1 /*
2 * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "ci/ciUtilities.inline.hpp"
28 #include "classfile/systemDictionary.hpp"
29 #include "classfile/vmSymbols.hpp"
30 #include "compiler/compileBroker.hpp"
31 #include "compiler/compileLog.hpp"
32 #include "gc/shared/barrierSet.hpp"
33 #include "jfr/support/jfrIntrinsics.hpp"
34 #include "memory/resourceArea.hpp"
35 #include "oops/klass.inline.hpp"
36 #include "oops/objArrayKlass.hpp"
37 #include "opto/addnode.hpp"
38 #include "opto/arraycopynode.hpp"
39 #include "opto/c2compiler.hpp"
40 #include "opto/callGenerator.hpp"
41 #include "opto/castnode.hpp"
42 #include "opto/cfgnode.hpp"
43 #include "opto/convertnode.hpp"
44 #include "opto/countbitsnode.hpp"
45 #include "opto/intrinsicnode.hpp"
46 #include "opto/idealKit.hpp"
47 #include "opto/mathexactnode.hpp"
48 #include "opto/movenode.hpp"
49 #include "opto/mulnode.hpp"
50 #include "opto/narrowptrnode.hpp"
51 #include "opto/opaquenode.hpp"
52 #include "opto/parse.hpp"
53 #include "opto/runtime.hpp"
54 #include "opto/rootnode.hpp"
55 #include "opto/subnode.hpp"
56 #include "prims/nativeLookup.hpp"
57 #include "prims/unsafe.hpp"
58 #include "runtime/objectMonitor.hpp"
59 #include "runtime/sharedRuntime.hpp"
60 #include "utilities/macros.hpp"
61
62
63 class LibraryIntrinsic : public InlineCallGenerator {
64 // Extend the set of intrinsics known to the runtime:
65 public:
66 private:
67 bool _is_virtual;
68 bool _does_virtual_dispatch;
69 int8_t _predicates_count; // Intrinsic is predicated by several conditions
70 int8_t _last_predicate; // Last generated predicate
71 vmIntrinsics::ID _intrinsic_id;
72
73 public:
LibraryIntrinsic(ciMethod * m,bool is_virtual,int predicates_count,bool does_virtual_dispatch,vmIntrinsics::ID id)74 LibraryIntrinsic(ciMethod* m, bool is_virtual, int predicates_count, bool does_virtual_dispatch, vmIntrinsics::ID id)
75 : InlineCallGenerator(m),
76 _is_virtual(is_virtual),
77 _does_virtual_dispatch(does_virtual_dispatch),
78 _predicates_count((int8_t)predicates_count),
79 _last_predicate((int8_t)-1),
80 _intrinsic_id(id)
81 {
82 }
is_intrinsic() const83 virtual bool is_intrinsic() const { return true; }
is_virtual() const84 virtual bool is_virtual() const { return _is_virtual; }
is_predicated() const85 virtual bool is_predicated() const { return _predicates_count > 0; }
predicates_count() const86 virtual int predicates_count() const { return _predicates_count; }
does_virtual_dispatch() const87 virtual bool does_virtual_dispatch() const { return _does_virtual_dispatch; }
88 virtual JVMState* generate(JVMState* jvms);
89 virtual Node* generate_predicate(JVMState* jvms, int predicate);
intrinsic_id() const90 vmIntrinsics::ID intrinsic_id() const { return _intrinsic_id; }
91 };
92
93
94 // Local helper class for LibraryIntrinsic:
95 class LibraryCallKit : public GraphKit {
96 private:
97 LibraryIntrinsic* _intrinsic; // the library intrinsic being called
98 Node* _result; // the result node, if any
99 int _reexecute_sp; // the stack pointer when bytecode needs to be reexecuted
100
101 const TypeOopPtr* sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type);
102
103 public:
LibraryCallKit(JVMState * jvms,LibraryIntrinsic * intrinsic)104 LibraryCallKit(JVMState* jvms, LibraryIntrinsic* intrinsic)
105 : GraphKit(jvms),
106 _intrinsic(intrinsic),
107 _result(NULL)
108 {
109 // Check if this is a root compile. In that case we don't have a caller.
110 if (!jvms->has_method()) {
111 _reexecute_sp = sp();
112 } else {
113 // Find out how many arguments the interpreter needs when deoptimizing
114 // and save the stack pointer value so it can used by uncommon_trap.
115 // We find the argument count by looking at the declared signature.
116 bool ignored_will_link;
117 ciSignature* declared_signature = NULL;
118 ciMethod* ignored_callee = caller()->get_method_at_bci(bci(), ignored_will_link, &declared_signature);
119 const int nargs = declared_signature->arg_size_for_bc(caller()->java_code_at_bci(bci()));
120 _reexecute_sp = sp() + nargs; // "push" arguments back on stack
121 }
122 }
123
is_LibraryCallKit() const124 virtual LibraryCallKit* is_LibraryCallKit() const { return (LibraryCallKit*)this; }
125
caller() const126 ciMethod* caller() const { return jvms()->method(); }
bci() const127 int bci() const { return jvms()->bci(); }
intrinsic() const128 LibraryIntrinsic* intrinsic() const { return _intrinsic; }
intrinsic_id() const129 vmIntrinsics::ID intrinsic_id() const { return _intrinsic->intrinsic_id(); }
callee() const130 ciMethod* callee() const { return _intrinsic->method(); }
131
132 bool try_to_inline(int predicate);
133 Node* try_to_predicate(int predicate);
134
push_result()135 void push_result() {
136 // Push the result onto the stack.
137 if (!stopped() && result() != NULL) {
138 BasicType bt = result()->bottom_type()->basic_type();
139 push_node(bt, result());
140 }
141 }
142
143 private:
fatal_unexpected_iid(vmIntrinsics::ID iid)144 void fatal_unexpected_iid(vmIntrinsics::ID iid) {
145 fatal("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid));
146 }
147
set_result(Node * n)148 void set_result(Node* n) { assert(_result == NULL, "only set once"); _result = n; }
149 void set_result(RegionNode* region, PhiNode* value);
result()150 Node* result() { return _result; }
151
reexecute_sp()152 virtual int reexecute_sp() { return _reexecute_sp; }
153
154 // Helper functions to inline natives
155 Node* generate_guard(Node* test, RegionNode* region, float true_prob);
156 Node* generate_slow_guard(Node* test, RegionNode* region);
157 Node* generate_fair_guard(Node* test, RegionNode* region);
158 Node* generate_negative_guard(Node* index, RegionNode* region,
159 // resulting CastII of index:
160 Node* *pos_index = NULL);
161 Node* generate_limit_guard(Node* offset, Node* subseq_length,
162 Node* array_length,
163 RegionNode* region);
164 void generate_string_range_check(Node* array, Node* offset,
165 Node* length, bool char_count);
166 Node* generate_current_thread(Node* &tls_output);
167 Node* load_mirror_from_klass(Node* klass);
168 Node* load_klass_from_mirror_common(Node* mirror, bool never_see_null,
169 RegionNode* region, int null_path,
170 int offset);
load_klass_from_mirror(Node * mirror,bool never_see_null,RegionNode * region,int null_path)171 Node* load_klass_from_mirror(Node* mirror, bool never_see_null,
172 RegionNode* region, int null_path) {
173 int offset = java_lang_Class::klass_offset_in_bytes();
174 return load_klass_from_mirror_common(mirror, never_see_null,
175 region, null_path,
176 offset);
177 }
load_array_klass_from_mirror(Node * mirror,bool never_see_null,RegionNode * region,int null_path)178 Node* load_array_klass_from_mirror(Node* mirror, bool never_see_null,
179 RegionNode* region, int null_path) {
180 int offset = java_lang_Class::array_klass_offset_in_bytes();
181 return load_klass_from_mirror_common(mirror, never_see_null,
182 region, null_path,
183 offset);
184 }
185 Node* generate_access_flags_guard(Node* kls,
186 int modifier_mask, int modifier_bits,
187 RegionNode* region);
188 Node* generate_interface_guard(Node* kls, RegionNode* region);
generate_array_guard(Node * kls,RegionNode * region)189 Node* generate_array_guard(Node* kls, RegionNode* region) {
190 return generate_array_guard_common(kls, region, false, false);
191 }
generate_non_array_guard(Node * kls,RegionNode * region)192 Node* generate_non_array_guard(Node* kls, RegionNode* region) {
193 return generate_array_guard_common(kls, region, false, true);
194 }
generate_objArray_guard(Node * kls,RegionNode * region)195 Node* generate_objArray_guard(Node* kls, RegionNode* region) {
196 return generate_array_guard_common(kls, region, true, false);
197 }
generate_non_objArray_guard(Node * kls,RegionNode * region)198 Node* generate_non_objArray_guard(Node* kls, RegionNode* region) {
199 return generate_array_guard_common(kls, region, true, true);
200 }
201 Node* generate_array_guard_common(Node* kls, RegionNode* region,
202 bool obj_array, bool not_array);
203 Node* generate_virtual_guard(Node* obj_klass, RegionNode* slow_region);
204 CallJavaNode* generate_method_call(vmIntrinsics::ID method_id,
205 bool is_virtual = false, bool is_static = false);
generate_method_call_static(vmIntrinsics::ID method_id)206 CallJavaNode* generate_method_call_static(vmIntrinsics::ID method_id) {
207 return generate_method_call(method_id, false, true);
208 }
generate_method_call_virtual(vmIntrinsics::ID method_id)209 CallJavaNode* generate_method_call_virtual(vmIntrinsics::ID method_id) {
210 return generate_method_call(method_id, true, false);
211 }
212 Node * load_field_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString, bool is_exact, bool is_static, ciInstanceKlass * fromKls);
213 Node * field_address_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString, bool is_exact, bool is_static, ciInstanceKlass * fromKls);
214
215 Node* make_string_method_node(int opcode, Node* str1_start, Node* cnt1, Node* str2_start, Node* cnt2, StrIntrinsicNode::ArgEnc ae);
216 bool inline_string_compareTo(StrIntrinsicNode::ArgEnc ae);
217 bool inline_string_indexOf(StrIntrinsicNode::ArgEnc ae);
218 bool inline_string_indexOfI(StrIntrinsicNode::ArgEnc ae);
219 Node* make_indexOf_node(Node* src_start, Node* src_count, Node* tgt_start, Node* tgt_count,
220 RegionNode* region, Node* phi, StrIntrinsicNode::ArgEnc ae);
221 bool inline_string_indexOfChar();
222 bool inline_string_equals(StrIntrinsicNode::ArgEnc ae);
223 bool inline_string_toBytesU();
224 bool inline_string_getCharsU();
225 bool inline_string_copy(bool compress);
226 bool inline_string_char_access(bool is_store);
227 Node* round_double_node(Node* n);
228 bool runtime_math(const TypeFunc* call_type, address funcAddr, const char* funcName);
229 bool inline_math_native(vmIntrinsics::ID id);
230 bool inline_math(vmIntrinsics::ID id);
231 bool inline_double_math(vmIntrinsics::ID id);
232 template <typename OverflowOp>
233 bool inline_math_overflow(Node* arg1, Node* arg2);
234 void inline_math_mathExact(Node* math, Node* test);
235 bool inline_math_addExactI(bool is_increment);
236 bool inline_math_addExactL(bool is_increment);
237 bool inline_math_multiplyExactI();
238 bool inline_math_multiplyExactL();
239 bool inline_math_multiplyHigh();
240 bool inline_math_negateExactI();
241 bool inline_math_negateExactL();
242 bool inline_math_subtractExactI(bool is_decrement);
243 bool inline_math_subtractExactL(bool is_decrement);
244 bool inline_min_max(vmIntrinsics::ID id);
245 bool inline_notify(vmIntrinsics::ID id);
246 Node* generate_min_max(vmIntrinsics::ID id, Node* x, Node* y);
247 // This returns Type::AnyPtr, RawPtr, or OopPtr.
248 int classify_unsafe_addr(Node* &base, Node* &offset, BasicType type);
249 Node* make_unsafe_address(Node*& base, Node* offset, DecoratorSet decorators, BasicType type = T_ILLEGAL, bool can_cast = false);
250
251 typedef enum { Relaxed, Opaque, Volatile, Acquire, Release } AccessKind;
252 DecoratorSet mo_decorator_for_access_kind(AccessKind kind);
253 bool inline_unsafe_access(bool is_store, BasicType type, AccessKind kind, bool is_unaligned);
254 static bool klass_needs_init_guard(Node* kls);
255 bool inline_unsafe_allocate();
256 bool inline_unsafe_newArray(bool uninitialized);
257 bool inline_unsafe_writeback0();
258 bool inline_unsafe_writebackSync0(bool is_pre);
259 bool inline_unsafe_copyMemory();
260 bool inline_native_currentThread();
261
262 bool inline_native_time_funcs(address method, const char* funcName);
263 #ifdef JFR_HAVE_INTRINSICS
264 bool inline_native_classID();
265 bool inline_native_getEventWriter();
266 #endif
267 bool inline_native_Class_query(vmIntrinsics::ID id);
268 bool inline_native_subtype_check();
269 bool inline_native_getLength();
270 bool inline_array_copyOf(bool is_copyOfRange);
271 bool inline_array_equals(StrIntrinsicNode::ArgEnc ae);
272 bool inline_preconditions_checkIndex();
273 void copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array);
274 bool inline_native_clone(bool is_virtual);
275 bool inline_native_Reflection_getCallerClass();
276 // Helper function for inlining native object hash method
277 bool inline_native_hashcode(bool is_virtual, bool is_static);
278 bool inline_native_getClass();
279
280 // Helper functions for inlining arraycopy
281 bool inline_arraycopy();
282 AllocateArrayNode* tightly_coupled_allocation(Node* ptr,
283 RegionNode* slow_region);
284 JVMState* arraycopy_restore_alloc_state(AllocateArrayNode* alloc, int& saved_reexecute_sp);
285 void arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms, int saved_reexecute_sp,
286 uint new_idx);
287
288 typedef enum { LS_get_add, LS_get_set, LS_cmp_swap, LS_cmp_swap_weak, LS_cmp_exchange } LoadStoreKind;
289 bool inline_unsafe_load_store(BasicType type, LoadStoreKind kind, AccessKind access_kind);
290 bool inline_unsafe_fence(vmIntrinsics::ID id);
291 bool inline_onspinwait();
292 bool inline_fp_conversions(vmIntrinsics::ID id);
293 bool inline_number_methods(vmIntrinsics::ID id);
294 bool inline_reference_get();
295 bool inline_Class_cast();
296 bool inline_aescrypt_Block(vmIntrinsics::ID id);
297 bool inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id);
298 bool inline_electronicCodeBook_AESCrypt(vmIntrinsics::ID id);
299 bool inline_counterMode_AESCrypt(vmIntrinsics::ID id);
300 Node* inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting);
301 Node* inline_electronicCodeBook_AESCrypt_predicate(bool decrypting);
302 Node* inline_counterMode_AESCrypt_predicate();
303 Node* get_key_start_from_aescrypt_object(Node* aescrypt_object);
304 Node* get_original_key_start_from_aescrypt_object(Node* aescrypt_object);
305 bool inline_ghash_processBlocks();
306 bool inline_base64_encodeBlock();
307 bool inline_sha_implCompress(vmIntrinsics::ID id);
308 bool inline_digestBase_implCompressMB(int predicate);
309 bool inline_sha_implCompressMB(Node* digestBaseObj, ciInstanceKlass* instklass_SHA,
310 bool long_state, address stubAddr, const char *stubName,
311 Node* src_start, Node* ofs, Node* limit);
312 Node* get_state_from_sha_object(Node *sha_object);
313 Node* get_state_from_sha5_object(Node *sha_object);
314 Node* inline_digestBase_implCompressMB_predicate(int predicate);
315 bool inline_encodeISOArray();
316 bool inline_updateCRC32();
317 bool inline_updateBytesCRC32();
318 bool inline_updateByteBufferCRC32();
319 Node* get_table_from_crc32c_class(ciInstanceKlass *crc32c_class);
320 bool inline_updateBytesCRC32C();
321 bool inline_updateDirectByteBufferCRC32C();
322 bool inline_updateBytesAdler32();
323 bool inline_updateByteBufferAdler32();
324 bool inline_multiplyToLen();
325 bool inline_hasNegatives();
326 bool inline_squareToLen();
327 bool inline_mulAdd();
328 bool inline_montgomeryMultiply();
329 bool inline_montgomerySquare();
330 bool inline_vectorizedMismatch();
331 bool inline_fma(vmIntrinsics::ID id);
332 bool inline_character_compare(vmIntrinsics::ID id);
333 bool inline_fp_min_max(vmIntrinsics::ID id);
334
335 bool inline_profileBoolean();
336 bool inline_isCompileConstant();
clear_upper_avx()337 void clear_upper_avx() {
338 #ifdef X86
339 if (UseAVX >= 2) {
340 C->set_clear_upper_avx(true);
341 }
342 #endif
343 }
344 };
345
346 //---------------------------make_vm_intrinsic----------------------------
make_vm_intrinsic(ciMethod * m,bool is_virtual)347 CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
348 vmIntrinsics::ID id = m->intrinsic_id();
349 assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
350
351 if (!m->is_loaded()) {
352 // Do not attempt to inline unloaded methods.
353 return NULL;
354 }
355
356 C2Compiler* compiler = (C2Compiler*)CompileBroker::compiler(CompLevel_full_optimization);
357 bool is_available = false;
358
359 {
360 // For calling is_intrinsic_supported and is_intrinsic_disabled_by_flag
361 // the compiler must transition to '_thread_in_vm' state because both
362 // methods access VM-internal data.
363 VM_ENTRY_MARK;
364 methodHandle mh(THREAD, m->get_Method());
365 is_available = compiler != NULL && compiler->is_intrinsic_supported(mh, is_virtual) &&
366 !C->directive()->is_intrinsic_disabled(mh) &&
367 !vmIntrinsics::is_disabled_by_flags(mh);
368
369 }
370
371 if (is_available) {
372 assert(id <= vmIntrinsics::LAST_COMPILER_INLINE, "caller responsibility");
373 assert(id != vmIntrinsics::_Object_init && id != vmIntrinsics::_invoke, "enum out of order?");
374 return new LibraryIntrinsic(m, is_virtual,
375 vmIntrinsics::predicates_needed(id),
376 vmIntrinsics::does_virtual_dispatch(id),
377 (vmIntrinsics::ID) id);
378 } else {
379 return NULL;
380 }
381 }
382
383 //----------------------register_library_intrinsics-----------------------
384 // Initialize this file's data structures, for each Compile instance.
register_library_intrinsics()385 void Compile::register_library_intrinsics() {
386 // Nothing to do here.
387 }
388
generate(JVMState * jvms)389 JVMState* LibraryIntrinsic::generate(JVMState* jvms) {
390 LibraryCallKit kit(jvms, this);
391 Compile* C = kit.C;
392 int nodes = C->unique();
393 #ifndef PRODUCT
394 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
395 char buf[1000];
396 const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
397 tty->print_cr("Intrinsic %s", str);
398 }
399 #endif
400 ciMethod* callee = kit.callee();
401 const int bci = kit.bci();
402
403 // Try to inline the intrinsic.
404 if ((CheckIntrinsics ? callee->intrinsic_candidate() : true) &&
405 kit.try_to_inline(_last_predicate)) {
406 const char *inline_msg = is_virtual() ? "(intrinsic, virtual)"
407 : "(intrinsic)";
408 CompileTask::print_inlining_ul(callee, jvms->depth() - 1, bci, inline_msg);
409 if (C->print_intrinsics() || C->print_inlining()) {
410 C->print_inlining(callee, jvms->depth() - 1, bci, inline_msg);
411 }
412 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
413 if (C->log()) {
414 C->log()->elem("intrinsic id='%s'%s nodes='%d'",
415 vmIntrinsics::name_at(intrinsic_id()),
416 (is_virtual() ? " virtual='1'" : ""),
417 C->unique() - nodes);
418 }
419 // Push the result from the inlined method onto the stack.
420 kit.push_result();
421 C->print_inlining_update(this);
422 return kit.transfer_exceptions_into_jvms();
423 }
424
425 // The intrinsic bailed out
426 if (jvms->has_method()) {
427 // Not a root compile.
428 const char* msg;
429 if (callee->intrinsic_candidate()) {
430 msg = is_virtual() ? "failed to inline (intrinsic, virtual)" : "failed to inline (intrinsic)";
431 } else {
432 msg = is_virtual() ? "failed to inline (intrinsic, virtual), method not annotated"
433 : "failed to inline (intrinsic), method not annotated";
434 }
435 CompileTask::print_inlining_ul(callee, jvms->depth() - 1, bci, msg);
436 if (C->print_intrinsics() || C->print_inlining()) {
437 C->print_inlining(callee, jvms->depth() - 1, bci, msg);
438 }
439 } else {
440 // Root compile
441 ResourceMark rm;
442 stringStream msg_stream;
443 msg_stream.print("Did not generate intrinsic %s%s at bci:%d in",
444 vmIntrinsics::name_at(intrinsic_id()),
445 is_virtual() ? " (virtual)" : "", bci);
446 const char *msg = msg_stream.as_string();
447 log_debug(jit, inlining)("%s", msg);
448 if (C->print_intrinsics() || C->print_inlining()) {
449 tty->print("%s", msg);
450 }
451 }
452 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed);
453 C->print_inlining_update(this);
454 return NULL;
455 }
456
generate_predicate(JVMState * jvms,int predicate)457 Node* LibraryIntrinsic::generate_predicate(JVMState* jvms, int predicate) {
458 LibraryCallKit kit(jvms, this);
459 Compile* C = kit.C;
460 int nodes = C->unique();
461 _last_predicate = predicate;
462 #ifndef PRODUCT
463 assert(is_predicated() && predicate < predicates_count(), "sanity");
464 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
465 char buf[1000];
466 const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
467 tty->print_cr("Predicate for intrinsic %s", str);
468 }
469 #endif
470 ciMethod* callee = kit.callee();
471 const int bci = kit.bci();
472
473 Node* slow_ctl = kit.try_to_predicate(predicate);
474 if (!kit.failing()) {
475 const char *inline_msg = is_virtual() ? "(intrinsic, virtual, predicate)"
476 : "(intrinsic, predicate)";
477 CompileTask::print_inlining_ul(callee, jvms->depth() - 1, bci, inline_msg);
478 if (C->print_intrinsics() || C->print_inlining()) {
479 C->print_inlining(callee, jvms->depth() - 1, bci, inline_msg);
480 }
481 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
482 if (C->log()) {
483 C->log()->elem("predicate_intrinsic id='%s'%s nodes='%d'",
484 vmIntrinsics::name_at(intrinsic_id()),
485 (is_virtual() ? " virtual='1'" : ""),
486 C->unique() - nodes);
487 }
488 return slow_ctl; // Could be NULL if the check folds.
489 }
490
491 // The intrinsic bailed out
492 if (jvms->has_method()) {
493 // Not a root compile.
494 const char* msg = "failed to generate predicate for intrinsic";
495 CompileTask::print_inlining_ul(kit.callee(), jvms->depth() - 1, bci, msg);
496 if (C->print_intrinsics() || C->print_inlining()) {
497 C->print_inlining(kit.callee(), jvms->depth() - 1, bci, msg);
498 }
499 } else {
500 // Root compile
501 ResourceMark rm;
502 stringStream msg_stream;
503 msg_stream.print("Did not generate intrinsic %s%s at bci:%d in",
504 vmIntrinsics::name_at(intrinsic_id()),
505 is_virtual() ? " (virtual)" : "", bci);
506 const char *msg = msg_stream.as_string();
507 log_debug(jit, inlining)("%s", msg);
508 if (C->print_intrinsics() || C->print_inlining()) {
509 C->print_inlining_stream()->print("%s", msg);
510 }
511 }
512 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed);
513 return NULL;
514 }
515
try_to_inline(int predicate)516 bool LibraryCallKit::try_to_inline(int predicate) {
517 // Handle symbolic names for otherwise undistinguished boolean switches:
518 const bool is_store = true;
519 const bool is_compress = true;
520 const bool is_static = true;
521 const bool is_volatile = true;
522
523 if (!jvms()->has_method()) {
524 // Root JVMState has a null method.
525 assert(map()->memory()->Opcode() == Op_Parm, "");
526 // Insert the memory aliasing node
527 set_all_memory(reset_memory());
528 }
529 assert(merged_memory(), "");
530
531
532 switch (intrinsic_id()) {
533 case vmIntrinsics::_hashCode: return inline_native_hashcode(intrinsic()->is_virtual(), !is_static);
534 case vmIntrinsics::_identityHashCode: return inline_native_hashcode(/*!virtual*/ false, is_static);
535 case vmIntrinsics::_getClass: return inline_native_getClass();
536
537 case vmIntrinsics::_ceil:
538 case vmIntrinsics::_floor:
539 case vmIntrinsics::_rint:
540 case vmIntrinsics::_dsin:
541 case vmIntrinsics::_dcos:
542 case vmIntrinsics::_dtan:
543 case vmIntrinsics::_dabs:
544 case vmIntrinsics::_fabs:
545 case vmIntrinsics::_iabs:
546 case vmIntrinsics::_labs:
547 case vmIntrinsics::_datan2:
548 case vmIntrinsics::_dsqrt:
549 case vmIntrinsics::_dexp:
550 case vmIntrinsics::_dlog:
551 case vmIntrinsics::_dlog10:
552 case vmIntrinsics::_dpow: return inline_math_native(intrinsic_id());
553
554 case vmIntrinsics::_min:
555 case vmIntrinsics::_max: return inline_min_max(intrinsic_id());
556
557 case vmIntrinsics::_notify:
558 case vmIntrinsics::_notifyAll:
559 return inline_notify(intrinsic_id());
560
561 case vmIntrinsics::_addExactI: return inline_math_addExactI(false /* add */);
562 case vmIntrinsics::_addExactL: return inline_math_addExactL(false /* add */);
563 case vmIntrinsics::_decrementExactI: return inline_math_subtractExactI(true /* decrement */);
564 case vmIntrinsics::_decrementExactL: return inline_math_subtractExactL(true /* decrement */);
565 case vmIntrinsics::_incrementExactI: return inline_math_addExactI(true /* increment */);
566 case vmIntrinsics::_incrementExactL: return inline_math_addExactL(true /* increment */);
567 case vmIntrinsics::_multiplyExactI: return inline_math_multiplyExactI();
568 case vmIntrinsics::_multiplyExactL: return inline_math_multiplyExactL();
569 case vmIntrinsics::_multiplyHigh: return inline_math_multiplyHigh();
570 case vmIntrinsics::_negateExactI: return inline_math_negateExactI();
571 case vmIntrinsics::_negateExactL: return inline_math_negateExactL();
572 case vmIntrinsics::_subtractExactI: return inline_math_subtractExactI(false /* subtract */);
573 case vmIntrinsics::_subtractExactL: return inline_math_subtractExactL(false /* subtract */);
574
575 case vmIntrinsics::_arraycopy: return inline_arraycopy();
576
577 case vmIntrinsics::_compareToL: return inline_string_compareTo(StrIntrinsicNode::LL);
578 case vmIntrinsics::_compareToU: return inline_string_compareTo(StrIntrinsicNode::UU);
579 case vmIntrinsics::_compareToLU: return inline_string_compareTo(StrIntrinsicNode::LU);
580 case vmIntrinsics::_compareToUL: return inline_string_compareTo(StrIntrinsicNode::UL);
581
582 case vmIntrinsics::_indexOfL: return inline_string_indexOf(StrIntrinsicNode::LL);
583 case vmIntrinsics::_indexOfU: return inline_string_indexOf(StrIntrinsicNode::UU);
584 case vmIntrinsics::_indexOfUL: return inline_string_indexOf(StrIntrinsicNode::UL);
585 case vmIntrinsics::_indexOfIL: return inline_string_indexOfI(StrIntrinsicNode::LL);
586 case vmIntrinsics::_indexOfIU: return inline_string_indexOfI(StrIntrinsicNode::UU);
587 case vmIntrinsics::_indexOfIUL: return inline_string_indexOfI(StrIntrinsicNode::UL);
588 case vmIntrinsics::_indexOfU_char: return inline_string_indexOfChar();
589
590 case vmIntrinsics::_equalsL: return inline_string_equals(StrIntrinsicNode::LL);
591 case vmIntrinsics::_equalsU: return inline_string_equals(StrIntrinsicNode::UU);
592
593 case vmIntrinsics::_toBytesStringU: return inline_string_toBytesU();
594 case vmIntrinsics::_getCharsStringU: return inline_string_getCharsU();
595 case vmIntrinsics::_getCharStringU: return inline_string_char_access(!is_store);
596 case vmIntrinsics::_putCharStringU: return inline_string_char_access( is_store);
597
598 case vmIntrinsics::_compressStringC:
599 case vmIntrinsics::_compressStringB: return inline_string_copy( is_compress);
600 case vmIntrinsics::_inflateStringC:
601 case vmIntrinsics::_inflateStringB: return inline_string_copy(!is_compress);
602
603 case vmIntrinsics::_getReference: return inline_unsafe_access(!is_store, T_OBJECT, Relaxed, false);
604 case vmIntrinsics::_getBoolean: return inline_unsafe_access(!is_store, T_BOOLEAN, Relaxed, false);
605 case vmIntrinsics::_getByte: return inline_unsafe_access(!is_store, T_BYTE, Relaxed, false);
606 case vmIntrinsics::_getShort: return inline_unsafe_access(!is_store, T_SHORT, Relaxed, false);
607 case vmIntrinsics::_getChar: return inline_unsafe_access(!is_store, T_CHAR, Relaxed, false);
608 case vmIntrinsics::_getInt: return inline_unsafe_access(!is_store, T_INT, Relaxed, false);
609 case vmIntrinsics::_getLong: return inline_unsafe_access(!is_store, T_LONG, Relaxed, false);
610 case vmIntrinsics::_getFloat: return inline_unsafe_access(!is_store, T_FLOAT, Relaxed, false);
611 case vmIntrinsics::_getDouble: return inline_unsafe_access(!is_store, T_DOUBLE, Relaxed, false);
612
613 case vmIntrinsics::_putReference: return inline_unsafe_access( is_store, T_OBJECT, Relaxed, false);
614 case vmIntrinsics::_putBoolean: return inline_unsafe_access( is_store, T_BOOLEAN, Relaxed, false);
615 case vmIntrinsics::_putByte: return inline_unsafe_access( is_store, T_BYTE, Relaxed, false);
616 case vmIntrinsics::_putShort: return inline_unsafe_access( is_store, T_SHORT, Relaxed, false);
617 case vmIntrinsics::_putChar: return inline_unsafe_access( is_store, T_CHAR, Relaxed, false);
618 case vmIntrinsics::_putInt: return inline_unsafe_access( is_store, T_INT, Relaxed, false);
619 case vmIntrinsics::_putLong: return inline_unsafe_access( is_store, T_LONG, Relaxed, false);
620 case vmIntrinsics::_putFloat: return inline_unsafe_access( is_store, T_FLOAT, Relaxed, false);
621 case vmIntrinsics::_putDouble: return inline_unsafe_access( is_store, T_DOUBLE, Relaxed, false);
622
623 case vmIntrinsics::_getReferenceVolatile: return inline_unsafe_access(!is_store, T_OBJECT, Volatile, false);
624 case vmIntrinsics::_getBooleanVolatile: return inline_unsafe_access(!is_store, T_BOOLEAN, Volatile, false);
625 case vmIntrinsics::_getByteVolatile: return inline_unsafe_access(!is_store, T_BYTE, Volatile, false);
626 case vmIntrinsics::_getShortVolatile: return inline_unsafe_access(!is_store, T_SHORT, Volatile, false);
627 case vmIntrinsics::_getCharVolatile: return inline_unsafe_access(!is_store, T_CHAR, Volatile, false);
628 case vmIntrinsics::_getIntVolatile: return inline_unsafe_access(!is_store, T_INT, Volatile, false);
629 case vmIntrinsics::_getLongVolatile: return inline_unsafe_access(!is_store, T_LONG, Volatile, false);
630 case vmIntrinsics::_getFloatVolatile: return inline_unsafe_access(!is_store, T_FLOAT, Volatile, false);
631 case vmIntrinsics::_getDoubleVolatile: return inline_unsafe_access(!is_store, T_DOUBLE, Volatile, false);
632
633 case vmIntrinsics::_putReferenceVolatile: return inline_unsafe_access( is_store, T_OBJECT, Volatile, false);
634 case vmIntrinsics::_putBooleanVolatile: return inline_unsafe_access( is_store, T_BOOLEAN, Volatile, false);
635 case vmIntrinsics::_putByteVolatile: return inline_unsafe_access( is_store, T_BYTE, Volatile, false);
636 case vmIntrinsics::_putShortVolatile: return inline_unsafe_access( is_store, T_SHORT, Volatile, false);
637 case vmIntrinsics::_putCharVolatile: return inline_unsafe_access( is_store, T_CHAR, Volatile, false);
638 case vmIntrinsics::_putIntVolatile: return inline_unsafe_access( is_store, T_INT, Volatile, false);
639 case vmIntrinsics::_putLongVolatile: return inline_unsafe_access( is_store, T_LONG, Volatile, false);
640 case vmIntrinsics::_putFloatVolatile: return inline_unsafe_access( is_store, T_FLOAT, Volatile, false);
641 case vmIntrinsics::_putDoubleVolatile: return inline_unsafe_access( is_store, T_DOUBLE, Volatile, false);
642
643 case vmIntrinsics::_getShortUnaligned: return inline_unsafe_access(!is_store, T_SHORT, Relaxed, true);
644 case vmIntrinsics::_getCharUnaligned: return inline_unsafe_access(!is_store, T_CHAR, Relaxed, true);
645 case vmIntrinsics::_getIntUnaligned: return inline_unsafe_access(!is_store, T_INT, Relaxed, true);
646 case vmIntrinsics::_getLongUnaligned: return inline_unsafe_access(!is_store, T_LONG, Relaxed, true);
647
648 case vmIntrinsics::_putShortUnaligned: return inline_unsafe_access( is_store, T_SHORT, Relaxed, true);
649 case vmIntrinsics::_putCharUnaligned: return inline_unsafe_access( is_store, T_CHAR, Relaxed, true);
650 case vmIntrinsics::_putIntUnaligned: return inline_unsafe_access( is_store, T_INT, Relaxed, true);
651 case vmIntrinsics::_putLongUnaligned: return inline_unsafe_access( is_store, T_LONG, Relaxed, true);
652
653 case vmIntrinsics::_getReferenceAcquire: return inline_unsafe_access(!is_store, T_OBJECT, Acquire, false);
654 case vmIntrinsics::_getBooleanAcquire: return inline_unsafe_access(!is_store, T_BOOLEAN, Acquire, false);
655 case vmIntrinsics::_getByteAcquire: return inline_unsafe_access(!is_store, T_BYTE, Acquire, false);
656 case vmIntrinsics::_getShortAcquire: return inline_unsafe_access(!is_store, T_SHORT, Acquire, false);
657 case vmIntrinsics::_getCharAcquire: return inline_unsafe_access(!is_store, T_CHAR, Acquire, false);
658 case vmIntrinsics::_getIntAcquire: return inline_unsafe_access(!is_store, T_INT, Acquire, false);
659 case vmIntrinsics::_getLongAcquire: return inline_unsafe_access(!is_store, T_LONG, Acquire, false);
660 case vmIntrinsics::_getFloatAcquire: return inline_unsafe_access(!is_store, T_FLOAT, Acquire, false);
661 case vmIntrinsics::_getDoubleAcquire: return inline_unsafe_access(!is_store, T_DOUBLE, Acquire, false);
662
663 case vmIntrinsics::_putReferenceRelease: return inline_unsafe_access( is_store, T_OBJECT, Release, false);
664 case vmIntrinsics::_putBooleanRelease: return inline_unsafe_access( is_store, T_BOOLEAN, Release, false);
665 case vmIntrinsics::_putByteRelease: return inline_unsafe_access( is_store, T_BYTE, Release, false);
666 case vmIntrinsics::_putShortRelease: return inline_unsafe_access( is_store, T_SHORT, Release, false);
667 case vmIntrinsics::_putCharRelease: return inline_unsafe_access( is_store, T_CHAR, Release, false);
668 case vmIntrinsics::_putIntRelease: return inline_unsafe_access( is_store, T_INT, Release, false);
669 case vmIntrinsics::_putLongRelease: return inline_unsafe_access( is_store, T_LONG, Release, false);
670 case vmIntrinsics::_putFloatRelease: return inline_unsafe_access( is_store, T_FLOAT, Release, false);
671 case vmIntrinsics::_putDoubleRelease: return inline_unsafe_access( is_store, T_DOUBLE, Release, false);
672
673 case vmIntrinsics::_getReferenceOpaque: return inline_unsafe_access(!is_store, T_OBJECT, Opaque, false);
674 case vmIntrinsics::_getBooleanOpaque: return inline_unsafe_access(!is_store, T_BOOLEAN, Opaque, false);
675 case vmIntrinsics::_getByteOpaque: return inline_unsafe_access(!is_store, T_BYTE, Opaque, false);
676 case vmIntrinsics::_getShortOpaque: return inline_unsafe_access(!is_store, T_SHORT, Opaque, false);
677 case vmIntrinsics::_getCharOpaque: return inline_unsafe_access(!is_store, T_CHAR, Opaque, false);
678 case vmIntrinsics::_getIntOpaque: return inline_unsafe_access(!is_store, T_INT, Opaque, false);
679 case vmIntrinsics::_getLongOpaque: return inline_unsafe_access(!is_store, T_LONG, Opaque, false);
680 case vmIntrinsics::_getFloatOpaque: return inline_unsafe_access(!is_store, T_FLOAT, Opaque, false);
681 case vmIntrinsics::_getDoubleOpaque: return inline_unsafe_access(!is_store, T_DOUBLE, Opaque, false);
682
683 case vmIntrinsics::_putReferenceOpaque: return inline_unsafe_access( is_store, T_OBJECT, Opaque, false);
684 case vmIntrinsics::_putBooleanOpaque: return inline_unsafe_access( is_store, T_BOOLEAN, Opaque, false);
685 case vmIntrinsics::_putByteOpaque: return inline_unsafe_access( is_store, T_BYTE, Opaque, false);
686 case vmIntrinsics::_putShortOpaque: return inline_unsafe_access( is_store, T_SHORT, Opaque, false);
687 case vmIntrinsics::_putCharOpaque: return inline_unsafe_access( is_store, T_CHAR, Opaque, false);
688 case vmIntrinsics::_putIntOpaque: return inline_unsafe_access( is_store, T_INT, Opaque, false);
689 case vmIntrinsics::_putLongOpaque: return inline_unsafe_access( is_store, T_LONG, Opaque, false);
690 case vmIntrinsics::_putFloatOpaque: return inline_unsafe_access( is_store, T_FLOAT, Opaque, false);
691 case vmIntrinsics::_putDoubleOpaque: return inline_unsafe_access( is_store, T_DOUBLE, Opaque, false);
692
693 case vmIntrinsics::_compareAndSetReference: return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap, Volatile);
694 case vmIntrinsics::_compareAndSetByte: return inline_unsafe_load_store(T_BYTE, LS_cmp_swap, Volatile);
695 case vmIntrinsics::_compareAndSetShort: return inline_unsafe_load_store(T_SHORT, LS_cmp_swap, Volatile);
696 case vmIntrinsics::_compareAndSetInt: return inline_unsafe_load_store(T_INT, LS_cmp_swap, Volatile);
697 case vmIntrinsics::_compareAndSetLong: return inline_unsafe_load_store(T_LONG, LS_cmp_swap, Volatile);
698
699 case vmIntrinsics::_weakCompareAndSetReferencePlain: return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Relaxed);
700 case vmIntrinsics::_weakCompareAndSetReferenceAcquire: return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Acquire);
701 case vmIntrinsics::_weakCompareAndSetReferenceRelease: return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Release);
702 case vmIntrinsics::_weakCompareAndSetReference: return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Volatile);
703 case vmIntrinsics::_weakCompareAndSetBytePlain: return inline_unsafe_load_store(T_BYTE, LS_cmp_swap_weak, Relaxed);
704 case vmIntrinsics::_weakCompareAndSetByteAcquire: return inline_unsafe_load_store(T_BYTE, LS_cmp_swap_weak, Acquire);
705 case vmIntrinsics::_weakCompareAndSetByteRelease: return inline_unsafe_load_store(T_BYTE, LS_cmp_swap_weak, Release);
706 case vmIntrinsics::_weakCompareAndSetByte: return inline_unsafe_load_store(T_BYTE, LS_cmp_swap_weak, Volatile);
707 case vmIntrinsics::_weakCompareAndSetShortPlain: return inline_unsafe_load_store(T_SHORT, LS_cmp_swap_weak, Relaxed);
708 case vmIntrinsics::_weakCompareAndSetShortAcquire: return inline_unsafe_load_store(T_SHORT, LS_cmp_swap_weak, Acquire);
709 case vmIntrinsics::_weakCompareAndSetShortRelease: return inline_unsafe_load_store(T_SHORT, LS_cmp_swap_weak, Release);
710 case vmIntrinsics::_weakCompareAndSetShort: return inline_unsafe_load_store(T_SHORT, LS_cmp_swap_weak, Volatile);
711 case vmIntrinsics::_weakCompareAndSetIntPlain: return inline_unsafe_load_store(T_INT, LS_cmp_swap_weak, Relaxed);
712 case vmIntrinsics::_weakCompareAndSetIntAcquire: return inline_unsafe_load_store(T_INT, LS_cmp_swap_weak, Acquire);
713 case vmIntrinsics::_weakCompareAndSetIntRelease: return inline_unsafe_load_store(T_INT, LS_cmp_swap_weak, Release);
714 case vmIntrinsics::_weakCompareAndSetInt: return inline_unsafe_load_store(T_INT, LS_cmp_swap_weak, Volatile);
715 case vmIntrinsics::_weakCompareAndSetLongPlain: return inline_unsafe_load_store(T_LONG, LS_cmp_swap_weak, Relaxed);
716 case vmIntrinsics::_weakCompareAndSetLongAcquire: return inline_unsafe_load_store(T_LONG, LS_cmp_swap_weak, Acquire);
717 case vmIntrinsics::_weakCompareAndSetLongRelease: return inline_unsafe_load_store(T_LONG, LS_cmp_swap_weak, Release);
718 case vmIntrinsics::_weakCompareAndSetLong: return inline_unsafe_load_store(T_LONG, LS_cmp_swap_weak, Volatile);
719
720 case vmIntrinsics::_compareAndExchangeReference: return inline_unsafe_load_store(T_OBJECT, LS_cmp_exchange, Volatile);
721 case vmIntrinsics::_compareAndExchangeReferenceAcquire: return inline_unsafe_load_store(T_OBJECT, LS_cmp_exchange, Acquire);
722 case vmIntrinsics::_compareAndExchangeReferenceRelease: return inline_unsafe_load_store(T_OBJECT, LS_cmp_exchange, Release);
723 case vmIntrinsics::_compareAndExchangeByte: return inline_unsafe_load_store(T_BYTE, LS_cmp_exchange, Volatile);
724 case vmIntrinsics::_compareAndExchangeByteAcquire: return inline_unsafe_load_store(T_BYTE, LS_cmp_exchange, Acquire);
725 case vmIntrinsics::_compareAndExchangeByteRelease: return inline_unsafe_load_store(T_BYTE, LS_cmp_exchange, Release);
726 case vmIntrinsics::_compareAndExchangeShort: return inline_unsafe_load_store(T_SHORT, LS_cmp_exchange, Volatile);
727 case vmIntrinsics::_compareAndExchangeShortAcquire: return inline_unsafe_load_store(T_SHORT, LS_cmp_exchange, Acquire);
728 case vmIntrinsics::_compareAndExchangeShortRelease: return inline_unsafe_load_store(T_SHORT, LS_cmp_exchange, Release);
729 case vmIntrinsics::_compareAndExchangeInt: return inline_unsafe_load_store(T_INT, LS_cmp_exchange, Volatile);
730 case vmIntrinsics::_compareAndExchangeIntAcquire: return inline_unsafe_load_store(T_INT, LS_cmp_exchange, Acquire);
731 case vmIntrinsics::_compareAndExchangeIntRelease: return inline_unsafe_load_store(T_INT, LS_cmp_exchange, Release);
732 case vmIntrinsics::_compareAndExchangeLong: return inline_unsafe_load_store(T_LONG, LS_cmp_exchange, Volatile);
733 case vmIntrinsics::_compareAndExchangeLongAcquire: return inline_unsafe_load_store(T_LONG, LS_cmp_exchange, Acquire);
734 case vmIntrinsics::_compareAndExchangeLongRelease: return inline_unsafe_load_store(T_LONG, LS_cmp_exchange, Release);
735
736 case vmIntrinsics::_getAndAddByte: return inline_unsafe_load_store(T_BYTE, LS_get_add, Volatile);
737 case vmIntrinsics::_getAndAddShort: return inline_unsafe_load_store(T_SHORT, LS_get_add, Volatile);
738 case vmIntrinsics::_getAndAddInt: return inline_unsafe_load_store(T_INT, LS_get_add, Volatile);
739 case vmIntrinsics::_getAndAddLong: return inline_unsafe_load_store(T_LONG, LS_get_add, Volatile);
740
741 case vmIntrinsics::_getAndSetByte: return inline_unsafe_load_store(T_BYTE, LS_get_set, Volatile);
742 case vmIntrinsics::_getAndSetShort: return inline_unsafe_load_store(T_SHORT, LS_get_set, Volatile);
743 case vmIntrinsics::_getAndSetInt: return inline_unsafe_load_store(T_INT, LS_get_set, Volatile);
744 case vmIntrinsics::_getAndSetLong: return inline_unsafe_load_store(T_LONG, LS_get_set, Volatile);
745 case vmIntrinsics::_getAndSetReference: return inline_unsafe_load_store(T_OBJECT, LS_get_set, Volatile);
746
747 case vmIntrinsics::_loadFence:
748 case vmIntrinsics::_storeFence:
749 case vmIntrinsics::_fullFence: return inline_unsafe_fence(intrinsic_id());
750
751 case vmIntrinsics::_onSpinWait: return inline_onspinwait();
752
753 case vmIntrinsics::_currentThread: return inline_native_currentThread();
754
755 #ifdef JFR_HAVE_INTRINSICS
756 case vmIntrinsics::_counterTime: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, JFR_TIME_FUNCTION), "counterTime");
757 case vmIntrinsics::_getClassId: return inline_native_classID();
758 case vmIntrinsics::_getEventWriter: return inline_native_getEventWriter();
759 #endif
760 case vmIntrinsics::_currentTimeMillis: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
761 case vmIntrinsics::_nanoTime: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
762 case vmIntrinsics::_writeback0: return inline_unsafe_writeback0();
763 case vmIntrinsics::_writebackPreSync0: return inline_unsafe_writebackSync0(true);
764 case vmIntrinsics::_writebackPostSync0: return inline_unsafe_writebackSync0(false);
765 case vmIntrinsics::_allocateInstance: return inline_unsafe_allocate();
766 case vmIntrinsics::_copyMemory: return inline_unsafe_copyMemory();
767 case vmIntrinsics::_getLength: return inline_native_getLength();
768 case vmIntrinsics::_copyOf: return inline_array_copyOf(false);
769 case vmIntrinsics::_copyOfRange: return inline_array_copyOf(true);
770 case vmIntrinsics::_equalsB: return inline_array_equals(StrIntrinsicNode::LL);
771 case vmIntrinsics::_equalsC: return inline_array_equals(StrIntrinsicNode::UU);
772 case vmIntrinsics::_Preconditions_checkIndex: return inline_preconditions_checkIndex();
773 case vmIntrinsics::_clone: return inline_native_clone(intrinsic()->is_virtual());
774
775 case vmIntrinsics::_allocateUninitializedArray: return inline_unsafe_newArray(true);
776 case vmIntrinsics::_newArray: return inline_unsafe_newArray(false);
777
778 case vmIntrinsics::_isAssignableFrom: return inline_native_subtype_check();
779
780 case vmIntrinsics::_isInstance:
781 case vmIntrinsics::_getModifiers:
782 case vmIntrinsics::_isInterface:
783 case vmIntrinsics::_isArray:
784 case vmIntrinsics::_isPrimitive:
785 case vmIntrinsics::_getSuperclass:
786 case vmIntrinsics::_getClassAccessFlags: return inline_native_Class_query(intrinsic_id());
787
788 case vmIntrinsics::_floatToRawIntBits:
789 case vmIntrinsics::_floatToIntBits:
790 case vmIntrinsics::_intBitsToFloat:
791 case vmIntrinsics::_doubleToRawLongBits:
792 case vmIntrinsics::_doubleToLongBits:
793 case vmIntrinsics::_longBitsToDouble: return inline_fp_conversions(intrinsic_id());
794
795 case vmIntrinsics::_numberOfLeadingZeros_i:
796 case vmIntrinsics::_numberOfLeadingZeros_l:
797 case vmIntrinsics::_numberOfTrailingZeros_i:
798 case vmIntrinsics::_numberOfTrailingZeros_l:
799 case vmIntrinsics::_bitCount_i:
800 case vmIntrinsics::_bitCount_l:
801 case vmIntrinsics::_reverseBytes_i:
802 case vmIntrinsics::_reverseBytes_l:
803 case vmIntrinsics::_reverseBytes_s:
804 case vmIntrinsics::_reverseBytes_c: return inline_number_methods(intrinsic_id());
805
806 case vmIntrinsics::_getCallerClass: return inline_native_Reflection_getCallerClass();
807
808 case vmIntrinsics::_Reference_get: return inline_reference_get();
809
810 case vmIntrinsics::_Class_cast: return inline_Class_cast();
811
812 case vmIntrinsics::_aescrypt_encryptBlock:
813 case vmIntrinsics::_aescrypt_decryptBlock: return inline_aescrypt_Block(intrinsic_id());
814
815 case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt:
816 case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
817 return inline_cipherBlockChaining_AESCrypt(intrinsic_id());
818
819 case vmIntrinsics::_electronicCodeBook_encryptAESCrypt:
820 case vmIntrinsics::_electronicCodeBook_decryptAESCrypt:
821 return inline_electronicCodeBook_AESCrypt(intrinsic_id());
822
823 case vmIntrinsics::_counterMode_AESCrypt:
824 return inline_counterMode_AESCrypt(intrinsic_id());
825
826 case vmIntrinsics::_sha_implCompress:
827 case vmIntrinsics::_sha2_implCompress:
828 case vmIntrinsics::_sha5_implCompress:
829 return inline_sha_implCompress(intrinsic_id());
830
831 case vmIntrinsics::_digestBase_implCompressMB:
832 return inline_digestBase_implCompressMB(predicate);
833
834 case vmIntrinsics::_multiplyToLen:
835 return inline_multiplyToLen();
836
837 case vmIntrinsics::_squareToLen:
838 return inline_squareToLen();
839
840 case vmIntrinsics::_mulAdd:
841 return inline_mulAdd();
842
843 case vmIntrinsics::_montgomeryMultiply:
844 return inline_montgomeryMultiply();
845 case vmIntrinsics::_montgomerySquare:
846 return inline_montgomerySquare();
847
848 case vmIntrinsics::_vectorizedMismatch:
849 return inline_vectorizedMismatch();
850
851 case vmIntrinsics::_ghash_processBlocks:
852 return inline_ghash_processBlocks();
853 case vmIntrinsics::_base64_encodeBlock:
854 return inline_base64_encodeBlock();
855
856 case vmIntrinsics::_encodeISOArray:
857 case vmIntrinsics::_encodeByteISOArray:
858 return inline_encodeISOArray();
859
860 case vmIntrinsics::_updateCRC32:
861 return inline_updateCRC32();
862 case vmIntrinsics::_updateBytesCRC32:
863 return inline_updateBytesCRC32();
864 case vmIntrinsics::_updateByteBufferCRC32:
865 return inline_updateByteBufferCRC32();
866
867 case vmIntrinsics::_updateBytesCRC32C:
868 return inline_updateBytesCRC32C();
869 case vmIntrinsics::_updateDirectByteBufferCRC32C:
870 return inline_updateDirectByteBufferCRC32C();
871
872 case vmIntrinsics::_updateBytesAdler32:
873 return inline_updateBytesAdler32();
874 case vmIntrinsics::_updateByteBufferAdler32:
875 return inline_updateByteBufferAdler32();
876
877 case vmIntrinsics::_profileBoolean:
878 return inline_profileBoolean();
879 case vmIntrinsics::_isCompileConstant:
880 return inline_isCompileConstant();
881
882 case vmIntrinsics::_hasNegatives:
883 return inline_hasNegatives();
884
885 case vmIntrinsics::_fmaD:
886 case vmIntrinsics::_fmaF:
887 return inline_fma(intrinsic_id());
888
889 case vmIntrinsics::_isDigit:
890 case vmIntrinsics::_isLowerCase:
891 case vmIntrinsics::_isUpperCase:
892 case vmIntrinsics::_isWhitespace:
893 return inline_character_compare(intrinsic_id());
894
895 case vmIntrinsics::_maxF:
896 case vmIntrinsics::_minF:
897 case vmIntrinsics::_maxD:
898 case vmIntrinsics::_minD:
899 return inline_fp_min_max(intrinsic_id());
900
901 default:
902 // If you get here, it may be that someone has added a new intrinsic
903 // to the list in vmSymbols.hpp without implementing it here.
904 #ifndef PRODUCT
905 if ((PrintMiscellaneous && (Verbose || WizardMode)) || PrintOpto) {
906 tty->print_cr("*** Warning: Unimplemented intrinsic %s(%d)",
907 vmIntrinsics::name_at(intrinsic_id()), intrinsic_id());
908 }
909 #endif
910 return false;
911 }
912 }
913
try_to_predicate(int predicate)914 Node* LibraryCallKit::try_to_predicate(int predicate) {
915 if (!jvms()->has_method()) {
916 // Root JVMState has a null method.
917 assert(map()->memory()->Opcode() == Op_Parm, "");
918 // Insert the memory aliasing node
919 set_all_memory(reset_memory());
920 }
921 assert(merged_memory(), "");
922
923 switch (intrinsic_id()) {
924 case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt:
925 return inline_cipherBlockChaining_AESCrypt_predicate(false);
926 case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
927 return inline_cipherBlockChaining_AESCrypt_predicate(true);
928 case vmIntrinsics::_electronicCodeBook_encryptAESCrypt:
929 return inline_electronicCodeBook_AESCrypt_predicate(false);
930 case vmIntrinsics::_electronicCodeBook_decryptAESCrypt:
931 return inline_electronicCodeBook_AESCrypt_predicate(true);
932 case vmIntrinsics::_counterMode_AESCrypt:
933 return inline_counterMode_AESCrypt_predicate();
934 case vmIntrinsics::_digestBase_implCompressMB:
935 return inline_digestBase_implCompressMB_predicate(predicate);
936
937 default:
938 // If you get here, it may be that someone has added a new intrinsic
939 // to the list in vmSymbols.hpp without implementing it here.
940 #ifndef PRODUCT
941 if ((PrintMiscellaneous && (Verbose || WizardMode)) || PrintOpto) {
942 tty->print_cr("*** Warning: Unimplemented predicate for intrinsic %s(%d)",
943 vmIntrinsics::name_at(intrinsic_id()), intrinsic_id());
944 }
945 #endif
946 Node* slow_ctl = control();
947 set_control(top()); // No fast path instrinsic
948 return slow_ctl;
949 }
950 }
951
952 //------------------------------set_result-------------------------------
953 // Helper function for finishing intrinsics.
set_result(RegionNode * region,PhiNode * value)954 void LibraryCallKit::set_result(RegionNode* region, PhiNode* value) {
955 record_for_igvn(region);
956 set_control(_gvn.transform(region));
957 set_result( _gvn.transform(value));
958 assert(value->type()->basic_type() == result()->bottom_type()->basic_type(), "sanity");
959 }
960
961 //------------------------------generate_guard---------------------------
962 // Helper function for generating guarded fast-slow graph structures.
963 // The given 'test', if true, guards a slow path. If the test fails
964 // then a fast path can be taken. (We generally hope it fails.)
965 // In all cases, GraphKit::control() is updated to the fast path.
966 // The returned value represents the control for the slow path.
967 // The return value is never 'top'; it is either a valid control
968 // or NULL if it is obvious that the slow path can never be taken.
969 // Also, if region and the slow control are not NULL, the slow edge
970 // is appended to the region.
generate_guard(Node * test,RegionNode * region,float true_prob)971 Node* LibraryCallKit::generate_guard(Node* test, RegionNode* region, float true_prob) {
972 if (stopped()) {
973 // Already short circuited.
974 return NULL;
975 }
976
977 // Build an if node and its projections.
978 // If test is true we take the slow path, which we assume is uncommon.
979 if (_gvn.type(test) == TypeInt::ZERO) {
980 // The slow branch is never taken. No need to build this guard.
981 return NULL;
982 }
983
984 IfNode* iff = create_and_map_if(control(), test, true_prob, COUNT_UNKNOWN);
985
986 Node* if_slow = _gvn.transform(new IfTrueNode(iff));
987 if (if_slow == top()) {
988 // The slow branch is never taken. No need to build this guard.
989 return NULL;
990 }
991
992 if (region != NULL)
993 region->add_req(if_slow);
994
995 Node* if_fast = _gvn.transform(new IfFalseNode(iff));
996 set_control(if_fast);
997
998 return if_slow;
999 }
1000
generate_slow_guard(Node * test,RegionNode * region)1001 inline Node* LibraryCallKit::generate_slow_guard(Node* test, RegionNode* region) {
1002 return generate_guard(test, region, PROB_UNLIKELY_MAG(3));
1003 }
generate_fair_guard(Node * test,RegionNode * region)1004 inline Node* LibraryCallKit::generate_fair_guard(Node* test, RegionNode* region) {
1005 return generate_guard(test, region, PROB_FAIR);
1006 }
1007
generate_negative_guard(Node * index,RegionNode * region,Node ** pos_index)1008 inline Node* LibraryCallKit::generate_negative_guard(Node* index, RegionNode* region,
1009 Node* *pos_index) {
1010 if (stopped())
1011 return NULL; // already stopped
1012 if (_gvn.type(index)->higher_equal(TypeInt::POS)) // [0,maxint]
1013 return NULL; // index is already adequately typed
1014 Node* cmp_lt = _gvn.transform(new CmpINode(index, intcon(0)));
1015 Node* bol_lt = _gvn.transform(new BoolNode(cmp_lt, BoolTest::lt));
1016 Node* is_neg = generate_guard(bol_lt, region, PROB_MIN);
1017 if (is_neg != NULL && pos_index != NULL) {
1018 // Emulate effect of Parse::adjust_map_after_if.
1019 Node* ccast = new CastIINode(index, TypeInt::POS);
1020 ccast->set_req(0, control());
1021 (*pos_index) = _gvn.transform(ccast);
1022 }
1023 return is_neg;
1024 }
1025
1026 // Make sure that 'position' is a valid limit index, in [0..length].
1027 // There are two equivalent plans for checking this:
1028 // A. (offset + copyLength) unsigned<= arrayLength
1029 // B. offset <= (arrayLength - copyLength)
1030 // We require that all of the values above, except for the sum and
1031 // difference, are already known to be non-negative.
1032 // Plan A is robust in the face of overflow, if offset and copyLength
1033 // are both hugely positive.
1034 //
1035 // Plan B is less direct and intuitive, but it does not overflow at
1036 // all, since the difference of two non-negatives is always
1037 // representable. Whenever Java methods must perform the equivalent
1038 // check they generally use Plan B instead of Plan A.
1039 // For the moment we use Plan A.
generate_limit_guard(Node * offset,Node * subseq_length,Node * array_length,RegionNode * region)1040 inline Node* LibraryCallKit::generate_limit_guard(Node* offset,
1041 Node* subseq_length,
1042 Node* array_length,
1043 RegionNode* region) {
1044 if (stopped())
1045 return NULL; // already stopped
1046 bool zero_offset = _gvn.type(offset) == TypeInt::ZERO;
1047 if (zero_offset && subseq_length->eqv_uncast(array_length))
1048 return NULL; // common case of whole-array copy
1049 Node* last = subseq_length;
1050 if (!zero_offset) // last += offset
1051 last = _gvn.transform(new AddINode(last, offset));
1052 Node* cmp_lt = _gvn.transform(new CmpUNode(array_length, last));
1053 Node* bol_lt = _gvn.transform(new BoolNode(cmp_lt, BoolTest::lt));
1054 Node* is_over = generate_guard(bol_lt, region, PROB_MIN);
1055 return is_over;
1056 }
1057
1058 // Emit range checks for the given String.value byte array
generate_string_range_check(Node * array,Node * offset,Node * count,bool char_count)1059 void LibraryCallKit::generate_string_range_check(Node* array, Node* offset, Node* count, bool char_count) {
1060 if (stopped()) {
1061 return; // already stopped
1062 }
1063 RegionNode* bailout = new RegionNode(1);
1064 record_for_igvn(bailout);
1065 if (char_count) {
1066 // Convert char count to byte count
1067 count = _gvn.transform(new LShiftINode(count, intcon(1)));
1068 }
1069
1070 // Offset and count must not be negative
1071 generate_negative_guard(offset, bailout);
1072 generate_negative_guard(count, bailout);
1073 // Offset + count must not exceed length of array
1074 generate_limit_guard(offset, count, load_array_length(array), bailout);
1075
1076 if (bailout->req() > 1) {
1077 PreserveJVMState pjvms(this);
1078 set_control(_gvn.transform(bailout));
1079 uncommon_trap(Deoptimization::Reason_intrinsic,
1080 Deoptimization::Action_maybe_recompile);
1081 }
1082 }
1083
1084 //--------------------------generate_current_thread--------------------
generate_current_thread(Node * & tls_output)1085 Node* LibraryCallKit::generate_current_thread(Node* &tls_output) {
1086 ciKlass* thread_klass = env()->Thread_klass();
1087 const Type* thread_type = TypeOopPtr::make_from_klass(thread_klass)->cast_to_ptr_type(TypePtr::NotNull);
1088 Node* thread = _gvn.transform(new ThreadLocalNode());
1089 Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::threadObj_offset()));
1090 Node* threadObj = _gvn.transform(LoadNode::make(_gvn, NULL, immutable_memory(), p, p->bottom_type()->is_ptr(), thread_type, T_OBJECT, MemNode::unordered));
1091 tls_output = thread;
1092 return threadObj;
1093 }
1094
1095
1096 //------------------------------make_string_method_node------------------------
1097 // Helper method for String intrinsic functions. This version is called with
1098 // str1 and str2 pointing to byte[] nodes containing Latin1 or UTF16 encoded
1099 // characters (depending on 'is_byte'). cnt1 and cnt2 are pointing to Int nodes
1100 // containing the lengths of str1 and str2.
make_string_method_node(int opcode,Node * str1_start,Node * cnt1,Node * str2_start,Node * cnt2,StrIntrinsicNode::ArgEnc ae)1101 Node* LibraryCallKit::make_string_method_node(int opcode, Node* str1_start, Node* cnt1, Node* str2_start, Node* cnt2, StrIntrinsicNode::ArgEnc ae) {
1102 Node* result = NULL;
1103 switch (opcode) {
1104 case Op_StrIndexOf:
1105 result = new StrIndexOfNode(control(), memory(TypeAryPtr::BYTES),
1106 str1_start, cnt1, str2_start, cnt2, ae);
1107 break;
1108 case Op_StrComp:
1109 result = new StrCompNode(control(), memory(TypeAryPtr::BYTES),
1110 str1_start, cnt1, str2_start, cnt2, ae);
1111 break;
1112 case Op_StrEquals:
1113 // We already know that cnt1 == cnt2 here (checked in 'inline_string_equals').
1114 // Use the constant length if there is one because optimized match rule may exist.
1115 result = new StrEqualsNode(control(), memory(TypeAryPtr::BYTES),
1116 str1_start, str2_start, cnt2->is_Con() ? cnt2 : cnt1, ae);
1117 break;
1118 default:
1119 ShouldNotReachHere();
1120 return NULL;
1121 }
1122
1123 // All these intrinsics have checks.
1124 C->set_has_split_ifs(true); // Has chance for split-if optimization
1125 clear_upper_avx();
1126
1127 return _gvn.transform(result);
1128 }
1129
1130 //------------------------------inline_string_compareTo------------------------
inline_string_compareTo(StrIntrinsicNode::ArgEnc ae)1131 bool LibraryCallKit::inline_string_compareTo(StrIntrinsicNode::ArgEnc ae) {
1132 Node* arg1 = argument(0);
1133 Node* arg2 = argument(1);
1134
1135 arg1 = must_be_not_null(arg1, true);
1136 arg2 = must_be_not_null(arg2, true);
1137
1138 // Get start addr and length of first argument
1139 Node* arg1_start = array_element_address(arg1, intcon(0), T_BYTE);
1140 Node* arg1_cnt = load_array_length(arg1);
1141
1142 // Get start addr and length of second argument
1143 Node* arg2_start = array_element_address(arg2, intcon(0), T_BYTE);
1144 Node* arg2_cnt = load_array_length(arg2);
1145
1146 Node* result = make_string_method_node(Op_StrComp, arg1_start, arg1_cnt, arg2_start, arg2_cnt, ae);
1147 set_result(result);
1148 return true;
1149 }
1150
1151 //------------------------------inline_string_equals------------------------
inline_string_equals(StrIntrinsicNode::ArgEnc ae)1152 bool LibraryCallKit::inline_string_equals(StrIntrinsicNode::ArgEnc ae) {
1153 Node* arg1 = argument(0);
1154 Node* arg2 = argument(1);
1155
1156 // paths (plus control) merge
1157 RegionNode* region = new RegionNode(3);
1158 Node* phi = new PhiNode(region, TypeInt::BOOL);
1159
1160 if (!stopped()) {
1161
1162 arg1 = must_be_not_null(arg1, true);
1163 arg2 = must_be_not_null(arg2, true);
1164
1165 // Get start addr and length of first argument
1166 Node* arg1_start = array_element_address(arg1, intcon(0), T_BYTE);
1167 Node* arg1_cnt = load_array_length(arg1);
1168
1169 // Get start addr and length of second argument
1170 Node* arg2_start = array_element_address(arg2, intcon(0), T_BYTE);
1171 Node* arg2_cnt = load_array_length(arg2);
1172
1173 // Check for arg1_cnt != arg2_cnt
1174 Node* cmp = _gvn.transform(new CmpINode(arg1_cnt, arg2_cnt));
1175 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
1176 Node* if_ne = generate_slow_guard(bol, NULL);
1177 if (if_ne != NULL) {
1178 phi->init_req(2, intcon(0));
1179 region->init_req(2, if_ne);
1180 }
1181
1182 // Check for count == 0 is done by assembler code for StrEquals.
1183
1184 if (!stopped()) {
1185 Node* equals = make_string_method_node(Op_StrEquals, arg1_start, arg1_cnt, arg2_start, arg2_cnt, ae);
1186 phi->init_req(1, equals);
1187 region->init_req(1, control());
1188 }
1189 }
1190
1191 // post merge
1192 set_control(_gvn.transform(region));
1193 record_for_igvn(region);
1194
1195 set_result(_gvn.transform(phi));
1196 return true;
1197 }
1198
1199 //------------------------------inline_array_equals----------------------------
inline_array_equals(StrIntrinsicNode::ArgEnc ae)1200 bool LibraryCallKit::inline_array_equals(StrIntrinsicNode::ArgEnc ae) {
1201 assert(ae == StrIntrinsicNode::UU || ae == StrIntrinsicNode::LL, "unsupported array types");
1202 Node* arg1 = argument(0);
1203 Node* arg2 = argument(1);
1204
1205 const TypeAryPtr* mtype = (ae == StrIntrinsicNode::UU) ? TypeAryPtr::CHARS : TypeAryPtr::BYTES;
1206 set_result(_gvn.transform(new AryEqNode(control(), memory(mtype), arg1, arg2, ae)));
1207 clear_upper_avx();
1208
1209 return true;
1210 }
1211
1212 //------------------------------inline_hasNegatives------------------------------
inline_hasNegatives()1213 bool LibraryCallKit::inline_hasNegatives() {
1214 if (too_many_traps(Deoptimization::Reason_intrinsic)) {
1215 return false;
1216 }
1217
1218 assert(callee()->signature()->size() == 3, "hasNegatives has 3 parameters");
1219 // no receiver since it is static method
1220 Node* ba = argument(0);
1221 Node* offset = argument(1);
1222 Node* len = argument(2);
1223
1224 ba = must_be_not_null(ba, true);
1225
1226 // Range checks
1227 generate_string_range_check(ba, offset, len, false);
1228 if (stopped()) {
1229 return true;
1230 }
1231 Node* ba_start = array_element_address(ba, offset, T_BYTE);
1232 Node* result = new HasNegativesNode(control(), memory(TypeAryPtr::BYTES), ba_start, len);
1233 set_result(_gvn.transform(result));
1234 return true;
1235 }
1236
inline_preconditions_checkIndex()1237 bool LibraryCallKit::inline_preconditions_checkIndex() {
1238 Node* index = argument(0);
1239 Node* length = argument(1);
1240 if (too_many_traps(Deoptimization::Reason_intrinsic) || too_many_traps(Deoptimization::Reason_range_check)) {
1241 return false;
1242 }
1243
1244 Node* len_pos_cmp = _gvn.transform(new CmpINode(length, intcon(0)));
1245 Node* len_pos_bol = _gvn.transform(new BoolNode(len_pos_cmp, BoolTest::ge));
1246
1247 {
1248 BuildCutout unless(this, len_pos_bol, PROB_MAX);
1249 uncommon_trap(Deoptimization::Reason_intrinsic,
1250 Deoptimization::Action_make_not_entrant);
1251 }
1252
1253 if (stopped()) {
1254 return false;
1255 }
1256
1257 Node* rc_cmp = _gvn.transform(new CmpUNode(index, length));
1258 BoolTest::mask btest = BoolTest::lt;
1259 Node* rc_bool = _gvn.transform(new BoolNode(rc_cmp, btest));
1260 RangeCheckNode* rc = new RangeCheckNode(control(), rc_bool, PROB_MAX, COUNT_UNKNOWN);
1261 _gvn.set_type(rc, rc->Value(&_gvn));
1262 if (!rc_bool->is_Con()) {
1263 record_for_igvn(rc);
1264 }
1265 set_control(_gvn.transform(new IfTrueNode(rc)));
1266 {
1267 PreserveJVMState pjvms(this);
1268 set_control(_gvn.transform(new IfFalseNode(rc)));
1269 uncommon_trap(Deoptimization::Reason_range_check,
1270 Deoptimization::Action_make_not_entrant);
1271 }
1272
1273 if (stopped()) {
1274 return false;
1275 }
1276
1277 Node* result = new CastIINode(index, TypeInt::make(0, _gvn.type(length)->is_int()->_hi, Type::WidenMax));
1278 result->set_req(0, control());
1279 result = _gvn.transform(result);
1280 set_result(result);
1281 replace_in_map(index, result);
1282 clear_upper_avx();
1283 return true;
1284 }
1285
1286 //------------------------------inline_string_indexOf------------------------
inline_string_indexOf(StrIntrinsicNode::ArgEnc ae)1287 bool LibraryCallKit::inline_string_indexOf(StrIntrinsicNode::ArgEnc ae) {
1288 if (!Matcher::match_rule_supported(Op_StrIndexOf)) {
1289 return false;
1290 }
1291 Node* src = argument(0);
1292 Node* tgt = argument(1);
1293
1294 // Make the merge point
1295 RegionNode* result_rgn = new RegionNode(4);
1296 Node* result_phi = new PhiNode(result_rgn, TypeInt::INT);
1297
1298 src = must_be_not_null(src, true);
1299 tgt = must_be_not_null(tgt, true);
1300
1301 // Get start addr and length of source string
1302 Node* src_start = array_element_address(src, intcon(0), T_BYTE);
1303 Node* src_count = load_array_length(src);
1304
1305 // Get start addr and length of substring
1306 Node* tgt_start = array_element_address(tgt, intcon(0), T_BYTE);
1307 Node* tgt_count = load_array_length(tgt);
1308
1309 if (ae == StrIntrinsicNode::UU || ae == StrIntrinsicNode::UL) {
1310 // Divide src size by 2 if String is UTF16 encoded
1311 src_count = _gvn.transform(new RShiftINode(src_count, intcon(1)));
1312 }
1313 if (ae == StrIntrinsicNode::UU) {
1314 // Divide substring size by 2 if String is UTF16 encoded
1315 tgt_count = _gvn.transform(new RShiftINode(tgt_count, intcon(1)));
1316 }
1317
1318 Node* result = make_indexOf_node(src_start, src_count, tgt_start, tgt_count, result_rgn, result_phi, ae);
1319 if (result != NULL) {
1320 result_phi->init_req(3, result);
1321 result_rgn->init_req(3, control());
1322 }
1323 set_control(_gvn.transform(result_rgn));
1324 record_for_igvn(result_rgn);
1325 set_result(_gvn.transform(result_phi));
1326
1327 return true;
1328 }
1329
1330 //-----------------------------inline_string_indexOf-----------------------
inline_string_indexOfI(StrIntrinsicNode::ArgEnc ae)1331 bool LibraryCallKit::inline_string_indexOfI(StrIntrinsicNode::ArgEnc ae) {
1332 if (too_many_traps(Deoptimization::Reason_intrinsic)) {
1333 return false;
1334 }
1335 if (!Matcher::match_rule_supported(Op_StrIndexOf)) {
1336 return false;
1337 }
1338 assert(callee()->signature()->size() == 5, "String.indexOf() has 5 arguments");
1339 Node* src = argument(0); // byte[]
1340 Node* src_count = argument(1); // char count
1341 Node* tgt = argument(2); // byte[]
1342 Node* tgt_count = argument(3); // char count
1343 Node* from_index = argument(4); // char index
1344
1345 src = must_be_not_null(src, true);
1346 tgt = must_be_not_null(tgt, true);
1347
1348 // Multiply byte array index by 2 if String is UTF16 encoded
1349 Node* src_offset = (ae == StrIntrinsicNode::LL) ? from_index : _gvn.transform(new LShiftINode(from_index, intcon(1)));
1350 src_count = _gvn.transform(new SubINode(src_count, from_index));
1351 Node* src_start = array_element_address(src, src_offset, T_BYTE);
1352 Node* tgt_start = array_element_address(tgt, intcon(0), T_BYTE);
1353
1354 // Range checks
1355 generate_string_range_check(src, src_offset, src_count, ae != StrIntrinsicNode::LL);
1356 generate_string_range_check(tgt, intcon(0), tgt_count, ae == StrIntrinsicNode::UU);
1357 if (stopped()) {
1358 return true;
1359 }
1360
1361 RegionNode* region = new RegionNode(5);
1362 Node* phi = new PhiNode(region, TypeInt::INT);
1363
1364 Node* result = make_indexOf_node(src_start, src_count, tgt_start, tgt_count, region, phi, ae);
1365 if (result != NULL) {
1366 // The result is index relative to from_index if substring was found, -1 otherwise.
1367 // Generate code which will fold into cmove.
1368 Node* cmp = _gvn.transform(new CmpINode(result, intcon(0)));
1369 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::lt));
1370
1371 Node* if_lt = generate_slow_guard(bol, NULL);
1372 if (if_lt != NULL) {
1373 // result == -1
1374 phi->init_req(3, result);
1375 region->init_req(3, if_lt);
1376 }
1377 if (!stopped()) {
1378 result = _gvn.transform(new AddINode(result, from_index));
1379 phi->init_req(4, result);
1380 region->init_req(4, control());
1381 }
1382 }
1383
1384 set_control(_gvn.transform(region));
1385 record_for_igvn(region);
1386 set_result(_gvn.transform(phi));
1387 clear_upper_avx();
1388
1389 return true;
1390 }
1391
1392 // Create StrIndexOfNode with fast path checks
make_indexOf_node(Node * src_start,Node * src_count,Node * tgt_start,Node * tgt_count,RegionNode * region,Node * phi,StrIntrinsicNode::ArgEnc ae)1393 Node* LibraryCallKit::make_indexOf_node(Node* src_start, Node* src_count, Node* tgt_start, Node* tgt_count,
1394 RegionNode* region, Node* phi, StrIntrinsicNode::ArgEnc ae) {
1395 // Check for substr count > string count
1396 Node* cmp = _gvn.transform(new CmpINode(tgt_count, src_count));
1397 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::gt));
1398 Node* if_gt = generate_slow_guard(bol, NULL);
1399 if (if_gt != NULL) {
1400 phi->init_req(1, intcon(-1));
1401 region->init_req(1, if_gt);
1402 }
1403 if (!stopped()) {
1404 // Check for substr count == 0
1405 cmp = _gvn.transform(new CmpINode(tgt_count, intcon(0)));
1406 bol = _gvn.transform(new BoolNode(cmp, BoolTest::eq));
1407 Node* if_zero = generate_slow_guard(bol, NULL);
1408 if (if_zero != NULL) {
1409 phi->init_req(2, intcon(0));
1410 region->init_req(2, if_zero);
1411 }
1412 }
1413 if (!stopped()) {
1414 return make_string_method_node(Op_StrIndexOf, src_start, src_count, tgt_start, tgt_count, ae);
1415 }
1416 return NULL;
1417 }
1418
1419 //-----------------------------inline_string_indexOfChar-----------------------
inline_string_indexOfChar()1420 bool LibraryCallKit::inline_string_indexOfChar() {
1421 if (too_many_traps(Deoptimization::Reason_intrinsic)) {
1422 return false;
1423 }
1424 if (!Matcher::match_rule_supported(Op_StrIndexOfChar)) {
1425 return false;
1426 }
1427 assert(callee()->signature()->size() == 4, "String.indexOfChar() has 4 arguments");
1428 Node* src = argument(0); // byte[]
1429 Node* tgt = argument(1); // tgt is int ch
1430 Node* from_index = argument(2);
1431 Node* max = argument(3);
1432
1433 src = must_be_not_null(src, true);
1434
1435 Node* src_offset = _gvn.transform(new LShiftINode(from_index, intcon(1)));
1436 Node* src_start = array_element_address(src, src_offset, T_BYTE);
1437 Node* src_count = _gvn.transform(new SubINode(max, from_index));
1438
1439 // Range checks
1440 generate_string_range_check(src, src_offset, src_count, true);
1441 if (stopped()) {
1442 return true;
1443 }
1444
1445 RegionNode* region = new RegionNode(3);
1446 Node* phi = new PhiNode(region, TypeInt::INT);
1447
1448 Node* result = new StrIndexOfCharNode(control(), memory(TypeAryPtr::BYTES), src_start, src_count, tgt, StrIntrinsicNode::none);
1449 C->set_has_split_ifs(true); // Has chance for split-if optimization
1450 _gvn.transform(result);
1451
1452 Node* cmp = _gvn.transform(new CmpINode(result, intcon(0)));
1453 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::lt));
1454
1455 Node* if_lt = generate_slow_guard(bol, NULL);
1456 if (if_lt != NULL) {
1457 // result == -1
1458 phi->init_req(2, result);
1459 region->init_req(2, if_lt);
1460 }
1461 if (!stopped()) {
1462 result = _gvn.transform(new AddINode(result, from_index));
1463 phi->init_req(1, result);
1464 region->init_req(1, control());
1465 }
1466 set_control(_gvn.transform(region));
1467 record_for_igvn(region);
1468 set_result(_gvn.transform(phi));
1469
1470 return true;
1471 }
1472 //---------------------------inline_string_copy---------------------
1473 // compressIt == true --> generate a compressed copy operation (compress char[]/byte[] to byte[])
1474 // int StringUTF16.compress(char[] src, int srcOff, byte[] dst, int dstOff, int len)
1475 // int StringUTF16.compress(byte[] src, int srcOff, byte[] dst, int dstOff, int len)
1476 // compressIt == false --> generate an inflated copy operation (inflate byte[] to char[]/byte[])
1477 // void StringLatin1.inflate(byte[] src, int srcOff, char[] dst, int dstOff, int len)
1478 // void StringLatin1.inflate(byte[] src, int srcOff, byte[] dst, int dstOff, int len)
inline_string_copy(bool compress)1479 bool LibraryCallKit::inline_string_copy(bool compress) {
1480 if (too_many_traps(Deoptimization::Reason_intrinsic)) {
1481 return false;
1482 }
1483 int nargs = 5; // 2 oops, 3 ints
1484 assert(callee()->signature()->size() == nargs, "string copy has 5 arguments");
1485
1486 Node* src = argument(0);
1487 Node* src_offset = argument(1);
1488 Node* dst = argument(2);
1489 Node* dst_offset = argument(3);
1490 Node* length = argument(4);
1491
1492 // Check for allocation before we add nodes that would confuse
1493 // tightly_coupled_allocation()
1494 AllocateArrayNode* alloc = tightly_coupled_allocation(dst, NULL);
1495
1496 // Figure out the size and type of the elements we will be copying.
1497 const Type* src_type = src->Value(&_gvn);
1498 const Type* dst_type = dst->Value(&_gvn);
1499 BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
1500 BasicType dst_elem = dst_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
1501 assert((compress && dst_elem == T_BYTE && (src_elem == T_BYTE || src_elem == T_CHAR)) ||
1502 (!compress && src_elem == T_BYTE && (dst_elem == T_BYTE || dst_elem == T_CHAR)),
1503 "Unsupported array types for inline_string_copy");
1504
1505 src = must_be_not_null(src, true);
1506 dst = must_be_not_null(dst, true);
1507
1508 // Convert char[] offsets to byte[] offsets
1509 bool convert_src = (compress && src_elem == T_BYTE);
1510 bool convert_dst = (!compress && dst_elem == T_BYTE);
1511 if (convert_src) {
1512 src_offset = _gvn.transform(new LShiftINode(src_offset, intcon(1)));
1513 } else if (convert_dst) {
1514 dst_offset = _gvn.transform(new LShiftINode(dst_offset, intcon(1)));
1515 }
1516
1517 // Range checks
1518 generate_string_range_check(src, src_offset, length, convert_src);
1519 generate_string_range_check(dst, dst_offset, length, convert_dst);
1520 if (stopped()) {
1521 return true;
1522 }
1523
1524 Node* src_start = array_element_address(src, src_offset, src_elem);
1525 Node* dst_start = array_element_address(dst, dst_offset, dst_elem);
1526 // 'src_start' points to src array + scaled offset
1527 // 'dst_start' points to dst array + scaled offset
1528 Node* count = NULL;
1529 if (compress) {
1530 count = compress_string(src_start, TypeAryPtr::get_array_body_type(src_elem), dst_start, length);
1531 } else {
1532 inflate_string(src_start, dst_start, TypeAryPtr::get_array_body_type(dst_elem), length);
1533 }
1534
1535 if (alloc != NULL) {
1536 if (alloc->maybe_set_complete(&_gvn)) {
1537 // "You break it, you buy it."
1538 InitializeNode* init = alloc->initialization();
1539 assert(init->is_complete(), "we just did this");
1540 init->set_complete_with_arraycopy();
1541 assert(dst->is_CheckCastPP(), "sanity");
1542 assert(dst->in(0)->in(0) == init, "dest pinned");
1543 }
1544 // Do not let stores that initialize this object be reordered with
1545 // a subsequent store that would make this object accessible by
1546 // other threads.
1547 // Record what AllocateNode this StoreStore protects so that
1548 // escape analysis can go from the MemBarStoreStoreNode to the
1549 // AllocateNode and eliminate the MemBarStoreStoreNode if possible
1550 // based on the escape status of the AllocateNode.
1551 insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
1552 }
1553 if (compress) {
1554 set_result(_gvn.transform(count));
1555 }
1556 clear_upper_avx();
1557
1558 return true;
1559 }
1560
1561 #ifdef _LP64
1562 #define XTOP ,top() /*additional argument*/
1563 #else //_LP64
1564 #define XTOP /*no additional argument*/
1565 #endif //_LP64
1566
1567 //------------------------inline_string_toBytesU--------------------------
1568 // public static byte[] StringUTF16.toBytes(char[] value, int off, int len)
inline_string_toBytesU()1569 bool LibraryCallKit::inline_string_toBytesU() {
1570 if (too_many_traps(Deoptimization::Reason_intrinsic)) {
1571 return false;
1572 }
1573 // Get the arguments.
1574 Node* value = argument(0);
1575 Node* offset = argument(1);
1576 Node* length = argument(2);
1577
1578 Node* newcopy = NULL;
1579
1580 // Set the original stack and the reexecute bit for the interpreter to reexecute
1581 // the bytecode that invokes StringUTF16.toBytes() if deoptimization happens.
1582 { PreserveReexecuteState preexecs(this);
1583 jvms()->set_should_reexecute(true);
1584
1585 // Check if a null path was taken unconditionally.
1586 value = null_check(value);
1587
1588 RegionNode* bailout = new RegionNode(1);
1589 record_for_igvn(bailout);
1590
1591 // Range checks
1592 generate_negative_guard(offset, bailout);
1593 generate_negative_guard(length, bailout);
1594 generate_limit_guard(offset, length, load_array_length(value), bailout);
1595 // Make sure that resulting byte[] length does not overflow Integer.MAX_VALUE
1596 generate_limit_guard(length, intcon(0), intcon(max_jint/2), bailout);
1597
1598 if (bailout->req() > 1) {
1599 PreserveJVMState pjvms(this);
1600 set_control(_gvn.transform(bailout));
1601 uncommon_trap(Deoptimization::Reason_intrinsic,
1602 Deoptimization::Action_maybe_recompile);
1603 }
1604 if (stopped()) {
1605 return true;
1606 }
1607
1608 Node* size = _gvn.transform(new LShiftINode(length, intcon(1)));
1609 Node* klass_node = makecon(TypeKlassPtr::make(ciTypeArrayKlass::make(T_BYTE)));
1610 newcopy = new_array(klass_node, size, 0); // no arguments to push
1611 AllocateArrayNode* alloc = tightly_coupled_allocation(newcopy, NULL);
1612
1613 // Calculate starting addresses.
1614 Node* src_start = array_element_address(value, offset, T_CHAR);
1615 Node* dst_start = basic_plus_adr(newcopy, arrayOopDesc::base_offset_in_bytes(T_BYTE));
1616
1617 // Check if src array address is aligned to HeapWordSize (dst is always aligned)
1618 const TypeInt* toffset = gvn().type(offset)->is_int();
1619 bool aligned = toffset->is_con() && ((toffset->get_con() * type2aelembytes(T_CHAR)) % HeapWordSize == 0);
1620
1621 // Figure out which arraycopy runtime method to call (disjoint, uninitialized).
1622 const char* copyfunc_name = "arraycopy";
1623 address copyfunc_addr = StubRoutines::select_arraycopy_function(T_CHAR, aligned, true, copyfunc_name, true);
1624 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
1625 OptoRuntime::fast_arraycopy_Type(),
1626 copyfunc_addr, copyfunc_name, TypeRawPtr::BOTTOM,
1627 src_start, dst_start, ConvI2X(length) XTOP);
1628 // Do not let reads from the cloned object float above the arraycopy.
1629 if (alloc != NULL) {
1630 if (alloc->maybe_set_complete(&_gvn)) {
1631 // "You break it, you buy it."
1632 InitializeNode* init = alloc->initialization();
1633 assert(init->is_complete(), "we just did this");
1634 init->set_complete_with_arraycopy();
1635 assert(newcopy->is_CheckCastPP(), "sanity");
1636 assert(newcopy->in(0)->in(0) == init, "dest pinned");
1637 }
1638 // Do not let stores that initialize this object be reordered with
1639 // a subsequent store that would make this object accessible by
1640 // other threads.
1641 // Record what AllocateNode this StoreStore protects so that
1642 // escape analysis can go from the MemBarStoreStoreNode to the
1643 // AllocateNode and eliminate the MemBarStoreStoreNode if possible
1644 // based on the escape status of the AllocateNode.
1645 insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
1646 } else {
1647 insert_mem_bar(Op_MemBarCPUOrder);
1648 }
1649 } // original reexecute is set back here
1650
1651 C->set_has_split_ifs(true); // Has chance for split-if optimization
1652 if (!stopped()) {
1653 set_result(newcopy);
1654 }
1655 clear_upper_avx();
1656
1657 return true;
1658 }
1659
1660 //------------------------inline_string_getCharsU--------------------------
1661 // public void StringUTF16.getChars(byte[] src, int srcBegin, int srcEnd, char dst[], int dstBegin)
inline_string_getCharsU()1662 bool LibraryCallKit::inline_string_getCharsU() {
1663 if (too_many_traps(Deoptimization::Reason_intrinsic)) {
1664 return false;
1665 }
1666
1667 // Get the arguments.
1668 Node* src = argument(0);
1669 Node* src_begin = argument(1);
1670 Node* src_end = argument(2); // exclusive offset (i < src_end)
1671 Node* dst = argument(3);
1672 Node* dst_begin = argument(4);
1673
1674 // Check for allocation before we add nodes that would confuse
1675 // tightly_coupled_allocation()
1676 AllocateArrayNode* alloc = tightly_coupled_allocation(dst, NULL);
1677
1678 // Check if a null path was taken unconditionally.
1679 src = null_check(src);
1680 dst = null_check(dst);
1681 if (stopped()) {
1682 return true;
1683 }
1684
1685 // Get length and convert char[] offset to byte[] offset
1686 Node* length = _gvn.transform(new SubINode(src_end, src_begin));
1687 src_begin = _gvn.transform(new LShiftINode(src_begin, intcon(1)));
1688
1689 // Range checks
1690 generate_string_range_check(src, src_begin, length, true);
1691 generate_string_range_check(dst, dst_begin, length, false);
1692 if (stopped()) {
1693 return true;
1694 }
1695
1696 if (!stopped()) {
1697 // Calculate starting addresses.
1698 Node* src_start = array_element_address(src, src_begin, T_BYTE);
1699 Node* dst_start = array_element_address(dst, dst_begin, T_CHAR);
1700
1701 // Check if array addresses are aligned to HeapWordSize
1702 const TypeInt* tsrc = gvn().type(src_begin)->is_int();
1703 const TypeInt* tdst = gvn().type(dst_begin)->is_int();
1704 bool aligned = tsrc->is_con() && ((tsrc->get_con() * type2aelembytes(T_BYTE)) % HeapWordSize == 0) &&
1705 tdst->is_con() && ((tdst->get_con() * type2aelembytes(T_CHAR)) % HeapWordSize == 0);
1706
1707 // Figure out which arraycopy runtime method to call (disjoint, uninitialized).
1708 const char* copyfunc_name = "arraycopy";
1709 address copyfunc_addr = StubRoutines::select_arraycopy_function(T_CHAR, aligned, true, copyfunc_name, true);
1710 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
1711 OptoRuntime::fast_arraycopy_Type(),
1712 copyfunc_addr, copyfunc_name, TypeRawPtr::BOTTOM,
1713 src_start, dst_start, ConvI2X(length) XTOP);
1714 // Do not let reads from the cloned object float above the arraycopy.
1715 if (alloc != NULL) {
1716 if (alloc->maybe_set_complete(&_gvn)) {
1717 // "You break it, you buy it."
1718 InitializeNode* init = alloc->initialization();
1719 assert(init->is_complete(), "we just did this");
1720 init->set_complete_with_arraycopy();
1721 assert(dst->is_CheckCastPP(), "sanity");
1722 assert(dst->in(0)->in(0) == init, "dest pinned");
1723 }
1724 // Do not let stores that initialize this object be reordered with
1725 // a subsequent store that would make this object accessible by
1726 // other threads.
1727 // Record what AllocateNode this StoreStore protects so that
1728 // escape analysis can go from the MemBarStoreStoreNode to the
1729 // AllocateNode and eliminate the MemBarStoreStoreNode if possible
1730 // based on the escape status of the AllocateNode.
1731 insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
1732 } else {
1733 insert_mem_bar(Op_MemBarCPUOrder);
1734 }
1735 }
1736
1737 C->set_has_split_ifs(true); // Has chance for split-if optimization
1738 return true;
1739 }
1740
1741 //----------------------inline_string_char_access----------------------------
1742 // Store/Load char to/from byte[] array.
1743 // static void StringUTF16.putChar(byte[] val, int index, int c)
1744 // static char StringUTF16.getChar(byte[] val, int index)
inline_string_char_access(bool is_store)1745 bool LibraryCallKit::inline_string_char_access(bool is_store) {
1746 Node* value = argument(0);
1747 Node* index = argument(1);
1748 Node* ch = is_store ? argument(2) : NULL;
1749
1750 // This intrinsic accesses byte[] array as char[] array. Computing the offsets
1751 // correctly requires matched array shapes.
1752 assert (arrayOopDesc::base_offset_in_bytes(T_CHAR) == arrayOopDesc::base_offset_in_bytes(T_BYTE),
1753 "sanity: byte[] and char[] bases agree");
1754 assert (type2aelembytes(T_CHAR) == type2aelembytes(T_BYTE)*2,
1755 "sanity: byte[] and char[] scales agree");
1756
1757 // Bail when getChar over constants is requested: constant folding would
1758 // reject folding mismatched char access over byte[]. A normal inlining for getChar
1759 // Java method would constant fold nicely instead.
1760 if (!is_store && value->is_Con() && index->is_Con()) {
1761 return false;
1762 }
1763
1764 value = must_be_not_null(value, true);
1765
1766 Node* adr = array_element_address(value, index, T_CHAR);
1767 if (adr->is_top()) {
1768 return false;
1769 }
1770 if (is_store) {
1771 access_store_at(value, adr, TypeAryPtr::BYTES, ch, TypeInt::CHAR, T_CHAR, IN_HEAP | MO_UNORDERED | C2_MISMATCHED);
1772 } else {
1773 ch = access_load_at(value, adr, TypeAryPtr::BYTES, TypeInt::CHAR, T_CHAR, IN_HEAP | MO_UNORDERED | C2_MISMATCHED | C2_CONTROL_DEPENDENT_LOAD);
1774 set_result(ch);
1775 }
1776 return true;
1777 }
1778
1779 //--------------------------round_double_node--------------------------------
1780 // Round a double node if necessary.
round_double_node(Node * n)1781 Node* LibraryCallKit::round_double_node(Node* n) {
1782 if (Matcher::strict_fp_requires_explicit_rounding && UseSSE <= 1)
1783 n = _gvn.transform(new RoundDoubleNode(0, n));
1784 return n;
1785 }
1786
1787 //------------------------------inline_math-----------------------------------
1788 // public static double Math.abs(double)
1789 // public static double Math.sqrt(double)
1790 // public static double Math.log(double)
1791 // public static double Math.log10(double)
inline_double_math(vmIntrinsics::ID id)1792 bool LibraryCallKit::inline_double_math(vmIntrinsics::ID id) {
1793 Node* arg = round_double_node(argument(0));
1794 Node* n = NULL;
1795 switch (id) {
1796 case vmIntrinsics::_dabs: n = new AbsDNode( arg); break;
1797 case vmIntrinsics::_dsqrt: n = new SqrtDNode(C, control(), arg); break;
1798 case vmIntrinsics::_ceil: n = RoundDoubleModeNode::make(_gvn, arg, RoundDoubleModeNode::rmode_ceil); break;
1799 case vmIntrinsics::_floor: n = RoundDoubleModeNode::make(_gvn, arg, RoundDoubleModeNode::rmode_floor); break;
1800 case vmIntrinsics::_rint: n = RoundDoubleModeNode::make(_gvn, arg, RoundDoubleModeNode::rmode_rint); break;
1801 default: fatal_unexpected_iid(id); break;
1802 }
1803 set_result(_gvn.transform(n));
1804 return true;
1805 }
1806
1807 //------------------------------inline_math-----------------------------------
1808 // public static float Math.abs(float)
1809 // public static int Math.abs(int)
1810 // public static long Math.abs(long)
inline_math(vmIntrinsics::ID id)1811 bool LibraryCallKit::inline_math(vmIntrinsics::ID id) {
1812 Node* arg = argument(0);
1813 Node* n = NULL;
1814 switch (id) {
1815 case vmIntrinsics::_fabs: n = new AbsFNode( arg); break;
1816 case vmIntrinsics::_iabs: n = new AbsINode( arg); break;
1817 case vmIntrinsics::_labs: n = new AbsLNode( arg); break;
1818 default: fatal_unexpected_iid(id); break;
1819 }
1820 set_result(_gvn.transform(n));
1821 return true;
1822 }
1823
1824 //------------------------------runtime_math-----------------------------
runtime_math(const TypeFunc * call_type,address funcAddr,const char * funcName)1825 bool LibraryCallKit::runtime_math(const TypeFunc* call_type, address funcAddr, const char* funcName) {
1826 assert(call_type == OptoRuntime::Math_DD_D_Type() || call_type == OptoRuntime::Math_D_D_Type(),
1827 "must be (DD)D or (D)D type");
1828
1829 // Inputs
1830 Node* a = round_double_node(argument(0));
1831 Node* b = (call_type == OptoRuntime::Math_DD_D_Type()) ? round_double_node(argument(2)) : NULL;
1832
1833 const TypePtr* no_memory_effects = NULL;
1834 Node* trig = make_runtime_call(RC_LEAF, call_type, funcAddr, funcName,
1835 no_memory_effects,
1836 a, top(), b, b ? top() : NULL);
1837 Node* value = _gvn.transform(new ProjNode(trig, TypeFunc::Parms+0));
1838 #ifdef ASSERT
1839 Node* value_top = _gvn.transform(new ProjNode(trig, TypeFunc::Parms+1));
1840 assert(value_top == top(), "second value must be top");
1841 #endif
1842
1843 set_result(value);
1844 return true;
1845 }
1846
1847 //------------------------------inline_math_native-----------------------------
inline_math_native(vmIntrinsics::ID id)1848 bool LibraryCallKit::inline_math_native(vmIntrinsics::ID id) {
1849 #define FN_PTR(f) CAST_FROM_FN_PTR(address, f)
1850 switch (id) {
1851 // These intrinsics are not properly supported on all hardware
1852 case vmIntrinsics::_dsin:
1853 return StubRoutines::dsin() != NULL ?
1854 runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dsin(), "dsin") :
1855 runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dsin), "SIN");
1856 case vmIntrinsics::_dcos:
1857 return StubRoutines::dcos() != NULL ?
1858 runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dcos(), "dcos") :
1859 runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dcos), "COS");
1860 case vmIntrinsics::_dtan:
1861 return StubRoutines::dtan() != NULL ?
1862 runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dtan(), "dtan") :
1863 runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dtan), "TAN");
1864 case vmIntrinsics::_dlog:
1865 return StubRoutines::dlog() != NULL ?
1866 runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dlog(), "dlog") :
1867 runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dlog), "LOG");
1868 case vmIntrinsics::_dlog10:
1869 return StubRoutines::dlog10() != NULL ?
1870 runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dlog10(), "dlog10") :
1871 runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dlog10), "LOG10");
1872
1873 // These intrinsics are supported on all hardware
1874 case vmIntrinsics::_ceil:
1875 case vmIntrinsics::_floor:
1876 case vmIntrinsics::_rint: return Matcher::match_rule_supported(Op_RoundDoubleMode) ? inline_double_math(id) : false;
1877 case vmIntrinsics::_dsqrt: return Matcher::match_rule_supported(Op_SqrtD) ? inline_double_math(id) : false;
1878 case vmIntrinsics::_dabs: return Matcher::has_match_rule(Op_AbsD) ? inline_double_math(id) : false;
1879 case vmIntrinsics::_fabs: return Matcher::match_rule_supported(Op_AbsF) ? inline_math(id) : false;
1880 case vmIntrinsics::_iabs: return Matcher::match_rule_supported(Op_AbsI) ? inline_math(id) : false;
1881 case vmIntrinsics::_labs: return Matcher::match_rule_supported(Op_AbsL) ? inline_math(id) : false;
1882
1883 case vmIntrinsics::_dexp:
1884 return StubRoutines::dexp() != NULL ?
1885 runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dexp(), "dexp") :
1886 runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dexp), "EXP");
1887 case vmIntrinsics::_dpow: {
1888 Node* exp = round_double_node(argument(2));
1889 const TypeD* d = _gvn.type(exp)->isa_double_constant();
1890 if (d != NULL && d->getd() == 2.0) {
1891 // Special case: pow(x, 2.0) => x * x
1892 Node* base = round_double_node(argument(0));
1893 set_result(_gvn.transform(new MulDNode(base, base)));
1894 return true;
1895 }
1896 return StubRoutines::dpow() != NULL ?
1897 runtime_math(OptoRuntime::Math_DD_D_Type(), StubRoutines::dpow(), "dpow") :
1898 runtime_math(OptoRuntime::Math_DD_D_Type(), FN_PTR(SharedRuntime::dpow), "POW");
1899 }
1900 #undef FN_PTR
1901
1902 // These intrinsics are not yet correctly implemented
1903 case vmIntrinsics::_datan2:
1904 return false;
1905
1906 default:
1907 fatal_unexpected_iid(id);
1908 return false;
1909 }
1910 }
1911
is_simple_name(Node * n)1912 static bool is_simple_name(Node* n) {
1913 return (n->req() == 1 // constant
1914 || (n->is_Type() && n->as_Type()->type()->singleton())
1915 || n->is_Proj() // parameter or return value
1916 || n->is_Phi() // local of some sort
1917 );
1918 }
1919
1920 //----------------------------inline_notify-----------------------------------*
inline_notify(vmIntrinsics::ID id)1921 bool LibraryCallKit::inline_notify(vmIntrinsics::ID id) {
1922 const TypeFunc* ftype = OptoRuntime::monitor_notify_Type();
1923 address func;
1924 if (id == vmIntrinsics::_notify) {
1925 func = OptoRuntime::monitor_notify_Java();
1926 } else {
1927 func = OptoRuntime::monitor_notifyAll_Java();
1928 }
1929 Node* call = make_runtime_call(RC_NO_LEAF, ftype, func, NULL, TypeRawPtr::BOTTOM, argument(0));
1930 make_slow_call_ex(call, env()->Throwable_klass(), false);
1931 return true;
1932 }
1933
1934
1935 //----------------------------inline_min_max-----------------------------------
inline_min_max(vmIntrinsics::ID id)1936 bool LibraryCallKit::inline_min_max(vmIntrinsics::ID id) {
1937 set_result(generate_min_max(id, argument(0), argument(1)));
1938 return true;
1939 }
1940
inline_math_mathExact(Node * math,Node * test)1941 void LibraryCallKit::inline_math_mathExact(Node* math, Node *test) {
1942 Node* bol = _gvn.transform( new BoolNode(test, BoolTest::overflow) );
1943 IfNode* check = create_and_map_if(control(), bol, PROB_UNLIKELY_MAG(3), COUNT_UNKNOWN);
1944 Node* fast_path = _gvn.transform( new IfFalseNode(check));
1945 Node* slow_path = _gvn.transform( new IfTrueNode(check) );
1946
1947 {
1948 PreserveJVMState pjvms(this);
1949 PreserveReexecuteState preexecs(this);
1950 jvms()->set_should_reexecute(true);
1951
1952 set_control(slow_path);
1953 set_i_o(i_o());
1954
1955 uncommon_trap(Deoptimization::Reason_intrinsic,
1956 Deoptimization::Action_none);
1957 }
1958
1959 set_control(fast_path);
1960 set_result(math);
1961 }
1962
1963 template <typename OverflowOp>
inline_math_overflow(Node * arg1,Node * arg2)1964 bool LibraryCallKit::inline_math_overflow(Node* arg1, Node* arg2) {
1965 typedef typename OverflowOp::MathOp MathOp;
1966
1967 MathOp* mathOp = new MathOp(arg1, arg2);
1968 Node* operation = _gvn.transform( mathOp );
1969 Node* ofcheck = _gvn.transform( new OverflowOp(arg1, arg2) );
1970 inline_math_mathExact(operation, ofcheck);
1971 return true;
1972 }
1973
inline_math_addExactI(bool is_increment)1974 bool LibraryCallKit::inline_math_addExactI(bool is_increment) {
1975 return inline_math_overflow<OverflowAddINode>(argument(0), is_increment ? intcon(1) : argument(1));
1976 }
1977
inline_math_addExactL(bool is_increment)1978 bool LibraryCallKit::inline_math_addExactL(bool is_increment) {
1979 return inline_math_overflow<OverflowAddLNode>(argument(0), is_increment ? longcon(1) : argument(2));
1980 }
1981
inline_math_subtractExactI(bool is_decrement)1982 bool LibraryCallKit::inline_math_subtractExactI(bool is_decrement) {
1983 return inline_math_overflow<OverflowSubINode>(argument(0), is_decrement ? intcon(1) : argument(1));
1984 }
1985
inline_math_subtractExactL(bool is_decrement)1986 bool LibraryCallKit::inline_math_subtractExactL(bool is_decrement) {
1987 return inline_math_overflow<OverflowSubLNode>(argument(0), is_decrement ? longcon(1) : argument(2));
1988 }
1989
inline_math_negateExactI()1990 bool LibraryCallKit::inline_math_negateExactI() {
1991 return inline_math_overflow<OverflowSubINode>(intcon(0), argument(0));
1992 }
1993
inline_math_negateExactL()1994 bool LibraryCallKit::inline_math_negateExactL() {
1995 return inline_math_overflow<OverflowSubLNode>(longcon(0), argument(0));
1996 }
1997
inline_math_multiplyExactI()1998 bool LibraryCallKit::inline_math_multiplyExactI() {
1999 return inline_math_overflow<OverflowMulINode>(argument(0), argument(1));
2000 }
2001
inline_math_multiplyExactL()2002 bool LibraryCallKit::inline_math_multiplyExactL() {
2003 return inline_math_overflow<OverflowMulLNode>(argument(0), argument(2));
2004 }
2005
inline_math_multiplyHigh()2006 bool LibraryCallKit::inline_math_multiplyHigh() {
2007 set_result(_gvn.transform(new MulHiLNode(argument(0), argument(2))));
2008 return true;
2009 }
2010
2011 Node*
generate_min_max(vmIntrinsics::ID id,Node * x0,Node * y0)2012 LibraryCallKit::generate_min_max(vmIntrinsics::ID id, Node* x0, Node* y0) {
2013 // These are the candidate return value:
2014 Node* xvalue = x0;
2015 Node* yvalue = y0;
2016
2017 if (xvalue == yvalue) {
2018 return xvalue;
2019 }
2020
2021 bool want_max = (id == vmIntrinsics::_max);
2022
2023 const TypeInt* txvalue = _gvn.type(xvalue)->isa_int();
2024 const TypeInt* tyvalue = _gvn.type(yvalue)->isa_int();
2025 if (txvalue == NULL || tyvalue == NULL) return top();
2026 // This is not really necessary, but it is consistent with a
2027 // hypothetical MaxINode::Value method:
2028 int widen = MAX2(txvalue->_widen, tyvalue->_widen);
2029
2030 // %%% This folding logic should (ideally) be in a different place.
2031 // Some should be inside IfNode, and there to be a more reliable
2032 // transformation of ?: style patterns into cmoves. We also want
2033 // more powerful optimizations around cmove and min/max.
2034
2035 // Try to find a dominating comparison of these guys.
2036 // It can simplify the index computation for Arrays.copyOf
2037 // and similar uses of System.arraycopy.
2038 // First, compute the normalized version of CmpI(x, y).
2039 int cmp_op = Op_CmpI;
2040 Node* xkey = xvalue;
2041 Node* ykey = yvalue;
2042 Node* ideal_cmpxy = _gvn.transform(new CmpINode(xkey, ykey));
2043 if (ideal_cmpxy->is_Cmp()) {
2044 // E.g., if we have CmpI(length - offset, count),
2045 // it might idealize to CmpI(length, count + offset)
2046 cmp_op = ideal_cmpxy->Opcode();
2047 xkey = ideal_cmpxy->in(1);
2048 ykey = ideal_cmpxy->in(2);
2049 }
2050
2051 // Start by locating any relevant comparisons.
2052 Node* start_from = (xkey->outcnt() < ykey->outcnt()) ? xkey : ykey;
2053 Node* cmpxy = NULL;
2054 Node* cmpyx = NULL;
2055 for (DUIterator_Fast kmax, k = start_from->fast_outs(kmax); k < kmax; k++) {
2056 Node* cmp = start_from->fast_out(k);
2057 if (cmp->outcnt() > 0 && // must have prior uses
2058 cmp->in(0) == NULL && // must be context-independent
2059 cmp->Opcode() == cmp_op) { // right kind of compare
2060 if (cmp->in(1) == xkey && cmp->in(2) == ykey) cmpxy = cmp;
2061 if (cmp->in(1) == ykey && cmp->in(2) == xkey) cmpyx = cmp;
2062 }
2063 }
2064
2065 const int NCMPS = 2;
2066 Node* cmps[NCMPS] = { cmpxy, cmpyx };
2067 int cmpn;
2068 for (cmpn = 0; cmpn < NCMPS; cmpn++) {
2069 if (cmps[cmpn] != NULL) break; // find a result
2070 }
2071 if (cmpn < NCMPS) {
2072 // Look for a dominating test that tells us the min and max.
2073 int depth = 0; // Limit search depth for speed
2074 Node* dom = control();
2075 for (; dom != NULL; dom = IfNode::up_one_dom(dom, true)) {
2076 if (++depth >= 100) break;
2077 Node* ifproj = dom;
2078 if (!ifproj->is_Proj()) continue;
2079 Node* iff = ifproj->in(0);
2080 if (!iff->is_If()) continue;
2081 Node* bol = iff->in(1);
2082 if (!bol->is_Bool()) continue;
2083 Node* cmp = bol->in(1);
2084 if (cmp == NULL) continue;
2085 for (cmpn = 0; cmpn < NCMPS; cmpn++)
2086 if (cmps[cmpn] == cmp) break;
2087 if (cmpn == NCMPS) continue;
2088 BoolTest::mask btest = bol->as_Bool()->_test._test;
2089 if (ifproj->is_IfFalse()) btest = BoolTest(btest).negate();
2090 if (cmp->in(1) == ykey) btest = BoolTest(btest).commute();
2091 // At this point, we know that 'x btest y' is true.
2092 switch (btest) {
2093 case BoolTest::eq:
2094 // They are proven equal, so we can collapse the min/max.
2095 // Either value is the answer. Choose the simpler.
2096 if (is_simple_name(yvalue) && !is_simple_name(xvalue))
2097 return yvalue;
2098 return xvalue;
2099 case BoolTest::lt: // x < y
2100 case BoolTest::le: // x <= y
2101 return (want_max ? yvalue : xvalue);
2102 case BoolTest::gt: // x > y
2103 case BoolTest::ge: // x >= y
2104 return (want_max ? xvalue : yvalue);
2105 default:
2106 break;
2107 }
2108 }
2109 }
2110
2111 // We failed to find a dominating test.
2112 // Let's pick a test that might GVN with prior tests.
2113 Node* best_bol = NULL;
2114 BoolTest::mask best_btest = BoolTest::illegal;
2115 for (cmpn = 0; cmpn < NCMPS; cmpn++) {
2116 Node* cmp = cmps[cmpn];
2117 if (cmp == NULL) continue;
2118 for (DUIterator_Fast jmax, j = cmp->fast_outs(jmax); j < jmax; j++) {
2119 Node* bol = cmp->fast_out(j);
2120 if (!bol->is_Bool()) continue;
2121 BoolTest::mask btest = bol->as_Bool()->_test._test;
2122 if (btest == BoolTest::eq || btest == BoolTest::ne) continue;
2123 if (cmp->in(1) == ykey) btest = BoolTest(btest).commute();
2124 if (bol->outcnt() > (best_bol == NULL ? 0 : best_bol->outcnt())) {
2125 best_bol = bol->as_Bool();
2126 best_btest = btest;
2127 }
2128 }
2129 }
2130
2131 Node* answer_if_true = NULL;
2132 Node* answer_if_false = NULL;
2133 switch (best_btest) {
2134 default:
2135 if (cmpxy == NULL)
2136 cmpxy = ideal_cmpxy;
2137 best_bol = _gvn.transform(new BoolNode(cmpxy, BoolTest::lt));
2138 // and fall through:
2139 case BoolTest::lt: // x < y
2140 case BoolTest::le: // x <= y
2141 answer_if_true = (want_max ? yvalue : xvalue);
2142 answer_if_false = (want_max ? xvalue : yvalue);
2143 break;
2144 case BoolTest::gt: // x > y
2145 case BoolTest::ge: // x >= y
2146 answer_if_true = (want_max ? xvalue : yvalue);
2147 answer_if_false = (want_max ? yvalue : xvalue);
2148 break;
2149 }
2150
2151 jint hi, lo;
2152 if (want_max) {
2153 // We can sharpen the minimum.
2154 hi = MAX2(txvalue->_hi, tyvalue->_hi);
2155 lo = MAX2(txvalue->_lo, tyvalue->_lo);
2156 } else {
2157 // We can sharpen the maximum.
2158 hi = MIN2(txvalue->_hi, tyvalue->_hi);
2159 lo = MIN2(txvalue->_lo, tyvalue->_lo);
2160 }
2161
2162 // Use a flow-free graph structure, to avoid creating excess control edges
2163 // which could hinder other optimizations.
2164 // Since Math.min/max is often used with arraycopy, we want
2165 // tightly_coupled_allocation to be able to see beyond min/max expressions.
2166 Node* cmov = CMoveNode::make(NULL, best_bol,
2167 answer_if_false, answer_if_true,
2168 TypeInt::make(lo, hi, widen));
2169
2170 return _gvn.transform(cmov);
2171
2172 /*
2173 // This is not as desirable as it may seem, since Min and Max
2174 // nodes do not have a full set of optimizations.
2175 // And they would interfere, anyway, with 'if' optimizations
2176 // and with CMoveI canonical forms.
2177 switch (id) {
2178 case vmIntrinsics::_min:
2179 result_val = _gvn.transform(new (C, 3) MinINode(x,y)); break;
2180 case vmIntrinsics::_max:
2181 result_val = _gvn.transform(new (C, 3) MaxINode(x,y)); break;
2182 default:
2183 ShouldNotReachHere();
2184 }
2185 */
2186 }
2187
2188 inline int
classify_unsafe_addr(Node * & base,Node * & offset,BasicType type)2189 LibraryCallKit::classify_unsafe_addr(Node* &base, Node* &offset, BasicType type) {
2190 const TypePtr* base_type = TypePtr::NULL_PTR;
2191 if (base != NULL) base_type = _gvn.type(base)->isa_ptr();
2192 if (base_type == NULL) {
2193 // Unknown type.
2194 return Type::AnyPtr;
2195 } else if (base_type == TypePtr::NULL_PTR) {
2196 // Since this is a NULL+long form, we have to switch to a rawptr.
2197 base = _gvn.transform(new CastX2PNode(offset));
2198 offset = MakeConX(0);
2199 return Type::RawPtr;
2200 } else if (base_type->base() == Type::RawPtr) {
2201 return Type::RawPtr;
2202 } else if (base_type->isa_oopptr()) {
2203 // Base is never null => always a heap address.
2204 if (!TypePtr::NULL_PTR->higher_equal(base_type)) {
2205 return Type::OopPtr;
2206 }
2207 // Offset is small => always a heap address.
2208 const TypeX* offset_type = _gvn.type(offset)->isa_intptr_t();
2209 if (offset_type != NULL &&
2210 base_type->offset() == 0 && // (should always be?)
2211 offset_type->_lo >= 0 &&
2212 !MacroAssembler::needs_explicit_null_check(offset_type->_hi)) {
2213 return Type::OopPtr;
2214 } else if (type == T_OBJECT) {
2215 // off heap access to an oop doesn't make any sense. Has to be on
2216 // heap.
2217 return Type::OopPtr;
2218 }
2219 // Otherwise, it might either be oop+off or NULL+addr.
2220 return Type::AnyPtr;
2221 } else {
2222 // No information:
2223 return Type::AnyPtr;
2224 }
2225 }
2226
make_unsafe_address(Node * & base,Node * offset,DecoratorSet decorators,BasicType type,bool can_cast)2227 inline Node* LibraryCallKit::make_unsafe_address(Node*& base, Node* offset, DecoratorSet decorators, BasicType type, bool can_cast) {
2228 Node* uncasted_base = base;
2229 int kind = classify_unsafe_addr(uncasted_base, offset, type);
2230 if (kind == Type::RawPtr) {
2231 return basic_plus_adr(top(), uncasted_base, offset);
2232 } else if (kind == Type::AnyPtr) {
2233 assert(base == uncasted_base, "unexpected base change");
2234 if (can_cast) {
2235 if (!_gvn.type(base)->speculative_maybe_null() &&
2236 !too_many_traps(Deoptimization::Reason_speculate_null_check)) {
2237 // According to profiling, this access is always on
2238 // heap. Casting the base to not null and thus avoiding membars
2239 // around the access should allow better optimizations
2240 Node* null_ctl = top();
2241 base = null_check_oop(base, &null_ctl, true, true, true);
2242 assert(null_ctl->is_top(), "no null control here");
2243 return basic_plus_adr(base, offset);
2244 } else if (_gvn.type(base)->speculative_always_null() &&
2245 !too_many_traps(Deoptimization::Reason_speculate_null_assert)) {
2246 // According to profiling, this access is always off
2247 // heap.
2248 base = null_assert(base);
2249 Node* raw_base = _gvn.transform(new CastX2PNode(offset));
2250 offset = MakeConX(0);
2251 return basic_plus_adr(top(), raw_base, offset);
2252 }
2253 }
2254 // We don't know if it's an on heap or off heap access. Fall back
2255 // to raw memory access.
2256 Node* raw = _gvn.transform(new CheckCastPPNode(control(), base, TypeRawPtr::BOTTOM));
2257 return basic_plus_adr(top(), raw, offset);
2258 } else {
2259 assert(base == uncasted_base, "unexpected base change");
2260 // We know it's an on heap access so base can't be null
2261 if (TypePtr::NULL_PTR->higher_equal(_gvn.type(base))) {
2262 base = must_be_not_null(base, true);
2263 }
2264 return basic_plus_adr(base, offset);
2265 }
2266 }
2267
2268 //--------------------------inline_number_methods-----------------------------
2269 // inline int Integer.numberOfLeadingZeros(int)
2270 // inline int Long.numberOfLeadingZeros(long)
2271 //
2272 // inline int Integer.numberOfTrailingZeros(int)
2273 // inline int Long.numberOfTrailingZeros(long)
2274 //
2275 // inline int Integer.bitCount(int)
2276 // inline int Long.bitCount(long)
2277 //
2278 // inline char Character.reverseBytes(char)
2279 // inline short Short.reverseBytes(short)
2280 // inline int Integer.reverseBytes(int)
2281 // inline long Long.reverseBytes(long)
inline_number_methods(vmIntrinsics::ID id)2282 bool LibraryCallKit::inline_number_methods(vmIntrinsics::ID id) {
2283 Node* arg = argument(0);
2284 Node* n = NULL;
2285 switch (id) {
2286 case vmIntrinsics::_numberOfLeadingZeros_i: n = new CountLeadingZerosINode( arg); break;
2287 case vmIntrinsics::_numberOfLeadingZeros_l: n = new CountLeadingZerosLNode( arg); break;
2288 case vmIntrinsics::_numberOfTrailingZeros_i: n = new CountTrailingZerosINode(arg); break;
2289 case vmIntrinsics::_numberOfTrailingZeros_l: n = new CountTrailingZerosLNode(arg); break;
2290 case vmIntrinsics::_bitCount_i: n = new PopCountINode( arg); break;
2291 case vmIntrinsics::_bitCount_l: n = new PopCountLNode( arg); break;
2292 case vmIntrinsics::_reverseBytes_c: n = new ReverseBytesUSNode(0, arg); break;
2293 case vmIntrinsics::_reverseBytes_s: n = new ReverseBytesSNode( 0, arg); break;
2294 case vmIntrinsics::_reverseBytes_i: n = new ReverseBytesINode( 0, arg); break;
2295 case vmIntrinsics::_reverseBytes_l: n = new ReverseBytesLNode( 0, arg); break;
2296 default: fatal_unexpected_iid(id); break;
2297 }
2298 set_result(_gvn.transform(n));
2299 return true;
2300 }
2301
2302 //----------------------------inline_unsafe_access----------------------------
2303
sharpen_unsafe_type(Compile::AliasType * alias_type,const TypePtr * adr_type)2304 const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type) {
2305 // Attempt to infer a sharper value type from the offset and base type.
2306 ciKlass* sharpened_klass = NULL;
2307
2308 // See if it is an instance field, with an object type.
2309 if (alias_type->field() != NULL) {
2310 if (alias_type->field()->type()->is_klass()) {
2311 sharpened_klass = alias_type->field()->type()->as_klass();
2312 }
2313 }
2314
2315 // See if it is a narrow oop array.
2316 if (adr_type->isa_aryptr()) {
2317 if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes()) {
2318 const TypeOopPtr *elem_type = adr_type->is_aryptr()->elem()->isa_oopptr();
2319 if (elem_type != NULL) {
2320 sharpened_klass = elem_type->klass();
2321 }
2322 }
2323 }
2324
2325 // The sharpened class might be unloaded if there is no class loader
2326 // contraint in place.
2327 if (sharpened_klass != NULL && sharpened_klass->is_loaded()) {
2328 const TypeOopPtr* tjp = TypeOopPtr::make_from_klass(sharpened_klass);
2329
2330 #ifndef PRODUCT
2331 if (C->print_intrinsics() || C->print_inlining()) {
2332 tty->print(" from base type: "); adr_type->dump(); tty->cr();
2333 tty->print(" sharpened value: "); tjp->dump(); tty->cr();
2334 }
2335 #endif
2336 // Sharpen the value type.
2337 return tjp;
2338 }
2339 return NULL;
2340 }
2341
mo_decorator_for_access_kind(AccessKind kind)2342 DecoratorSet LibraryCallKit::mo_decorator_for_access_kind(AccessKind kind) {
2343 switch (kind) {
2344 case Relaxed:
2345 return MO_UNORDERED;
2346 case Opaque:
2347 return MO_RELAXED;
2348 case Acquire:
2349 return MO_ACQUIRE;
2350 case Release:
2351 return MO_RELEASE;
2352 case Volatile:
2353 return MO_SEQ_CST;
2354 default:
2355 ShouldNotReachHere();
2356 return 0;
2357 }
2358 }
2359
inline_unsafe_access(bool is_store,const BasicType type,const AccessKind kind,const bool unaligned)2360 bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, const AccessKind kind, const bool unaligned) {
2361 if (callee()->is_static()) return false; // caller must have the capability!
2362 DecoratorSet decorators = C2_UNSAFE_ACCESS;
2363 guarantee(!is_store || kind != Acquire, "Acquire accesses can be produced only for loads");
2364 guarantee( is_store || kind != Release, "Release accesses can be produced only for stores");
2365 assert(type != T_OBJECT || !unaligned, "unaligned access not supported with object type");
2366
2367 if (is_reference_type(type)) {
2368 decorators |= ON_UNKNOWN_OOP_REF;
2369 }
2370
2371 if (unaligned) {
2372 decorators |= C2_UNALIGNED;
2373 }
2374
2375 #ifndef PRODUCT
2376 {
2377 ResourceMark rm;
2378 // Check the signatures.
2379 ciSignature* sig = callee()->signature();
2380 #ifdef ASSERT
2381 if (!is_store) {
2382 // Object getReference(Object base, int/long offset), etc.
2383 BasicType rtype = sig->return_type()->basic_type();
2384 assert(rtype == type, "getter must return the expected value");
2385 assert(sig->count() == 2, "oop getter has 2 arguments");
2386 assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
2387 assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
2388 } else {
2389 // void putReference(Object base, int/long offset, Object x), etc.
2390 assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value");
2391 assert(sig->count() == 3, "oop putter has 3 arguments");
2392 assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object");
2393 assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct");
2394 BasicType vtype = sig->type_at(sig->count()-1)->basic_type();
2395 assert(vtype == type, "putter must accept the expected value");
2396 }
2397 #endif // ASSERT
2398 }
2399 #endif //PRODUCT
2400
2401 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
2402
2403 Node* receiver = argument(0); // type: oop
2404
2405 // Build address expression.
2406 Node* adr;
2407 Node* heap_base_oop = top();
2408 Node* offset = top();
2409 Node* val;
2410
2411 // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2412 Node* base = argument(1); // type: oop
2413 // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2414 offset = argument(2); // type: long
2415 // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2416 // to be plain byte offsets, which are also the same as those accepted
2417 // by oopDesc::field_addr.
2418 assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2419 "fieldOffset must be byte-scaled");
2420 // 32-bit machines ignore the high half!
2421 offset = ConvL2X(offset);
2422 adr = make_unsafe_address(base, offset, is_store ? ACCESS_WRITE : ACCESS_READ, type, kind == Relaxed);
2423
2424 if (_gvn.type(base)->isa_ptr() == TypePtr::NULL_PTR) {
2425 if (type != T_OBJECT) {
2426 decorators |= IN_NATIVE; // off-heap primitive access
2427 } else {
2428 return false; // off-heap oop accesses are not supported
2429 }
2430 } else {
2431 heap_base_oop = base; // on-heap or mixed access
2432 }
2433
2434 // Can base be NULL? Otherwise, always on-heap access.
2435 bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(_gvn.type(base));
2436
2437 if (!can_access_non_heap) {
2438 decorators |= IN_HEAP;
2439 }
2440
2441 val = is_store ? argument(4) : NULL;
2442
2443 const TypePtr* adr_type = _gvn.type(adr)->isa_ptr();
2444 if (adr_type == TypePtr::NULL_PTR) {
2445 return false; // off-heap access with zero address
2446 }
2447
2448 // Try to categorize the address.
2449 Compile::AliasType* alias_type = C->alias_type(adr_type);
2450 assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2451
2452 if (alias_type->adr_type() == TypeInstPtr::KLASS ||
2453 alias_type->adr_type() == TypeAryPtr::RANGE) {
2454 return false; // not supported
2455 }
2456
2457 bool mismatched = false;
2458 BasicType bt = alias_type->basic_type();
2459 if (bt != T_ILLEGAL) {
2460 assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access");
2461 if (bt == T_BYTE && adr_type->isa_aryptr()) {
2462 // Alias type doesn't differentiate between byte[] and boolean[]).
2463 // Use address type to get the element type.
2464 bt = adr_type->is_aryptr()->elem()->array_element_basic_type();
2465 }
2466 if (bt == T_ARRAY || bt == T_NARROWOOP) {
2467 // accessing an array field with getReference is not a mismatch
2468 bt = T_OBJECT;
2469 }
2470 if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2471 // Don't intrinsify mismatched object accesses
2472 return false;
2473 }
2474 mismatched = (bt != type);
2475 } else if (alias_type->adr_type()->isa_oopptr()) {
2476 mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched
2477 }
2478
2479 assert(!mismatched || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2480
2481 if (mismatched) {
2482 decorators |= C2_MISMATCHED;
2483 }
2484
2485 // First guess at the value type.
2486 const Type *value_type = Type::get_const_basic_type(type);
2487
2488 // Figure out the memory ordering.
2489 decorators |= mo_decorator_for_access_kind(kind);
2490
2491 if (!is_store && type == T_OBJECT) {
2492 const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2493 if (tjp != NULL) {
2494 value_type = tjp;
2495 }
2496 }
2497
2498 receiver = null_check(receiver);
2499 if (stopped()) {
2500 return true;
2501 }
2502 // Heap pointers get a null-check from the interpreter,
2503 // as a courtesy. However, this is not guaranteed by Unsafe,
2504 // and it is not possible to fully distinguish unintended nulls
2505 // from intended ones in this API.
2506
2507 if (!is_store) {
2508 Node* p = NULL;
2509 // Try to constant fold a load from a constant field
2510 ciField* field = alias_type->field();
2511 if (heap_base_oop != top() && field != NULL && field->is_constant() && !mismatched) {
2512 // final or stable field
2513 p = make_constant_from_field(field, heap_base_oop);
2514 }
2515
2516 if (p == NULL) { // Could not constant fold the load
2517 p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators);
2518 // Normalize the value returned by getBoolean in the following cases
2519 if (type == T_BOOLEAN &&
2520 (mismatched ||
2521 heap_base_oop == top() || // - heap_base_oop is NULL or
2522 (can_access_non_heap && field == NULL)) // - heap_base_oop is potentially NULL
2523 // and the unsafe access is made to large offset
2524 // (i.e., larger than the maximum offset necessary for any
2525 // field access)
2526 ) {
2527 IdealKit ideal = IdealKit(this);
2528 #define __ ideal.
2529 IdealVariable normalized_result(ideal);
2530 __ declarations_done();
2531 __ set(normalized_result, p);
2532 __ if_then(p, BoolTest::ne, ideal.ConI(0));
2533 __ set(normalized_result, ideal.ConI(1));
2534 ideal.end_if();
2535 final_sync(ideal);
2536 p = __ value(normalized_result);
2537 #undef __
2538 }
2539 }
2540 if (type == T_ADDRESS) {
2541 p = gvn().transform(new CastP2XNode(NULL, p));
2542 p = ConvX2UL(p);
2543 }
2544 // The load node has the control of the preceding MemBarCPUOrder. All
2545 // following nodes will have the control of the MemBarCPUOrder inserted at
2546 // the end of this method. So, pushing the load onto the stack at a later
2547 // point is fine.
2548 set_result(p);
2549 } else {
2550 if (bt == T_ADDRESS) {
2551 // Repackage the long as a pointer.
2552 val = ConvL2X(val);
2553 val = gvn().transform(new CastX2PNode(val));
2554 }
2555 access_store_at(heap_base_oop, adr, adr_type, val, value_type, type, decorators);
2556 }
2557
2558 return true;
2559 }
2560
2561 //----------------------------inline_unsafe_load_store----------------------------
2562 // This method serves a couple of different customers (depending on LoadStoreKind):
2563 //
2564 // LS_cmp_swap:
2565 //
2566 // boolean compareAndSetReference(Object o, long offset, Object expected, Object x);
2567 // boolean compareAndSetInt( Object o, long offset, int expected, int x);
2568 // boolean compareAndSetLong( Object o, long offset, long expected, long x);
2569 //
2570 // LS_cmp_swap_weak:
2571 //
2572 // boolean weakCompareAndSetReference( Object o, long offset, Object expected, Object x);
2573 // boolean weakCompareAndSetReferencePlain( Object o, long offset, Object expected, Object x);
2574 // boolean weakCompareAndSetReferenceAcquire(Object o, long offset, Object expected, Object x);
2575 // boolean weakCompareAndSetReferenceRelease(Object o, long offset, Object expected, Object x);
2576 //
2577 // boolean weakCompareAndSetInt( Object o, long offset, int expected, int x);
2578 // boolean weakCompareAndSetIntPlain( Object o, long offset, int expected, int x);
2579 // boolean weakCompareAndSetIntAcquire( Object o, long offset, int expected, int x);
2580 // boolean weakCompareAndSetIntRelease( Object o, long offset, int expected, int x);
2581 //
2582 // boolean weakCompareAndSetLong( Object o, long offset, long expected, long x);
2583 // boolean weakCompareAndSetLongPlain( Object o, long offset, long expected, long x);
2584 // boolean weakCompareAndSetLongAcquire( Object o, long offset, long expected, long x);
2585 // boolean weakCompareAndSetLongRelease( Object o, long offset, long expected, long x);
2586 //
2587 // LS_cmp_exchange:
2588 //
2589 // Object compareAndExchangeReferenceVolatile(Object o, long offset, Object expected, Object x);
2590 // Object compareAndExchangeReferenceAcquire( Object o, long offset, Object expected, Object x);
2591 // Object compareAndExchangeReferenceRelease( Object o, long offset, Object expected, Object x);
2592 //
2593 // Object compareAndExchangeIntVolatile( Object o, long offset, Object expected, Object x);
2594 // Object compareAndExchangeIntAcquire( Object o, long offset, Object expected, Object x);
2595 // Object compareAndExchangeIntRelease( Object o, long offset, Object expected, Object x);
2596 //
2597 // Object compareAndExchangeLongVolatile( Object o, long offset, Object expected, Object x);
2598 // Object compareAndExchangeLongAcquire( Object o, long offset, Object expected, Object x);
2599 // Object compareAndExchangeLongRelease( Object o, long offset, Object expected, Object x);
2600 //
2601 // LS_get_add:
2602 //
2603 // int getAndAddInt( Object o, long offset, int delta)
2604 // long getAndAddLong(Object o, long offset, long delta)
2605 //
2606 // LS_get_set:
2607 //
2608 // int getAndSet(Object o, long offset, int newValue)
2609 // long getAndSet(Object o, long offset, long newValue)
2610 // Object getAndSet(Object o, long offset, Object newValue)
2611 //
inline_unsafe_load_store(const BasicType type,const LoadStoreKind kind,const AccessKind access_kind)2612 bool LibraryCallKit::inline_unsafe_load_store(const BasicType type, const LoadStoreKind kind, const AccessKind access_kind) {
2613 // This basic scheme here is the same as inline_unsafe_access, but
2614 // differs in enough details that combining them would make the code
2615 // overly confusing. (This is a true fact! I originally combined
2616 // them, but even I was confused by it!) As much code/comments as
2617 // possible are retained from inline_unsafe_access though to make
2618 // the correspondences clearer. - dl
2619
2620 if (callee()->is_static()) return false; // caller must have the capability!
2621
2622 DecoratorSet decorators = C2_UNSAFE_ACCESS;
2623 decorators |= mo_decorator_for_access_kind(access_kind);
2624
2625 #ifndef PRODUCT
2626 BasicType rtype;
2627 {
2628 ResourceMark rm;
2629 // Check the signatures.
2630 ciSignature* sig = callee()->signature();
2631 rtype = sig->return_type()->basic_type();
2632 switch(kind) {
2633 case LS_get_add:
2634 case LS_get_set: {
2635 // Check the signatures.
2636 #ifdef ASSERT
2637 assert(rtype == type, "get and set must return the expected type");
2638 assert(sig->count() == 3, "get and set has 3 arguments");
2639 assert(sig->type_at(0)->basic_type() == T_OBJECT, "get and set base is object");
2640 assert(sig->type_at(1)->basic_type() == T_LONG, "get and set offset is long");
2641 assert(sig->type_at(2)->basic_type() == type, "get and set must take expected type as new value/delta");
2642 assert(access_kind == Volatile, "mo is not passed to intrinsic nodes in current implementation");
2643 #endif // ASSERT
2644 break;
2645 }
2646 case LS_cmp_swap:
2647 case LS_cmp_swap_weak: {
2648 // Check the signatures.
2649 #ifdef ASSERT
2650 assert(rtype == T_BOOLEAN, "CAS must return boolean");
2651 assert(sig->count() == 4, "CAS has 4 arguments");
2652 assert(sig->type_at(0)->basic_type() == T_OBJECT, "CAS base is object");
2653 assert(sig->type_at(1)->basic_type() == T_LONG, "CAS offset is long");
2654 #endif // ASSERT
2655 break;
2656 }
2657 case LS_cmp_exchange: {
2658 // Check the signatures.
2659 #ifdef ASSERT
2660 assert(rtype == type, "CAS must return the expected type");
2661 assert(sig->count() == 4, "CAS has 4 arguments");
2662 assert(sig->type_at(0)->basic_type() == T_OBJECT, "CAS base is object");
2663 assert(sig->type_at(1)->basic_type() == T_LONG, "CAS offset is long");
2664 #endif // ASSERT
2665 break;
2666 }
2667 default:
2668 ShouldNotReachHere();
2669 }
2670 }
2671 #endif //PRODUCT
2672
2673 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
2674
2675 // Get arguments:
2676 Node* receiver = NULL;
2677 Node* base = NULL;
2678 Node* offset = NULL;
2679 Node* oldval = NULL;
2680 Node* newval = NULL;
2681 switch(kind) {
2682 case LS_cmp_swap:
2683 case LS_cmp_swap_weak:
2684 case LS_cmp_exchange: {
2685 const bool two_slot_type = type2size[type] == 2;
2686 receiver = argument(0); // type: oop
2687 base = argument(1); // type: oop
2688 offset = argument(2); // type: long
2689 oldval = argument(4); // type: oop, int, or long
2690 newval = argument(two_slot_type ? 6 : 5); // type: oop, int, or long
2691 break;
2692 }
2693 case LS_get_add:
2694 case LS_get_set: {
2695 receiver = argument(0); // type: oop
2696 base = argument(1); // type: oop
2697 offset = argument(2); // type: long
2698 oldval = NULL;
2699 newval = argument(4); // type: oop, int, or long
2700 break;
2701 }
2702 default:
2703 ShouldNotReachHere();
2704 }
2705
2706 // Build field offset expression.
2707 // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2708 // to be plain byte offsets, which are also the same as those accepted
2709 // by oopDesc::field_addr.
2710 assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled");
2711 // 32-bit machines ignore the high half of long offsets
2712 offset = ConvL2X(offset);
2713 Node* adr = make_unsafe_address(base, offset, ACCESS_WRITE | ACCESS_READ, type, false);
2714 const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2715
2716 Compile::AliasType* alias_type = C->alias_type(adr_type);
2717 BasicType bt = alias_type->basic_type();
2718 if (bt != T_ILLEGAL &&
2719 (is_reference_type(bt) != (type == T_OBJECT))) {
2720 // Don't intrinsify mismatched object accesses.
2721 return false;
2722 }
2723
2724 // For CAS, unlike inline_unsafe_access, there seems no point in
2725 // trying to refine types. Just use the coarse types here.
2726 assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2727 const Type *value_type = Type::get_const_basic_type(type);
2728
2729 switch (kind) {
2730 case LS_get_set:
2731 case LS_cmp_exchange: {
2732 if (type == T_OBJECT) {
2733 const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2734 if (tjp != NULL) {
2735 value_type = tjp;
2736 }
2737 }
2738 break;
2739 }
2740 case LS_cmp_swap:
2741 case LS_cmp_swap_weak:
2742 case LS_get_add:
2743 break;
2744 default:
2745 ShouldNotReachHere();
2746 }
2747
2748 // Null check receiver.
2749 receiver = null_check(receiver);
2750 if (stopped()) {
2751 return true;
2752 }
2753
2754 int alias_idx = C->get_alias_index(adr_type);
2755
2756 if (is_reference_type(type)) {
2757 decorators |= IN_HEAP | ON_UNKNOWN_OOP_REF;
2758
2759 // Transformation of a value which could be NULL pointer (CastPP #NULL)
2760 // could be delayed during Parse (for example, in adjust_map_after_if()).
2761 // Execute transformation here to avoid barrier generation in such case.
2762 if (_gvn.type(newval) == TypePtr::NULL_PTR)
2763 newval = _gvn.makecon(TypePtr::NULL_PTR);
2764
2765 if (oldval != NULL && _gvn.type(oldval) == TypePtr::NULL_PTR) {
2766 // Refine the value to a null constant, when it is known to be null
2767 oldval = _gvn.makecon(TypePtr::NULL_PTR);
2768 }
2769 }
2770
2771 Node* result = NULL;
2772 switch (kind) {
2773 case LS_cmp_exchange: {
2774 result = access_atomic_cmpxchg_val_at(base, adr, adr_type, alias_idx,
2775 oldval, newval, value_type, type, decorators);
2776 break;
2777 }
2778 case LS_cmp_swap_weak:
2779 decorators |= C2_WEAK_CMPXCHG;
2780 case LS_cmp_swap: {
2781 result = access_atomic_cmpxchg_bool_at(base, adr, adr_type, alias_idx,
2782 oldval, newval, value_type, type, decorators);
2783 break;
2784 }
2785 case LS_get_set: {
2786 result = access_atomic_xchg_at(base, adr, adr_type, alias_idx,
2787 newval, value_type, type, decorators);
2788 break;
2789 }
2790 case LS_get_add: {
2791 result = access_atomic_add_at(base, adr, adr_type, alias_idx,
2792 newval, value_type, type, decorators);
2793 break;
2794 }
2795 default:
2796 ShouldNotReachHere();
2797 }
2798
2799 assert(type2size[result->bottom_type()->basic_type()] == type2size[rtype], "result type should match");
2800 set_result(result);
2801 return true;
2802 }
2803
inline_unsafe_fence(vmIntrinsics::ID id)2804 bool LibraryCallKit::inline_unsafe_fence(vmIntrinsics::ID id) {
2805 // Regardless of form, don't allow previous ld/st to move down,
2806 // then issue acquire, release, or volatile mem_bar.
2807 insert_mem_bar(Op_MemBarCPUOrder);
2808 switch(id) {
2809 case vmIntrinsics::_loadFence:
2810 insert_mem_bar(Op_LoadFence);
2811 return true;
2812 case vmIntrinsics::_storeFence:
2813 insert_mem_bar(Op_StoreFence);
2814 return true;
2815 case vmIntrinsics::_fullFence:
2816 insert_mem_bar(Op_MemBarVolatile);
2817 return true;
2818 default:
2819 fatal_unexpected_iid(id);
2820 return false;
2821 }
2822 }
2823
inline_onspinwait()2824 bool LibraryCallKit::inline_onspinwait() {
2825 insert_mem_bar(Op_OnSpinWait);
2826 return true;
2827 }
2828
klass_needs_init_guard(Node * kls)2829 bool LibraryCallKit::klass_needs_init_guard(Node* kls) {
2830 if (!kls->is_Con()) {
2831 return true;
2832 }
2833 const TypeKlassPtr* klsptr = kls->bottom_type()->isa_klassptr();
2834 if (klsptr == NULL) {
2835 return true;
2836 }
2837 ciInstanceKlass* ik = klsptr->klass()->as_instance_klass();
2838 // don't need a guard for a klass that is already initialized
2839 return !ik->is_initialized();
2840 }
2841
2842 //----------------------------inline_unsafe_writeback0-------------------------
2843 // public native void Unsafe.writeback0(long address)
inline_unsafe_writeback0()2844 bool LibraryCallKit::inline_unsafe_writeback0() {
2845 if (!Matcher::has_match_rule(Op_CacheWB)) {
2846 return false;
2847 }
2848 #ifndef PRODUCT
2849 assert(Matcher::has_match_rule(Op_CacheWBPreSync), "found match rule for CacheWB but not CacheWBPreSync");
2850 assert(Matcher::has_match_rule(Op_CacheWBPostSync), "found match rule for CacheWB but not CacheWBPostSync");
2851 ciSignature* sig = callee()->signature();
2852 assert(sig->type_at(0)->basic_type() == T_LONG, "Unsafe_writeback0 address is long!");
2853 #endif
2854 null_check_receiver(); // null-check, then ignore
2855 Node *addr = argument(1);
2856 addr = new CastX2PNode(addr);
2857 addr = _gvn.transform(addr);
2858 Node *flush = new CacheWBNode(control(), memory(TypeRawPtr::BOTTOM), addr);
2859 flush = _gvn.transform(flush);
2860 set_memory(flush, TypeRawPtr::BOTTOM);
2861 return true;
2862 }
2863
2864 //----------------------------inline_unsafe_writeback0-------------------------
2865 // public native void Unsafe.writeback0(long address)
inline_unsafe_writebackSync0(bool is_pre)2866 bool LibraryCallKit::inline_unsafe_writebackSync0(bool is_pre) {
2867 if (is_pre && !Matcher::has_match_rule(Op_CacheWBPreSync)) {
2868 return false;
2869 }
2870 if (!is_pre && !Matcher::has_match_rule(Op_CacheWBPostSync)) {
2871 return false;
2872 }
2873 #ifndef PRODUCT
2874 assert(Matcher::has_match_rule(Op_CacheWB),
2875 (is_pre ? "found match rule for CacheWBPreSync but not CacheWB"
2876 : "found match rule for CacheWBPostSync but not CacheWB"));
2877
2878 #endif
2879 null_check_receiver(); // null-check, then ignore
2880 Node *sync;
2881 if (is_pre) {
2882 sync = new CacheWBPreSyncNode(control(), memory(TypeRawPtr::BOTTOM));
2883 } else {
2884 sync = new CacheWBPostSyncNode(control(), memory(TypeRawPtr::BOTTOM));
2885 }
2886 sync = _gvn.transform(sync);
2887 set_memory(sync, TypeRawPtr::BOTTOM);
2888 return true;
2889 }
2890
2891 //----------------------------inline_unsafe_allocate---------------------------
2892 // public native Object Unsafe.allocateInstance(Class<?> cls);
inline_unsafe_allocate()2893 bool LibraryCallKit::inline_unsafe_allocate() {
2894 if (callee()->is_static()) return false; // caller must have the capability!
2895
2896 null_check_receiver(); // null-check, then ignore
2897 Node* cls = null_check(argument(1));
2898 if (stopped()) return true;
2899
2900 Node* kls = load_klass_from_mirror(cls, false, NULL, 0);
2901 kls = null_check(kls);
2902 if (stopped()) return true; // argument was like int.class
2903
2904 Node* test = NULL;
2905 if (LibraryCallKit::klass_needs_init_guard(kls)) {
2906 // Note: The argument might still be an illegal value like
2907 // Serializable.class or Object[].class. The runtime will handle it.
2908 // But we must make an explicit check for initialization.
2909 Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset()));
2910 // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
2911 // can generate code to load it as unsigned byte.
2912 Node* inst = make_load(NULL, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::unordered);
2913 Node* bits = intcon(InstanceKlass::fully_initialized);
2914 test = _gvn.transform(new SubINode(inst, bits));
2915 // The 'test' is non-zero if we need to take a slow path.
2916 }
2917
2918 Node* obj = new_instance(kls, test);
2919 set_result(obj);
2920 return true;
2921 }
2922
2923 //------------------------inline_native_time_funcs--------------
2924 // inline code for System.currentTimeMillis() and System.nanoTime()
2925 // these have the same type and signature
inline_native_time_funcs(address funcAddr,const char * funcName)2926 bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* funcName) {
2927 const TypeFunc* tf = OptoRuntime::void_long_Type();
2928 const TypePtr* no_memory_effects = NULL;
2929 Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects);
2930 Node* value = _gvn.transform(new ProjNode(time, TypeFunc::Parms+0));
2931 #ifdef ASSERT
2932 Node* value_top = _gvn.transform(new ProjNode(time, TypeFunc::Parms+1));
2933 assert(value_top == top(), "second value must be top");
2934 #endif
2935 set_result(value);
2936 return true;
2937 }
2938
2939 #ifdef JFR_HAVE_INTRINSICS
2940
2941 /*
2942 * oop -> myklass
2943 * myklass->trace_id |= USED
2944 * return myklass->trace_id & ~0x3
2945 */
inline_native_classID()2946 bool LibraryCallKit::inline_native_classID() {
2947 Node* cls = null_check(argument(0), T_OBJECT);
2948 Node* kls = load_klass_from_mirror(cls, false, NULL, 0);
2949 kls = null_check(kls, T_OBJECT);
2950
2951 ByteSize offset = KLASS_TRACE_ID_OFFSET;
2952 Node* insp = basic_plus_adr(kls, in_bytes(offset));
2953 Node* tvalue = make_load(NULL, insp, TypeLong::LONG, T_LONG, MemNode::unordered);
2954
2955 Node* clsused = longcon(0x01l); // set the class bit
2956 Node* orl = _gvn.transform(new OrLNode(tvalue, clsused));
2957 const TypePtr *adr_type = _gvn.type(insp)->isa_ptr();
2958 store_to_memory(control(), insp, orl, T_LONG, adr_type, MemNode::unordered);
2959
2960 #ifdef TRACE_ID_META_BITS
2961 Node* mbits = longcon(~TRACE_ID_META_BITS);
2962 tvalue = _gvn.transform(new AndLNode(tvalue, mbits));
2963 #endif
2964 #ifdef TRACE_ID_SHIFT
2965 Node* cbits = intcon(TRACE_ID_SHIFT);
2966 tvalue = _gvn.transform(new URShiftLNode(tvalue, cbits));
2967 #endif
2968
2969 set_result(tvalue);
2970 return true;
2971
2972 }
2973
inline_native_getEventWriter()2974 bool LibraryCallKit::inline_native_getEventWriter() {
2975 Node* tls_ptr = _gvn.transform(new ThreadLocalNode());
2976
2977 Node* jobj_ptr = basic_plus_adr(top(), tls_ptr,
2978 in_bytes(THREAD_LOCAL_WRITER_OFFSET_JFR));
2979
2980 Node* jobj = make_load(control(), jobj_ptr, TypeRawPtr::BOTTOM, T_ADDRESS, MemNode::unordered);
2981
2982 Node* jobj_cmp_null = _gvn.transform( new CmpPNode(jobj, null()) );
2983 Node* test_jobj_eq_null = _gvn.transform( new BoolNode(jobj_cmp_null, BoolTest::eq) );
2984
2985 IfNode* iff_jobj_null =
2986 create_and_map_if(control(), test_jobj_eq_null, PROB_MIN, COUNT_UNKNOWN);
2987
2988 enum { _normal_path = 1,
2989 _null_path = 2,
2990 PATH_LIMIT };
2991
2992 RegionNode* result_rgn = new RegionNode(PATH_LIMIT);
2993 PhiNode* result_val = new PhiNode(result_rgn, TypeInstPtr::BOTTOM);
2994
2995 Node* jobj_is_null = _gvn.transform(new IfTrueNode(iff_jobj_null));
2996 result_rgn->init_req(_null_path, jobj_is_null);
2997 result_val->init_req(_null_path, null());
2998
2999 Node* jobj_is_not_null = _gvn.transform(new IfFalseNode(iff_jobj_null));
3000 set_control(jobj_is_not_null);
3001 Node* res = access_load(jobj, TypeInstPtr::NOTNULL, T_OBJECT,
3002 IN_NATIVE | C2_CONTROL_DEPENDENT_LOAD);
3003 result_rgn->init_req(_normal_path, control());
3004 result_val->init_req(_normal_path, res);
3005
3006 set_result(result_rgn, result_val);
3007
3008 return true;
3009 }
3010
3011 #endif // JFR_HAVE_INTRINSICS
3012
3013 //------------------------inline_native_currentThread------------------
inline_native_currentThread()3014 bool LibraryCallKit::inline_native_currentThread() {
3015 Node* junk = NULL;
3016 set_result(generate_current_thread(junk));
3017 return true;
3018 }
3019
3020 //---------------------------load_mirror_from_klass----------------------------
3021 // Given a klass oop, load its java mirror (a java.lang.Class oop).
load_mirror_from_klass(Node * klass)3022 Node* LibraryCallKit::load_mirror_from_klass(Node* klass) {
3023 Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
3024 Node* load = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
3025 // mirror = ((OopHandle)mirror)->resolve();
3026 return access_load(load, TypeInstPtr::MIRROR, T_OBJECT, IN_NATIVE);
3027 }
3028
3029 //-----------------------load_klass_from_mirror_common-------------------------
3030 // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
3031 // Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
3032 // and branch to the given path on the region.
3033 // If never_see_null, take an uncommon trap on null, so we can optimistically
3034 // compile for the non-null case.
3035 // If the region is NULL, force never_see_null = true.
load_klass_from_mirror_common(Node * mirror,bool never_see_null,RegionNode * region,int null_path,int offset)3036 Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,
3037 bool never_see_null,
3038 RegionNode* region,
3039 int null_path,
3040 int offset) {
3041 if (region == NULL) never_see_null = true;
3042 Node* p = basic_plus_adr(mirror, offset);
3043 const TypeKlassPtr* kls_type = TypeKlassPtr::OBJECT_OR_NULL;
3044 Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type));
3045 Node* null_ctl = top();
3046 kls = null_check_oop(kls, &null_ctl, never_see_null);
3047 if (region != NULL) {
3048 // Set region->in(null_path) if the mirror is a primitive (e.g, int.class).
3049 region->init_req(null_path, null_ctl);
3050 } else {
3051 assert(null_ctl == top(), "no loose ends");
3052 }
3053 return kls;
3054 }
3055
3056 //--------------------(inline_native_Class_query helpers)---------------------
3057 // Use this for JVM_ACC_INTERFACE, JVM_ACC_IS_CLONEABLE_FAST, JVM_ACC_HAS_FINALIZER.
3058 // Fall through if (mods & mask) == bits, take the guard otherwise.
generate_access_flags_guard(Node * kls,int modifier_mask,int modifier_bits,RegionNode * region)3059 Node* LibraryCallKit::generate_access_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) {
3060 // Branch around if the given klass has the given modifier bit set.
3061 // Like generate_guard, adds a new path onto the region.
3062 Node* modp = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
3063 Node* mods = make_load(NULL, modp, TypeInt::INT, T_INT, MemNode::unordered);
3064 Node* mask = intcon(modifier_mask);
3065 Node* bits = intcon(modifier_bits);
3066 Node* mbit = _gvn.transform(new AndINode(mods, mask));
3067 Node* cmp = _gvn.transform(new CmpINode(mbit, bits));
3068 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
3069 return generate_fair_guard(bol, region);
3070 }
generate_interface_guard(Node * kls,RegionNode * region)3071 Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
3072 return generate_access_flags_guard(kls, JVM_ACC_INTERFACE, 0, region);
3073 }
3074
3075 //-------------------------inline_native_Class_query-------------------
inline_native_Class_query(vmIntrinsics::ID id)3076 bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
3077 const Type* return_type = TypeInt::BOOL;
3078 Node* prim_return_value = top(); // what happens if it's a primitive class?
3079 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3080 bool expect_prim = false; // most of these guys expect to work on refs
3081
3082 enum { _normal_path = 1, _prim_path = 2, PATH_LIMIT };
3083
3084 Node* mirror = argument(0);
3085 Node* obj = top();
3086
3087 switch (id) {
3088 case vmIntrinsics::_isInstance:
3089 // nothing is an instance of a primitive type
3090 prim_return_value = intcon(0);
3091 obj = argument(1);
3092 break;
3093 case vmIntrinsics::_getModifiers:
3094 prim_return_value = intcon(JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC);
3095 assert(is_power_of_2((int)JVM_ACC_WRITTEN_FLAGS+1), "change next line");
3096 return_type = TypeInt::make(0, JVM_ACC_WRITTEN_FLAGS, Type::WidenMin);
3097 break;
3098 case vmIntrinsics::_isInterface:
3099 prim_return_value = intcon(0);
3100 break;
3101 case vmIntrinsics::_isArray:
3102 prim_return_value = intcon(0);
3103 expect_prim = true; // cf. ObjectStreamClass.getClassSignature
3104 break;
3105 case vmIntrinsics::_isPrimitive:
3106 prim_return_value = intcon(1);
3107 expect_prim = true; // obviously
3108 break;
3109 case vmIntrinsics::_getSuperclass:
3110 prim_return_value = null();
3111 return_type = TypeInstPtr::MIRROR->cast_to_ptr_type(TypePtr::BotPTR);
3112 break;
3113 case vmIntrinsics::_getClassAccessFlags:
3114 prim_return_value = intcon(JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC);
3115 return_type = TypeInt::INT; // not bool! 6297094
3116 break;
3117 default:
3118 fatal_unexpected_iid(id);
3119 break;
3120 }
3121
3122 const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
3123 if (mirror_con == NULL) return false; // cannot happen?
3124
3125 #ifndef PRODUCT
3126 if (C->print_intrinsics() || C->print_inlining()) {
3127 ciType* k = mirror_con->java_mirror_type();
3128 if (k) {
3129 tty->print("Inlining %s on constant Class ", vmIntrinsics::name_at(intrinsic_id()));
3130 k->print_name();
3131 tty->cr();
3132 }
3133 }
3134 #endif
3135
3136 // Null-check the mirror, and the mirror's klass ptr (in case it is a primitive).
3137 RegionNode* region = new RegionNode(PATH_LIMIT);
3138 record_for_igvn(region);
3139 PhiNode* phi = new PhiNode(region, return_type);
3140
3141 // The mirror will never be null of Reflection.getClassAccessFlags, however
3142 // it may be null for Class.isInstance or Class.getModifiers. Throw a NPE
3143 // if it is. See bug 4774291.
3144
3145 // For Reflection.getClassAccessFlags(), the null check occurs in
3146 // the wrong place; see inline_unsafe_access(), above, for a similar
3147 // situation.
3148 mirror = null_check(mirror);
3149 // If mirror or obj is dead, only null-path is taken.
3150 if (stopped()) return true;
3151
3152 if (expect_prim) never_see_null = false; // expect nulls (meaning prims)
3153
3154 // Now load the mirror's klass metaobject, and null-check it.
3155 // Side-effects region with the control path if the klass is null.
3156 Node* kls = load_klass_from_mirror(mirror, never_see_null, region, _prim_path);
3157 // If kls is null, we have a primitive mirror.
3158 phi->init_req(_prim_path, prim_return_value);
3159 if (stopped()) { set_result(region, phi); return true; }
3160 bool safe_for_replace = (region->in(_prim_path) == top());
3161
3162 Node* p; // handy temp
3163 Node* null_ctl;
3164
3165 // Now that we have the non-null klass, we can perform the real query.
3166 // For constant classes, the query will constant-fold in LoadNode::Value.
3167 Node* query_value = top();
3168 switch (id) {
3169 case vmIntrinsics::_isInstance:
3170 // nothing is an instance of a primitive type
3171 query_value = gen_instanceof(obj, kls, safe_for_replace);
3172 break;
3173
3174 case vmIntrinsics::_getModifiers:
3175 p = basic_plus_adr(kls, in_bytes(Klass::modifier_flags_offset()));
3176 query_value = make_load(NULL, p, TypeInt::INT, T_INT, MemNode::unordered);
3177 break;
3178
3179 case vmIntrinsics::_isInterface:
3180 // (To verify this code sequence, check the asserts in JVM_IsInterface.)
3181 if (generate_interface_guard(kls, region) != NULL)
3182 // A guard was added. If the guard is taken, it was an interface.
3183 phi->add_req(intcon(1));
3184 // If we fall through, it's a plain class.
3185 query_value = intcon(0);
3186 break;
3187
3188 case vmIntrinsics::_isArray:
3189 // (To verify this code sequence, check the asserts in JVM_IsArrayClass.)
3190 if (generate_array_guard(kls, region) != NULL)
3191 // A guard was added. If the guard is taken, it was an array.
3192 phi->add_req(intcon(1));
3193 // If we fall through, it's a plain class.
3194 query_value = intcon(0);
3195 break;
3196
3197 case vmIntrinsics::_isPrimitive:
3198 query_value = intcon(0); // "normal" path produces false
3199 break;
3200
3201 case vmIntrinsics::_getSuperclass:
3202 // The rules here are somewhat unfortunate, but we can still do better
3203 // with random logic than with a JNI call.
3204 // Interfaces store null or Object as _super, but must report null.
3205 // Arrays store an intermediate super as _super, but must report Object.
3206 // Other types can report the actual _super.
3207 // (To verify this code sequence, check the asserts in JVM_IsInterface.)
3208 if (generate_interface_guard(kls, region) != NULL)
3209 // A guard was added. If the guard is taken, it was an interface.
3210 phi->add_req(null());
3211 if (generate_array_guard(kls, region) != NULL)
3212 // A guard was added. If the guard is taken, it was an array.
3213 phi->add_req(makecon(TypeInstPtr::make(env()->Object_klass()->java_mirror())));
3214 // If we fall through, it's a plain class. Get its _super.
3215 p = basic_plus_adr(kls, in_bytes(Klass::super_offset()));
3216 kls = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeRawPtr::BOTTOM, TypeKlassPtr::OBJECT_OR_NULL));
3217 null_ctl = top();
3218 kls = null_check_oop(kls, &null_ctl);
3219 if (null_ctl != top()) {
3220 // If the guard is taken, Object.superClass is null (both klass and mirror).
3221 region->add_req(null_ctl);
3222 phi ->add_req(null());
3223 }
3224 if (!stopped()) {
3225 query_value = load_mirror_from_klass(kls);
3226 }
3227 break;
3228
3229 case vmIntrinsics::_getClassAccessFlags:
3230 p = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
3231 query_value = make_load(NULL, p, TypeInt::INT, T_INT, MemNode::unordered);
3232 break;
3233
3234 default:
3235 fatal_unexpected_iid(id);
3236 break;
3237 }
3238
3239 // Fall-through is the normal case of a query to a real class.
3240 phi->init_req(1, query_value);
3241 region->init_req(1, control());
3242
3243 C->set_has_split_ifs(true); // Has chance for split-if optimization
3244 set_result(region, phi);
3245 return true;
3246 }
3247
3248 //-------------------------inline_Class_cast-------------------
inline_Class_cast()3249 bool LibraryCallKit::inline_Class_cast() {
3250 Node* mirror = argument(0); // Class
3251 Node* obj = argument(1);
3252 const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
3253 if (mirror_con == NULL) {
3254 return false; // dead path (mirror->is_top()).
3255 }
3256 if (obj == NULL || obj->is_top()) {
3257 return false; // dead path
3258 }
3259 const TypeOopPtr* tp = _gvn.type(obj)->isa_oopptr();
3260
3261 // First, see if Class.cast() can be folded statically.
3262 // java_mirror_type() returns non-null for compile-time Class constants.
3263 ciType* tm = mirror_con->java_mirror_type();
3264 if (tm != NULL && tm->is_klass() &&
3265 tp != NULL && tp->klass() != NULL) {
3266 if (!tp->klass()->is_loaded()) {
3267 // Don't use intrinsic when class is not loaded.
3268 return false;
3269 } else {
3270 int static_res = C->static_subtype_check(tm->as_klass(), tp->klass());
3271 if (static_res == Compile::SSC_always_true) {
3272 // isInstance() is true - fold the code.
3273 set_result(obj);
3274 return true;
3275 } else if (static_res == Compile::SSC_always_false) {
3276 // Don't use intrinsic, have to throw ClassCastException.
3277 // If the reference is null, the non-intrinsic bytecode will
3278 // be optimized appropriately.
3279 return false;
3280 }
3281 }
3282 }
3283
3284 // Bailout intrinsic and do normal inlining if exception path is frequent.
3285 if (too_many_traps(Deoptimization::Reason_intrinsic)) {
3286 return false;
3287 }
3288
3289 // Generate dynamic checks.
3290 // Class.cast() is java implementation of _checkcast bytecode.
3291 // Do checkcast (Parse::do_checkcast()) optimizations here.
3292
3293 mirror = null_check(mirror);
3294 // If mirror is dead, only null-path is taken.
3295 if (stopped()) {
3296 return true;
3297 }
3298
3299 // Not-subtype or the mirror's klass ptr is NULL (in case it is a primitive).
3300 enum { _bad_type_path = 1, _prim_path = 2, PATH_LIMIT };
3301 RegionNode* region = new RegionNode(PATH_LIMIT);
3302 record_for_igvn(region);
3303
3304 // Now load the mirror's klass metaobject, and null-check it.
3305 // If kls is null, we have a primitive mirror and
3306 // nothing is an instance of a primitive type.
3307 Node* kls = load_klass_from_mirror(mirror, false, region, _prim_path);
3308
3309 Node* res = top();
3310 if (!stopped()) {
3311 Node* bad_type_ctrl = top();
3312 // Do checkcast optimizations.
3313 res = gen_checkcast(obj, kls, &bad_type_ctrl);
3314 region->init_req(_bad_type_path, bad_type_ctrl);
3315 }
3316 if (region->in(_prim_path) != top() ||
3317 region->in(_bad_type_path) != top()) {
3318 // Let Interpreter throw ClassCastException.
3319 PreserveJVMState pjvms(this);
3320 set_control(_gvn.transform(region));
3321 uncommon_trap(Deoptimization::Reason_intrinsic,
3322 Deoptimization::Action_maybe_recompile);
3323 }
3324 if (!stopped()) {
3325 set_result(res);
3326 }
3327 return true;
3328 }
3329
3330
3331 //--------------------------inline_native_subtype_check------------------------
3332 // This intrinsic takes the JNI calls out of the heart of
3333 // UnsafeFieldAccessorImpl.set, which improves Field.set, readObject, etc.
inline_native_subtype_check()3334 bool LibraryCallKit::inline_native_subtype_check() {
3335 // Pull both arguments off the stack.
3336 Node* args[2]; // two java.lang.Class mirrors: superc, subc
3337 args[0] = argument(0);
3338 args[1] = argument(1);
3339 Node* klasses[2]; // corresponding Klasses: superk, subk
3340 klasses[0] = klasses[1] = top();
3341
3342 enum {
3343 // A full decision tree on {superc is prim, subc is prim}:
3344 _prim_0_path = 1, // {P,N} => false
3345 // {P,P} & superc!=subc => false
3346 _prim_same_path, // {P,P} & superc==subc => true
3347 _prim_1_path, // {N,P} => false
3348 _ref_subtype_path, // {N,N} & subtype check wins => true
3349 _both_ref_path, // {N,N} & subtype check loses => false
3350 PATH_LIMIT
3351 };
3352
3353 RegionNode* region = new RegionNode(PATH_LIMIT);
3354 Node* phi = new PhiNode(region, TypeInt::BOOL);
3355 record_for_igvn(region);
3356
3357 const TypePtr* adr_type = TypeRawPtr::BOTTOM; // memory type of loads
3358 const TypeKlassPtr* kls_type = TypeKlassPtr::OBJECT_OR_NULL;
3359 int class_klass_offset = java_lang_Class::klass_offset_in_bytes();
3360
3361 // First null-check both mirrors and load each mirror's klass metaobject.
3362 int which_arg;
3363 for (which_arg = 0; which_arg <= 1; which_arg++) {
3364 Node* arg = args[which_arg];
3365 arg = null_check(arg);
3366 if (stopped()) break;
3367 args[which_arg] = arg;
3368
3369 Node* p = basic_plus_adr(arg, class_klass_offset);
3370 Node* kls = LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, adr_type, kls_type);
3371 klasses[which_arg] = _gvn.transform(kls);
3372 }
3373
3374 // Having loaded both klasses, test each for null.
3375 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3376 for (which_arg = 0; which_arg <= 1; which_arg++) {
3377 Node* kls = klasses[which_arg];
3378 Node* null_ctl = top();
3379 kls = null_check_oop(kls, &null_ctl, never_see_null);
3380 int prim_path = (which_arg == 0 ? _prim_0_path : _prim_1_path);
3381 region->init_req(prim_path, null_ctl);
3382 if (stopped()) break;
3383 klasses[which_arg] = kls;
3384 }
3385
3386 if (!stopped()) {
3387 // now we have two reference types, in klasses[0..1]
3388 Node* subk = klasses[1]; // the argument to isAssignableFrom
3389 Node* superk = klasses[0]; // the receiver
3390 region->set_req(_both_ref_path, gen_subtype_check(subk, superk));
3391 // now we have a successful reference subtype check
3392 region->set_req(_ref_subtype_path, control());
3393 }
3394
3395 // If both operands are primitive (both klasses null), then
3396 // we must return true when they are identical primitives.
3397 // It is convenient to test this after the first null klass check.
3398 set_control(region->in(_prim_0_path)); // go back to first null check
3399 if (!stopped()) {
3400 // Since superc is primitive, make a guard for the superc==subc case.
3401 Node* cmp_eq = _gvn.transform(new CmpPNode(args[0], args[1]));
3402 Node* bol_eq = _gvn.transform(new BoolNode(cmp_eq, BoolTest::eq));
3403 generate_guard(bol_eq, region, PROB_FAIR);
3404 if (region->req() == PATH_LIMIT+1) {
3405 // A guard was added. If the added guard is taken, superc==subc.
3406 region->swap_edges(PATH_LIMIT, _prim_same_path);
3407 region->del_req(PATH_LIMIT);
3408 }
3409 region->set_req(_prim_0_path, control()); // Not equal after all.
3410 }
3411
3412 // these are the only paths that produce 'true':
3413 phi->set_req(_prim_same_path, intcon(1));
3414 phi->set_req(_ref_subtype_path, intcon(1));
3415
3416 // pull together the cases:
3417 assert(region->req() == PATH_LIMIT, "sane region");
3418 for (uint i = 1; i < region->req(); i++) {
3419 Node* ctl = region->in(i);
3420 if (ctl == NULL || ctl == top()) {
3421 region->set_req(i, top());
3422 phi ->set_req(i, top());
3423 } else if (phi->in(i) == NULL) {
3424 phi->set_req(i, intcon(0)); // all other paths produce 'false'
3425 }
3426 }
3427
3428 set_control(_gvn.transform(region));
3429 set_result(_gvn.transform(phi));
3430 return true;
3431 }
3432
3433 //---------------------generate_array_guard_common------------------------
generate_array_guard_common(Node * kls,RegionNode * region,bool obj_array,bool not_array)3434 Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region,
3435 bool obj_array, bool not_array) {
3436
3437 if (stopped()) {
3438 return NULL;
3439 }
3440
3441 // If obj_array/non_array==false/false:
3442 // Branch around if the given klass is in fact an array (either obj or prim).
3443 // If obj_array/non_array==false/true:
3444 // Branch around if the given klass is not an array klass of any kind.
3445 // If obj_array/non_array==true/true:
3446 // Branch around if the kls is not an oop array (kls is int[], String, etc.)
3447 // If obj_array/non_array==true/false:
3448 // Branch around if the kls is an oop array (Object[] or subtype)
3449 //
3450 // Like generate_guard, adds a new path onto the region.
3451 jint layout_con = 0;
3452 Node* layout_val = get_layout_helper(kls, layout_con);
3453 if (layout_val == NULL) {
3454 bool query = (obj_array
3455 ? Klass::layout_helper_is_objArray(layout_con)
3456 : Klass::layout_helper_is_array(layout_con));
3457 if (query == not_array) {
3458 return NULL; // never a branch
3459 } else { // always a branch
3460 Node* always_branch = control();
3461 if (region != NULL)
3462 region->add_req(always_branch);
3463 set_control(top());
3464 return always_branch;
3465 }
3466 }
3467 // Now test the correct condition.
3468 jint nval = (obj_array
3469 ? (jint)(Klass::_lh_array_tag_type_value
3470 << Klass::_lh_array_tag_shift)
3471 : Klass::_lh_neutral_value);
3472 Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(nval)));
3473 BoolTest::mask btest = BoolTest::lt; // correct for testing is_[obj]array
3474 // invert the test if we are looking for a non-array
3475 if (not_array) btest = BoolTest(btest).negate();
3476 Node* bol = _gvn.transform(new BoolNode(cmp, btest));
3477 return generate_fair_guard(bol, region);
3478 }
3479
3480
3481 //-----------------------inline_native_newArray--------------------------
3482 // private static native Object java.lang.reflect.newArray(Class<?> componentType, int length);
3483 // private native Object Unsafe.allocateUninitializedArray0(Class<?> cls, int size);
inline_unsafe_newArray(bool uninitialized)3484 bool LibraryCallKit::inline_unsafe_newArray(bool uninitialized) {
3485 Node* mirror;
3486 Node* count_val;
3487 if (uninitialized) {
3488 mirror = argument(1);
3489 count_val = argument(2);
3490 } else {
3491 mirror = argument(0);
3492 count_val = argument(1);
3493 }
3494
3495 mirror = null_check(mirror);
3496 // If mirror or obj is dead, only null-path is taken.
3497 if (stopped()) return true;
3498
3499 enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT };
3500 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
3501 PhiNode* result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
3502 PhiNode* result_io = new PhiNode(result_reg, Type::ABIO);
3503 PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
3504
3505 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3506 Node* klass_node = load_array_klass_from_mirror(mirror, never_see_null,
3507 result_reg, _slow_path);
3508 Node* normal_ctl = control();
3509 Node* no_array_ctl = result_reg->in(_slow_path);
3510
3511 // Generate code for the slow case. We make a call to newArray().
3512 set_control(no_array_ctl);
3513 if (!stopped()) {
3514 // Either the input type is void.class, or else the
3515 // array klass has not yet been cached. Either the
3516 // ensuing call will throw an exception, or else it
3517 // will cache the array klass for next time.
3518 PreserveJVMState pjvms(this);
3519 CallJavaNode* slow_call = generate_method_call_static(vmIntrinsics::_newArray);
3520 Node* slow_result = set_results_for_java_call(slow_call);
3521 // this->control() comes from set_results_for_java_call
3522 result_reg->set_req(_slow_path, control());
3523 result_val->set_req(_slow_path, slow_result);
3524 result_io ->set_req(_slow_path, i_o());
3525 result_mem->set_req(_slow_path, reset_memory());
3526 }
3527
3528 set_control(normal_ctl);
3529 if (!stopped()) {
3530 // Normal case: The array type has been cached in the java.lang.Class.
3531 // The following call works fine even if the array type is polymorphic.
3532 // It could be a dynamic mix of int[], boolean[], Object[], etc.
3533 Node* obj = new_array(klass_node, count_val, 0); // no arguments to push
3534 result_reg->init_req(_normal_path, control());
3535 result_val->init_req(_normal_path, obj);
3536 result_io ->init_req(_normal_path, i_o());
3537 result_mem->init_req(_normal_path, reset_memory());
3538
3539 if (uninitialized) {
3540 // Mark the allocation so that zeroing is skipped
3541 AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(obj, &_gvn);
3542 alloc->maybe_set_complete(&_gvn);
3543 }
3544 }
3545
3546 // Return the combined state.
3547 set_i_o( _gvn.transform(result_io) );
3548 set_all_memory( _gvn.transform(result_mem));
3549
3550 C->set_has_split_ifs(true); // Has chance for split-if optimization
3551 set_result(result_reg, result_val);
3552 return true;
3553 }
3554
3555 //----------------------inline_native_getLength--------------------------
3556 // public static native int java.lang.reflect.Array.getLength(Object array);
inline_native_getLength()3557 bool LibraryCallKit::inline_native_getLength() {
3558 if (too_many_traps(Deoptimization::Reason_intrinsic)) return false;
3559
3560 Node* array = null_check(argument(0));
3561 // If array is dead, only null-path is taken.
3562 if (stopped()) return true;
3563
3564 // Deoptimize if it is a non-array.
3565 Node* non_array = generate_non_array_guard(load_object_klass(array), NULL);
3566
3567 if (non_array != NULL) {
3568 PreserveJVMState pjvms(this);
3569 set_control(non_array);
3570 uncommon_trap(Deoptimization::Reason_intrinsic,
3571 Deoptimization::Action_maybe_recompile);
3572 }
3573
3574 // If control is dead, only non-array-path is taken.
3575 if (stopped()) return true;
3576
3577 // The works fine even if the array type is polymorphic.
3578 // It could be a dynamic mix of int[], boolean[], Object[], etc.
3579 Node* result = load_array_length(array);
3580
3581 C->set_has_split_ifs(true); // Has chance for split-if optimization
3582 set_result(result);
3583 return true;
3584 }
3585
3586 //------------------------inline_array_copyOf----------------------------
3587 // public static <T,U> T[] java.util.Arrays.copyOf( U[] original, int newLength, Class<? extends T[]> newType);
3588 // public static <T,U> T[] java.util.Arrays.copyOfRange(U[] original, int from, int to, Class<? extends T[]> newType);
inline_array_copyOf(bool is_copyOfRange)3589 bool LibraryCallKit::inline_array_copyOf(bool is_copyOfRange) {
3590 if (too_many_traps(Deoptimization::Reason_intrinsic)) return false;
3591
3592 // Get the arguments.
3593 Node* original = argument(0);
3594 Node* start = is_copyOfRange? argument(1): intcon(0);
3595 Node* end = is_copyOfRange? argument(2): argument(1);
3596 Node* array_type_mirror = is_copyOfRange? argument(3): argument(2);
3597
3598 Node* newcopy = NULL;
3599
3600 // Set the original stack and the reexecute bit for the interpreter to reexecute
3601 // the bytecode that invokes Arrays.copyOf if deoptimization happens.
3602 { PreserveReexecuteState preexecs(this);
3603 jvms()->set_should_reexecute(true);
3604
3605 array_type_mirror = null_check(array_type_mirror);
3606 original = null_check(original);
3607
3608 // Check if a null path was taken unconditionally.
3609 if (stopped()) return true;
3610
3611 Node* orig_length = load_array_length(original);
3612
3613 Node* klass_node = load_klass_from_mirror(array_type_mirror, false, NULL, 0);
3614 klass_node = null_check(klass_node);
3615
3616 RegionNode* bailout = new RegionNode(1);
3617 record_for_igvn(bailout);
3618
3619 // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
3620 // Bail out if that is so.
3621 Node* not_objArray = generate_non_objArray_guard(klass_node, bailout);
3622 if (not_objArray != NULL) {
3623 // Improve the klass node's type from the new optimistic assumption:
3624 ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
3625 const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/);
3626 Node* cast = new CastPPNode(klass_node, akls);
3627 cast->init_req(0, control());
3628 klass_node = _gvn.transform(cast);
3629 }
3630
3631 // Bail out if either start or end is negative.
3632 generate_negative_guard(start, bailout, &start);
3633 generate_negative_guard(end, bailout, &end);
3634
3635 Node* length = end;
3636 if (_gvn.type(start) != TypeInt::ZERO) {
3637 length = _gvn.transform(new SubINode(end, start));
3638 }
3639
3640 // Bail out if length is negative.
3641 // Without this the new_array would throw
3642 // NegativeArraySizeException but IllegalArgumentException is what
3643 // should be thrown
3644 generate_negative_guard(length, bailout, &length);
3645
3646 if (bailout->req() > 1) {
3647 PreserveJVMState pjvms(this);
3648 set_control(_gvn.transform(bailout));
3649 uncommon_trap(Deoptimization::Reason_intrinsic,
3650 Deoptimization::Action_maybe_recompile);
3651 }
3652
3653 if (!stopped()) {
3654 // How many elements will we copy from the original?
3655 // The answer is MinI(orig_length - start, length).
3656 Node* orig_tail = _gvn.transform(new SubINode(orig_length, start));
3657 Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
3658
3659 // Generate a direct call to the right arraycopy function(s).
3660 // We know the copy is disjoint but we might not know if the
3661 // oop stores need checking.
3662 // Extreme case: Arrays.copyOf((Integer[])x, 10, String[].class).
3663 // This will fail a store-check if x contains any non-nulls.
3664
3665 // ArrayCopyNode:Ideal may transform the ArrayCopyNode to
3666 // loads/stores but it is legal only if we're sure the
3667 // Arrays.copyOf would succeed. So we need all input arguments
3668 // to the copyOf to be validated, including that the copy to the
3669 // new array won't trigger an ArrayStoreException. That subtype
3670 // check can be optimized if we know something on the type of
3671 // the input array from type speculation.
3672 if (_gvn.type(klass_node)->singleton()) {
3673 ciKlass* subk = _gvn.type(load_object_klass(original))->is_klassptr()->klass();
3674 ciKlass* superk = _gvn.type(klass_node)->is_klassptr()->klass();
3675
3676 int test = C->static_subtype_check(superk, subk);
3677 if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) {
3678 const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr();
3679 if (t_original->speculative_type() != NULL) {
3680 original = maybe_cast_profiled_obj(original, t_original->speculative_type(), true);
3681 }
3682 }
3683 }
3684
3685 bool validated = false;
3686 // Reason_class_check rather than Reason_intrinsic because we
3687 // want to intrinsify even if this traps.
3688 if (!too_many_traps(Deoptimization::Reason_class_check)) {
3689 Node* not_subtype_ctrl = gen_subtype_check(load_object_klass(original),
3690 klass_node);
3691
3692 if (not_subtype_ctrl != top()) {
3693 PreserveJVMState pjvms(this);
3694 set_control(not_subtype_ctrl);
3695 uncommon_trap(Deoptimization::Reason_class_check,
3696 Deoptimization::Action_make_not_entrant);
3697 assert(stopped(), "Should be stopped");
3698 }
3699 validated = true;
3700 }
3701
3702 if (!stopped()) {
3703 newcopy = new_array(klass_node, length, 0); // no arguments to push
3704
3705 ArrayCopyNode* ac = ArrayCopyNode::make(this, true, original, start, newcopy, intcon(0), moved, true, false,
3706 load_object_klass(original), klass_node);
3707 if (!is_copyOfRange) {
3708 ac->set_copyof(validated);
3709 } else {
3710 ac->set_copyofrange(validated);
3711 }
3712 Node* n = _gvn.transform(ac);
3713 if (n == ac) {
3714 ac->connect_outputs(this);
3715 } else {
3716 assert(validated, "shouldn't transform if all arguments not validated");
3717 set_all_memory(n);
3718 }
3719 }
3720 }
3721 } // original reexecute is set back here
3722
3723 C->set_has_split_ifs(true); // Has chance for split-if optimization
3724 if (!stopped()) {
3725 set_result(newcopy);
3726 }
3727 return true;
3728 }
3729
3730
3731 //----------------------generate_virtual_guard---------------------------
3732 // Helper for hashCode and clone. Peeks inside the vtable to avoid a call.
generate_virtual_guard(Node * obj_klass,RegionNode * slow_region)3733 Node* LibraryCallKit::generate_virtual_guard(Node* obj_klass,
3734 RegionNode* slow_region) {
3735 ciMethod* method = callee();
3736 int vtable_index = method->vtable_index();
3737 assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
3738 "bad index %d", vtable_index);
3739 // Get the Method* out of the appropriate vtable entry.
3740 int entry_offset = in_bytes(Klass::vtable_start_offset()) +
3741 vtable_index*vtableEntry::size_in_bytes() +
3742 vtableEntry::method_offset_in_bytes();
3743 Node* entry_addr = basic_plus_adr(obj_klass, entry_offset);
3744 Node* target_call = make_load(NULL, entry_addr, TypePtr::NOTNULL, T_ADDRESS, MemNode::unordered);
3745
3746 // Compare the target method with the expected method (e.g., Object.hashCode).
3747 const TypePtr* native_call_addr = TypeMetadataPtr::make(method);
3748
3749 Node* native_call = makecon(native_call_addr);
3750 Node* chk_native = _gvn.transform(new CmpPNode(target_call, native_call));
3751 Node* test_native = _gvn.transform(new BoolNode(chk_native, BoolTest::ne));
3752
3753 return generate_slow_guard(test_native, slow_region);
3754 }
3755
3756 //-----------------------generate_method_call----------------------------
3757 // Use generate_method_call to make a slow-call to the real
3758 // method if the fast path fails. An alternative would be to
3759 // use a stub like OptoRuntime::slow_arraycopy_Java.
3760 // This only works for expanding the current library call,
3761 // not another intrinsic. (E.g., don't use this for making an
3762 // arraycopy call inside of the copyOf intrinsic.)
3763 CallJavaNode*
generate_method_call(vmIntrinsics::ID method_id,bool is_virtual,bool is_static)3764 LibraryCallKit::generate_method_call(vmIntrinsics::ID method_id, bool is_virtual, bool is_static) {
3765 // When compiling the intrinsic method itself, do not use this technique.
3766 guarantee(callee() != C->method(), "cannot make slow-call to self");
3767
3768 ciMethod* method = callee();
3769 // ensure the JVMS we have will be correct for this call
3770 guarantee(method_id == method->intrinsic_id(), "must match");
3771
3772 const TypeFunc* tf = TypeFunc::make(method);
3773 CallJavaNode* slow_call;
3774 if (is_static) {
3775 assert(!is_virtual, "");
3776 slow_call = new CallStaticJavaNode(C, tf,
3777 SharedRuntime::get_resolve_static_call_stub(),
3778 method, bci());
3779 } else if (is_virtual) {
3780 null_check_receiver();
3781 int vtable_index = Method::invalid_vtable_index;
3782 if (UseInlineCaches) {
3783 // Suppress the vtable call
3784 } else {
3785 // hashCode and clone are not a miranda methods,
3786 // so the vtable index is fixed.
3787 // No need to use the linkResolver to get it.
3788 vtable_index = method->vtable_index();
3789 assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
3790 "bad index %d", vtable_index);
3791 }
3792 slow_call = new CallDynamicJavaNode(tf,
3793 SharedRuntime::get_resolve_virtual_call_stub(),
3794 method, vtable_index, bci());
3795 } else { // neither virtual nor static: opt_virtual
3796 null_check_receiver();
3797 slow_call = new CallStaticJavaNode(C, tf,
3798 SharedRuntime::get_resolve_opt_virtual_call_stub(),
3799 method, bci());
3800 slow_call->set_optimized_virtual(true);
3801 }
3802 if (CallGenerator::is_inlined_method_handle_intrinsic(this->method(), bci(), callee())) {
3803 // To be able to issue a direct call (optimized virtual or virtual)
3804 // and skip a call to MH.linkTo*/invokeBasic adapter, additional information
3805 // about the method being invoked should be attached to the call site to
3806 // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C).
3807 slow_call->set_override_symbolic_info(true);
3808 }
3809 set_arguments_for_java_call(slow_call);
3810 set_edges_for_java_call(slow_call);
3811 return slow_call;
3812 }
3813
3814
3815 /**
3816 * Build special case code for calls to hashCode on an object. This call may
3817 * be virtual (invokevirtual) or bound (invokespecial). For each case we generate
3818 * slightly different code.
3819 */
inline_native_hashcode(bool is_virtual,bool is_static)3820 bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
3821 assert(is_static == callee()->is_static(), "correct intrinsic selection");
3822 assert(!(is_virtual && is_static), "either virtual, special, or static");
3823
3824 enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT };
3825
3826 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
3827 PhiNode* result_val = new PhiNode(result_reg, TypeInt::INT);
3828 PhiNode* result_io = new PhiNode(result_reg, Type::ABIO);
3829 PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
3830 Node* obj = NULL;
3831 if (!is_static) {
3832 // Check for hashing null object
3833 obj = null_check_receiver();
3834 if (stopped()) return true; // unconditionally null
3835 result_reg->init_req(_null_path, top());
3836 result_val->init_req(_null_path, top());
3837 } else {
3838 // Do a null check, and return zero if null.
3839 // System.identityHashCode(null) == 0
3840 obj = argument(0);
3841 Node* null_ctl = top();
3842 obj = null_check_oop(obj, &null_ctl);
3843 result_reg->init_req(_null_path, null_ctl);
3844 result_val->init_req(_null_path, _gvn.intcon(0));
3845 }
3846
3847 // Unconditionally null? Then return right away.
3848 if (stopped()) {
3849 set_control( result_reg->in(_null_path));
3850 if (!stopped())
3851 set_result(result_val->in(_null_path));
3852 return true;
3853 }
3854
3855 // We only go to the fast case code if we pass a number of guards. The
3856 // paths which do not pass are accumulated in the slow_region.
3857 RegionNode* slow_region = new RegionNode(1);
3858 record_for_igvn(slow_region);
3859
3860 // If this is a virtual call, we generate a funny guard. We pull out
3861 // the vtable entry corresponding to hashCode() from the target object.
3862 // If the target method which we are calling happens to be the native
3863 // Object hashCode() method, we pass the guard. We do not need this
3864 // guard for non-virtual calls -- the caller is known to be the native
3865 // Object hashCode().
3866 if (is_virtual) {
3867 // After null check, get the object's klass.
3868 Node* obj_klass = load_object_klass(obj);
3869 generate_virtual_guard(obj_klass, slow_region);
3870 }
3871
3872 // Get the header out of the object, use LoadMarkNode when available
3873 Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
3874 // The control of the load must be NULL. Otherwise, the load can move before
3875 // the null check after castPP removal.
3876 Node* no_ctrl = NULL;
3877 Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
3878
3879 // Test the header to see if it is unlocked.
3880 Node *lock_mask = _gvn.MakeConX(markWord::biased_lock_mask_in_place);
3881 Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
3882 Node *unlocked_val = _gvn.MakeConX(markWord::unlocked_value);
3883 Node *chk_unlocked = _gvn.transform(new CmpXNode( lmasked_header, unlocked_val));
3884 Node *test_unlocked = _gvn.transform(new BoolNode( chk_unlocked, BoolTest::ne));
3885
3886 generate_slow_guard(test_unlocked, slow_region);
3887
3888 // Get the hash value and check to see that it has been properly assigned.
3889 // We depend on hash_mask being at most 32 bits and avoid the use of
3890 // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
3891 // vm: see markWord.hpp.
3892 Node *hash_mask = _gvn.intcon(markWord::hash_mask);
3893 Node *hash_shift = _gvn.intcon(markWord::hash_shift);
3894 Node *hshifted_header= _gvn.transform(new URShiftXNode(header, hash_shift));
3895 // This hack lets the hash bits live anywhere in the mark object now, as long
3896 // as the shift drops the relevant bits into the low 32 bits. Note that
3897 // Java spec says that HashCode is an int so there's no point in capturing
3898 // an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build).
3899 hshifted_header = ConvX2I(hshifted_header);
3900 Node *hash_val = _gvn.transform(new AndINode(hshifted_header, hash_mask));
3901
3902 Node *no_hash_val = _gvn.intcon(markWord::no_hash);
3903 Node *chk_assigned = _gvn.transform(new CmpINode( hash_val, no_hash_val));
3904 Node *test_assigned = _gvn.transform(new BoolNode( chk_assigned, BoolTest::eq));
3905
3906 generate_slow_guard(test_assigned, slow_region);
3907
3908 Node* init_mem = reset_memory();
3909 // fill in the rest of the null path:
3910 result_io ->init_req(_null_path, i_o());
3911 result_mem->init_req(_null_path, init_mem);
3912
3913 result_val->init_req(_fast_path, hash_val);
3914 result_reg->init_req(_fast_path, control());
3915 result_io ->init_req(_fast_path, i_o());
3916 result_mem->init_req(_fast_path, init_mem);
3917
3918 // Generate code for the slow case. We make a call to hashCode().
3919 set_control(_gvn.transform(slow_region));
3920 if (!stopped()) {
3921 // No need for PreserveJVMState, because we're using up the present state.
3922 set_all_memory(init_mem);
3923 vmIntrinsics::ID hashCode_id = is_static ? vmIntrinsics::_identityHashCode : vmIntrinsics::_hashCode;
3924 CallJavaNode* slow_call = generate_method_call(hashCode_id, is_virtual, is_static);
3925 Node* slow_result = set_results_for_java_call(slow_call);
3926 // this->control() comes from set_results_for_java_call
3927 result_reg->init_req(_slow_path, control());
3928 result_val->init_req(_slow_path, slow_result);
3929 result_io ->set_req(_slow_path, i_o());
3930 result_mem ->set_req(_slow_path, reset_memory());
3931 }
3932
3933 // Return the combined state.
3934 set_i_o( _gvn.transform(result_io) );
3935 set_all_memory( _gvn.transform(result_mem));
3936
3937 set_result(result_reg, result_val);
3938 return true;
3939 }
3940
3941 //---------------------------inline_native_getClass----------------------------
3942 // public final native Class<?> java.lang.Object.getClass();
3943 //
3944 // Build special case code for calls to getClass on an object.
inline_native_getClass()3945 bool LibraryCallKit::inline_native_getClass() {
3946 Node* obj = null_check_receiver();
3947 if (stopped()) return true;
3948 set_result(load_mirror_from_klass(load_object_klass(obj)));
3949 return true;
3950 }
3951
3952 //-----------------inline_native_Reflection_getCallerClass---------------------
3953 // public static native Class<?> sun.reflect.Reflection.getCallerClass();
3954 //
3955 // In the presence of deep enough inlining, getCallerClass() becomes a no-op.
3956 //
3957 // NOTE: This code must perform the same logic as JVM_GetCallerClass
3958 // in that it must skip particular security frames and checks for
3959 // caller sensitive methods.
inline_native_Reflection_getCallerClass()3960 bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
3961 #ifndef PRODUCT
3962 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
3963 tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
3964 }
3965 #endif
3966
3967 if (!jvms()->has_method()) {
3968 #ifndef PRODUCT
3969 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
3970 tty->print_cr(" Bailing out because intrinsic was inlined at top level");
3971 }
3972 #endif
3973 return false;
3974 }
3975
3976 // Walk back up the JVM state to find the caller at the required
3977 // depth.
3978 JVMState* caller_jvms = jvms();
3979
3980 // Cf. JVM_GetCallerClass
3981 // NOTE: Start the loop at depth 1 because the current JVM state does
3982 // not include the Reflection.getCallerClass() frame.
3983 for (int n = 1; caller_jvms != NULL; caller_jvms = caller_jvms->caller(), n++) {
3984 ciMethod* m = caller_jvms->method();
3985 switch (n) {
3986 case 0:
3987 fatal("current JVM state does not include the Reflection.getCallerClass frame");
3988 break;
3989 case 1:
3990 // Frame 0 and 1 must be caller sensitive (see JVM_GetCallerClass).
3991 if (!m->caller_sensitive()) {
3992 #ifndef PRODUCT
3993 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
3994 tty->print_cr(" Bailing out: CallerSensitive annotation expected at frame %d", n);
3995 }
3996 #endif
3997 return false; // bail-out; let JVM_GetCallerClass do the work
3998 }
3999 break;
4000 default:
4001 if (!m->is_ignored_by_security_stack_walk()) {
4002 // We have reached the desired frame; return the holder class.
4003 // Acquire method holder as java.lang.Class and push as constant.
4004 ciInstanceKlass* caller_klass = caller_jvms->method()->holder();
4005 ciInstance* caller_mirror = caller_klass->java_mirror();
4006 set_result(makecon(TypeInstPtr::make(caller_mirror)));
4007
4008 #ifndef PRODUCT
4009 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4010 tty->print_cr(" Succeeded: caller = %d) %s.%s, JVMS depth = %d", n, caller_klass->name()->as_utf8(), caller_jvms->method()->name()->as_utf8(), jvms()->depth());
4011 tty->print_cr(" JVM state at this point:");
4012 for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
4013 ciMethod* m = jvms()->of_depth(i)->method();
4014 tty->print_cr(" %d) %s.%s", n, m->holder()->name()->as_utf8(), m->name()->as_utf8());
4015 }
4016 }
4017 #endif
4018 return true;
4019 }
4020 break;
4021 }
4022 }
4023
4024 #ifndef PRODUCT
4025 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4026 tty->print_cr(" Bailing out because caller depth exceeded inlining depth = %d", jvms()->depth());
4027 tty->print_cr(" JVM state at this point:");
4028 for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
4029 ciMethod* m = jvms()->of_depth(i)->method();
4030 tty->print_cr(" %d) %s.%s", n, m->holder()->name()->as_utf8(), m->name()->as_utf8());
4031 }
4032 }
4033 #endif
4034
4035 return false; // bail-out; let JVM_GetCallerClass do the work
4036 }
4037
inline_fp_conversions(vmIntrinsics::ID id)4038 bool LibraryCallKit::inline_fp_conversions(vmIntrinsics::ID id) {
4039 Node* arg = argument(0);
4040 Node* result = NULL;
4041
4042 switch (id) {
4043 case vmIntrinsics::_floatToRawIntBits: result = new MoveF2INode(arg); break;
4044 case vmIntrinsics::_intBitsToFloat: result = new MoveI2FNode(arg); break;
4045 case vmIntrinsics::_doubleToRawLongBits: result = new MoveD2LNode(arg); break;
4046 case vmIntrinsics::_longBitsToDouble: result = new MoveL2DNode(arg); break;
4047
4048 case vmIntrinsics::_doubleToLongBits: {
4049 // two paths (plus control) merge in a wood
4050 RegionNode *r = new RegionNode(3);
4051 Node *phi = new PhiNode(r, TypeLong::LONG);
4052
4053 Node *cmpisnan = _gvn.transform(new CmpDNode(arg, arg));
4054 // Build the boolean node
4055 Node *bolisnan = _gvn.transform(new BoolNode(cmpisnan, BoolTest::ne));
4056
4057 // Branch either way.
4058 // NaN case is less traveled, which makes all the difference.
4059 IfNode *ifisnan = create_and_xform_if(control(), bolisnan, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
4060 Node *opt_isnan = _gvn.transform(ifisnan);
4061 assert( opt_isnan->is_If(), "Expect an IfNode");
4062 IfNode *opt_ifisnan = (IfNode*)opt_isnan;
4063 Node *iftrue = _gvn.transform(new IfTrueNode(opt_ifisnan));
4064
4065 set_control(iftrue);
4066
4067 static const jlong nan_bits = CONST64(0x7ff8000000000000);
4068 Node *slow_result = longcon(nan_bits); // return NaN
4069 phi->init_req(1, _gvn.transform( slow_result ));
4070 r->init_req(1, iftrue);
4071
4072 // Else fall through
4073 Node *iffalse = _gvn.transform(new IfFalseNode(opt_ifisnan));
4074 set_control(iffalse);
4075
4076 phi->init_req(2, _gvn.transform(new MoveD2LNode(arg)));
4077 r->init_req(2, iffalse);
4078
4079 // Post merge
4080 set_control(_gvn.transform(r));
4081 record_for_igvn(r);
4082
4083 C->set_has_split_ifs(true); // Has chance for split-if optimization
4084 result = phi;
4085 assert(result->bottom_type()->isa_long(), "must be");
4086 break;
4087 }
4088
4089 case vmIntrinsics::_floatToIntBits: {
4090 // two paths (plus control) merge in a wood
4091 RegionNode *r = new RegionNode(3);
4092 Node *phi = new PhiNode(r, TypeInt::INT);
4093
4094 Node *cmpisnan = _gvn.transform(new CmpFNode(arg, arg));
4095 // Build the boolean node
4096 Node *bolisnan = _gvn.transform(new BoolNode(cmpisnan, BoolTest::ne));
4097
4098 // Branch either way.
4099 // NaN case is less traveled, which makes all the difference.
4100 IfNode *ifisnan = create_and_xform_if(control(), bolisnan, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
4101 Node *opt_isnan = _gvn.transform(ifisnan);
4102 assert( opt_isnan->is_If(), "Expect an IfNode");
4103 IfNode *opt_ifisnan = (IfNode*)opt_isnan;
4104 Node *iftrue = _gvn.transform(new IfTrueNode(opt_ifisnan));
4105
4106 set_control(iftrue);
4107
4108 static const jint nan_bits = 0x7fc00000;
4109 Node *slow_result = makecon(TypeInt::make(nan_bits)); // return NaN
4110 phi->init_req(1, _gvn.transform( slow_result ));
4111 r->init_req(1, iftrue);
4112
4113 // Else fall through
4114 Node *iffalse = _gvn.transform(new IfFalseNode(opt_ifisnan));
4115 set_control(iffalse);
4116
4117 phi->init_req(2, _gvn.transform(new MoveF2INode(arg)));
4118 r->init_req(2, iffalse);
4119
4120 // Post merge
4121 set_control(_gvn.transform(r));
4122 record_for_igvn(r);
4123
4124 C->set_has_split_ifs(true); // Has chance for split-if optimization
4125 result = phi;
4126 assert(result->bottom_type()->isa_int(), "must be");
4127 break;
4128 }
4129
4130 default:
4131 fatal_unexpected_iid(id);
4132 break;
4133 }
4134 set_result(_gvn.transform(result));
4135 return true;
4136 }
4137
4138 //----------------------inline_unsafe_copyMemory-------------------------
4139 // public native void Unsafe.copyMemory0(Object srcBase, long srcOffset, Object destBase, long destOffset, long bytes);
inline_unsafe_copyMemory()4140 bool LibraryCallKit::inline_unsafe_copyMemory() {
4141 if (callee()->is_static()) return false; // caller must have the capability!
4142 null_check_receiver(); // null-check receiver
4143 if (stopped()) return true;
4144
4145 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
4146
4147 Node* src_ptr = argument(1); // type: oop
4148 Node* src_off = ConvL2X(argument(2)); // type: long
4149 Node* dst_ptr = argument(4); // type: oop
4150 Node* dst_off = ConvL2X(argument(5)); // type: long
4151 Node* size = ConvL2X(argument(7)); // type: long
4152
4153 assert(Unsafe_field_offset_to_byte_offset(11) == 11,
4154 "fieldOffset must be byte-scaled");
4155
4156 Node* src = make_unsafe_address(src_ptr, src_off, ACCESS_READ);
4157 Node* dst = make_unsafe_address(dst_ptr, dst_off, ACCESS_WRITE);
4158
4159 // Conservatively insert a memory barrier on all memory slices.
4160 // Do not let writes of the copy source or destination float below the copy.
4161 insert_mem_bar(Op_MemBarCPUOrder);
4162
4163 Node* thread = _gvn.transform(new ThreadLocalNode());
4164 Node* doing_unsafe_access_addr = basic_plus_adr(top(), thread, in_bytes(JavaThread::doing_unsafe_access_offset()));
4165 BasicType doing_unsafe_access_bt = T_BYTE;
4166 assert((sizeof(bool) * CHAR_BIT) == 8, "not implemented");
4167
4168 // update volatile field
4169 store_to_memory(control(), doing_unsafe_access_addr, intcon(1), doing_unsafe_access_bt, Compile::AliasIdxRaw, MemNode::unordered);
4170
4171 // Call it. Note that the length argument is not scaled.
4172 make_runtime_call(RC_LEAF|RC_NO_FP,
4173 OptoRuntime::fast_arraycopy_Type(),
4174 StubRoutines::unsafe_arraycopy(),
4175 "unsafe_arraycopy",
4176 TypeRawPtr::BOTTOM,
4177 src, dst, size XTOP);
4178
4179 store_to_memory(control(), doing_unsafe_access_addr, intcon(0), doing_unsafe_access_bt, Compile::AliasIdxRaw, MemNode::unordered);
4180
4181 // Do not let reads of the copy destination float above the copy.
4182 insert_mem_bar(Op_MemBarCPUOrder);
4183
4184 return true;
4185 }
4186
4187 //------------------------clone_coping-----------------------------------
4188 // Helper function for inline_native_clone.
copy_to_clone(Node * obj,Node * alloc_obj,Node * obj_size,bool is_array)4189 void LibraryCallKit::copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array) {
4190 assert(obj_size != NULL, "");
4191 Node* raw_obj = alloc_obj->in(1);
4192 assert(alloc_obj->is_CheckCastPP() && raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), "");
4193
4194 AllocateNode* alloc = NULL;
4195 if (ReduceBulkZeroing) {
4196 // We will be completely responsible for initializing this object -
4197 // mark Initialize node as complete.
4198 alloc = AllocateNode::Ideal_allocation(alloc_obj, &_gvn);
4199 // The object was just allocated - there should be no any stores!
4200 guarantee(alloc != NULL && alloc->maybe_set_complete(&_gvn), "");
4201 // Mark as complete_with_arraycopy so that on AllocateNode
4202 // expansion, we know this AllocateNode is initialized by an array
4203 // copy and a StoreStore barrier exists after the array copy.
4204 alloc->initialization()->set_complete_with_arraycopy();
4205 }
4206
4207 Node* size = _gvn.transform(obj_size);
4208 access_clone(obj, alloc_obj, size, is_array);
4209
4210 // Do not let reads from the cloned object float above the arraycopy.
4211 if (alloc != NULL) {
4212 // Do not let stores that initialize this object be reordered with
4213 // a subsequent store that would make this object accessible by
4214 // other threads.
4215 // Record what AllocateNode this StoreStore protects so that
4216 // escape analysis can go from the MemBarStoreStoreNode to the
4217 // AllocateNode and eliminate the MemBarStoreStoreNode if possible
4218 // based on the escape status of the AllocateNode.
4219 insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
4220 } else {
4221 insert_mem_bar(Op_MemBarCPUOrder);
4222 }
4223 }
4224
4225 //------------------------inline_native_clone----------------------------
4226 // protected native Object java.lang.Object.clone();
4227 //
4228 // Here are the simple edge cases:
4229 // null receiver => normal trap
4230 // virtual and clone was overridden => slow path to out-of-line clone
4231 // not cloneable or finalizer => slow path to out-of-line Object.clone
4232 //
4233 // The general case has two steps, allocation and copying.
4234 // Allocation has two cases, and uses GraphKit::new_instance or new_array.
4235 //
4236 // Copying also has two cases, oop arrays and everything else.
4237 // Oop arrays use arrayof_oop_arraycopy (same as System.arraycopy).
4238 // Everything else uses the tight inline loop supplied by CopyArrayNode.
4239 //
4240 // These steps fold up nicely if and when the cloned object's klass
4241 // can be sharply typed as an object array, a type array, or an instance.
4242 //
inline_native_clone(bool is_virtual)4243 bool LibraryCallKit::inline_native_clone(bool is_virtual) {
4244 PhiNode* result_val;
4245
4246 // Set the reexecute bit for the interpreter to reexecute
4247 // the bytecode that invokes Object.clone if deoptimization happens.
4248 { PreserveReexecuteState preexecs(this);
4249 jvms()->set_should_reexecute(true);
4250
4251 Node* obj = null_check_receiver();
4252 if (stopped()) return true;
4253
4254 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
4255
4256 // If we are going to clone an instance, we need its exact type to
4257 // know the number and types of fields to convert the clone to
4258 // loads/stores. Maybe a speculative type can help us.
4259 if (!obj_type->klass_is_exact() &&
4260 obj_type->speculative_type() != NULL &&
4261 obj_type->speculative_type()->is_instance_klass()) {
4262 ciInstanceKlass* spec_ik = obj_type->speculative_type()->as_instance_klass();
4263 if (spec_ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem &&
4264 !spec_ik->has_injected_fields()) {
4265 ciKlass* k = obj_type->klass();
4266 if (!k->is_instance_klass() ||
4267 k->as_instance_klass()->is_interface() ||
4268 k->as_instance_klass()->has_subklass()) {
4269 obj = maybe_cast_profiled_obj(obj, obj_type->speculative_type(), false);
4270 }
4271 }
4272 }
4273
4274 // Conservatively insert a memory barrier on all memory slices.
4275 // Do not let writes into the original float below the clone.
4276 insert_mem_bar(Op_MemBarCPUOrder);
4277
4278 // paths into result_reg:
4279 enum {
4280 _slow_path = 1, // out-of-line call to clone method (virtual or not)
4281 _objArray_path, // plain array allocation, plus arrayof_oop_arraycopy
4282 _array_path, // plain array allocation, plus arrayof_long_arraycopy
4283 _instance_path, // plain instance allocation, plus arrayof_long_arraycopy
4284 PATH_LIMIT
4285 };
4286 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4287 result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
4288 PhiNode* result_i_o = new PhiNode(result_reg, Type::ABIO);
4289 PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
4290 record_for_igvn(result_reg);
4291
4292 Node* obj_klass = load_object_klass(obj);
4293 Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)NULL);
4294 if (array_ctl != NULL) {
4295 // It's an array.
4296 PreserveJVMState pjvms(this);
4297 set_control(array_ctl);
4298 Node* obj_length = load_array_length(obj);
4299 Node* obj_size = NULL;
4300 Node* alloc_obj = new_array(obj_klass, obj_length, 0, &obj_size); // no arguments to push
4301
4302 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
4303 if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, BarrierSetC2::Parsing)) {
4304 // If it is an oop array, it requires very special treatment,
4305 // because gc barriers are required when accessing the array.
4306 Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)NULL);
4307 if (is_obja != NULL) {
4308 PreserveJVMState pjvms2(this);
4309 set_control(is_obja);
4310 // Generate a direct call to the right arraycopy function(s).
4311 Node* alloc = tightly_coupled_allocation(alloc_obj, NULL);
4312 ArrayCopyNode* ac = ArrayCopyNode::make(this, true, obj, intcon(0), alloc_obj, intcon(0), obj_length, alloc != NULL, false);
4313 ac->set_clone_oop_array();
4314 Node* n = _gvn.transform(ac);
4315 assert(n == ac, "cannot disappear");
4316 ac->connect_outputs(this);
4317
4318 result_reg->init_req(_objArray_path, control());
4319 result_val->init_req(_objArray_path, alloc_obj);
4320 result_i_o ->set_req(_objArray_path, i_o());
4321 result_mem ->set_req(_objArray_path, reset_memory());
4322 }
4323 }
4324 // Otherwise, there are no barriers to worry about.
4325 // (We can dispense with card marks if we know the allocation
4326 // comes out of eden (TLAB)... In fact, ReduceInitialCardMarks
4327 // causes the non-eden paths to take compensating steps to
4328 // simulate a fresh allocation, so that no further
4329 // card marks are required in compiled code to initialize
4330 // the object.)
4331
4332 if (!stopped()) {
4333 copy_to_clone(obj, alloc_obj, obj_size, true);
4334
4335 // Present the results of the copy.
4336 result_reg->init_req(_array_path, control());
4337 result_val->init_req(_array_path, alloc_obj);
4338 result_i_o ->set_req(_array_path, i_o());
4339 result_mem ->set_req(_array_path, reset_memory());
4340 }
4341 }
4342
4343 // We only go to the instance fast case code if we pass a number of guards.
4344 // The paths which do not pass are accumulated in the slow_region.
4345 RegionNode* slow_region = new RegionNode(1);
4346 record_for_igvn(slow_region);
4347 if (!stopped()) {
4348 // It's an instance (we did array above). Make the slow-path tests.
4349 // If this is a virtual call, we generate a funny guard. We grab
4350 // the vtable entry corresponding to clone() from the target object.
4351 // If the target method which we are calling happens to be the
4352 // Object clone() method, we pass the guard. We do not need this
4353 // guard for non-virtual calls; the caller is known to be the native
4354 // Object clone().
4355 if (is_virtual) {
4356 generate_virtual_guard(obj_klass, slow_region);
4357 }
4358
4359 // The object must be easily cloneable and must not have a finalizer.
4360 // Both of these conditions may be checked in a single test.
4361 // We could optimize the test further, but we don't care.
4362 generate_access_flags_guard(obj_klass,
4363 // Test both conditions:
4364 JVM_ACC_IS_CLONEABLE_FAST | JVM_ACC_HAS_FINALIZER,
4365 // Must be cloneable but not finalizer:
4366 JVM_ACC_IS_CLONEABLE_FAST,
4367 slow_region);
4368 }
4369
4370 if (!stopped()) {
4371 // It's an instance, and it passed the slow-path tests.
4372 PreserveJVMState pjvms(this);
4373 Node* obj_size = NULL;
4374 // Need to deoptimize on exception from allocation since Object.clone intrinsic
4375 // is reexecuted if deoptimization occurs and there could be problems when merging
4376 // exception state between multiple Object.clone versions (reexecute=true vs reexecute=false).
4377 Node* alloc_obj = new_instance(obj_klass, NULL, &obj_size, /*deoptimize_on_exception=*/true);
4378
4379 copy_to_clone(obj, alloc_obj, obj_size, false);
4380
4381 // Present the results of the slow call.
4382 result_reg->init_req(_instance_path, control());
4383 result_val->init_req(_instance_path, alloc_obj);
4384 result_i_o ->set_req(_instance_path, i_o());
4385 result_mem ->set_req(_instance_path, reset_memory());
4386 }
4387
4388 // Generate code for the slow case. We make a call to clone().
4389 set_control(_gvn.transform(slow_region));
4390 if (!stopped()) {
4391 PreserveJVMState pjvms(this);
4392 CallJavaNode* slow_call = generate_method_call(vmIntrinsics::_clone, is_virtual);
4393 // We need to deoptimize on exception (see comment above)
4394 Node* slow_result = set_results_for_java_call(slow_call, false, /* deoptimize */ true);
4395 // this->control() comes from set_results_for_java_call
4396 result_reg->init_req(_slow_path, control());
4397 result_val->init_req(_slow_path, slow_result);
4398 result_i_o ->set_req(_slow_path, i_o());
4399 result_mem ->set_req(_slow_path, reset_memory());
4400 }
4401
4402 // Return the combined state.
4403 set_control( _gvn.transform(result_reg));
4404 set_i_o( _gvn.transform(result_i_o));
4405 set_all_memory( _gvn.transform(result_mem));
4406 } // original reexecute is set back here
4407
4408 set_result(_gvn.transform(result_val));
4409 return true;
4410 }
4411
4412 // If we have a tightly coupled allocation, the arraycopy may take care
4413 // of the array initialization. If one of the guards we insert between
4414 // the allocation and the arraycopy causes a deoptimization, an
4415 // unitialized array will escape the compiled method. To prevent that
4416 // we set the JVM state for uncommon traps between the allocation and
4417 // the arraycopy to the state before the allocation so, in case of
4418 // deoptimization, we'll reexecute the allocation and the
4419 // initialization.
arraycopy_restore_alloc_state(AllocateArrayNode * alloc,int & saved_reexecute_sp)4420 JVMState* LibraryCallKit::arraycopy_restore_alloc_state(AllocateArrayNode* alloc, int& saved_reexecute_sp) {
4421 if (alloc != NULL) {
4422 ciMethod* trap_method = alloc->jvms()->method();
4423 int trap_bci = alloc->jvms()->bci();
4424
4425 if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &&
4426 !C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_null_check)) {
4427 // Make sure there's no store between the allocation and the
4428 // arraycopy otherwise visible side effects could be rexecuted
4429 // in case of deoptimization and cause incorrect execution.
4430 bool no_interfering_store = true;
4431 Node* mem = alloc->in(TypeFunc::Memory);
4432 if (mem->is_MergeMem()) {
4433 for (MergeMemStream mms(merged_memory(), mem->as_MergeMem()); mms.next_non_empty2(); ) {
4434 Node* n = mms.memory();
4435 if (n != mms.memory2() && !(n->is_Proj() && n->in(0) == alloc->initialization())) {
4436 assert(n->is_Store(), "what else?");
4437 no_interfering_store = false;
4438 break;
4439 }
4440 }
4441 } else {
4442 for (MergeMemStream mms(merged_memory()); mms.next_non_empty(); ) {
4443 Node* n = mms.memory();
4444 if (n != mem && !(n->is_Proj() && n->in(0) == alloc->initialization())) {
4445 assert(n->is_Store(), "what else?");
4446 no_interfering_store = false;
4447 break;
4448 }
4449 }
4450 }
4451
4452 if (no_interfering_store) {
4453 JVMState* old_jvms = alloc->jvms()->clone_shallow(C);
4454 uint size = alloc->req();
4455 SafePointNode* sfpt = new SafePointNode(size, old_jvms);
4456 old_jvms->set_map(sfpt);
4457 for (uint i = 0; i < size; i++) {
4458 sfpt->init_req(i, alloc->in(i));
4459 }
4460 // re-push array length for deoptimization
4461 sfpt->ins_req(old_jvms->stkoff() + old_jvms->sp(), alloc->in(AllocateNode::ALength));
4462 old_jvms->set_sp(old_jvms->sp()+1);
4463 old_jvms->set_monoff(old_jvms->monoff()+1);
4464 old_jvms->set_scloff(old_jvms->scloff()+1);
4465 old_jvms->set_endoff(old_jvms->endoff()+1);
4466 old_jvms->set_should_reexecute(true);
4467
4468 sfpt->set_i_o(map()->i_o());
4469 sfpt->set_memory(map()->memory());
4470 sfpt->set_control(map()->control());
4471
4472 JVMState* saved_jvms = jvms();
4473 saved_reexecute_sp = _reexecute_sp;
4474
4475 set_jvms(sfpt->jvms());
4476 _reexecute_sp = jvms()->sp();
4477
4478 return saved_jvms;
4479 }
4480 }
4481 }
4482 return NULL;
4483 }
4484
4485 // In case of a deoptimization, we restart execution at the
4486 // allocation, allocating a new array. We would leave an uninitialized
4487 // array in the heap that GCs wouldn't expect. Move the allocation
4488 // after the traps so we don't allocate the array if we
4489 // deoptimize. This is possible because tightly_coupled_allocation()
4490 // guarantees there's no observer of the allocated array at this point
4491 // and the control flow is simple enough.
arraycopy_move_allocation_here(AllocateArrayNode * alloc,Node * dest,JVMState * saved_jvms,int saved_reexecute_sp,uint new_idx)4492 void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms,
4493 int saved_reexecute_sp, uint new_idx) {
4494 if (saved_jvms != NULL && !stopped()) {
4495 assert(alloc != NULL, "only with a tightly coupled allocation");
4496 // restore JVM state to the state at the arraycopy
4497 saved_jvms->map()->set_control(map()->control());
4498 assert(saved_jvms->map()->memory() == map()->memory(), "memory state changed?");
4499 assert(saved_jvms->map()->i_o() == map()->i_o(), "IO state changed?");
4500 // If we've improved the types of some nodes (null check) while
4501 // emitting the guards, propagate them to the current state
4502 map()->replaced_nodes().apply(saved_jvms->map(), new_idx);
4503 set_jvms(saved_jvms);
4504 _reexecute_sp = saved_reexecute_sp;
4505
4506 // Remove the allocation from above the guards
4507 CallProjections callprojs;
4508 alloc->extract_projections(&callprojs, true);
4509 InitializeNode* init = alloc->initialization();
4510 Node* alloc_mem = alloc->in(TypeFunc::Memory);
4511 C->gvn_replace_by(callprojs.fallthrough_ioproj, alloc->in(TypeFunc::I_O));
4512 C->gvn_replace_by(init->proj_out(TypeFunc::Memory), alloc_mem);
4513 C->gvn_replace_by(init->proj_out(TypeFunc::Control), alloc->in(0));
4514
4515 // move the allocation here (after the guards)
4516 _gvn.hash_delete(alloc);
4517 alloc->set_req(TypeFunc::Control, control());
4518 alloc->set_req(TypeFunc::I_O, i_o());
4519 Node *mem = reset_memory();
4520 set_all_memory(mem);
4521 alloc->set_req(TypeFunc::Memory, mem);
4522 set_control(init->proj_out_or_null(TypeFunc::Control));
4523 set_i_o(callprojs.fallthrough_ioproj);
4524
4525 // Update memory as done in GraphKit::set_output_for_allocation()
4526 const TypeInt* length_type = _gvn.find_int_type(alloc->in(AllocateNode::ALength));
4527 const TypeOopPtr* ary_type = _gvn.type(alloc->in(AllocateNode::KlassNode))->is_klassptr()->as_instance_type();
4528 if (ary_type->isa_aryptr() && length_type != NULL) {
4529 ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
4530 }
4531 const TypePtr* telemref = ary_type->add_offset(Type::OffsetBot);
4532 int elemidx = C->get_alias_index(telemref);
4533 set_memory(init->proj_out_or_null(TypeFunc::Memory), Compile::AliasIdxRaw);
4534 set_memory(init->proj_out_or_null(TypeFunc::Memory), elemidx);
4535
4536 Node* allocx = _gvn.transform(alloc);
4537 assert(allocx == alloc, "where has the allocation gone?");
4538 assert(dest->is_CheckCastPP(), "not an allocation result?");
4539
4540 _gvn.hash_delete(dest);
4541 dest->set_req(0, control());
4542 Node* destx = _gvn.transform(dest);
4543 assert(destx == dest, "where has the allocation result gone?");
4544 }
4545 }
4546
4547
4548 //------------------------------inline_arraycopy-----------------------
4549 // public static native void java.lang.System.arraycopy(Object src, int srcPos,
4550 // Object dest, int destPos,
4551 // int length);
inline_arraycopy()4552 bool LibraryCallKit::inline_arraycopy() {
4553 // Get the arguments.
4554 Node* src = argument(0); // type: oop
4555 Node* src_offset = argument(1); // type: int
4556 Node* dest = argument(2); // type: oop
4557 Node* dest_offset = argument(3); // type: int
4558 Node* length = argument(4); // type: int
4559
4560 uint new_idx = C->unique();
4561
4562 // Check for allocation before we add nodes that would confuse
4563 // tightly_coupled_allocation()
4564 AllocateArrayNode* alloc = tightly_coupled_allocation(dest, NULL);
4565
4566 int saved_reexecute_sp = -1;
4567 JVMState* saved_jvms = arraycopy_restore_alloc_state(alloc, saved_reexecute_sp);
4568 // See arraycopy_restore_alloc_state() comment
4569 // if alloc == NULL we don't have to worry about a tightly coupled allocation so we can emit all needed guards
4570 // if saved_jvms != NULL (then alloc != NULL) then we can handle guards and a tightly coupled allocation
4571 // if saved_jvms == NULL and alloc != NULL, we can't emit any guards
4572 bool can_emit_guards = (alloc == NULL || saved_jvms != NULL);
4573
4574 // The following tests must be performed
4575 // (1) src and dest are arrays.
4576 // (2) src and dest arrays must have elements of the same BasicType
4577 // (3) src and dest must not be null.
4578 // (4) src_offset must not be negative.
4579 // (5) dest_offset must not be negative.
4580 // (6) length must not be negative.
4581 // (7) src_offset + length must not exceed length of src.
4582 // (8) dest_offset + length must not exceed length of dest.
4583 // (9) each element of an oop array must be assignable
4584
4585 // (3) src and dest must not be null.
4586 // always do this here because we need the JVM state for uncommon traps
4587 Node* null_ctl = top();
4588 src = saved_jvms != NULL ? null_check_oop(src, &null_ctl, true, true) : null_check(src, T_ARRAY);
4589 assert(null_ctl->is_top(), "no null control here");
4590 dest = null_check(dest, T_ARRAY);
4591
4592 if (!can_emit_guards) {
4593 // if saved_jvms == NULL and alloc != NULL, we don't emit any
4594 // guards but the arraycopy node could still take advantage of a
4595 // tightly allocated allocation. tightly_coupled_allocation() is
4596 // called again to make sure it takes the null check above into
4597 // account: the null check is mandatory and if it caused an
4598 // uncommon trap to be emitted then the allocation can't be
4599 // considered tightly coupled in this context.
4600 alloc = tightly_coupled_allocation(dest, NULL);
4601 }
4602
4603 bool validated = false;
4604
4605 const Type* src_type = _gvn.type(src);
4606 const Type* dest_type = _gvn.type(dest);
4607 const TypeAryPtr* top_src = src_type->isa_aryptr();
4608 const TypeAryPtr* top_dest = dest_type->isa_aryptr();
4609
4610 // Do we have the type of src?
4611 bool has_src = (top_src != NULL && top_src->klass() != NULL);
4612 // Do we have the type of dest?
4613 bool has_dest = (top_dest != NULL && top_dest->klass() != NULL);
4614 // Is the type for src from speculation?
4615 bool src_spec = false;
4616 // Is the type for dest from speculation?
4617 bool dest_spec = false;
4618
4619 if ((!has_src || !has_dest) && can_emit_guards) {
4620 // We don't have sufficient type information, let's see if
4621 // speculative types can help. We need to have types for both src
4622 // and dest so that it pays off.
4623
4624 // Do we already have or could we have type information for src
4625 bool could_have_src = has_src;
4626 // Do we already have or could we have type information for dest
4627 bool could_have_dest = has_dest;
4628
4629 ciKlass* src_k = NULL;
4630 if (!has_src) {
4631 src_k = src_type->speculative_type_not_null();
4632 if (src_k != NULL && src_k->is_array_klass()) {
4633 could_have_src = true;
4634 }
4635 }
4636
4637 ciKlass* dest_k = NULL;
4638 if (!has_dest) {
4639 dest_k = dest_type->speculative_type_not_null();
4640 if (dest_k != NULL && dest_k->is_array_klass()) {
4641 could_have_dest = true;
4642 }
4643 }
4644
4645 if (could_have_src && could_have_dest) {
4646 // This is going to pay off so emit the required guards
4647 if (!has_src) {
4648 src = maybe_cast_profiled_obj(src, src_k, true);
4649 src_type = _gvn.type(src);
4650 top_src = src_type->isa_aryptr();
4651 has_src = (top_src != NULL && top_src->klass() != NULL);
4652 src_spec = true;
4653 }
4654 if (!has_dest) {
4655 dest = maybe_cast_profiled_obj(dest, dest_k, true);
4656 dest_type = _gvn.type(dest);
4657 top_dest = dest_type->isa_aryptr();
4658 has_dest = (top_dest != NULL && top_dest->klass() != NULL);
4659 dest_spec = true;
4660 }
4661 }
4662 }
4663
4664 if (has_src && has_dest && can_emit_guards) {
4665 BasicType src_elem = top_src->klass()->as_array_klass()->element_type()->basic_type();
4666 BasicType dest_elem = top_dest->klass()->as_array_klass()->element_type()->basic_type();
4667 if (is_reference_type(src_elem)) src_elem = T_OBJECT;
4668 if (is_reference_type(dest_elem)) dest_elem = T_OBJECT;
4669
4670 if (src_elem == dest_elem && src_elem == T_OBJECT) {
4671 // If both arrays are object arrays then having the exact types
4672 // for both will remove the need for a subtype check at runtime
4673 // before the call and may make it possible to pick a faster copy
4674 // routine (without a subtype check on every element)
4675 // Do we have the exact type of src?
4676 bool could_have_src = src_spec;
4677 // Do we have the exact type of dest?
4678 bool could_have_dest = dest_spec;
4679 ciKlass* src_k = top_src->klass();
4680 ciKlass* dest_k = top_dest->klass();
4681 if (!src_spec) {
4682 src_k = src_type->speculative_type_not_null();
4683 if (src_k != NULL && src_k->is_array_klass()) {
4684 could_have_src = true;
4685 }
4686 }
4687 if (!dest_spec) {
4688 dest_k = dest_type->speculative_type_not_null();
4689 if (dest_k != NULL && dest_k->is_array_klass()) {
4690 could_have_dest = true;
4691 }
4692 }
4693 if (could_have_src && could_have_dest) {
4694 // If we can have both exact types, emit the missing guards
4695 if (could_have_src && !src_spec) {
4696 src = maybe_cast_profiled_obj(src, src_k, true);
4697 }
4698 if (could_have_dest && !dest_spec) {
4699 dest = maybe_cast_profiled_obj(dest, dest_k, true);
4700 }
4701 }
4702 }
4703 }
4704
4705 ciMethod* trap_method = method();
4706 int trap_bci = bci();
4707 if (saved_jvms != NULL) {
4708 trap_method = alloc->jvms()->method();
4709 trap_bci = alloc->jvms()->bci();
4710 }
4711
4712 bool negative_length_guard_generated = false;
4713
4714 if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &&
4715 can_emit_guards &&
4716 !src->is_top() && !dest->is_top()) {
4717 // validate arguments: enables transformation the ArrayCopyNode
4718 validated = true;
4719
4720 RegionNode* slow_region = new RegionNode(1);
4721 record_for_igvn(slow_region);
4722
4723 // (1) src and dest are arrays.
4724 generate_non_array_guard(load_object_klass(src), slow_region);
4725 generate_non_array_guard(load_object_klass(dest), slow_region);
4726
4727 // (2) src and dest arrays must have elements of the same BasicType
4728 // done at macro expansion or at Ideal transformation time
4729
4730 // (4) src_offset must not be negative.
4731 generate_negative_guard(src_offset, slow_region);
4732
4733 // (5) dest_offset must not be negative.
4734 generate_negative_guard(dest_offset, slow_region);
4735
4736 // (7) src_offset + length must not exceed length of src.
4737 generate_limit_guard(src_offset, length,
4738 load_array_length(src),
4739 slow_region);
4740
4741 // (8) dest_offset + length must not exceed length of dest.
4742 generate_limit_guard(dest_offset, length,
4743 load_array_length(dest),
4744 slow_region);
4745
4746 // (6) length must not be negative.
4747 // This is also checked in generate_arraycopy() during macro expansion, but
4748 // we also have to check it here for the case where the ArrayCopyNode will
4749 // be eliminated by Escape Analysis.
4750 if (EliminateAllocations) {
4751 generate_negative_guard(length, slow_region);
4752 negative_length_guard_generated = true;
4753 }
4754
4755 // (9) each element of an oop array must be assignable
4756 Node* src_klass = load_object_klass(src);
4757 Node* dest_klass = load_object_klass(dest);
4758 Node* not_subtype_ctrl = gen_subtype_check(src_klass, dest_klass);
4759
4760 if (not_subtype_ctrl != top()) {
4761 PreserveJVMState pjvms(this);
4762 set_control(not_subtype_ctrl);
4763 uncommon_trap(Deoptimization::Reason_intrinsic,
4764 Deoptimization::Action_make_not_entrant);
4765 assert(stopped(), "Should be stopped");
4766 }
4767 {
4768 PreserveJVMState pjvms(this);
4769 set_control(_gvn.transform(slow_region));
4770 uncommon_trap(Deoptimization::Reason_intrinsic,
4771 Deoptimization::Action_make_not_entrant);
4772 assert(stopped(), "Should be stopped");
4773 }
4774
4775 const TypeKlassPtr* dest_klass_t = _gvn.type(dest_klass)->is_klassptr();
4776 const Type *toop = TypeOopPtr::make_from_klass(dest_klass_t->klass());
4777 src = _gvn.transform(new CheckCastPPNode(control(), src, toop));
4778 }
4779
4780 arraycopy_move_allocation_here(alloc, dest, saved_jvms, saved_reexecute_sp, new_idx);
4781
4782 if (stopped()) {
4783 return true;
4784 }
4785
4786 ArrayCopyNode* ac = ArrayCopyNode::make(this, true, src, src_offset, dest, dest_offset, length, alloc != NULL, negative_length_guard_generated,
4787 // Create LoadRange and LoadKlass nodes for use during macro expansion here
4788 // so the compiler has a chance to eliminate them: during macro expansion,
4789 // we have to set their control (CastPP nodes are eliminated).
4790 load_object_klass(src), load_object_klass(dest),
4791 load_array_length(src), load_array_length(dest));
4792
4793 ac->set_arraycopy(validated);
4794
4795 Node* n = _gvn.transform(ac);
4796 if (n == ac) {
4797 ac->connect_outputs(this);
4798 } else {
4799 assert(validated, "shouldn't transform if all arguments not validated");
4800 set_all_memory(n);
4801 }
4802 clear_upper_avx();
4803
4804
4805 return true;
4806 }
4807
4808
4809 // Helper function which determines if an arraycopy immediately follows
4810 // an allocation, with no intervening tests or other escapes for the object.
4811 AllocateArrayNode*
tightly_coupled_allocation(Node * ptr,RegionNode * slow_region)4812 LibraryCallKit::tightly_coupled_allocation(Node* ptr,
4813 RegionNode* slow_region) {
4814 if (stopped()) return NULL; // no fast path
4815 if (C->AliasLevel() == 0) return NULL; // no MergeMems around
4816
4817 AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(ptr, &_gvn);
4818 if (alloc == NULL) return NULL;
4819
4820 Node* rawmem = memory(Compile::AliasIdxRaw);
4821 // Is the allocation's memory state untouched?
4822 if (!(rawmem->is_Proj() && rawmem->in(0)->is_Initialize())) {
4823 // Bail out if there have been raw-memory effects since the allocation.
4824 // (Example: There might have been a call or safepoint.)
4825 return NULL;
4826 }
4827 rawmem = rawmem->in(0)->as_Initialize()->memory(Compile::AliasIdxRaw);
4828 if (!(rawmem->is_Proj() && rawmem->in(0) == alloc)) {
4829 return NULL;
4830 }
4831
4832 // There must be no unexpected observers of this allocation.
4833 for (DUIterator_Fast imax, i = ptr->fast_outs(imax); i < imax; i++) {
4834 Node* obs = ptr->fast_out(i);
4835 if (obs != this->map()) {
4836 return NULL;
4837 }
4838 }
4839
4840 // This arraycopy must unconditionally follow the allocation of the ptr.
4841 Node* alloc_ctl = ptr->in(0);
4842 Node* ctl = control();
4843 while (ctl != alloc_ctl) {
4844 // There may be guards which feed into the slow_region.
4845 // Any other control flow means that we might not get a chance
4846 // to finish initializing the allocated object.
4847 if ((ctl->is_IfFalse() || ctl->is_IfTrue()) && ctl->in(0)->is_If()) {
4848 IfNode* iff = ctl->in(0)->as_If();
4849 Node* not_ctl = iff->proj_out_or_null(1 - ctl->as_Proj()->_con);
4850 assert(not_ctl != NULL && not_ctl != ctl, "found alternate");
4851 if (slow_region != NULL && slow_region->find_edge(not_ctl) >= 1) {
4852 ctl = iff->in(0); // This test feeds the known slow_region.
4853 continue;
4854 }
4855 // One more try: Various low-level checks bottom out in
4856 // uncommon traps. If the debug-info of the trap omits
4857 // any reference to the allocation, as we've already
4858 // observed, then there can be no objection to the trap.
4859 bool found_trap = false;
4860 for (DUIterator_Fast jmax, j = not_ctl->fast_outs(jmax); j < jmax; j++) {
4861 Node* obs = not_ctl->fast_out(j);
4862 if (obs->in(0) == not_ctl && obs->is_Call() &&
4863 (obs->as_Call()->entry_point() == SharedRuntime::uncommon_trap_blob()->entry_point())) {
4864 found_trap = true; break;
4865 }
4866 }
4867 if (found_trap) {
4868 ctl = iff->in(0); // This test feeds a harmless uncommon trap.
4869 continue;
4870 }
4871 }
4872 return NULL;
4873 }
4874
4875 // If we get this far, we have an allocation which immediately
4876 // precedes the arraycopy, and we can take over zeroing the new object.
4877 // The arraycopy will finish the initialization, and provide
4878 // a new control state to which we will anchor the destination pointer.
4879
4880 return alloc;
4881 }
4882
4883 //-------------inline_encodeISOArray-----------------------------------
4884 // encode char[] to byte[] in ISO_8859_1
inline_encodeISOArray()4885 bool LibraryCallKit::inline_encodeISOArray() {
4886 assert(callee()->signature()->size() == 5, "encodeISOArray has 5 parameters");
4887 // no receiver since it is static method
4888 Node *src = argument(0);
4889 Node *src_offset = argument(1);
4890 Node *dst = argument(2);
4891 Node *dst_offset = argument(3);
4892 Node *length = argument(4);
4893
4894 src = must_be_not_null(src, true);
4895 dst = must_be_not_null(dst, true);
4896
4897 const Type* src_type = src->Value(&_gvn);
4898 const Type* dst_type = dst->Value(&_gvn);
4899 const TypeAryPtr* top_src = src_type->isa_aryptr();
4900 const TypeAryPtr* top_dest = dst_type->isa_aryptr();
4901 if (top_src == NULL || top_src->klass() == NULL ||
4902 top_dest == NULL || top_dest->klass() == NULL) {
4903 // failed array check
4904 return false;
4905 }
4906
4907 // Figure out the size and type of the elements we will be copying.
4908 BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
4909 BasicType dst_elem = dst_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
4910 if (!((src_elem == T_CHAR) || (src_elem== T_BYTE)) || dst_elem != T_BYTE) {
4911 return false;
4912 }
4913
4914 Node* src_start = array_element_address(src, src_offset, T_CHAR);
4915 Node* dst_start = array_element_address(dst, dst_offset, dst_elem);
4916 // 'src_start' points to src array + scaled offset
4917 // 'dst_start' points to dst array + scaled offset
4918
4919 const TypeAryPtr* mtype = TypeAryPtr::BYTES;
4920 Node* enc = new EncodeISOArrayNode(control(), memory(mtype), src_start, dst_start, length);
4921 enc = _gvn.transform(enc);
4922 Node* res_mem = _gvn.transform(new SCMemProjNode(enc));
4923 set_memory(res_mem, mtype);
4924 set_result(enc);
4925 clear_upper_avx();
4926
4927 return true;
4928 }
4929
4930 //-------------inline_multiplyToLen-----------------------------------
inline_multiplyToLen()4931 bool LibraryCallKit::inline_multiplyToLen() {
4932 assert(UseMultiplyToLenIntrinsic, "not implemented on this platform");
4933
4934 address stubAddr = StubRoutines::multiplyToLen();
4935 if (stubAddr == NULL) {
4936 return false; // Intrinsic's stub is not implemented on this platform
4937 }
4938 const char* stubName = "multiplyToLen";
4939
4940 assert(callee()->signature()->size() == 5, "multiplyToLen has 5 parameters");
4941
4942 // no receiver because it is a static method
4943 Node* x = argument(0);
4944 Node* xlen = argument(1);
4945 Node* y = argument(2);
4946 Node* ylen = argument(3);
4947 Node* z = argument(4);
4948
4949 x = must_be_not_null(x, true);
4950 y = must_be_not_null(y, true);
4951
4952 const Type* x_type = x->Value(&_gvn);
4953 const Type* y_type = y->Value(&_gvn);
4954 const TypeAryPtr* top_x = x_type->isa_aryptr();
4955 const TypeAryPtr* top_y = y_type->isa_aryptr();
4956 if (top_x == NULL || top_x->klass() == NULL ||
4957 top_y == NULL || top_y->klass() == NULL) {
4958 // failed array check
4959 return false;
4960 }
4961
4962 BasicType x_elem = x_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
4963 BasicType y_elem = y_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
4964 if (x_elem != T_INT || y_elem != T_INT) {
4965 return false;
4966 }
4967
4968 // Set the original stack and the reexecute bit for the interpreter to reexecute
4969 // the bytecode that invokes BigInteger.multiplyToLen() if deoptimization happens
4970 // on the return from z array allocation in runtime.
4971 { PreserveReexecuteState preexecs(this);
4972 jvms()->set_should_reexecute(true);
4973
4974 Node* x_start = array_element_address(x, intcon(0), x_elem);
4975 Node* y_start = array_element_address(y, intcon(0), y_elem);
4976 // 'x_start' points to x array + scaled xlen
4977 // 'y_start' points to y array + scaled ylen
4978
4979 // Allocate the result array
4980 Node* zlen = _gvn.transform(new AddINode(xlen, ylen));
4981 ciKlass* klass = ciTypeArrayKlass::make(T_INT);
4982 Node* klass_node = makecon(TypeKlassPtr::make(klass));
4983
4984 IdealKit ideal(this);
4985
4986 #define __ ideal.
4987 Node* one = __ ConI(1);
4988 Node* zero = __ ConI(0);
4989 IdealVariable need_alloc(ideal), z_alloc(ideal); __ declarations_done();
4990 __ set(need_alloc, zero);
4991 __ set(z_alloc, z);
4992 __ if_then(z, BoolTest::eq, null()); {
4993 __ increment (need_alloc, one);
4994 } __ else_(); {
4995 // Update graphKit memory and control from IdealKit.
4996 sync_kit(ideal);
4997 Node *cast = new CastPPNode(z, TypePtr::NOTNULL);
4998 cast->init_req(0, control());
4999 _gvn.set_type(cast, cast->bottom_type());
5000 C->record_for_igvn(cast);
5001
5002 Node* zlen_arg = load_array_length(cast);
5003 // Update IdealKit memory and control from graphKit.
5004 __ sync_kit(this);
5005 __ if_then(zlen_arg, BoolTest::lt, zlen); {
5006 __ increment (need_alloc, one);
5007 } __ end_if();
5008 } __ end_if();
5009
5010 __ if_then(__ value(need_alloc), BoolTest::ne, zero); {
5011 // Update graphKit memory and control from IdealKit.
5012 sync_kit(ideal);
5013 Node * narr = new_array(klass_node, zlen, 1);
5014 // Update IdealKit memory and control from graphKit.
5015 __ sync_kit(this);
5016 __ set(z_alloc, narr);
5017 } __ end_if();
5018
5019 sync_kit(ideal);
5020 z = __ value(z_alloc);
5021 // Can't use TypeAryPtr::INTS which uses Bottom offset.
5022 _gvn.set_type(z, TypeOopPtr::make_from_klass(klass));
5023 // Final sync IdealKit and GraphKit.
5024 final_sync(ideal);
5025 #undef __
5026
5027 Node* z_start = array_element_address(z, intcon(0), T_INT);
5028
5029 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
5030 OptoRuntime::multiplyToLen_Type(),
5031 stubAddr, stubName, TypePtr::BOTTOM,
5032 x_start, xlen, y_start, ylen, z_start, zlen);
5033 } // original reexecute is set back here
5034
5035 C->set_has_split_ifs(true); // Has chance for split-if optimization
5036 set_result(z);
5037 return true;
5038 }
5039
5040 //-------------inline_squareToLen------------------------------------
inline_squareToLen()5041 bool LibraryCallKit::inline_squareToLen() {
5042 assert(UseSquareToLenIntrinsic, "not implemented on this platform");
5043
5044 address stubAddr = StubRoutines::squareToLen();
5045 if (stubAddr == NULL) {
5046 return false; // Intrinsic's stub is not implemented on this platform
5047 }
5048 const char* stubName = "squareToLen";
5049
5050 assert(callee()->signature()->size() == 4, "implSquareToLen has 4 parameters");
5051
5052 Node* x = argument(0);
5053 Node* len = argument(1);
5054 Node* z = argument(2);
5055 Node* zlen = argument(3);
5056
5057 x = must_be_not_null(x, true);
5058 z = must_be_not_null(z, true);
5059
5060 const Type* x_type = x->Value(&_gvn);
5061 const Type* z_type = z->Value(&_gvn);
5062 const TypeAryPtr* top_x = x_type->isa_aryptr();
5063 const TypeAryPtr* top_z = z_type->isa_aryptr();
5064 if (top_x == NULL || top_x->klass() == NULL ||
5065 top_z == NULL || top_z->klass() == NULL) {
5066 // failed array check
5067 return false;
5068 }
5069
5070 BasicType x_elem = x_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5071 BasicType z_elem = z_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5072 if (x_elem != T_INT || z_elem != T_INT) {
5073 return false;
5074 }
5075
5076
5077 Node* x_start = array_element_address(x, intcon(0), x_elem);
5078 Node* z_start = array_element_address(z, intcon(0), z_elem);
5079
5080 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
5081 OptoRuntime::squareToLen_Type(),
5082 stubAddr, stubName, TypePtr::BOTTOM,
5083 x_start, len, z_start, zlen);
5084
5085 set_result(z);
5086 return true;
5087 }
5088
5089 //-------------inline_mulAdd------------------------------------------
inline_mulAdd()5090 bool LibraryCallKit::inline_mulAdd() {
5091 assert(UseMulAddIntrinsic, "not implemented on this platform");
5092
5093 address stubAddr = StubRoutines::mulAdd();
5094 if (stubAddr == NULL) {
5095 return false; // Intrinsic's stub is not implemented on this platform
5096 }
5097 const char* stubName = "mulAdd";
5098
5099 assert(callee()->signature()->size() == 5, "mulAdd has 5 parameters");
5100
5101 Node* out = argument(0);
5102 Node* in = argument(1);
5103 Node* offset = argument(2);
5104 Node* len = argument(3);
5105 Node* k = argument(4);
5106
5107 out = must_be_not_null(out, true);
5108
5109 const Type* out_type = out->Value(&_gvn);
5110 const Type* in_type = in->Value(&_gvn);
5111 const TypeAryPtr* top_out = out_type->isa_aryptr();
5112 const TypeAryPtr* top_in = in_type->isa_aryptr();
5113 if (top_out == NULL || top_out->klass() == NULL ||
5114 top_in == NULL || top_in->klass() == NULL) {
5115 // failed array check
5116 return false;
5117 }
5118
5119 BasicType out_elem = out_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5120 BasicType in_elem = in_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5121 if (out_elem != T_INT || in_elem != T_INT) {
5122 return false;
5123 }
5124
5125 Node* outlen = load_array_length(out);
5126 Node* new_offset = _gvn.transform(new SubINode(outlen, offset));
5127 Node* out_start = array_element_address(out, intcon(0), out_elem);
5128 Node* in_start = array_element_address(in, intcon(0), in_elem);
5129
5130 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
5131 OptoRuntime::mulAdd_Type(),
5132 stubAddr, stubName, TypePtr::BOTTOM,
5133 out_start,in_start, new_offset, len, k);
5134 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
5135 set_result(result);
5136 return true;
5137 }
5138
5139 //-------------inline_montgomeryMultiply-----------------------------------
inline_montgomeryMultiply()5140 bool LibraryCallKit::inline_montgomeryMultiply() {
5141 address stubAddr = StubRoutines::montgomeryMultiply();
5142 if (stubAddr == NULL) {
5143 return false; // Intrinsic's stub is not implemented on this platform
5144 }
5145
5146 assert(UseMontgomeryMultiplyIntrinsic, "not implemented on this platform");
5147 const char* stubName = "montgomery_multiply";
5148
5149 assert(callee()->signature()->size() == 7, "montgomeryMultiply has 7 parameters");
5150
5151 Node* a = argument(0);
5152 Node* b = argument(1);
5153 Node* n = argument(2);
5154 Node* len = argument(3);
5155 Node* inv = argument(4);
5156 Node* m = argument(6);
5157
5158 const Type* a_type = a->Value(&_gvn);
5159 const TypeAryPtr* top_a = a_type->isa_aryptr();
5160 const Type* b_type = b->Value(&_gvn);
5161 const TypeAryPtr* top_b = b_type->isa_aryptr();
5162 const Type* n_type = a->Value(&_gvn);
5163 const TypeAryPtr* top_n = n_type->isa_aryptr();
5164 const Type* m_type = a->Value(&_gvn);
5165 const TypeAryPtr* top_m = m_type->isa_aryptr();
5166 if (top_a == NULL || top_a->klass() == NULL ||
5167 top_b == NULL || top_b->klass() == NULL ||
5168 top_n == NULL || top_n->klass() == NULL ||
5169 top_m == NULL || top_m->klass() == NULL) {
5170 // failed array check
5171 return false;
5172 }
5173
5174 BasicType a_elem = a_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5175 BasicType b_elem = b_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5176 BasicType n_elem = n_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5177 BasicType m_elem = m_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5178 if (a_elem != T_INT || b_elem != T_INT || n_elem != T_INT || m_elem != T_INT) {
5179 return false;
5180 }
5181
5182 // Make the call
5183 {
5184 Node* a_start = array_element_address(a, intcon(0), a_elem);
5185 Node* b_start = array_element_address(b, intcon(0), b_elem);
5186 Node* n_start = array_element_address(n, intcon(0), n_elem);
5187 Node* m_start = array_element_address(m, intcon(0), m_elem);
5188
5189 Node* call = make_runtime_call(RC_LEAF,
5190 OptoRuntime::montgomeryMultiply_Type(),
5191 stubAddr, stubName, TypePtr::BOTTOM,
5192 a_start, b_start, n_start, len, inv, top(),
5193 m_start);
5194 set_result(m);
5195 }
5196
5197 return true;
5198 }
5199
inline_montgomerySquare()5200 bool LibraryCallKit::inline_montgomerySquare() {
5201 address stubAddr = StubRoutines::montgomerySquare();
5202 if (stubAddr == NULL) {
5203 return false; // Intrinsic's stub is not implemented on this platform
5204 }
5205
5206 assert(UseMontgomerySquareIntrinsic, "not implemented on this platform");
5207 const char* stubName = "montgomery_square";
5208
5209 assert(callee()->signature()->size() == 6, "montgomerySquare has 6 parameters");
5210
5211 Node* a = argument(0);
5212 Node* n = argument(1);
5213 Node* len = argument(2);
5214 Node* inv = argument(3);
5215 Node* m = argument(5);
5216
5217 const Type* a_type = a->Value(&_gvn);
5218 const TypeAryPtr* top_a = a_type->isa_aryptr();
5219 const Type* n_type = a->Value(&_gvn);
5220 const TypeAryPtr* top_n = n_type->isa_aryptr();
5221 const Type* m_type = a->Value(&_gvn);
5222 const TypeAryPtr* top_m = m_type->isa_aryptr();
5223 if (top_a == NULL || top_a->klass() == NULL ||
5224 top_n == NULL || top_n->klass() == NULL ||
5225 top_m == NULL || top_m->klass() == NULL) {
5226 // failed array check
5227 return false;
5228 }
5229
5230 BasicType a_elem = a_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5231 BasicType n_elem = n_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5232 BasicType m_elem = m_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5233 if (a_elem != T_INT || n_elem != T_INT || m_elem != T_INT) {
5234 return false;
5235 }
5236
5237 // Make the call
5238 {
5239 Node* a_start = array_element_address(a, intcon(0), a_elem);
5240 Node* n_start = array_element_address(n, intcon(0), n_elem);
5241 Node* m_start = array_element_address(m, intcon(0), m_elem);
5242
5243 Node* call = make_runtime_call(RC_LEAF,
5244 OptoRuntime::montgomerySquare_Type(),
5245 stubAddr, stubName, TypePtr::BOTTOM,
5246 a_start, n_start, len, inv, top(),
5247 m_start);
5248 set_result(m);
5249 }
5250
5251 return true;
5252 }
5253
5254 //-------------inline_vectorizedMismatch------------------------------
inline_vectorizedMismatch()5255 bool LibraryCallKit::inline_vectorizedMismatch() {
5256 assert(UseVectorizedMismatchIntrinsic, "not implementated on this platform");
5257
5258 address stubAddr = StubRoutines::vectorizedMismatch();
5259 if (stubAddr == NULL) {
5260 return false; // Intrinsic's stub is not implemented on this platform
5261 }
5262 const char* stubName = "vectorizedMismatch";
5263 int size_l = callee()->signature()->size();
5264 assert(callee()->signature()->size() == 8, "vectorizedMismatch has 6 parameters");
5265
5266 Node* obja = argument(0);
5267 Node* aoffset = argument(1);
5268 Node* objb = argument(3);
5269 Node* boffset = argument(4);
5270 Node* length = argument(6);
5271 Node* scale = argument(7);
5272
5273 const Type* a_type = obja->Value(&_gvn);
5274 const Type* b_type = objb->Value(&_gvn);
5275 const TypeAryPtr* top_a = a_type->isa_aryptr();
5276 const TypeAryPtr* top_b = b_type->isa_aryptr();
5277 if (top_a == NULL || top_a->klass() == NULL ||
5278 top_b == NULL || top_b->klass() == NULL) {
5279 // failed array check
5280 return false;
5281 }
5282
5283 Node* call;
5284 jvms()->set_should_reexecute(true);
5285
5286 Node* obja_adr = make_unsafe_address(obja, aoffset, ACCESS_READ);
5287 Node* objb_adr = make_unsafe_address(objb, boffset, ACCESS_READ);
5288
5289 call = make_runtime_call(RC_LEAF,
5290 OptoRuntime::vectorizedMismatch_Type(),
5291 stubAddr, stubName, TypePtr::BOTTOM,
5292 obja_adr, objb_adr, length, scale);
5293
5294 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
5295 set_result(result);
5296 return true;
5297 }
5298
5299 /**
5300 * Calculate CRC32 for byte.
5301 * int java.util.zip.CRC32.update(int crc, int b)
5302 */
inline_updateCRC32()5303 bool LibraryCallKit::inline_updateCRC32() {
5304 assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support");
5305 assert(callee()->signature()->size() == 2, "update has 2 parameters");
5306 // no receiver since it is static method
5307 Node* crc = argument(0); // type: int
5308 Node* b = argument(1); // type: int
5309
5310 /*
5311 * int c = ~ crc;
5312 * b = timesXtoThe32[(b ^ c) & 0xFF];
5313 * b = b ^ (c >>> 8);
5314 * crc = ~b;
5315 */
5316
5317 Node* M1 = intcon(-1);
5318 crc = _gvn.transform(new XorINode(crc, M1));
5319 Node* result = _gvn.transform(new XorINode(crc, b));
5320 result = _gvn.transform(new AndINode(result, intcon(0xFF)));
5321
5322 Node* base = makecon(TypeRawPtr::make(StubRoutines::crc_table_addr()));
5323 Node* offset = _gvn.transform(new LShiftINode(result, intcon(0x2)));
5324 Node* adr = basic_plus_adr(top(), base, ConvI2X(offset));
5325 result = make_load(control(), adr, TypeInt::INT, T_INT, MemNode::unordered);
5326
5327 crc = _gvn.transform(new URShiftINode(crc, intcon(8)));
5328 result = _gvn.transform(new XorINode(crc, result));
5329 result = _gvn.transform(new XorINode(result, M1));
5330 set_result(result);
5331 return true;
5332 }
5333
5334 /**
5335 * Calculate CRC32 for byte[] array.
5336 * int java.util.zip.CRC32.updateBytes(int crc, byte[] buf, int off, int len)
5337 */
inline_updateBytesCRC32()5338 bool LibraryCallKit::inline_updateBytesCRC32() {
5339 assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support");
5340 assert(callee()->signature()->size() == 4, "updateBytes has 4 parameters");
5341 // no receiver since it is static method
5342 Node* crc = argument(0); // type: int
5343 Node* src = argument(1); // type: oop
5344 Node* offset = argument(2); // type: int
5345 Node* length = argument(3); // type: int
5346
5347 const Type* src_type = src->Value(&_gvn);
5348 const TypeAryPtr* top_src = src_type->isa_aryptr();
5349 if (top_src == NULL || top_src->klass() == NULL) {
5350 // failed array check
5351 return false;
5352 }
5353
5354 // Figure out the size and type of the elements we will be copying.
5355 BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5356 if (src_elem != T_BYTE) {
5357 return false;
5358 }
5359
5360 // 'src_start' points to src array + scaled offset
5361 src = must_be_not_null(src, true);
5362 Node* src_start = array_element_address(src, offset, src_elem);
5363
5364 // We assume that range check is done by caller.
5365 // TODO: generate range check (offset+length < src.length) in debug VM.
5366
5367 // Call the stub.
5368 address stubAddr = StubRoutines::updateBytesCRC32();
5369 const char *stubName = "updateBytesCRC32";
5370
5371 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::updateBytesCRC32_Type(),
5372 stubAddr, stubName, TypePtr::BOTTOM,
5373 crc, src_start, length);
5374 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
5375 set_result(result);
5376 return true;
5377 }
5378
5379 /**
5380 * Calculate CRC32 for ByteBuffer.
5381 * int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
5382 */
inline_updateByteBufferCRC32()5383 bool LibraryCallKit::inline_updateByteBufferCRC32() {
5384 assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support");
5385 assert(callee()->signature()->size() == 5, "updateByteBuffer has 4 parameters and one is long");
5386 // no receiver since it is static method
5387 Node* crc = argument(0); // type: int
5388 Node* src = argument(1); // type: long
5389 Node* offset = argument(3); // type: int
5390 Node* length = argument(4); // type: int
5391
5392 src = ConvL2X(src); // adjust Java long to machine word
5393 Node* base = _gvn.transform(new CastX2PNode(src));
5394 offset = ConvI2X(offset);
5395
5396 // 'src_start' points to src array + scaled offset
5397 Node* src_start = basic_plus_adr(top(), base, offset);
5398
5399 // Call the stub.
5400 address stubAddr = StubRoutines::updateBytesCRC32();
5401 const char *stubName = "updateBytesCRC32";
5402
5403 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::updateBytesCRC32_Type(),
5404 stubAddr, stubName, TypePtr::BOTTOM,
5405 crc, src_start, length);
5406 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
5407 set_result(result);
5408 return true;
5409 }
5410
5411 //------------------------------get_table_from_crc32c_class-----------------------
get_table_from_crc32c_class(ciInstanceKlass * crc32c_class)5412 Node * LibraryCallKit::get_table_from_crc32c_class(ciInstanceKlass *crc32c_class) {
5413 Node* table = load_field_from_object(NULL, "byteTable", "[I", /*is_exact*/ false, /*is_static*/ true, crc32c_class);
5414 assert (table != NULL, "wrong version of java.util.zip.CRC32C");
5415
5416 return table;
5417 }
5418
5419 //------------------------------inline_updateBytesCRC32C-----------------------
5420 //
5421 // Calculate CRC32C for byte[] array.
5422 // int java.util.zip.CRC32C.updateBytes(int crc, byte[] buf, int off, int end)
5423 //
inline_updateBytesCRC32C()5424 bool LibraryCallKit::inline_updateBytesCRC32C() {
5425 assert(UseCRC32CIntrinsics, "need CRC32C instruction support");
5426 assert(callee()->signature()->size() == 4, "updateBytes has 4 parameters");
5427 assert(callee()->holder()->is_loaded(), "CRC32C class must be loaded");
5428 // no receiver since it is a static method
5429 Node* crc = argument(0); // type: int
5430 Node* src = argument(1); // type: oop
5431 Node* offset = argument(2); // type: int
5432 Node* end = argument(3); // type: int
5433
5434 Node* length = _gvn.transform(new SubINode(end, offset));
5435
5436 const Type* src_type = src->Value(&_gvn);
5437 const TypeAryPtr* top_src = src_type->isa_aryptr();
5438 if (top_src == NULL || top_src->klass() == NULL) {
5439 // failed array check
5440 return false;
5441 }
5442
5443 // Figure out the size and type of the elements we will be copying.
5444 BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5445 if (src_elem != T_BYTE) {
5446 return false;
5447 }
5448
5449 // 'src_start' points to src array + scaled offset
5450 src = must_be_not_null(src, true);
5451 Node* src_start = array_element_address(src, offset, src_elem);
5452
5453 // static final int[] byteTable in class CRC32C
5454 Node* table = get_table_from_crc32c_class(callee()->holder());
5455 table = must_be_not_null(table, true);
5456 Node* table_start = array_element_address(table, intcon(0), T_INT);
5457
5458 // We assume that range check is done by caller.
5459 // TODO: generate range check (offset+length < src.length) in debug VM.
5460
5461 // Call the stub.
5462 address stubAddr = StubRoutines::updateBytesCRC32C();
5463 const char *stubName = "updateBytesCRC32C";
5464
5465 Node* call = make_runtime_call(RC_LEAF, OptoRuntime::updateBytesCRC32C_Type(),
5466 stubAddr, stubName, TypePtr::BOTTOM,
5467 crc, src_start, length, table_start);
5468 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
5469 set_result(result);
5470 return true;
5471 }
5472
5473 //------------------------------inline_updateDirectByteBufferCRC32C-----------------------
5474 //
5475 // Calculate CRC32C for DirectByteBuffer.
5476 // int java.util.zip.CRC32C.updateDirectByteBuffer(int crc, long buf, int off, int end)
5477 //
inline_updateDirectByteBufferCRC32C()5478 bool LibraryCallKit::inline_updateDirectByteBufferCRC32C() {
5479 assert(UseCRC32CIntrinsics, "need CRC32C instruction support");
5480 assert(callee()->signature()->size() == 5, "updateDirectByteBuffer has 4 parameters and one is long");
5481 assert(callee()->holder()->is_loaded(), "CRC32C class must be loaded");
5482 // no receiver since it is a static method
5483 Node* crc = argument(0); // type: int
5484 Node* src = argument(1); // type: long
5485 Node* offset = argument(3); // type: int
5486 Node* end = argument(4); // type: int
5487
5488 Node* length = _gvn.transform(new SubINode(end, offset));
5489
5490 src = ConvL2X(src); // adjust Java long to machine word
5491 Node* base = _gvn.transform(new CastX2PNode(src));
5492 offset = ConvI2X(offset);
5493
5494 // 'src_start' points to src array + scaled offset
5495 Node* src_start = basic_plus_adr(top(), base, offset);
5496
5497 // static final int[] byteTable in class CRC32C
5498 Node* table = get_table_from_crc32c_class(callee()->holder());
5499 table = must_be_not_null(table, true);
5500 Node* table_start = array_element_address(table, intcon(0), T_INT);
5501
5502 // Call the stub.
5503 address stubAddr = StubRoutines::updateBytesCRC32C();
5504 const char *stubName = "updateBytesCRC32C";
5505
5506 Node* call = make_runtime_call(RC_LEAF, OptoRuntime::updateBytesCRC32C_Type(),
5507 stubAddr, stubName, TypePtr::BOTTOM,
5508 crc, src_start, length, table_start);
5509 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
5510 set_result(result);
5511 return true;
5512 }
5513
5514 //------------------------------inline_updateBytesAdler32----------------------
5515 //
5516 // Calculate Adler32 checksum for byte[] array.
5517 // int java.util.zip.Adler32.updateBytes(int crc, byte[] buf, int off, int len)
5518 //
inline_updateBytesAdler32()5519 bool LibraryCallKit::inline_updateBytesAdler32() {
5520 assert(UseAdler32Intrinsics, "Adler32 Instrinsic support need"); // check if we actually need to check this flag or check a different one
5521 assert(callee()->signature()->size() == 4, "updateBytes has 4 parameters");
5522 assert(callee()->holder()->is_loaded(), "Adler32 class must be loaded");
5523 // no receiver since it is static method
5524 Node* crc = argument(0); // type: int
5525 Node* src = argument(1); // type: oop
5526 Node* offset = argument(2); // type: int
5527 Node* length = argument(3); // type: int
5528
5529 const Type* src_type = src->Value(&_gvn);
5530 const TypeAryPtr* top_src = src_type->isa_aryptr();
5531 if (top_src == NULL || top_src->klass() == NULL) {
5532 // failed array check
5533 return false;
5534 }
5535
5536 // Figure out the size and type of the elements we will be copying.
5537 BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5538 if (src_elem != T_BYTE) {
5539 return false;
5540 }
5541
5542 // 'src_start' points to src array + scaled offset
5543 Node* src_start = array_element_address(src, offset, src_elem);
5544
5545 // We assume that range check is done by caller.
5546 // TODO: generate range check (offset+length < src.length) in debug VM.
5547
5548 // Call the stub.
5549 address stubAddr = StubRoutines::updateBytesAdler32();
5550 const char *stubName = "updateBytesAdler32";
5551
5552 Node* call = make_runtime_call(RC_LEAF, OptoRuntime::updateBytesAdler32_Type(),
5553 stubAddr, stubName, TypePtr::BOTTOM,
5554 crc, src_start, length);
5555 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
5556 set_result(result);
5557 return true;
5558 }
5559
5560 //------------------------------inline_updateByteBufferAdler32---------------
5561 //
5562 // Calculate Adler32 checksum for DirectByteBuffer.
5563 // int java.util.zip.Adler32.updateByteBuffer(int crc, long buf, int off, int len)
5564 //
inline_updateByteBufferAdler32()5565 bool LibraryCallKit::inline_updateByteBufferAdler32() {
5566 assert(UseAdler32Intrinsics, "Adler32 Instrinsic support need"); // check if we actually need to check this flag or check a different one
5567 assert(callee()->signature()->size() == 5, "updateByteBuffer has 4 parameters and one is long");
5568 assert(callee()->holder()->is_loaded(), "Adler32 class must be loaded");
5569 // no receiver since it is static method
5570 Node* crc = argument(0); // type: int
5571 Node* src = argument(1); // type: long
5572 Node* offset = argument(3); // type: int
5573 Node* length = argument(4); // type: int
5574
5575 src = ConvL2X(src); // adjust Java long to machine word
5576 Node* base = _gvn.transform(new CastX2PNode(src));
5577 offset = ConvI2X(offset);
5578
5579 // 'src_start' points to src array + scaled offset
5580 Node* src_start = basic_plus_adr(top(), base, offset);
5581
5582 // Call the stub.
5583 address stubAddr = StubRoutines::updateBytesAdler32();
5584 const char *stubName = "updateBytesAdler32";
5585
5586 Node* call = make_runtime_call(RC_LEAF, OptoRuntime::updateBytesAdler32_Type(),
5587 stubAddr, stubName, TypePtr::BOTTOM,
5588 crc, src_start, length);
5589
5590 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
5591 set_result(result);
5592 return true;
5593 }
5594
5595 //----------------------------inline_reference_get----------------------------
5596 // public T java.lang.ref.Reference.get();
inline_reference_get()5597 bool LibraryCallKit::inline_reference_get() {
5598 const int referent_offset = java_lang_ref_Reference::referent_offset;
5599 guarantee(referent_offset > 0, "should have already been set");
5600
5601 // Get the argument:
5602 Node* reference_obj = null_check_receiver();
5603 if (stopped()) return true;
5604
5605 const TypeInstPtr* tinst = _gvn.type(reference_obj)->isa_instptr();
5606 assert(tinst != NULL, "obj is null");
5607 assert(tinst->klass()->is_loaded(), "obj is not loaded");
5608 ciInstanceKlass* referenceKlass = tinst->klass()->as_instance_klass();
5609 ciField* field = referenceKlass->get_field_by_name(ciSymbol::make("referent"),
5610 ciSymbol::make("Ljava/lang/Object;"),
5611 false);
5612 assert (field != NULL, "undefined field");
5613
5614 Node* adr = basic_plus_adr(reference_obj, reference_obj, referent_offset);
5615 const TypePtr* adr_type = C->alias_type(field)->adr_type();
5616
5617 ciInstanceKlass* klass = env()->Object_klass();
5618 const TypeOopPtr* object_type = TypeOopPtr::make_from_klass(klass);
5619
5620 DecoratorSet decorators = IN_HEAP | ON_WEAK_OOP_REF;
5621 Node* result = access_load_at(reference_obj, adr, adr_type, object_type, T_OBJECT, decorators);
5622 // Add memory barrier to prevent commoning reads from this field
5623 // across safepoint since GC can change its value.
5624 insert_mem_bar(Op_MemBarCPUOrder);
5625
5626 set_result(result);
5627 return true;
5628 }
5629
5630
load_field_from_object(Node * fromObj,const char * fieldName,const char * fieldTypeString,bool is_exact=true,bool is_static=false,ciInstanceKlass * fromKls=NULL)5631 Node * LibraryCallKit::load_field_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString,
5632 bool is_exact=true, bool is_static=false,
5633 ciInstanceKlass * fromKls=NULL) {
5634 if (fromKls == NULL) {
5635 const TypeInstPtr* tinst = _gvn.type(fromObj)->isa_instptr();
5636 assert(tinst != NULL, "obj is null");
5637 assert(tinst->klass()->is_loaded(), "obj is not loaded");
5638 assert(!is_exact || tinst->klass_is_exact(), "klass not exact");
5639 fromKls = tinst->klass()->as_instance_klass();
5640 } else {
5641 assert(is_static, "only for static field access");
5642 }
5643 ciField* field = fromKls->get_field_by_name(ciSymbol::make(fieldName),
5644 ciSymbol::make(fieldTypeString),
5645 is_static);
5646
5647 assert (field != NULL, "undefined field");
5648 if (field == NULL) return (Node *) NULL;
5649
5650 if (is_static) {
5651 const TypeInstPtr* tip = TypeInstPtr::make(fromKls->java_mirror());
5652 fromObj = makecon(tip);
5653 }
5654
5655 // Next code copied from Parse::do_get_xxx():
5656
5657 // Compute address and memory type.
5658 int offset = field->offset_in_bytes();
5659 bool is_vol = field->is_volatile();
5660 ciType* field_klass = field->type();
5661 assert(field_klass->is_loaded(), "should be loaded");
5662 const TypePtr* adr_type = C->alias_type(field)->adr_type();
5663 Node *adr = basic_plus_adr(fromObj, fromObj, offset);
5664 BasicType bt = field->layout_type();
5665
5666 // Build the resultant type of the load
5667 const Type *type;
5668 if (bt == T_OBJECT) {
5669 type = TypeOopPtr::make_from_klass(field_klass->as_klass());
5670 } else {
5671 type = Type::get_const_basic_type(bt);
5672 }
5673
5674 DecoratorSet decorators = IN_HEAP;
5675
5676 if (is_vol) {
5677 decorators |= MO_SEQ_CST;
5678 }
5679
5680 return access_load_at(fromObj, adr, adr_type, type, bt, decorators);
5681 }
5682
field_address_from_object(Node * fromObj,const char * fieldName,const char * fieldTypeString,bool is_exact=true,bool is_static=false,ciInstanceKlass * fromKls=NULL)5683 Node * LibraryCallKit::field_address_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString,
5684 bool is_exact = true, bool is_static = false,
5685 ciInstanceKlass * fromKls = NULL) {
5686 if (fromKls == NULL) {
5687 const TypeInstPtr* tinst = _gvn.type(fromObj)->isa_instptr();
5688 assert(tinst != NULL, "obj is null");
5689 assert(tinst->klass()->is_loaded(), "obj is not loaded");
5690 assert(!is_exact || tinst->klass_is_exact(), "klass not exact");
5691 fromKls = tinst->klass()->as_instance_klass();
5692 }
5693 else {
5694 assert(is_static, "only for static field access");
5695 }
5696 ciField* field = fromKls->get_field_by_name(ciSymbol::make(fieldName),
5697 ciSymbol::make(fieldTypeString),
5698 is_static);
5699
5700 assert(field != NULL, "undefined field");
5701 assert(!field->is_volatile(), "not defined for volatile fields");
5702
5703 if (is_static) {
5704 const TypeInstPtr* tip = TypeInstPtr::make(fromKls->java_mirror());
5705 fromObj = makecon(tip);
5706 }
5707
5708 // Next code copied from Parse::do_get_xxx():
5709
5710 // Compute address and memory type.
5711 int offset = field->offset_in_bytes();
5712 Node *adr = basic_plus_adr(fromObj, fromObj, offset);
5713
5714 return adr;
5715 }
5716
5717 //------------------------------inline_aescrypt_Block-----------------------
inline_aescrypt_Block(vmIntrinsics::ID id)5718 bool LibraryCallKit::inline_aescrypt_Block(vmIntrinsics::ID id) {
5719 address stubAddr = NULL;
5720 const char *stubName;
5721 assert(UseAES, "need AES instruction support");
5722
5723 switch(id) {
5724 case vmIntrinsics::_aescrypt_encryptBlock:
5725 stubAddr = StubRoutines::aescrypt_encryptBlock();
5726 stubName = "aescrypt_encryptBlock";
5727 break;
5728 case vmIntrinsics::_aescrypt_decryptBlock:
5729 stubAddr = StubRoutines::aescrypt_decryptBlock();
5730 stubName = "aescrypt_decryptBlock";
5731 break;
5732 default:
5733 break;
5734 }
5735 if (stubAddr == NULL) return false;
5736
5737 Node* aescrypt_object = argument(0);
5738 Node* src = argument(1);
5739 Node* src_offset = argument(2);
5740 Node* dest = argument(3);
5741 Node* dest_offset = argument(4);
5742
5743 src = must_be_not_null(src, true);
5744 dest = must_be_not_null(dest, true);
5745
5746 // (1) src and dest are arrays.
5747 const Type* src_type = src->Value(&_gvn);
5748 const Type* dest_type = dest->Value(&_gvn);
5749 const TypeAryPtr* top_src = src_type->isa_aryptr();
5750 const TypeAryPtr* top_dest = dest_type->isa_aryptr();
5751 assert (top_src != NULL && top_src->klass() != NULL && top_dest != NULL && top_dest->klass() != NULL, "args are strange");
5752
5753 // for the quick and dirty code we will skip all the checks.
5754 // we are just trying to get the call to be generated.
5755 Node* src_start = src;
5756 Node* dest_start = dest;
5757 if (src_offset != NULL || dest_offset != NULL) {
5758 assert(src_offset != NULL && dest_offset != NULL, "");
5759 src_start = array_element_address(src, src_offset, T_BYTE);
5760 dest_start = array_element_address(dest, dest_offset, T_BYTE);
5761 }
5762
5763 // now need to get the start of its expanded key array
5764 // this requires a newer class file that has this array as littleEndian ints, otherwise we revert to java
5765 Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object);
5766 if (k_start == NULL) return false;
5767
5768 if (Matcher::pass_original_key_for_aes()) {
5769 // on SPARC we need to pass the original key since key expansion needs to happen in intrinsics due to
5770 // compatibility issues between Java key expansion and SPARC crypto instructions
5771 Node* original_k_start = get_original_key_start_from_aescrypt_object(aescrypt_object);
5772 if (original_k_start == NULL) return false;
5773
5774 // Call the stub.
5775 make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::aescrypt_block_Type(),
5776 stubAddr, stubName, TypePtr::BOTTOM,
5777 src_start, dest_start, k_start, original_k_start);
5778 } else {
5779 // Call the stub.
5780 make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::aescrypt_block_Type(),
5781 stubAddr, stubName, TypePtr::BOTTOM,
5782 src_start, dest_start, k_start);
5783 }
5784
5785 return true;
5786 }
5787
5788 //------------------------------inline_cipherBlockChaining_AESCrypt-----------------------
inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id)5789 bool LibraryCallKit::inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id) {
5790 address stubAddr = NULL;
5791 const char *stubName = NULL;
5792
5793 assert(UseAES, "need AES instruction support");
5794
5795 switch(id) {
5796 case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt:
5797 stubAddr = StubRoutines::cipherBlockChaining_encryptAESCrypt();
5798 stubName = "cipherBlockChaining_encryptAESCrypt";
5799 break;
5800 case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
5801 stubAddr = StubRoutines::cipherBlockChaining_decryptAESCrypt();
5802 stubName = "cipherBlockChaining_decryptAESCrypt";
5803 break;
5804 default:
5805 break;
5806 }
5807 if (stubAddr == NULL) return false;
5808
5809 Node* cipherBlockChaining_object = argument(0);
5810 Node* src = argument(1);
5811 Node* src_offset = argument(2);
5812 Node* len = argument(3);
5813 Node* dest = argument(4);
5814 Node* dest_offset = argument(5);
5815
5816 src = must_be_not_null(src, false);
5817 dest = must_be_not_null(dest, false);
5818
5819 // (1) src and dest are arrays.
5820 const Type* src_type = src->Value(&_gvn);
5821 const Type* dest_type = dest->Value(&_gvn);
5822 const TypeAryPtr* top_src = src_type->isa_aryptr();
5823 const TypeAryPtr* top_dest = dest_type->isa_aryptr();
5824 assert (top_src != NULL && top_src->klass() != NULL
5825 && top_dest != NULL && top_dest->klass() != NULL, "args are strange");
5826
5827 // checks are the responsibility of the caller
5828 Node* src_start = src;
5829 Node* dest_start = dest;
5830 if (src_offset != NULL || dest_offset != NULL) {
5831 assert(src_offset != NULL && dest_offset != NULL, "");
5832 src_start = array_element_address(src, src_offset, T_BYTE);
5833 dest_start = array_element_address(dest, dest_offset, T_BYTE);
5834 }
5835
5836 // if we are in this set of code, we "know" the embeddedCipher is an AESCrypt object
5837 // (because of the predicated logic executed earlier).
5838 // so we cast it here safely.
5839 // this requires a newer class file that has this array as littleEndian ints, otherwise we revert to java
5840
5841 Node* embeddedCipherObj = load_field_from_object(cipherBlockChaining_object, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;", /*is_exact*/ false);
5842 if (embeddedCipherObj == NULL) return false;
5843
5844 // cast it to what we know it will be at runtime
5845 const TypeInstPtr* tinst = _gvn.type(cipherBlockChaining_object)->isa_instptr();
5846 assert(tinst != NULL, "CBC obj is null");
5847 assert(tinst->klass()->is_loaded(), "CBC obj is not loaded");
5848 ciKlass* klass_AESCrypt = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt"));
5849 assert(klass_AESCrypt->is_loaded(), "predicate checks that this class is loaded");
5850
5851 ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
5852 const TypeKlassPtr* aklass = TypeKlassPtr::make(instklass_AESCrypt);
5853 const TypeOopPtr* xtype = aklass->as_instance_type();
5854 Node* aescrypt_object = new CheckCastPPNode(control(), embeddedCipherObj, xtype);
5855 aescrypt_object = _gvn.transform(aescrypt_object);
5856
5857 // we need to get the start of the aescrypt_object's expanded key array
5858 Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object);
5859 if (k_start == NULL) return false;
5860
5861 // similarly, get the start address of the r vector
5862 Node* objRvec = load_field_from_object(cipherBlockChaining_object, "r", "[B", /*is_exact*/ false);
5863 if (objRvec == NULL) return false;
5864 Node* r_start = array_element_address(objRvec, intcon(0), T_BYTE);
5865
5866 Node* cbcCrypt;
5867 if (Matcher::pass_original_key_for_aes()) {
5868 // on SPARC we need to pass the original key since key expansion needs to happen in intrinsics due to
5869 // compatibility issues between Java key expansion and SPARC crypto instructions
5870 Node* original_k_start = get_original_key_start_from_aescrypt_object(aescrypt_object);
5871 if (original_k_start == NULL) return false;
5872
5873 // Call the stub, passing src_start, dest_start, k_start, r_start, src_len and original_k_start
5874 cbcCrypt = make_runtime_call(RC_LEAF|RC_NO_FP,
5875 OptoRuntime::cipherBlockChaining_aescrypt_Type(),
5876 stubAddr, stubName, TypePtr::BOTTOM,
5877 src_start, dest_start, k_start, r_start, len, original_k_start);
5878 } else {
5879 // Call the stub, passing src_start, dest_start, k_start, r_start and src_len
5880 cbcCrypt = make_runtime_call(RC_LEAF|RC_NO_FP,
5881 OptoRuntime::cipherBlockChaining_aescrypt_Type(),
5882 stubAddr, stubName, TypePtr::BOTTOM,
5883 src_start, dest_start, k_start, r_start, len);
5884 }
5885
5886 // return cipher length (int)
5887 Node* retvalue = _gvn.transform(new ProjNode(cbcCrypt, TypeFunc::Parms));
5888 set_result(retvalue);
5889 return true;
5890 }
5891
5892 //------------------------------inline_electronicCodeBook_AESCrypt-----------------------
inline_electronicCodeBook_AESCrypt(vmIntrinsics::ID id)5893 bool LibraryCallKit::inline_electronicCodeBook_AESCrypt(vmIntrinsics::ID id) {
5894 address stubAddr = NULL;
5895 const char *stubName = NULL;
5896
5897 assert(UseAES, "need AES instruction support");
5898
5899 switch (id) {
5900 case vmIntrinsics::_electronicCodeBook_encryptAESCrypt:
5901 stubAddr = StubRoutines::electronicCodeBook_encryptAESCrypt();
5902 stubName = "electronicCodeBook_encryptAESCrypt";
5903 break;
5904 case vmIntrinsics::_electronicCodeBook_decryptAESCrypt:
5905 stubAddr = StubRoutines::electronicCodeBook_decryptAESCrypt();
5906 stubName = "electronicCodeBook_decryptAESCrypt";
5907 break;
5908 default:
5909 break;
5910 }
5911
5912 if (stubAddr == NULL) return false;
5913
5914 Node* electronicCodeBook_object = argument(0);
5915 Node* src = argument(1);
5916 Node* src_offset = argument(2);
5917 Node* len = argument(3);
5918 Node* dest = argument(4);
5919 Node* dest_offset = argument(5);
5920
5921 // (1) src and dest are arrays.
5922 const Type* src_type = src->Value(&_gvn);
5923 const Type* dest_type = dest->Value(&_gvn);
5924 const TypeAryPtr* top_src = src_type->isa_aryptr();
5925 const TypeAryPtr* top_dest = dest_type->isa_aryptr();
5926 assert(top_src != NULL && top_src->klass() != NULL
5927 && top_dest != NULL && top_dest->klass() != NULL, "args are strange");
5928
5929 // checks are the responsibility of the caller
5930 Node* src_start = src;
5931 Node* dest_start = dest;
5932 if (src_offset != NULL || dest_offset != NULL) {
5933 assert(src_offset != NULL && dest_offset != NULL, "");
5934 src_start = array_element_address(src, src_offset, T_BYTE);
5935 dest_start = array_element_address(dest, dest_offset, T_BYTE);
5936 }
5937
5938 // if we are in this set of code, we "know" the embeddedCipher is an AESCrypt object
5939 // (because of the predicated logic executed earlier).
5940 // so we cast it here safely.
5941 // this requires a newer class file that has this array as littleEndian ints, otherwise we revert to java
5942
5943 Node* embeddedCipherObj = load_field_from_object(electronicCodeBook_object, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;", /*is_exact*/ false);
5944 if (embeddedCipherObj == NULL) return false;
5945
5946 // cast it to what we know it will be at runtime
5947 const TypeInstPtr* tinst = _gvn.type(electronicCodeBook_object)->isa_instptr();
5948 assert(tinst != NULL, "ECB obj is null");
5949 assert(tinst->klass()->is_loaded(), "ECB obj is not loaded");
5950 ciKlass* klass_AESCrypt = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt"));
5951 assert(klass_AESCrypt->is_loaded(), "predicate checks that this class is loaded");
5952
5953 ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
5954 const TypeKlassPtr* aklass = TypeKlassPtr::make(instklass_AESCrypt);
5955 const TypeOopPtr* xtype = aklass->as_instance_type();
5956 Node* aescrypt_object = new CheckCastPPNode(control(), embeddedCipherObj, xtype);
5957 aescrypt_object = _gvn.transform(aescrypt_object);
5958
5959 // we need to get the start of the aescrypt_object's expanded key array
5960 Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object);
5961 if (k_start == NULL) return false;
5962
5963 Node* ecbCrypt;
5964 if (Matcher::pass_original_key_for_aes()) {
5965 // no SPARC version for AES/ECB intrinsics now.
5966 return false;
5967 }
5968 // Call the stub, passing src_start, dest_start, k_start, r_start and src_len
5969 ecbCrypt = make_runtime_call(RC_LEAF | RC_NO_FP,
5970 OptoRuntime::electronicCodeBook_aescrypt_Type(),
5971 stubAddr, stubName, TypePtr::BOTTOM,
5972 src_start, dest_start, k_start, len);
5973
5974 // return cipher length (int)
5975 Node* retvalue = _gvn.transform(new ProjNode(ecbCrypt, TypeFunc::Parms));
5976 set_result(retvalue);
5977 return true;
5978 }
5979
5980 //------------------------------inline_counterMode_AESCrypt-----------------------
inline_counterMode_AESCrypt(vmIntrinsics::ID id)5981 bool LibraryCallKit::inline_counterMode_AESCrypt(vmIntrinsics::ID id) {
5982 assert(UseAES, "need AES instruction support");
5983 if (!UseAESCTRIntrinsics) return false;
5984
5985 address stubAddr = NULL;
5986 const char *stubName = NULL;
5987 if (id == vmIntrinsics::_counterMode_AESCrypt) {
5988 stubAddr = StubRoutines::counterMode_AESCrypt();
5989 stubName = "counterMode_AESCrypt";
5990 }
5991 if (stubAddr == NULL) return false;
5992
5993 Node* counterMode_object = argument(0);
5994 Node* src = argument(1);
5995 Node* src_offset = argument(2);
5996 Node* len = argument(3);
5997 Node* dest = argument(4);
5998 Node* dest_offset = argument(5);
5999
6000 // (1) src and dest are arrays.
6001 const Type* src_type = src->Value(&_gvn);
6002 const Type* dest_type = dest->Value(&_gvn);
6003 const TypeAryPtr* top_src = src_type->isa_aryptr();
6004 const TypeAryPtr* top_dest = dest_type->isa_aryptr();
6005 assert(top_src != NULL && top_src->klass() != NULL &&
6006 top_dest != NULL && top_dest->klass() != NULL, "args are strange");
6007
6008 // checks are the responsibility of the caller
6009 Node* src_start = src;
6010 Node* dest_start = dest;
6011 if (src_offset != NULL || dest_offset != NULL) {
6012 assert(src_offset != NULL && dest_offset != NULL, "");
6013 src_start = array_element_address(src, src_offset, T_BYTE);
6014 dest_start = array_element_address(dest, dest_offset, T_BYTE);
6015 }
6016
6017 // if we are in this set of code, we "know" the embeddedCipher is an AESCrypt object
6018 // (because of the predicated logic executed earlier).
6019 // so we cast it here safely.
6020 // this requires a newer class file that has this array as littleEndian ints, otherwise we revert to java
6021 Node* embeddedCipherObj = load_field_from_object(counterMode_object, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;", /*is_exact*/ false);
6022 if (embeddedCipherObj == NULL) return false;
6023 // cast it to what we know it will be at runtime
6024 const TypeInstPtr* tinst = _gvn.type(counterMode_object)->isa_instptr();
6025 assert(tinst != NULL, "CTR obj is null");
6026 assert(tinst->klass()->is_loaded(), "CTR obj is not loaded");
6027 ciKlass* klass_AESCrypt = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt"));
6028 assert(klass_AESCrypt->is_loaded(), "predicate checks that this class is loaded");
6029 ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
6030 const TypeKlassPtr* aklass = TypeKlassPtr::make(instklass_AESCrypt);
6031 const TypeOopPtr* xtype = aklass->as_instance_type();
6032 Node* aescrypt_object = new CheckCastPPNode(control(), embeddedCipherObj, xtype);
6033 aescrypt_object = _gvn.transform(aescrypt_object);
6034 // we need to get the start of the aescrypt_object's expanded key array
6035 Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object);
6036 if (k_start == NULL) return false;
6037 // similarly, get the start address of the r vector
6038 Node* obj_counter = load_field_from_object(counterMode_object, "counter", "[B", /*is_exact*/ false);
6039 if (obj_counter == NULL) return false;
6040 Node* cnt_start = array_element_address(obj_counter, intcon(0), T_BYTE);
6041
6042 Node* saved_encCounter = load_field_from_object(counterMode_object, "encryptedCounter", "[B", /*is_exact*/ false);
6043 if (saved_encCounter == NULL) return false;
6044 Node* saved_encCounter_start = array_element_address(saved_encCounter, intcon(0), T_BYTE);
6045 Node* used = field_address_from_object(counterMode_object, "used", "I", /*is_exact*/ false);
6046
6047 Node* ctrCrypt;
6048 if (Matcher::pass_original_key_for_aes()) {
6049 // no SPARC version for AES/CTR intrinsics now.
6050 return false;
6051 }
6052 // Call the stub, passing src_start, dest_start, k_start, r_start and src_len
6053 ctrCrypt = make_runtime_call(RC_LEAF|RC_NO_FP,
6054 OptoRuntime::counterMode_aescrypt_Type(),
6055 stubAddr, stubName, TypePtr::BOTTOM,
6056 src_start, dest_start, k_start, cnt_start, len, saved_encCounter_start, used);
6057
6058 // return cipher length (int)
6059 Node* retvalue = _gvn.transform(new ProjNode(ctrCrypt, TypeFunc::Parms));
6060 set_result(retvalue);
6061 return true;
6062 }
6063
6064 //------------------------------get_key_start_from_aescrypt_object-----------------------
get_key_start_from_aescrypt_object(Node * aescrypt_object)6065 Node * LibraryCallKit::get_key_start_from_aescrypt_object(Node *aescrypt_object) {
6066 #if defined(PPC64) || defined(S390)
6067 // MixColumns for decryption can be reduced by preprocessing MixColumns with round keys.
6068 // Intel's extention is based on this optimization and AESCrypt generates round keys by preprocessing MixColumns.
6069 // However, ppc64 vncipher processes MixColumns and requires the same round keys with encryption.
6070 // The ppc64 stubs of encryption and decryption use the same round keys (sessionK[0]).
6071 Node* objSessionK = load_field_from_object(aescrypt_object, "sessionK", "[[I", /*is_exact*/ false);
6072 assert (objSessionK != NULL, "wrong version of com.sun.crypto.provider.AESCrypt");
6073 if (objSessionK == NULL) {
6074 return (Node *) NULL;
6075 }
6076 Node* objAESCryptKey = load_array_element(control(), objSessionK, intcon(0), TypeAryPtr::OOPS);
6077 #else
6078 Node* objAESCryptKey = load_field_from_object(aescrypt_object, "K", "[I", /*is_exact*/ false);
6079 #endif // PPC64
6080 assert (objAESCryptKey != NULL, "wrong version of com.sun.crypto.provider.AESCrypt");
6081 if (objAESCryptKey == NULL) return (Node *) NULL;
6082
6083 // now have the array, need to get the start address of the K array
6084 Node* k_start = array_element_address(objAESCryptKey, intcon(0), T_INT);
6085 return k_start;
6086 }
6087
6088 //------------------------------get_original_key_start_from_aescrypt_object-----------------------
get_original_key_start_from_aescrypt_object(Node * aescrypt_object)6089 Node * LibraryCallKit::get_original_key_start_from_aescrypt_object(Node *aescrypt_object) {
6090 Node* objAESCryptKey = load_field_from_object(aescrypt_object, "lastKey", "[B", /*is_exact*/ false);
6091 assert (objAESCryptKey != NULL, "wrong version of com.sun.crypto.provider.AESCrypt");
6092 if (objAESCryptKey == NULL) return (Node *) NULL;
6093
6094 // now have the array, need to get the start address of the lastKey array
6095 Node* original_k_start = array_element_address(objAESCryptKey, intcon(0), T_BYTE);
6096 return original_k_start;
6097 }
6098
6099 //----------------------------inline_cipherBlockChaining_AESCrypt_predicate----------------------------
6100 // Return node representing slow path of predicate check.
6101 // the pseudo code we want to emulate with this predicate is:
6102 // for encryption:
6103 // if (embeddedCipherObj instanceof AESCrypt) do_intrinsic, else do_javapath
6104 // for decryption:
6105 // if ((embeddedCipherObj instanceof AESCrypt) && (cipher!=plain)) do_intrinsic, else do_javapath
6106 // note cipher==plain is more conservative than the original java code but that's OK
6107 //
inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting)6108 Node* LibraryCallKit::inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting) {
6109 // The receiver was checked for NULL already.
6110 Node* objCBC = argument(0);
6111
6112 Node* src = argument(1);
6113 Node* dest = argument(4);
6114
6115 // Load embeddedCipher field of CipherBlockChaining object.
6116 Node* embeddedCipherObj = load_field_from_object(objCBC, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;", /*is_exact*/ false);
6117
6118 // get AESCrypt klass for instanceOf check
6119 // AESCrypt might not be loaded yet if some other SymmetricCipher got us to this compile point
6120 // will have same classloader as CipherBlockChaining object
6121 const TypeInstPtr* tinst = _gvn.type(objCBC)->isa_instptr();
6122 assert(tinst != NULL, "CBCobj is null");
6123 assert(tinst->klass()->is_loaded(), "CBCobj is not loaded");
6124
6125 // we want to do an instanceof comparison against the AESCrypt class
6126 ciKlass* klass_AESCrypt = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt"));
6127 if (!klass_AESCrypt->is_loaded()) {
6128 // if AESCrypt is not even loaded, we never take the intrinsic fast path
6129 Node* ctrl = control();
6130 set_control(top()); // no regular fast path
6131 return ctrl;
6132 }
6133
6134 src = must_be_not_null(src, true);
6135 dest = must_be_not_null(dest, true);
6136
6137 // Resolve oops to stable for CmpP below.
6138 ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
6139
6140 Node* instof = gen_instanceof(embeddedCipherObj, makecon(TypeKlassPtr::make(instklass_AESCrypt)));
6141 Node* cmp_instof = _gvn.transform(new CmpINode(instof, intcon(1)));
6142 Node* bool_instof = _gvn.transform(new BoolNode(cmp_instof, BoolTest::ne));
6143
6144 Node* instof_false = generate_guard(bool_instof, NULL, PROB_MIN);
6145
6146 // for encryption, we are done
6147 if (!decrypting)
6148 return instof_false; // even if it is NULL
6149
6150 // for decryption, we need to add a further check to avoid
6151 // taking the intrinsic path when cipher and plain are the same
6152 // see the original java code for why.
6153 RegionNode* region = new RegionNode(3);
6154 region->init_req(1, instof_false);
6155
6156 Node* cmp_src_dest = _gvn.transform(new CmpPNode(src, dest));
6157 Node* bool_src_dest = _gvn.transform(new BoolNode(cmp_src_dest, BoolTest::eq));
6158 Node* src_dest_conjoint = generate_guard(bool_src_dest, NULL, PROB_MIN);
6159 region->init_req(2, src_dest_conjoint);
6160
6161 record_for_igvn(region);
6162 return _gvn.transform(region);
6163 }
6164
6165 //----------------------------inline_electronicCodeBook_AESCrypt_predicate----------------------------
6166 // Return node representing slow path of predicate check.
6167 // the pseudo code we want to emulate with this predicate is:
6168 // for encryption:
6169 // if (embeddedCipherObj instanceof AESCrypt) do_intrinsic, else do_javapath
6170 // for decryption:
6171 // if ((embeddedCipherObj instanceof AESCrypt) && (cipher!=plain)) do_intrinsic, else do_javapath
6172 // note cipher==plain is more conservative than the original java code but that's OK
6173 //
inline_electronicCodeBook_AESCrypt_predicate(bool decrypting)6174 Node* LibraryCallKit::inline_electronicCodeBook_AESCrypt_predicate(bool decrypting) {
6175 // The receiver was checked for NULL already.
6176 Node* objECB = argument(0);
6177
6178 // Load embeddedCipher field of ElectronicCodeBook object.
6179 Node* embeddedCipherObj = load_field_from_object(objECB, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;", /*is_exact*/ false);
6180
6181 // get AESCrypt klass for instanceOf check
6182 // AESCrypt might not be loaded yet if some other SymmetricCipher got us to this compile point
6183 // will have same classloader as ElectronicCodeBook object
6184 const TypeInstPtr* tinst = _gvn.type(objECB)->isa_instptr();
6185 assert(tinst != NULL, "ECBobj is null");
6186 assert(tinst->klass()->is_loaded(), "ECBobj is not loaded");
6187
6188 // we want to do an instanceof comparison against the AESCrypt class
6189 ciKlass* klass_AESCrypt = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt"));
6190 if (!klass_AESCrypt->is_loaded()) {
6191 // if AESCrypt is not even loaded, we never take the intrinsic fast path
6192 Node* ctrl = control();
6193 set_control(top()); // no regular fast path
6194 return ctrl;
6195 }
6196 ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
6197
6198 Node* instof = gen_instanceof(embeddedCipherObj, makecon(TypeKlassPtr::make(instklass_AESCrypt)));
6199 Node* cmp_instof = _gvn.transform(new CmpINode(instof, intcon(1)));
6200 Node* bool_instof = _gvn.transform(new BoolNode(cmp_instof, BoolTest::ne));
6201
6202 Node* instof_false = generate_guard(bool_instof, NULL, PROB_MIN);
6203
6204 // for encryption, we are done
6205 if (!decrypting)
6206 return instof_false; // even if it is NULL
6207
6208 // for decryption, we need to add a further check to avoid
6209 // taking the intrinsic path when cipher and plain are the same
6210 // see the original java code for why.
6211 RegionNode* region = new RegionNode(3);
6212 region->init_req(1, instof_false);
6213 Node* src = argument(1);
6214 Node* dest = argument(4);
6215 Node* cmp_src_dest = _gvn.transform(new CmpPNode(src, dest));
6216 Node* bool_src_dest = _gvn.transform(new BoolNode(cmp_src_dest, BoolTest::eq));
6217 Node* src_dest_conjoint = generate_guard(bool_src_dest, NULL, PROB_MIN);
6218 region->init_req(2, src_dest_conjoint);
6219
6220 record_for_igvn(region);
6221 return _gvn.transform(region);
6222 }
6223
6224 //----------------------------inline_counterMode_AESCrypt_predicate----------------------------
6225 // Return node representing slow path of predicate check.
6226 // the pseudo code we want to emulate with this predicate is:
6227 // for encryption:
6228 // if (embeddedCipherObj instanceof AESCrypt) do_intrinsic, else do_javapath
6229 // for decryption:
6230 // if ((embeddedCipherObj instanceof AESCrypt) && (cipher!=plain)) do_intrinsic, else do_javapath
6231 // note cipher==plain is more conservative than the original java code but that's OK
6232 //
6233
inline_counterMode_AESCrypt_predicate()6234 Node* LibraryCallKit::inline_counterMode_AESCrypt_predicate() {
6235 // The receiver was checked for NULL already.
6236 Node* objCTR = argument(0);
6237
6238 // Load embeddedCipher field of CipherBlockChaining object.
6239 Node* embeddedCipherObj = load_field_from_object(objCTR, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;", /*is_exact*/ false);
6240
6241 // get AESCrypt klass for instanceOf check
6242 // AESCrypt might not be loaded yet if some other SymmetricCipher got us to this compile point
6243 // will have same classloader as CipherBlockChaining object
6244 const TypeInstPtr* tinst = _gvn.type(objCTR)->isa_instptr();
6245 assert(tinst != NULL, "CTRobj is null");
6246 assert(tinst->klass()->is_loaded(), "CTRobj is not loaded");
6247
6248 // we want to do an instanceof comparison against the AESCrypt class
6249 ciKlass* klass_AESCrypt = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt"));
6250 if (!klass_AESCrypt->is_loaded()) {
6251 // if AESCrypt is not even loaded, we never take the intrinsic fast path
6252 Node* ctrl = control();
6253 set_control(top()); // no regular fast path
6254 return ctrl;
6255 }
6256
6257 ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
6258 Node* instof = gen_instanceof(embeddedCipherObj, makecon(TypeKlassPtr::make(instklass_AESCrypt)));
6259 Node* cmp_instof = _gvn.transform(new CmpINode(instof, intcon(1)));
6260 Node* bool_instof = _gvn.transform(new BoolNode(cmp_instof, BoolTest::ne));
6261 Node* instof_false = generate_guard(bool_instof, NULL, PROB_MIN);
6262
6263 return instof_false; // even if it is NULL
6264 }
6265
6266 //------------------------------inline_ghash_processBlocks
inline_ghash_processBlocks()6267 bool LibraryCallKit::inline_ghash_processBlocks() {
6268 address stubAddr;
6269 const char *stubName;
6270 assert(UseGHASHIntrinsics, "need GHASH intrinsics support");
6271
6272 stubAddr = StubRoutines::ghash_processBlocks();
6273 stubName = "ghash_processBlocks";
6274
6275 Node* data = argument(0);
6276 Node* offset = argument(1);
6277 Node* len = argument(2);
6278 Node* state = argument(3);
6279 Node* subkeyH = argument(4);
6280
6281 state = must_be_not_null(state, true);
6282 subkeyH = must_be_not_null(subkeyH, true);
6283 data = must_be_not_null(data, true);
6284
6285 Node* state_start = array_element_address(state, intcon(0), T_LONG);
6286 assert(state_start, "state is NULL");
6287 Node* subkeyH_start = array_element_address(subkeyH, intcon(0), T_LONG);
6288 assert(subkeyH_start, "subkeyH is NULL");
6289 Node* data_start = array_element_address(data, offset, T_BYTE);
6290 assert(data_start, "data is NULL");
6291
6292 Node* ghash = make_runtime_call(RC_LEAF|RC_NO_FP,
6293 OptoRuntime::ghash_processBlocks_Type(),
6294 stubAddr, stubName, TypePtr::BOTTOM,
6295 state_start, subkeyH_start, data_start, len);
6296 return true;
6297 }
6298
inline_base64_encodeBlock()6299 bool LibraryCallKit::inline_base64_encodeBlock() {
6300 address stubAddr;
6301 const char *stubName;
6302 assert(UseBASE64Intrinsics, "need Base64 intrinsics support");
6303 assert(callee()->signature()->size() == 6, "base64_encodeBlock has 6 parameters");
6304 stubAddr = StubRoutines::base64_encodeBlock();
6305 stubName = "encodeBlock";
6306
6307 if (!stubAddr) return false;
6308 Node* base64obj = argument(0);
6309 Node* src = argument(1);
6310 Node* offset = argument(2);
6311 Node* len = argument(3);
6312 Node* dest = argument(4);
6313 Node* dp = argument(5);
6314 Node* isURL = argument(6);
6315
6316 src = must_be_not_null(src, true);
6317 dest = must_be_not_null(dest, true);
6318
6319 Node* src_start = array_element_address(src, intcon(0), T_BYTE);
6320 assert(src_start, "source array is NULL");
6321 Node* dest_start = array_element_address(dest, intcon(0), T_BYTE);
6322 assert(dest_start, "destination array is NULL");
6323
6324 Node* base64 = make_runtime_call(RC_LEAF,
6325 OptoRuntime::base64_encodeBlock_Type(),
6326 stubAddr, stubName, TypePtr::BOTTOM,
6327 src_start, offset, len, dest_start, dp, isURL);
6328 return true;
6329 }
6330
6331 //------------------------------inline_sha_implCompress-----------------------
6332 //
6333 // Calculate SHA (i.e., SHA-1) for single-block byte[] array.
6334 // void com.sun.security.provider.SHA.implCompress(byte[] buf, int ofs)
6335 //
6336 // Calculate SHA2 (i.e., SHA-244 or SHA-256) for single-block byte[] array.
6337 // void com.sun.security.provider.SHA2.implCompress(byte[] buf, int ofs)
6338 //
6339 // Calculate SHA5 (i.e., SHA-384 or SHA-512) for single-block byte[] array.
6340 // void com.sun.security.provider.SHA5.implCompress(byte[] buf, int ofs)
6341 //
inline_sha_implCompress(vmIntrinsics::ID id)6342 bool LibraryCallKit::inline_sha_implCompress(vmIntrinsics::ID id) {
6343 assert(callee()->signature()->size() == 2, "sha_implCompress has 2 parameters");
6344
6345 Node* sha_obj = argument(0);
6346 Node* src = argument(1); // type oop
6347 Node* ofs = argument(2); // type int
6348
6349 const Type* src_type = src->Value(&_gvn);
6350 const TypeAryPtr* top_src = src_type->isa_aryptr();
6351 if (top_src == NULL || top_src->klass() == NULL) {
6352 // failed array check
6353 return false;
6354 }
6355 // Figure out the size and type of the elements we will be copying.
6356 BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
6357 if (src_elem != T_BYTE) {
6358 return false;
6359 }
6360 // 'src_start' points to src array + offset
6361 src = must_be_not_null(src, true);
6362 Node* src_start = array_element_address(src, ofs, src_elem);
6363 Node* state = NULL;
6364 address stubAddr;
6365 const char *stubName;
6366
6367 switch(id) {
6368 case vmIntrinsics::_sha_implCompress:
6369 assert(UseSHA1Intrinsics, "need SHA1 instruction support");
6370 state = get_state_from_sha_object(sha_obj);
6371 stubAddr = StubRoutines::sha1_implCompress();
6372 stubName = "sha1_implCompress";
6373 break;
6374 case vmIntrinsics::_sha2_implCompress:
6375 assert(UseSHA256Intrinsics, "need SHA256 instruction support");
6376 state = get_state_from_sha_object(sha_obj);
6377 stubAddr = StubRoutines::sha256_implCompress();
6378 stubName = "sha256_implCompress";
6379 break;
6380 case vmIntrinsics::_sha5_implCompress:
6381 assert(UseSHA512Intrinsics, "need SHA512 instruction support");
6382 state = get_state_from_sha5_object(sha_obj);
6383 stubAddr = StubRoutines::sha512_implCompress();
6384 stubName = "sha512_implCompress";
6385 break;
6386 default:
6387 fatal_unexpected_iid(id);
6388 return false;
6389 }
6390 if (state == NULL) return false;
6391
6392 assert(stubAddr != NULL, "Stub is generated");
6393 if (stubAddr == NULL) return false;
6394
6395 // Call the stub.
6396 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::sha_implCompress_Type(),
6397 stubAddr, stubName, TypePtr::BOTTOM,
6398 src_start, state);
6399
6400 return true;
6401 }
6402
6403 //------------------------------inline_digestBase_implCompressMB-----------------------
6404 //
6405 // Calculate SHA/SHA2/SHA5 for multi-block byte[] array.
6406 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit)
6407 //
inline_digestBase_implCompressMB(int predicate)6408 bool LibraryCallKit::inline_digestBase_implCompressMB(int predicate) {
6409 assert(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics,
6410 "need SHA1/SHA256/SHA512 instruction support");
6411 assert((uint)predicate < 3, "sanity");
6412 assert(callee()->signature()->size() == 3, "digestBase_implCompressMB has 3 parameters");
6413
6414 Node* digestBase_obj = argument(0); // The receiver was checked for NULL already.
6415 Node* src = argument(1); // byte[] array
6416 Node* ofs = argument(2); // type int
6417 Node* limit = argument(3); // type int
6418
6419 const Type* src_type = src->Value(&_gvn);
6420 const TypeAryPtr* top_src = src_type->isa_aryptr();
6421 if (top_src == NULL || top_src->klass() == NULL) {
6422 // failed array check
6423 return false;
6424 }
6425 // Figure out the size and type of the elements we will be copying.
6426 BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
6427 if (src_elem != T_BYTE) {
6428 return false;
6429 }
6430 // 'src_start' points to src array + offset
6431 src = must_be_not_null(src, false);
6432 Node* src_start = array_element_address(src, ofs, src_elem);
6433
6434 const char* klass_SHA_name = NULL;
6435 const char* stub_name = NULL;
6436 address stub_addr = NULL;
6437 bool long_state = false;
6438
6439 switch (predicate) {
6440 case 0:
6441 if (UseSHA1Intrinsics) {
6442 klass_SHA_name = "sun/security/provider/SHA";
6443 stub_name = "sha1_implCompressMB";
6444 stub_addr = StubRoutines::sha1_implCompressMB();
6445 }
6446 break;
6447 case 1:
6448 if (UseSHA256Intrinsics) {
6449 klass_SHA_name = "sun/security/provider/SHA2";
6450 stub_name = "sha256_implCompressMB";
6451 stub_addr = StubRoutines::sha256_implCompressMB();
6452 }
6453 break;
6454 case 2:
6455 if (UseSHA512Intrinsics) {
6456 klass_SHA_name = "sun/security/provider/SHA5";
6457 stub_name = "sha512_implCompressMB";
6458 stub_addr = StubRoutines::sha512_implCompressMB();
6459 long_state = true;
6460 }
6461 break;
6462 default:
6463 fatal("unknown SHA intrinsic predicate: %d", predicate);
6464 }
6465 if (klass_SHA_name != NULL) {
6466 assert(stub_addr != NULL, "Stub is generated");
6467 if (stub_addr == NULL) return false;
6468
6469 // get DigestBase klass to lookup for SHA klass
6470 const TypeInstPtr* tinst = _gvn.type(digestBase_obj)->isa_instptr();
6471 assert(tinst != NULL, "digestBase_obj is not instance???");
6472 assert(tinst->klass()->is_loaded(), "DigestBase is not loaded");
6473
6474 ciKlass* klass_SHA = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make(klass_SHA_name));
6475 assert(klass_SHA->is_loaded(), "predicate checks that this class is loaded");
6476 ciInstanceKlass* instklass_SHA = klass_SHA->as_instance_klass();
6477 return inline_sha_implCompressMB(digestBase_obj, instklass_SHA, long_state, stub_addr, stub_name, src_start, ofs, limit);
6478 }
6479 return false;
6480 }
6481 //------------------------------inline_sha_implCompressMB-----------------------
inline_sha_implCompressMB(Node * digestBase_obj,ciInstanceKlass * instklass_SHA,bool long_state,address stubAddr,const char * stubName,Node * src_start,Node * ofs,Node * limit)6482 bool LibraryCallKit::inline_sha_implCompressMB(Node* digestBase_obj, ciInstanceKlass* instklass_SHA,
6483 bool long_state, address stubAddr, const char *stubName,
6484 Node* src_start, Node* ofs, Node* limit) {
6485 const TypeKlassPtr* aklass = TypeKlassPtr::make(instklass_SHA);
6486 const TypeOopPtr* xtype = aklass->as_instance_type();
6487 Node* sha_obj = new CheckCastPPNode(control(), digestBase_obj, xtype);
6488 sha_obj = _gvn.transform(sha_obj);
6489
6490 Node* state;
6491 if (long_state) {
6492 state = get_state_from_sha5_object(sha_obj);
6493 } else {
6494 state = get_state_from_sha_object(sha_obj);
6495 }
6496 if (state == NULL) return false;
6497
6498 // Call the stub.
6499 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
6500 OptoRuntime::digestBase_implCompressMB_Type(),
6501 stubAddr, stubName, TypePtr::BOTTOM,
6502 src_start, state, ofs, limit);
6503 // return ofs (int)
6504 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
6505 set_result(result);
6506
6507 return true;
6508 }
6509
6510 //------------------------------get_state_from_sha_object-----------------------
get_state_from_sha_object(Node * sha_object)6511 Node * LibraryCallKit::get_state_from_sha_object(Node *sha_object) {
6512 Node* sha_state = load_field_from_object(sha_object, "state", "[I", /*is_exact*/ false);
6513 assert (sha_state != NULL, "wrong version of sun.security.provider.SHA/SHA2");
6514 if (sha_state == NULL) return (Node *) NULL;
6515
6516 // now have the array, need to get the start address of the state array
6517 Node* state = array_element_address(sha_state, intcon(0), T_INT);
6518 return state;
6519 }
6520
6521 //------------------------------get_state_from_sha5_object-----------------------
get_state_from_sha5_object(Node * sha_object)6522 Node * LibraryCallKit::get_state_from_sha5_object(Node *sha_object) {
6523 Node* sha_state = load_field_from_object(sha_object, "state", "[J", /*is_exact*/ false);
6524 assert (sha_state != NULL, "wrong version of sun.security.provider.SHA5");
6525 if (sha_state == NULL) return (Node *) NULL;
6526
6527 // now have the array, need to get the start address of the state array
6528 Node* state = array_element_address(sha_state, intcon(0), T_LONG);
6529 return state;
6530 }
6531
6532 //----------------------------inline_digestBase_implCompressMB_predicate----------------------------
6533 // Return node representing slow path of predicate check.
6534 // the pseudo code we want to emulate with this predicate is:
6535 // if (digestBaseObj instanceof SHA/SHA2/SHA5) do_intrinsic, else do_javapath
6536 //
inline_digestBase_implCompressMB_predicate(int predicate)6537 Node* LibraryCallKit::inline_digestBase_implCompressMB_predicate(int predicate) {
6538 assert(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics,
6539 "need SHA1/SHA256/SHA512 instruction support");
6540 assert((uint)predicate < 3, "sanity");
6541
6542 // The receiver was checked for NULL already.
6543 Node* digestBaseObj = argument(0);
6544
6545 // get DigestBase klass for instanceOf check
6546 const TypeInstPtr* tinst = _gvn.type(digestBaseObj)->isa_instptr();
6547 assert(tinst != NULL, "digestBaseObj is null");
6548 assert(tinst->klass()->is_loaded(), "DigestBase is not loaded");
6549
6550 const char* klass_SHA_name = NULL;
6551 switch (predicate) {
6552 case 0:
6553 if (UseSHA1Intrinsics) {
6554 // we want to do an instanceof comparison against the SHA class
6555 klass_SHA_name = "sun/security/provider/SHA";
6556 }
6557 break;
6558 case 1:
6559 if (UseSHA256Intrinsics) {
6560 // we want to do an instanceof comparison against the SHA2 class
6561 klass_SHA_name = "sun/security/provider/SHA2";
6562 }
6563 break;
6564 case 2:
6565 if (UseSHA512Intrinsics) {
6566 // we want to do an instanceof comparison against the SHA5 class
6567 klass_SHA_name = "sun/security/provider/SHA5";
6568 }
6569 break;
6570 default:
6571 fatal("unknown SHA intrinsic predicate: %d", predicate);
6572 }
6573
6574 ciKlass* klass_SHA = NULL;
6575 if (klass_SHA_name != NULL) {
6576 klass_SHA = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make(klass_SHA_name));
6577 }
6578 if ((klass_SHA == NULL) || !klass_SHA->is_loaded()) {
6579 // if none of SHA/SHA2/SHA5 is loaded, we never take the intrinsic fast path
6580 Node* ctrl = control();
6581 set_control(top()); // no intrinsic path
6582 return ctrl;
6583 }
6584 ciInstanceKlass* instklass_SHA = klass_SHA->as_instance_klass();
6585
6586 Node* instofSHA = gen_instanceof(digestBaseObj, makecon(TypeKlassPtr::make(instklass_SHA)));
6587 Node* cmp_instof = _gvn.transform(new CmpINode(instofSHA, intcon(1)));
6588 Node* bool_instof = _gvn.transform(new BoolNode(cmp_instof, BoolTest::ne));
6589 Node* instof_false = generate_guard(bool_instof, NULL, PROB_MIN);
6590
6591 return instof_false; // even if it is NULL
6592 }
6593
6594 //-------------inline_fma-----------------------------------
inline_fma(vmIntrinsics::ID id)6595 bool LibraryCallKit::inline_fma(vmIntrinsics::ID id) {
6596 Node *a = NULL;
6597 Node *b = NULL;
6598 Node *c = NULL;
6599 Node* result = NULL;
6600 switch (id) {
6601 case vmIntrinsics::_fmaD:
6602 assert(callee()->signature()->size() == 6, "fma has 3 parameters of size 2 each.");
6603 // no receiver since it is static method
6604 a = round_double_node(argument(0));
6605 b = round_double_node(argument(2));
6606 c = round_double_node(argument(4));
6607 result = _gvn.transform(new FmaDNode(control(), a, b, c));
6608 break;
6609 case vmIntrinsics::_fmaF:
6610 assert(callee()->signature()->size() == 3, "fma has 3 parameters of size 1 each.");
6611 a = argument(0);
6612 b = argument(1);
6613 c = argument(2);
6614 result = _gvn.transform(new FmaFNode(control(), a, b, c));
6615 break;
6616 default:
6617 fatal_unexpected_iid(id); break;
6618 }
6619 set_result(result);
6620 return true;
6621 }
6622
inline_character_compare(vmIntrinsics::ID id)6623 bool LibraryCallKit::inline_character_compare(vmIntrinsics::ID id) {
6624 // argument(0) is receiver
6625 Node* codePoint = argument(1);
6626 Node* n = NULL;
6627
6628 switch (id) {
6629 case vmIntrinsics::_isDigit :
6630 n = new DigitNode(control(), codePoint);
6631 break;
6632 case vmIntrinsics::_isLowerCase :
6633 n = new LowerCaseNode(control(), codePoint);
6634 break;
6635 case vmIntrinsics::_isUpperCase :
6636 n = new UpperCaseNode(control(), codePoint);
6637 break;
6638 case vmIntrinsics::_isWhitespace :
6639 n = new WhitespaceNode(control(), codePoint);
6640 break;
6641 default:
6642 fatal_unexpected_iid(id);
6643 }
6644
6645 set_result(_gvn.transform(n));
6646 return true;
6647 }
6648
6649 //------------------------------inline_fp_min_max------------------------------
inline_fp_min_max(vmIntrinsics::ID id)6650 bool LibraryCallKit::inline_fp_min_max(vmIntrinsics::ID id) {
6651 /* DISABLED BECAUSE METHOD DATA ISN'T COLLECTED PER CALL-SITE, SEE JDK-8015416.
6652
6653 // The intrinsic should be used only when the API branches aren't predictable,
6654 // the last one performing the most important comparison. The following heuristic
6655 // uses the branch statistics to eventually bail out if necessary.
6656
6657 ciMethodData *md = callee()->method_data();
6658
6659 if ( md != NULL && md->is_mature() && md->invocation_count() > 0 ) {
6660 ciCallProfile cp = caller()->call_profile_at_bci(bci());
6661
6662 if ( ((double)cp.count()) / ((double)md->invocation_count()) < 0.8 ) {
6663 // Bail out if the call-site didn't contribute enough to the statistics.
6664 return false;
6665 }
6666
6667 uint taken = 0, not_taken = 0;
6668
6669 for (ciProfileData *p = md->first_data(); md->is_valid(p); p = md->next_data(p)) {
6670 if (p->is_BranchData()) {
6671 taken = ((ciBranchData*)p)->taken();
6672 not_taken = ((ciBranchData*)p)->not_taken();
6673 }
6674 }
6675
6676 double balance = (((double)taken) - ((double)not_taken)) / ((double)md->invocation_count());
6677 balance = balance < 0 ? -balance : balance;
6678 if ( balance > 0.2 ) {
6679 // Bail out if the most important branch is predictable enough.
6680 return false;
6681 }
6682 }
6683 */
6684
6685 Node *a = NULL;
6686 Node *b = NULL;
6687 Node *n = NULL;
6688 switch (id) {
6689 case vmIntrinsics::_maxF:
6690 case vmIntrinsics::_minF:
6691 assert(callee()->signature()->size() == 2, "minF/maxF has 2 parameters of size 1 each.");
6692 a = argument(0);
6693 b = argument(1);
6694 break;
6695 case vmIntrinsics::_maxD:
6696 case vmIntrinsics::_minD:
6697 assert(callee()->signature()->size() == 4, "minD/maxD has 2 parameters of size 2 each.");
6698 a = round_double_node(argument(0));
6699 b = round_double_node(argument(2));
6700 break;
6701 default:
6702 fatal_unexpected_iid(id);
6703 break;
6704 }
6705 switch (id) {
6706 case vmIntrinsics::_maxF: n = new MaxFNode(a, b); break;
6707 case vmIntrinsics::_minF: n = new MinFNode(a, b); break;
6708 case vmIntrinsics::_maxD: n = new MaxDNode(a, b); break;
6709 case vmIntrinsics::_minD: n = new MinDNode(a, b); break;
6710 default: fatal_unexpected_iid(id); break;
6711 }
6712 set_result(_gvn.transform(n));
6713 return true;
6714 }
6715
inline_profileBoolean()6716 bool LibraryCallKit::inline_profileBoolean() {
6717 Node* counts = argument(1);
6718 const TypeAryPtr* ary = NULL;
6719 ciArray* aobj = NULL;
6720 if (counts->is_Con()
6721 && (ary = counts->bottom_type()->isa_aryptr()) != NULL
6722 && (aobj = ary->const_oop()->as_array()) != NULL
6723 && (aobj->length() == 2)) {
6724 // Profile is int[2] where [0] and [1] correspond to false and true value occurrences respectively.
6725 jint false_cnt = aobj->element_value(0).as_int();
6726 jint true_cnt = aobj->element_value(1).as_int();
6727
6728 if (C->log() != NULL) {
6729 C->log()->elem("observe source='profileBoolean' false='%d' true='%d'",
6730 false_cnt, true_cnt);
6731 }
6732
6733 if (false_cnt + true_cnt == 0) {
6734 // According to profile, never executed.
6735 uncommon_trap_exact(Deoptimization::Reason_intrinsic,
6736 Deoptimization::Action_reinterpret);
6737 return true;
6738 }
6739
6740 // result is a boolean (0 or 1) and its profile (false_cnt & true_cnt)
6741 // is a number of each value occurrences.
6742 Node* result = argument(0);
6743 if (false_cnt == 0 || true_cnt == 0) {
6744 // According to profile, one value has been never seen.
6745 int expected_val = (false_cnt == 0) ? 1 : 0;
6746
6747 Node* cmp = _gvn.transform(new CmpINode(result, intcon(expected_val)));
6748 Node* test = _gvn.transform(new BoolNode(cmp, BoolTest::eq));
6749
6750 IfNode* check = create_and_map_if(control(), test, PROB_ALWAYS, COUNT_UNKNOWN);
6751 Node* fast_path = _gvn.transform(new IfTrueNode(check));
6752 Node* slow_path = _gvn.transform(new IfFalseNode(check));
6753
6754 { // Slow path: uncommon trap for never seen value and then reexecute
6755 // MethodHandleImpl::profileBoolean() to bump the count, so JIT knows
6756 // the value has been seen at least once.
6757 PreserveJVMState pjvms(this);
6758 PreserveReexecuteState preexecs(this);
6759 jvms()->set_should_reexecute(true);
6760
6761 set_control(slow_path);
6762 set_i_o(i_o());
6763
6764 uncommon_trap_exact(Deoptimization::Reason_intrinsic,
6765 Deoptimization::Action_reinterpret);
6766 }
6767 // The guard for never seen value enables sharpening of the result and
6768 // returning a constant. It allows to eliminate branches on the same value
6769 // later on.
6770 set_control(fast_path);
6771 result = intcon(expected_val);
6772 }
6773 // Stop profiling.
6774 // MethodHandleImpl::profileBoolean() has profiling logic in its bytecode.
6775 // By replacing method body with profile data (represented as ProfileBooleanNode
6776 // on IR level) we effectively disable profiling.
6777 // It enables full speed execution once optimized code is generated.
6778 Node* profile = _gvn.transform(new ProfileBooleanNode(result, false_cnt, true_cnt));
6779 C->record_for_igvn(profile);
6780 set_result(profile);
6781 return true;
6782 } else {
6783 // Continue profiling.
6784 // Profile data isn't available at the moment. So, execute method's bytecode version.
6785 // Usually, when GWT LambdaForms are profiled it means that a stand-alone nmethod
6786 // is compiled and counters aren't available since corresponding MethodHandle
6787 // isn't a compile-time constant.
6788 return false;
6789 }
6790 }
6791
inline_isCompileConstant()6792 bool LibraryCallKit::inline_isCompileConstant() {
6793 Node* n = argument(0);
6794 set_result(n->is_Con() ? intcon(1) : intcon(0));
6795 return true;
6796 }
6797