1 /*
2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/shared/barrierSet.hpp"
27 #include "gc/shared/c2/barrierSetC2.hpp"
28 #include "memory/allocation.inline.hpp"
29 #include "memory/resourceArea.hpp"
30 #include "oops/compressedOops.hpp"
31 #include "opto/ad.hpp"
32 #include "opto/addnode.hpp"
33 #include "opto/callnode.hpp"
34 #include "opto/idealGraphPrinter.hpp"
35 #include "opto/matcher.hpp"
36 #include "opto/memnode.hpp"
37 #include "opto/movenode.hpp"
38 #include "opto/opcodes.hpp"
39 #include "opto/regmask.hpp"
40 #include "opto/rootnode.hpp"
41 #include "opto/runtime.hpp"
42 #include "opto/type.hpp"
43 #include "opto/vectornode.hpp"
44 #include "runtime/os.hpp"
45 #include "runtime/sharedRuntime.hpp"
46 #include "utilities/align.hpp"
47
48 OptoReg::Name OptoReg::c_frame_pointer;
49
50 const RegMask *Matcher::idealreg2regmask[_last_machine_leaf];
51 RegMask Matcher::mreg2regmask[_last_Mach_Reg];
52 RegMask Matcher::STACK_ONLY_mask;
53 RegMask Matcher::c_frame_ptr_mask;
54 const uint Matcher::_begin_rematerialize = _BEGIN_REMATERIALIZE;
55 const uint Matcher::_end_rematerialize = _END_REMATERIALIZE;
56
57 //---------------------------Matcher-------------------------------------------
Matcher()58 Matcher::Matcher()
59 : PhaseTransform( Phase::Ins_Select ),
60 _states_arena(Chunk::medium_size, mtCompiler),
61 _visited(&_states_arena),
62 _shared(&_states_arena),
63 _dontcare(&_states_arena),
64 _reduceOp(reduceOp), _leftOp(leftOp), _rightOp(rightOp),
65 _swallowed(swallowed),
66 _begin_inst_chain_rule(_BEGIN_INST_CHAIN_RULE),
67 _end_inst_chain_rule(_END_INST_CHAIN_RULE),
68 _must_clone(must_clone),
69 _shared_nodes(C->comp_arena()),
70 #ifdef ASSERT
71 _old2new_map(C->comp_arena()),
72 _new2old_map(C->comp_arena()),
73 #endif
74 _allocation_started(false),
75 _ruleName(ruleName),
76 _register_save_policy(register_save_policy),
77 _c_reg_save_policy(c_reg_save_policy),
78 _register_save_type(register_save_type) {
79 C->set_matcher(this);
80
81 idealreg2spillmask [Op_RegI] = NULL;
82 idealreg2spillmask [Op_RegN] = NULL;
83 idealreg2spillmask [Op_RegL] = NULL;
84 idealreg2spillmask [Op_RegF] = NULL;
85 idealreg2spillmask [Op_RegD] = NULL;
86 idealreg2spillmask [Op_RegP] = NULL;
87 idealreg2spillmask [Op_VecS] = NULL;
88 idealreg2spillmask [Op_VecD] = NULL;
89 idealreg2spillmask [Op_VecX] = NULL;
90 idealreg2spillmask [Op_VecY] = NULL;
91 idealreg2spillmask [Op_VecZ] = NULL;
92 idealreg2spillmask [Op_RegFlags] = NULL;
93
94 idealreg2debugmask [Op_RegI] = NULL;
95 idealreg2debugmask [Op_RegN] = NULL;
96 idealreg2debugmask [Op_RegL] = NULL;
97 idealreg2debugmask [Op_RegF] = NULL;
98 idealreg2debugmask [Op_RegD] = NULL;
99 idealreg2debugmask [Op_RegP] = NULL;
100 idealreg2debugmask [Op_VecS] = NULL;
101 idealreg2debugmask [Op_VecD] = NULL;
102 idealreg2debugmask [Op_VecX] = NULL;
103 idealreg2debugmask [Op_VecY] = NULL;
104 idealreg2debugmask [Op_VecZ] = NULL;
105 idealreg2debugmask [Op_RegFlags] = NULL;
106
107 idealreg2mhdebugmask[Op_RegI] = NULL;
108 idealreg2mhdebugmask[Op_RegN] = NULL;
109 idealreg2mhdebugmask[Op_RegL] = NULL;
110 idealreg2mhdebugmask[Op_RegF] = NULL;
111 idealreg2mhdebugmask[Op_RegD] = NULL;
112 idealreg2mhdebugmask[Op_RegP] = NULL;
113 idealreg2mhdebugmask[Op_VecS] = NULL;
114 idealreg2mhdebugmask[Op_VecD] = NULL;
115 idealreg2mhdebugmask[Op_VecX] = NULL;
116 idealreg2mhdebugmask[Op_VecY] = NULL;
117 idealreg2mhdebugmask[Op_VecZ] = NULL;
118 idealreg2mhdebugmask[Op_RegFlags] = NULL;
119
120 debug_only(_mem_node = NULL;) // Ideal memory node consumed by mach node
121 }
122
123 //------------------------------warp_incoming_stk_arg------------------------
124 // This warps a VMReg into an OptoReg::Name
warp_incoming_stk_arg(VMReg reg)125 OptoReg::Name Matcher::warp_incoming_stk_arg( VMReg reg ) {
126 OptoReg::Name warped;
127 if( reg->is_stack() ) { // Stack slot argument?
128 warped = OptoReg::add(_old_SP, reg->reg2stack() );
129 warped = OptoReg::add(warped, C->out_preserve_stack_slots());
130 if( warped >= _in_arg_limit )
131 _in_arg_limit = OptoReg::add(warped, 1); // Bump max stack slot seen
132 if (!RegMask::can_represent_arg(warped)) {
133 // the compiler cannot represent this method's calling sequence
134 C->record_method_not_compilable("unsupported incoming calling sequence");
135 return OptoReg::Bad;
136 }
137 return warped;
138 }
139 return OptoReg::as_OptoReg(reg);
140 }
141
142 //---------------------------compute_old_SP------------------------------------
compute_old_SP()143 OptoReg::Name Compile::compute_old_SP() {
144 int fixed = fixed_slots();
145 int preserve = in_preserve_stack_slots();
146 return OptoReg::stack2reg(align_up(fixed + preserve, (int)Matcher::stack_alignment_in_slots()));
147 }
148
149
150
151 #ifdef ASSERT
verify_new_nodes_only(Node * xroot)152 void Matcher::verify_new_nodes_only(Node* xroot) {
153 // Make sure that the new graph only references new nodes
154 ResourceMark rm;
155 Unique_Node_List worklist;
156 VectorSet visited(Thread::current()->resource_area());
157 worklist.push(xroot);
158 while (worklist.size() > 0) {
159 Node* n = worklist.pop();
160 visited.set(n->_idx);
161 assert(C->node_arena()->contains(n), "dead node");
162 for (uint j = 0; j < n->req(); j++) {
163 Node* in = n->in(j);
164 if (in != NULL) {
165 assert(C->node_arena()->contains(in), "dead node");
166 if (!visited.test(in->_idx)) {
167 worklist.push(in);
168 }
169 }
170 }
171 }
172 }
173 #endif
174
175
176 //---------------------------match---------------------------------------------
match()177 void Matcher::match( ) {
178 if( MaxLabelRootDepth < 100 ) { // Too small?
179 assert(false, "invalid MaxLabelRootDepth, increase it to 100 minimum");
180 MaxLabelRootDepth = 100;
181 }
182 // One-time initialization of some register masks.
183 init_spill_mask( C->root()->in(1) );
184 _return_addr_mask = return_addr();
185 #ifdef _LP64
186 // Pointers take 2 slots in 64-bit land
187 _return_addr_mask.Insert(OptoReg::add(return_addr(),1));
188 #endif
189
190 // Map a Java-signature return type into return register-value
191 // machine registers for 0, 1 and 2 returned values.
192 const TypeTuple *range = C->tf()->range();
193 if( range->cnt() > TypeFunc::Parms ) { // If not a void function
194 // Get ideal-register return type
195 uint ireg = range->field_at(TypeFunc::Parms)->ideal_reg();
196 // Get machine return register
197 uint sop = C->start()->Opcode();
198 OptoRegPair regs = return_value(ireg, false);
199
200 // And mask for same
201 _return_value_mask = RegMask(regs.first());
202 if( OptoReg::is_valid(regs.second()) )
203 _return_value_mask.Insert(regs.second());
204 }
205
206 // ---------------
207 // Frame Layout
208
209 // Need the method signature to determine the incoming argument types,
210 // because the types determine which registers the incoming arguments are
211 // in, and this affects the matched code.
212 const TypeTuple *domain = C->tf()->domain();
213 uint argcnt = domain->cnt() - TypeFunc::Parms;
214 BasicType *sig_bt = NEW_RESOURCE_ARRAY( BasicType, argcnt );
215 VMRegPair *vm_parm_regs = NEW_RESOURCE_ARRAY( VMRegPair, argcnt );
216 _parm_regs = NEW_RESOURCE_ARRAY( OptoRegPair, argcnt );
217 _calling_convention_mask = NEW_RESOURCE_ARRAY( RegMask, argcnt );
218 uint i;
219 for( i = 0; i<argcnt; i++ ) {
220 sig_bt[i] = domain->field_at(i+TypeFunc::Parms)->basic_type();
221 }
222
223 // Pass array of ideal registers and length to USER code (from the AD file)
224 // that will convert this to an array of register numbers.
225 const StartNode *start = C->start();
226 start->calling_convention( sig_bt, vm_parm_regs, argcnt );
227 #ifdef ASSERT
228 // Sanity check users' calling convention. Real handy while trying to
229 // get the initial port correct.
230 { for (uint i = 0; i<argcnt; i++) {
231 if( !vm_parm_regs[i].first()->is_valid() && !vm_parm_regs[i].second()->is_valid() ) {
232 assert(domain->field_at(i+TypeFunc::Parms)==Type::HALF, "only allowed on halve" );
233 _parm_regs[i].set_bad();
234 continue;
235 }
236 VMReg parm_reg = vm_parm_regs[i].first();
237 assert(parm_reg->is_valid(), "invalid arg?");
238 if (parm_reg->is_reg()) {
239 OptoReg::Name opto_parm_reg = OptoReg::as_OptoReg(parm_reg);
240 assert(can_be_java_arg(opto_parm_reg) ||
241 C->stub_function() == CAST_FROM_FN_PTR(address, OptoRuntime::rethrow_C) ||
242 opto_parm_reg == inline_cache_reg(),
243 "parameters in register must be preserved by runtime stubs");
244 }
245 for (uint j = 0; j < i; j++) {
246 assert(parm_reg != vm_parm_regs[j].first(),
247 "calling conv. must produce distinct regs");
248 }
249 }
250 }
251 #endif
252
253 // Do some initial frame layout.
254
255 // Compute the old incoming SP (may be called FP) as
256 // OptoReg::stack0() + locks + in_preserve_stack_slots + pad2.
257 _old_SP = C->compute_old_SP();
258 assert( is_even(_old_SP), "must be even" );
259
260 // Compute highest incoming stack argument as
261 // _old_SP + out_preserve_stack_slots + incoming argument size.
262 _in_arg_limit = OptoReg::add(_old_SP, C->out_preserve_stack_slots());
263 assert( is_even(_in_arg_limit), "out_preserve must be even" );
264 for( i = 0; i < argcnt; i++ ) {
265 // Permit args to have no register
266 _calling_convention_mask[i].Clear();
267 if( !vm_parm_regs[i].first()->is_valid() && !vm_parm_regs[i].second()->is_valid() ) {
268 continue;
269 }
270 // calling_convention returns stack arguments as a count of
271 // slots beyond OptoReg::stack0()/VMRegImpl::stack0. We need to convert this to
272 // the allocators point of view, taking into account all the
273 // preserve area, locks & pad2.
274
275 OptoReg::Name reg1 = warp_incoming_stk_arg(vm_parm_regs[i].first());
276 if( OptoReg::is_valid(reg1))
277 _calling_convention_mask[i].Insert(reg1);
278
279 OptoReg::Name reg2 = warp_incoming_stk_arg(vm_parm_regs[i].second());
280 if( OptoReg::is_valid(reg2))
281 _calling_convention_mask[i].Insert(reg2);
282
283 // Saved biased stack-slot register number
284 _parm_regs[i].set_pair(reg2, reg1);
285 }
286
287 // Finally, make sure the incoming arguments take up an even number of
288 // words, in case the arguments or locals need to contain doubleword stack
289 // slots. The rest of the system assumes that stack slot pairs (in
290 // particular, in the spill area) which look aligned will in fact be
291 // aligned relative to the stack pointer in the target machine. Double
292 // stack slots will always be allocated aligned.
293 _new_SP = OptoReg::Name(align_up(_in_arg_limit, (int)RegMask::SlotsPerLong));
294
295 // Compute highest outgoing stack argument as
296 // _new_SP + out_preserve_stack_slots + max(outgoing argument size).
297 _out_arg_limit = OptoReg::add(_new_SP, C->out_preserve_stack_slots());
298 assert( is_even(_out_arg_limit), "out_preserve must be even" );
299
300 if (!RegMask::can_represent_arg(OptoReg::add(_out_arg_limit,-1))) {
301 // the compiler cannot represent this method's calling sequence
302 C->record_method_not_compilable("must be able to represent all call arguments in reg mask");
303 }
304
305 if (C->failing()) return; // bailed out on incoming arg failure
306
307 // ---------------
308 // Collect roots of matcher trees. Every node for which
309 // _shared[_idx] is cleared is guaranteed to not be shared, and thus
310 // can be a valid interior of some tree.
311 find_shared( C->root() );
312 find_shared( C->top() );
313
314 C->print_method(PHASE_BEFORE_MATCHING);
315
316 // Create new ideal node ConP #NULL even if it does exist in old space
317 // to avoid false sharing if the corresponding mach node is not used.
318 // The corresponding mach node is only used in rare cases for derived
319 // pointers.
320 Node* new_ideal_null = ConNode::make(TypePtr::NULL_PTR);
321
322 // Swap out to old-space; emptying new-space
323 Arena *old = C->node_arena()->move_contents(C->old_arena());
324
325 // Save debug and profile information for nodes in old space:
326 _old_node_note_array = C->node_note_array();
327 if (_old_node_note_array != NULL) {
328 C->set_node_note_array(new(C->comp_arena()) GrowableArray<Node_Notes*>
329 (C->comp_arena(), _old_node_note_array->length(),
330 0, NULL));
331 }
332
333 // Pre-size the new_node table to avoid the need for range checks.
334 grow_new_node_array(C->unique());
335
336 // Reset node counter so MachNodes start with _idx at 0
337 int live_nodes = C->live_nodes();
338 C->set_unique(0);
339 C->reset_dead_node_list();
340
341 // Recursively match trees from old space into new space.
342 // Correct leaves of new-space Nodes; they point to old-space.
343 _visited.clear();
344 C->set_cached_top_node(xform( C->top(), live_nodes ));
345 if (!C->failing()) {
346 Node* xroot = xform( C->root(), 1 );
347 if (xroot == NULL) {
348 Matcher::soft_match_failure(); // recursive matching process failed
349 C->record_method_not_compilable("instruction match failed");
350 } else {
351 // During matching shared constants were attached to C->root()
352 // because xroot wasn't available yet, so transfer the uses to
353 // the xroot.
354 for( DUIterator_Fast jmax, j = C->root()->fast_outs(jmax); j < jmax; j++ ) {
355 Node* n = C->root()->fast_out(j);
356 if (C->node_arena()->contains(n)) {
357 assert(n->in(0) == C->root(), "should be control user");
358 n->set_req(0, xroot);
359 --j;
360 --jmax;
361 }
362 }
363
364 // Generate new mach node for ConP #NULL
365 assert(new_ideal_null != NULL, "sanity");
366 _mach_null = match_tree(new_ideal_null);
367 // Don't set control, it will confuse GCM since there are no uses.
368 // The control will be set when this node is used first time
369 // in find_base_for_derived().
370 assert(_mach_null != NULL, "");
371
372 C->set_root(xroot->is_Root() ? xroot->as_Root() : NULL);
373
374 #ifdef ASSERT
375 verify_new_nodes_only(xroot);
376 #endif
377 }
378 }
379 if (C->top() == NULL || C->root() == NULL) {
380 C->record_method_not_compilable("graph lost"); // %%% cannot happen?
381 }
382 if (C->failing()) {
383 // delete old;
384 old->destruct_contents();
385 return;
386 }
387 assert( C->top(), "" );
388 assert( C->root(), "" );
389 validate_null_checks();
390
391 // Now smoke old-space
392 NOT_DEBUG( old->destruct_contents() );
393
394 // ------------------------
395 // Set up save-on-entry registers.
396 Fixup_Save_On_Entry( );
397
398 { // Cleanup mach IR after selection phase is over.
399 Compile::TracePhase tp("postselect_cleanup", &timers[_t_postselect_cleanup]);
400 do_postselect_cleanup();
401 if (C->failing()) return;
402 assert(verify_after_postselect_cleanup(), "");
403 }
404 }
405
406 //------------------------------Fixup_Save_On_Entry----------------------------
407 // The stated purpose of this routine is to take care of save-on-entry
408 // registers. However, the overall goal of the Match phase is to convert into
409 // machine-specific instructions which have RegMasks to guide allocation.
410 // So what this procedure really does is put a valid RegMask on each input
411 // to the machine-specific variations of all Return, TailCall and Halt
412 // instructions. It also adds edgs to define the save-on-entry values (and of
413 // course gives them a mask).
414
init_input_masks(uint size,RegMask & ret_adr,RegMask & fp)415 static RegMask *init_input_masks( uint size, RegMask &ret_adr, RegMask &fp ) {
416 RegMask *rms = NEW_RESOURCE_ARRAY( RegMask, size );
417 // Do all the pre-defined register masks
418 rms[TypeFunc::Control ] = RegMask::Empty;
419 rms[TypeFunc::I_O ] = RegMask::Empty;
420 rms[TypeFunc::Memory ] = RegMask::Empty;
421 rms[TypeFunc::ReturnAdr] = ret_adr;
422 rms[TypeFunc::FramePtr ] = fp;
423 return rms;
424 }
425
426 #define NOF_STACK_MASKS (3*6+5)
427
428 // Create the initial stack mask used by values spilling to the stack.
429 // Disallow any debug info in outgoing argument areas by setting the
430 // initial mask accordingly.
init_first_stack_mask()431 void Matcher::init_first_stack_mask() {
432
433 // Allocate storage for spill masks as masks for the appropriate load type.
434 RegMask *rms = (RegMask*)C->comp_arena()->Amalloc_D(sizeof(RegMask) * NOF_STACK_MASKS);
435
436 // Initialize empty placeholder masks into the newly allocated arena
437 for (int i = 0; i < NOF_STACK_MASKS; i++) {
438 new (rms + i) RegMask();
439 }
440
441 idealreg2spillmask [Op_RegN] = &rms[0];
442 idealreg2spillmask [Op_RegI] = &rms[1];
443 idealreg2spillmask [Op_RegL] = &rms[2];
444 idealreg2spillmask [Op_RegF] = &rms[3];
445 idealreg2spillmask [Op_RegD] = &rms[4];
446 idealreg2spillmask [Op_RegP] = &rms[5];
447
448 idealreg2debugmask [Op_RegN] = &rms[6];
449 idealreg2debugmask [Op_RegI] = &rms[7];
450 idealreg2debugmask [Op_RegL] = &rms[8];
451 idealreg2debugmask [Op_RegF] = &rms[9];
452 idealreg2debugmask [Op_RegD] = &rms[10];
453 idealreg2debugmask [Op_RegP] = &rms[11];
454
455 idealreg2mhdebugmask[Op_RegN] = &rms[12];
456 idealreg2mhdebugmask[Op_RegI] = &rms[13];
457 idealreg2mhdebugmask[Op_RegL] = &rms[14];
458 idealreg2mhdebugmask[Op_RegF] = &rms[15];
459 idealreg2mhdebugmask[Op_RegD] = &rms[16];
460 idealreg2mhdebugmask[Op_RegP] = &rms[17];
461
462 idealreg2spillmask [Op_VecS] = &rms[18];
463 idealreg2spillmask [Op_VecD] = &rms[19];
464 idealreg2spillmask [Op_VecX] = &rms[20];
465 idealreg2spillmask [Op_VecY] = &rms[21];
466 idealreg2spillmask [Op_VecZ] = &rms[22];
467
468 OptoReg::Name i;
469
470 // At first, start with the empty mask
471 C->FIRST_STACK_mask().Clear();
472
473 // Add in the incoming argument area
474 OptoReg::Name init_in = OptoReg::add(_old_SP, C->out_preserve_stack_slots());
475 for (i = init_in; i < _in_arg_limit; i = OptoReg::add(i,1)) {
476 C->FIRST_STACK_mask().Insert(i);
477 }
478 // Add in all bits past the outgoing argument area
479 guarantee(RegMask::can_represent_arg(OptoReg::add(_out_arg_limit,-1)),
480 "must be able to represent all call arguments in reg mask");
481 OptoReg::Name init = _out_arg_limit;
482 for (i = init; RegMask::can_represent(i); i = OptoReg::add(i,1)) {
483 C->FIRST_STACK_mask().Insert(i);
484 }
485 // Finally, set the "infinite stack" bit.
486 C->FIRST_STACK_mask().set_AllStack();
487
488 // Make spill masks. Registers for their class, plus FIRST_STACK_mask.
489 RegMask aligned_stack_mask = C->FIRST_STACK_mask();
490 // Keep spill masks aligned.
491 aligned_stack_mask.clear_to_pairs();
492 assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
493
494 *idealreg2spillmask[Op_RegP] = *idealreg2regmask[Op_RegP];
495 #ifdef _LP64
496 *idealreg2spillmask[Op_RegN] = *idealreg2regmask[Op_RegN];
497 idealreg2spillmask[Op_RegN]->OR(C->FIRST_STACK_mask());
498 idealreg2spillmask[Op_RegP]->OR(aligned_stack_mask);
499 #else
500 idealreg2spillmask[Op_RegP]->OR(C->FIRST_STACK_mask());
501 #endif
502 *idealreg2spillmask[Op_RegI] = *idealreg2regmask[Op_RegI];
503 idealreg2spillmask[Op_RegI]->OR(C->FIRST_STACK_mask());
504 *idealreg2spillmask[Op_RegL] = *idealreg2regmask[Op_RegL];
505 idealreg2spillmask[Op_RegL]->OR(aligned_stack_mask);
506 *idealreg2spillmask[Op_RegF] = *idealreg2regmask[Op_RegF];
507 idealreg2spillmask[Op_RegF]->OR(C->FIRST_STACK_mask());
508 *idealreg2spillmask[Op_RegD] = *idealreg2regmask[Op_RegD];
509 idealreg2spillmask[Op_RegD]->OR(aligned_stack_mask);
510
511 if (Matcher::vector_size_supported(T_BYTE,4)) {
512 *idealreg2spillmask[Op_VecS] = *idealreg2regmask[Op_VecS];
513 idealreg2spillmask[Op_VecS]->OR(C->FIRST_STACK_mask());
514 }
515 if (Matcher::vector_size_supported(T_FLOAT,2)) {
516 // For VecD we need dual alignment and 8 bytes (2 slots) for spills.
517 // RA guarantees such alignment since it is needed for Double and Long values.
518 *idealreg2spillmask[Op_VecD] = *idealreg2regmask[Op_VecD];
519 idealreg2spillmask[Op_VecD]->OR(aligned_stack_mask);
520 }
521 if (Matcher::vector_size_supported(T_FLOAT,4)) {
522 // For VecX we need quadro alignment and 16 bytes (4 slots) for spills.
523 //
524 // RA can use input arguments stack slots for spills but until RA
525 // we don't know frame size and offset of input arg stack slots.
526 //
527 // Exclude last input arg stack slots to avoid spilling vectors there
528 // otherwise vector spills could stomp over stack slots in caller frame.
529 OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
530 for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecX); k++) {
531 aligned_stack_mask.Remove(in);
532 in = OptoReg::add(in, -1);
533 }
534 aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecX);
535 assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
536 *idealreg2spillmask[Op_VecX] = *idealreg2regmask[Op_VecX];
537 idealreg2spillmask[Op_VecX]->OR(aligned_stack_mask);
538 }
539 if (Matcher::vector_size_supported(T_FLOAT,8)) {
540 // For VecY we need octo alignment and 32 bytes (8 slots) for spills.
541 OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
542 for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecY); k++) {
543 aligned_stack_mask.Remove(in);
544 in = OptoReg::add(in, -1);
545 }
546 aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecY);
547 assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
548 *idealreg2spillmask[Op_VecY] = *idealreg2regmask[Op_VecY];
549 idealreg2spillmask[Op_VecY]->OR(aligned_stack_mask);
550 }
551 if (Matcher::vector_size_supported(T_FLOAT,16)) {
552 // For VecZ we need enough alignment and 64 bytes (16 slots) for spills.
553 OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
554 for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecZ); k++) {
555 aligned_stack_mask.Remove(in);
556 in = OptoReg::add(in, -1);
557 }
558 aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecZ);
559 assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
560 *idealreg2spillmask[Op_VecZ] = *idealreg2regmask[Op_VecZ];
561 idealreg2spillmask[Op_VecZ]->OR(aligned_stack_mask);
562 }
563 if (UseFPUForSpilling) {
564 // This mask logic assumes that the spill operations are
565 // symmetric and that the registers involved are the same size.
566 // On sparc for instance we may have to use 64 bit moves will
567 // kill 2 registers when used with F0-F31.
568 idealreg2spillmask[Op_RegI]->OR(*idealreg2regmask[Op_RegF]);
569 idealreg2spillmask[Op_RegF]->OR(*idealreg2regmask[Op_RegI]);
570 #ifdef _LP64
571 idealreg2spillmask[Op_RegN]->OR(*idealreg2regmask[Op_RegF]);
572 idealreg2spillmask[Op_RegL]->OR(*idealreg2regmask[Op_RegD]);
573 idealreg2spillmask[Op_RegD]->OR(*idealreg2regmask[Op_RegL]);
574 idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegD]);
575 #else
576 idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegF]);
577 #ifdef ARM
578 // ARM has support for moving 64bit values between a pair of
579 // integer registers and a double register
580 idealreg2spillmask[Op_RegL]->OR(*idealreg2regmask[Op_RegD]);
581 idealreg2spillmask[Op_RegD]->OR(*idealreg2regmask[Op_RegL]);
582 #endif
583 #endif
584 }
585
586 // Make up debug masks. Any spill slot plus callee-save registers.
587 // Caller-save registers are assumed to be trashable by the various
588 // inline-cache fixup routines.
589 *idealreg2debugmask [Op_RegN]= *idealreg2spillmask[Op_RegN];
590 *idealreg2debugmask [Op_RegI]= *idealreg2spillmask[Op_RegI];
591 *idealreg2debugmask [Op_RegL]= *idealreg2spillmask[Op_RegL];
592 *idealreg2debugmask [Op_RegF]= *idealreg2spillmask[Op_RegF];
593 *idealreg2debugmask [Op_RegD]= *idealreg2spillmask[Op_RegD];
594 *idealreg2debugmask [Op_RegP]= *idealreg2spillmask[Op_RegP];
595
596 *idealreg2mhdebugmask[Op_RegN]= *idealreg2spillmask[Op_RegN];
597 *idealreg2mhdebugmask[Op_RegI]= *idealreg2spillmask[Op_RegI];
598 *idealreg2mhdebugmask[Op_RegL]= *idealreg2spillmask[Op_RegL];
599 *idealreg2mhdebugmask[Op_RegF]= *idealreg2spillmask[Op_RegF];
600 *idealreg2mhdebugmask[Op_RegD]= *idealreg2spillmask[Op_RegD];
601 *idealreg2mhdebugmask[Op_RegP]= *idealreg2spillmask[Op_RegP];
602
603 // Prevent stub compilations from attempting to reference
604 // callee-saved registers from debug info
605 bool exclude_soe = !Compile::current()->is_method_compilation();
606
607 for( i=OptoReg::Name(0); i<OptoReg::Name(_last_Mach_Reg); i = OptoReg::add(i,1) ) {
608 // registers the caller has to save do not work
609 if( _register_save_policy[i] == 'C' ||
610 _register_save_policy[i] == 'A' ||
611 (_register_save_policy[i] == 'E' && exclude_soe) ) {
612 idealreg2debugmask [Op_RegN]->Remove(i);
613 idealreg2debugmask [Op_RegI]->Remove(i); // Exclude save-on-call
614 idealreg2debugmask [Op_RegL]->Remove(i); // registers from debug
615 idealreg2debugmask [Op_RegF]->Remove(i); // masks
616 idealreg2debugmask [Op_RegD]->Remove(i);
617 idealreg2debugmask [Op_RegP]->Remove(i);
618
619 idealreg2mhdebugmask[Op_RegN]->Remove(i);
620 idealreg2mhdebugmask[Op_RegI]->Remove(i);
621 idealreg2mhdebugmask[Op_RegL]->Remove(i);
622 idealreg2mhdebugmask[Op_RegF]->Remove(i);
623 idealreg2mhdebugmask[Op_RegD]->Remove(i);
624 idealreg2mhdebugmask[Op_RegP]->Remove(i);
625 }
626 }
627
628 // Subtract the register we use to save the SP for MethodHandle
629 // invokes to from the debug mask.
630 const RegMask save_mask = method_handle_invoke_SP_save_mask();
631 idealreg2mhdebugmask[Op_RegN]->SUBTRACT(save_mask);
632 idealreg2mhdebugmask[Op_RegI]->SUBTRACT(save_mask);
633 idealreg2mhdebugmask[Op_RegL]->SUBTRACT(save_mask);
634 idealreg2mhdebugmask[Op_RegF]->SUBTRACT(save_mask);
635 idealreg2mhdebugmask[Op_RegD]->SUBTRACT(save_mask);
636 idealreg2mhdebugmask[Op_RegP]->SUBTRACT(save_mask);
637 }
638
639 //---------------------------is_save_on_entry----------------------------------
is_save_on_entry(int reg)640 bool Matcher::is_save_on_entry( int reg ) {
641 return
642 _register_save_policy[reg] == 'E' ||
643 _register_save_policy[reg] == 'A' || // Save-on-entry register?
644 // Also save argument registers in the trampolining stubs
645 (C->save_argument_registers() && is_spillable_arg(reg));
646 }
647
648 //---------------------------Fixup_Save_On_Entry-------------------------------
Fixup_Save_On_Entry()649 void Matcher::Fixup_Save_On_Entry( ) {
650 init_first_stack_mask();
651
652 Node *root = C->root(); // Short name for root
653 // Count number of save-on-entry registers.
654 uint soe_cnt = number_of_saved_registers();
655 uint i;
656
657 // Find the procedure Start Node
658 StartNode *start = C->start();
659 assert( start, "Expect a start node" );
660
661 // Save argument registers in the trampolining stubs
662 if( C->save_argument_registers() )
663 for( i = 0; i < _last_Mach_Reg; i++ )
664 if( is_spillable_arg(i) )
665 soe_cnt++;
666
667 // Input RegMask array shared by all Returns.
668 // The type for doubles and longs has a count of 2, but
669 // there is only 1 returned value
670 uint ret_edge_cnt = TypeFunc::Parms + ((C->tf()->range()->cnt() == TypeFunc::Parms) ? 0 : 1);
671 RegMask *ret_rms = init_input_masks( ret_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
672 // Returns have 0 or 1 returned values depending on call signature.
673 // Return register is specified by return_value in the AD file.
674 if (ret_edge_cnt > TypeFunc::Parms)
675 ret_rms[TypeFunc::Parms+0] = _return_value_mask;
676
677 // Input RegMask array shared by all Rethrows.
678 uint reth_edge_cnt = TypeFunc::Parms+1;
679 RegMask *reth_rms = init_input_masks( reth_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
680 // Rethrow takes exception oop only, but in the argument 0 slot.
681 OptoReg::Name reg = find_receiver(false);
682 if (reg >= 0) {
683 reth_rms[TypeFunc::Parms] = mreg2regmask[reg];
684 #ifdef _LP64
685 // Need two slots for ptrs in 64-bit land
686 reth_rms[TypeFunc::Parms].Insert(OptoReg::add(OptoReg::Name(reg), 1));
687 #endif
688 }
689
690 // Input RegMask array shared by all TailCalls
691 uint tail_call_edge_cnt = TypeFunc::Parms+2;
692 RegMask *tail_call_rms = init_input_masks( tail_call_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
693
694 // Input RegMask array shared by all TailJumps
695 uint tail_jump_edge_cnt = TypeFunc::Parms+2;
696 RegMask *tail_jump_rms = init_input_masks( tail_jump_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
697
698 // TailCalls have 2 returned values (target & moop), whose masks come
699 // from the usual MachNode/MachOper mechanism. Find a sample
700 // TailCall to extract these masks and put the correct masks into
701 // the tail_call_rms array.
702 for( i=1; i < root->req(); i++ ) {
703 MachReturnNode *m = root->in(i)->as_MachReturn();
704 if( m->ideal_Opcode() == Op_TailCall ) {
705 tail_call_rms[TypeFunc::Parms+0] = m->MachNode::in_RegMask(TypeFunc::Parms+0);
706 tail_call_rms[TypeFunc::Parms+1] = m->MachNode::in_RegMask(TypeFunc::Parms+1);
707 break;
708 }
709 }
710
711 // TailJumps have 2 returned values (target & ex_oop), whose masks come
712 // from the usual MachNode/MachOper mechanism. Find a sample
713 // TailJump to extract these masks and put the correct masks into
714 // the tail_jump_rms array.
715 for( i=1; i < root->req(); i++ ) {
716 MachReturnNode *m = root->in(i)->as_MachReturn();
717 if( m->ideal_Opcode() == Op_TailJump ) {
718 tail_jump_rms[TypeFunc::Parms+0] = m->MachNode::in_RegMask(TypeFunc::Parms+0);
719 tail_jump_rms[TypeFunc::Parms+1] = m->MachNode::in_RegMask(TypeFunc::Parms+1);
720 break;
721 }
722 }
723
724 // Input RegMask array shared by all Halts
725 uint halt_edge_cnt = TypeFunc::Parms;
726 RegMask *halt_rms = init_input_masks( halt_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
727
728 // Capture the return input masks into each exit flavor
729 for( i=1; i < root->req(); i++ ) {
730 MachReturnNode *exit = root->in(i)->as_MachReturn();
731 switch( exit->ideal_Opcode() ) {
732 case Op_Return : exit->_in_rms = ret_rms; break;
733 case Op_Rethrow : exit->_in_rms = reth_rms; break;
734 case Op_TailCall : exit->_in_rms = tail_call_rms; break;
735 case Op_TailJump : exit->_in_rms = tail_jump_rms; break;
736 case Op_Halt : exit->_in_rms = halt_rms; break;
737 default : ShouldNotReachHere();
738 }
739 }
740
741 // Next unused projection number from Start.
742 int proj_cnt = C->tf()->domain()->cnt();
743
744 // Do all the save-on-entry registers. Make projections from Start for
745 // them, and give them a use at the exit points. To the allocator, they
746 // look like incoming register arguments.
747 for( i = 0; i < _last_Mach_Reg; i++ ) {
748 if( is_save_on_entry(i) ) {
749
750 // Add the save-on-entry to the mask array
751 ret_rms [ ret_edge_cnt] = mreg2regmask[i];
752 reth_rms [ reth_edge_cnt] = mreg2regmask[i];
753 tail_call_rms[tail_call_edge_cnt] = mreg2regmask[i];
754 tail_jump_rms[tail_jump_edge_cnt] = mreg2regmask[i];
755 // Halts need the SOE registers, but only in the stack as debug info.
756 // A just-prior uncommon-trap or deoptimization will use the SOE regs.
757 halt_rms [ halt_edge_cnt] = *idealreg2spillmask[_register_save_type[i]];
758
759 Node *mproj;
760
761 // Is this a RegF low half of a RegD? Double up 2 adjacent RegF's
762 // into a single RegD.
763 if( (i&1) == 0 &&
764 _register_save_type[i ] == Op_RegF &&
765 _register_save_type[i+1] == Op_RegF &&
766 is_save_on_entry(i+1) ) {
767 // Add other bit for double
768 ret_rms [ ret_edge_cnt].Insert(OptoReg::Name(i+1));
769 reth_rms [ reth_edge_cnt].Insert(OptoReg::Name(i+1));
770 tail_call_rms[tail_call_edge_cnt].Insert(OptoReg::Name(i+1));
771 tail_jump_rms[tail_jump_edge_cnt].Insert(OptoReg::Name(i+1));
772 halt_rms [ halt_edge_cnt].Insert(OptoReg::Name(i+1));
773 mproj = new MachProjNode( start, proj_cnt, ret_rms[ret_edge_cnt], Op_RegD );
774 proj_cnt += 2; // Skip 2 for doubles
775 }
776 else if( (i&1) == 1 && // Else check for high half of double
777 _register_save_type[i-1] == Op_RegF &&
778 _register_save_type[i ] == Op_RegF &&
779 is_save_on_entry(i-1) ) {
780 ret_rms [ ret_edge_cnt] = RegMask::Empty;
781 reth_rms [ reth_edge_cnt] = RegMask::Empty;
782 tail_call_rms[tail_call_edge_cnt] = RegMask::Empty;
783 tail_jump_rms[tail_jump_edge_cnt] = RegMask::Empty;
784 halt_rms [ halt_edge_cnt] = RegMask::Empty;
785 mproj = C->top();
786 }
787 // Is this a RegI low half of a RegL? Double up 2 adjacent RegI's
788 // into a single RegL.
789 else if( (i&1) == 0 &&
790 _register_save_type[i ] == Op_RegI &&
791 _register_save_type[i+1] == Op_RegI &&
792 is_save_on_entry(i+1) ) {
793 // Add other bit for long
794 ret_rms [ ret_edge_cnt].Insert(OptoReg::Name(i+1));
795 reth_rms [ reth_edge_cnt].Insert(OptoReg::Name(i+1));
796 tail_call_rms[tail_call_edge_cnt].Insert(OptoReg::Name(i+1));
797 tail_jump_rms[tail_jump_edge_cnt].Insert(OptoReg::Name(i+1));
798 halt_rms [ halt_edge_cnt].Insert(OptoReg::Name(i+1));
799 mproj = new MachProjNode( start, proj_cnt, ret_rms[ret_edge_cnt], Op_RegL );
800 proj_cnt += 2; // Skip 2 for longs
801 }
802 else if( (i&1) == 1 && // Else check for high half of long
803 _register_save_type[i-1] == Op_RegI &&
804 _register_save_type[i ] == Op_RegI &&
805 is_save_on_entry(i-1) ) {
806 ret_rms [ ret_edge_cnt] = RegMask::Empty;
807 reth_rms [ reth_edge_cnt] = RegMask::Empty;
808 tail_call_rms[tail_call_edge_cnt] = RegMask::Empty;
809 tail_jump_rms[tail_jump_edge_cnt] = RegMask::Empty;
810 halt_rms [ halt_edge_cnt] = RegMask::Empty;
811 mproj = C->top();
812 } else {
813 // Make a projection for it off the Start
814 mproj = new MachProjNode( start, proj_cnt++, ret_rms[ret_edge_cnt], _register_save_type[i] );
815 }
816
817 ret_edge_cnt ++;
818 reth_edge_cnt ++;
819 tail_call_edge_cnt ++;
820 tail_jump_edge_cnt ++;
821 halt_edge_cnt ++;
822
823 // Add a use of the SOE register to all exit paths
824 for( uint j=1; j < root->req(); j++ )
825 root->in(j)->add_req(mproj);
826 } // End of if a save-on-entry register
827 } // End of for all machine registers
828 }
829
830 //------------------------------init_spill_mask--------------------------------
init_spill_mask(Node * ret)831 void Matcher::init_spill_mask( Node *ret ) {
832 if( idealreg2regmask[Op_RegI] ) return; // One time only init
833
834 OptoReg::c_frame_pointer = c_frame_pointer();
835 c_frame_ptr_mask = c_frame_pointer();
836 #ifdef _LP64
837 // pointers are twice as big
838 c_frame_ptr_mask.Insert(OptoReg::add(c_frame_pointer(),1));
839 #endif
840
841 // Start at OptoReg::stack0()
842 STACK_ONLY_mask.Clear();
843 OptoReg::Name init = OptoReg::stack2reg(0);
844 // STACK_ONLY_mask is all stack bits
845 OptoReg::Name i;
846 for (i = init; RegMask::can_represent(i); i = OptoReg::add(i,1))
847 STACK_ONLY_mask.Insert(i);
848 // Also set the "infinite stack" bit.
849 STACK_ONLY_mask.set_AllStack();
850
851 // Copy the register names over into the shared world
852 for( i=OptoReg::Name(0); i<OptoReg::Name(_last_Mach_Reg); i = OptoReg::add(i,1) ) {
853 // SharedInfo::regName[i] = regName[i];
854 // Handy RegMasks per machine register
855 mreg2regmask[i].Insert(i);
856 }
857
858 // Grab the Frame Pointer
859 Node *fp = ret->in(TypeFunc::FramePtr);
860 // Share frame pointer while making spill ops
861 set_shared(fp);
862
863 // Get the ADLC notion of the right regmask, for each basic type.
864 #ifdef _LP64
865 idealreg2regmask[Op_RegN] = regmask_for_ideal_register(Op_RegN, ret);
866 #endif
867 idealreg2regmask[Op_RegI] = regmask_for_ideal_register(Op_RegI, ret);
868 idealreg2regmask[Op_RegP] = regmask_for_ideal_register(Op_RegP, ret);
869 idealreg2regmask[Op_RegF] = regmask_for_ideal_register(Op_RegF, ret);
870 idealreg2regmask[Op_RegD] = regmask_for_ideal_register(Op_RegD, ret);
871 idealreg2regmask[Op_RegL] = regmask_for_ideal_register(Op_RegL, ret);
872 idealreg2regmask[Op_VecS] = regmask_for_ideal_register(Op_VecS, ret);
873 idealreg2regmask[Op_VecD] = regmask_for_ideal_register(Op_VecD, ret);
874 idealreg2regmask[Op_VecX] = regmask_for_ideal_register(Op_VecX, ret);
875 idealreg2regmask[Op_VecY] = regmask_for_ideal_register(Op_VecY, ret);
876 idealreg2regmask[Op_VecZ] = regmask_for_ideal_register(Op_VecZ, ret);
877 }
878
879 #ifdef ASSERT
match_alias_type(Compile * C,Node * n,Node * m)880 static void match_alias_type(Compile* C, Node* n, Node* m) {
881 if (!VerifyAliases) return; // do not go looking for trouble by default
882 const TypePtr* nat = n->adr_type();
883 const TypePtr* mat = m->adr_type();
884 int nidx = C->get_alias_index(nat);
885 int midx = C->get_alias_index(mat);
886 // Detune the assert for cases like (AndI 0xFF (LoadB p)).
887 if (nidx == Compile::AliasIdxTop && midx >= Compile::AliasIdxRaw) {
888 for (uint i = 1; i < n->req(); i++) {
889 Node* n1 = n->in(i);
890 const TypePtr* n1at = n1->adr_type();
891 if (n1at != NULL) {
892 nat = n1at;
893 nidx = C->get_alias_index(n1at);
894 }
895 }
896 }
897 // %%% Kludgery. Instead, fix ideal adr_type methods for all these cases:
898 if (nidx == Compile::AliasIdxTop && midx == Compile::AliasIdxRaw) {
899 switch (n->Opcode()) {
900 case Op_PrefetchAllocation:
901 nidx = Compile::AliasIdxRaw;
902 nat = TypeRawPtr::BOTTOM;
903 break;
904 }
905 }
906 if (nidx == Compile::AliasIdxRaw && midx == Compile::AliasIdxTop) {
907 switch (n->Opcode()) {
908 case Op_ClearArray:
909 midx = Compile::AliasIdxRaw;
910 mat = TypeRawPtr::BOTTOM;
911 break;
912 }
913 }
914 if (nidx == Compile::AliasIdxTop && midx == Compile::AliasIdxBot) {
915 switch (n->Opcode()) {
916 case Op_Return:
917 case Op_Rethrow:
918 case Op_Halt:
919 case Op_TailCall:
920 case Op_TailJump:
921 nidx = Compile::AliasIdxBot;
922 nat = TypePtr::BOTTOM;
923 break;
924 }
925 }
926 if (nidx == Compile::AliasIdxBot && midx == Compile::AliasIdxTop) {
927 switch (n->Opcode()) {
928 case Op_StrComp:
929 case Op_StrEquals:
930 case Op_StrIndexOf:
931 case Op_StrIndexOfChar:
932 case Op_AryEq:
933 case Op_HasNegatives:
934 case Op_MemBarVolatile:
935 case Op_MemBarCPUOrder: // %%% these ideals should have narrower adr_type?
936 case Op_StrInflatedCopy:
937 case Op_StrCompressedCopy:
938 case Op_OnSpinWait:
939 case Op_EncodeISOArray:
940 nidx = Compile::AliasIdxTop;
941 nat = NULL;
942 break;
943 }
944 }
945 if (nidx != midx) {
946 if (PrintOpto || (PrintMiscellaneous && (WizardMode || Verbose))) {
947 tty->print_cr("==== Matcher alias shift %d => %d", nidx, midx);
948 n->dump();
949 m->dump();
950 }
951 assert(C->subsume_loads() && C->must_alias(nat, midx),
952 "must not lose alias info when matching");
953 }
954 }
955 #endif
956
957 //------------------------------xform------------------------------------------
958 // Given a Node in old-space, Match him (Label/Reduce) to produce a machine
959 // Node in new-space. Given a new-space Node, recursively walk his children.
transform(Node * n)960 Node *Matcher::transform( Node *n ) { ShouldNotCallThis(); return n; }
xform(Node * n,int max_stack)961 Node *Matcher::xform( Node *n, int max_stack ) {
962 // Use one stack to keep both: child's node/state and parent's node/index
963 MStack mstack(max_stack * 2 * 2); // usually: C->live_nodes() * 2 * 2
964 mstack.push(n, Visit, NULL, -1); // set NULL as parent to indicate root
965 while (mstack.is_nonempty()) {
966 C->check_node_count(NodeLimitFudgeFactor, "too many nodes matching instructions");
967 if (C->failing()) return NULL;
968 n = mstack.node(); // Leave node on stack
969 Node_State nstate = mstack.state();
970 if (nstate == Visit) {
971 mstack.set_state(Post_Visit);
972 Node *oldn = n;
973 // Old-space or new-space check
974 if (!C->node_arena()->contains(n)) {
975 // Old space!
976 Node* m;
977 if (has_new_node(n)) { // Not yet Label/Reduced
978 m = new_node(n);
979 } else {
980 if (!is_dontcare(n)) { // Matcher can match this guy
981 // Calls match special. They match alone with no children.
982 // Their children, the incoming arguments, match normally.
983 m = n->is_SafePoint() ? match_sfpt(n->as_SafePoint()):match_tree(n);
984 if (C->failing()) return NULL;
985 if (m == NULL) { Matcher::soft_match_failure(); return NULL; }
986 if (n->is_MemBar()) {
987 m->as_MachMemBar()->set_adr_type(n->adr_type());
988 }
989 } else { // Nothing the matcher cares about
990 if (n->is_Proj() && n->in(0) != NULL && n->in(0)->is_Multi()) { // Projections?
991 // Convert to machine-dependent projection
992 m = n->in(0)->as_Multi()->match( n->as_Proj(), this );
993 #ifdef ASSERT
994 _new2old_map.map(m->_idx, n);
995 #endif
996 if (m->in(0) != NULL) // m might be top
997 collect_null_checks(m, n);
998 } else { // Else just a regular 'ol guy
999 m = n->clone(); // So just clone into new-space
1000 #ifdef ASSERT
1001 _new2old_map.map(m->_idx, n);
1002 #endif
1003 // Def-Use edges will be added incrementally as Uses
1004 // of this node are matched.
1005 assert(m->outcnt() == 0, "no Uses of this clone yet");
1006 }
1007 }
1008
1009 set_new_node(n, m); // Map old to new
1010 if (_old_node_note_array != NULL) {
1011 Node_Notes* nn = C->locate_node_notes(_old_node_note_array,
1012 n->_idx);
1013 C->set_node_notes_at(m->_idx, nn);
1014 }
1015 debug_only(match_alias_type(C, n, m));
1016 }
1017 n = m; // n is now a new-space node
1018 mstack.set_node(n);
1019 }
1020
1021 // New space!
1022 if (_visited.test_set(n->_idx)) continue; // while(mstack.is_nonempty())
1023
1024 int i;
1025 // Put precedence edges on stack first (match them last).
1026 for (i = oldn->req(); (uint)i < oldn->len(); i++) {
1027 Node *m = oldn->in(i);
1028 if (m == NULL) break;
1029 // set -1 to call add_prec() instead of set_req() during Step1
1030 mstack.push(m, Visit, n, -1);
1031 }
1032
1033 // Handle precedence edges for interior nodes
1034 for (i = n->len()-1; (uint)i >= n->req(); i--) {
1035 Node *m = n->in(i);
1036 if (m == NULL || C->node_arena()->contains(m)) continue;
1037 n->rm_prec(i);
1038 // set -1 to call add_prec() instead of set_req() during Step1
1039 mstack.push(m, Visit, n, -1);
1040 }
1041
1042 // For constant debug info, I'd rather have unmatched constants.
1043 int cnt = n->req();
1044 JVMState* jvms = n->jvms();
1045 int debug_cnt = jvms ? jvms->debug_start() : cnt;
1046
1047 // Now do only debug info. Clone constants rather than matching.
1048 // Constants are represented directly in the debug info without
1049 // the need for executable machine instructions.
1050 // Monitor boxes are also represented directly.
1051 for (i = cnt - 1; i >= debug_cnt; --i) { // For all debug inputs do
1052 Node *m = n->in(i); // Get input
1053 int op = m->Opcode();
1054 assert((op == Op_BoxLock) == jvms->is_monitor_use(i), "boxes only at monitor sites");
1055 if( op == Op_ConI || op == Op_ConP || op == Op_ConN || op == Op_ConNKlass ||
1056 op == Op_ConF || op == Op_ConD || op == Op_ConL
1057 // || op == Op_BoxLock // %%%% enable this and remove (+++) in chaitin.cpp
1058 ) {
1059 m = m->clone();
1060 #ifdef ASSERT
1061 _new2old_map.map(m->_idx, n);
1062 #endif
1063 mstack.push(m, Post_Visit, n, i); // Don't need to visit
1064 mstack.push(m->in(0), Visit, m, 0);
1065 } else {
1066 mstack.push(m, Visit, n, i);
1067 }
1068 }
1069
1070 // And now walk his children, and convert his inputs to new-space.
1071 for( ; i >= 0; --i ) { // For all normal inputs do
1072 Node *m = n->in(i); // Get input
1073 if(m != NULL)
1074 mstack.push(m, Visit, n, i);
1075 }
1076
1077 }
1078 else if (nstate == Post_Visit) {
1079 // Set xformed input
1080 Node *p = mstack.parent();
1081 if (p != NULL) { // root doesn't have parent
1082 int i = (int)mstack.index();
1083 if (i >= 0)
1084 p->set_req(i, n); // required input
1085 else if (i == -1)
1086 p->add_prec(n); // precedence input
1087 else
1088 ShouldNotReachHere();
1089 }
1090 mstack.pop(); // remove processed node from stack
1091 }
1092 else {
1093 ShouldNotReachHere();
1094 }
1095 } // while (mstack.is_nonempty())
1096 return n; // Return new-space Node
1097 }
1098
1099 //------------------------------warp_outgoing_stk_arg------------------------
warp_outgoing_stk_arg(VMReg reg,OptoReg::Name begin_out_arg_area,OptoReg::Name & out_arg_limit_per_call)1100 OptoReg::Name Matcher::warp_outgoing_stk_arg( VMReg reg, OptoReg::Name begin_out_arg_area, OptoReg::Name &out_arg_limit_per_call ) {
1101 // Convert outgoing argument location to a pre-biased stack offset
1102 if (reg->is_stack()) {
1103 OptoReg::Name warped = reg->reg2stack();
1104 // Adjust the stack slot offset to be the register number used
1105 // by the allocator.
1106 warped = OptoReg::add(begin_out_arg_area, warped);
1107 // Keep track of the largest numbered stack slot used for an arg.
1108 // Largest used slot per call-site indicates the amount of stack
1109 // that is killed by the call.
1110 if( warped >= out_arg_limit_per_call )
1111 out_arg_limit_per_call = OptoReg::add(warped,1);
1112 if (!RegMask::can_represent_arg(warped)) {
1113 C->record_method_not_compilable("unsupported calling sequence");
1114 return OptoReg::Bad;
1115 }
1116 return warped;
1117 }
1118 return OptoReg::as_OptoReg(reg);
1119 }
1120
1121
1122 //------------------------------match_sfpt-------------------------------------
1123 // Helper function to match call instructions. Calls match special.
1124 // They match alone with no children. Their children, the incoming
1125 // arguments, match normally.
match_sfpt(SafePointNode * sfpt)1126 MachNode *Matcher::match_sfpt( SafePointNode *sfpt ) {
1127 MachSafePointNode *msfpt = NULL;
1128 MachCallNode *mcall = NULL;
1129 uint cnt;
1130 // Split out case for SafePoint vs Call
1131 CallNode *call;
1132 const TypeTuple *domain;
1133 ciMethod* method = NULL;
1134 bool is_method_handle_invoke = false; // for special kill effects
1135 if( sfpt->is_Call() ) {
1136 call = sfpt->as_Call();
1137 domain = call->tf()->domain();
1138 cnt = domain->cnt();
1139
1140 // Match just the call, nothing else
1141 MachNode *m = match_tree(call);
1142 if (C->failing()) return NULL;
1143 if( m == NULL ) { Matcher::soft_match_failure(); return NULL; }
1144
1145 // Copy data from the Ideal SafePoint to the machine version
1146 mcall = m->as_MachCall();
1147
1148 mcall->set_tf( call->tf());
1149 mcall->set_entry_point(call->entry_point());
1150 mcall->set_cnt( call->cnt());
1151
1152 if( mcall->is_MachCallJava() ) {
1153 MachCallJavaNode *mcall_java = mcall->as_MachCallJava();
1154 const CallJavaNode *call_java = call->as_CallJava();
1155 assert(call_java->validate_symbolic_info(), "inconsistent info");
1156 method = call_java->method();
1157 mcall_java->_method = method;
1158 mcall_java->_bci = call_java->_bci;
1159 mcall_java->_optimized_virtual = call_java->is_optimized_virtual();
1160 is_method_handle_invoke = call_java->is_method_handle_invoke();
1161 mcall_java->_method_handle_invoke = is_method_handle_invoke;
1162 mcall_java->_override_symbolic_info = call_java->override_symbolic_info();
1163 if (is_method_handle_invoke) {
1164 C->set_has_method_handle_invokes(true);
1165 }
1166 if( mcall_java->is_MachCallStaticJava() )
1167 mcall_java->as_MachCallStaticJava()->_name =
1168 call_java->as_CallStaticJava()->_name;
1169 if( mcall_java->is_MachCallDynamicJava() )
1170 mcall_java->as_MachCallDynamicJava()->_vtable_index =
1171 call_java->as_CallDynamicJava()->_vtable_index;
1172 }
1173 else if( mcall->is_MachCallRuntime() ) {
1174 mcall->as_MachCallRuntime()->_name = call->as_CallRuntime()->_name;
1175 }
1176 msfpt = mcall;
1177 }
1178 // This is a non-call safepoint
1179 else {
1180 call = NULL;
1181 domain = NULL;
1182 MachNode *mn = match_tree(sfpt);
1183 if (C->failing()) return NULL;
1184 msfpt = mn->as_MachSafePoint();
1185 cnt = TypeFunc::Parms;
1186 }
1187
1188 // Advertise the correct memory effects (for anti-dependence computation).
1189 msfpt->set_adr_type(sfpt->adr_type());
1190
1191 // Allocate a private array of RegMasks. These RegMasks are not shared.
1192 msfpt->_in_rms = NEW_RESOURCE_ARRAY( RegMask, cnt );
1193 // Empty them all.
1194 for (uint i = 0; i < cnt; i++) ::new (&(msfpt->_in_rms[i])) RegMask();
1195
1196 // Do all the pre-defined non-Empty register masks
1197 msfpt->_in_rms[TypeFunc::ReturnAdr] = _return_addr_mask;
1198 msfpt->_in_rms[TypeFunc::FramePtr ] = c_frame_ptr_mask;
1199
1200 // Place first outgoing argument can possibly be put.
1201 OptoReg::Name begin_out_arg_area = OptoReg::add(_new_SP, C->out_preserve_stack_slots());
1202 assert( is_even(begin_out_arg_area), "" );
1203 // Compute max outgoing register number per call site.
1204 OptoReg::Name out_arg_limit_per_call = begin_out_arg_area;
1205 // Calls to C may hammer extra stack slots above and beyond any arguments.
1206 // These are usually backing store for register arguments for varargs.
1207 if( call != NULL && call->is_CallRuntime() )
1208 out_arg_limit_per_call = OptoReg::add(out_arg_limit_per_call,C->varargs_C_out_slots_killed());
1209
1210
1211 // Do the normal argument list (parameters) register masks
1212 int argcnt = cnt - TypeFunc::Parms;
1213 if( argcnt > 0 ) { // Skip it all if we have no args
1214 BasicType *sig_bt = NEW_RESOURCE_ARRAY( BasicType, argcnt );
1215 VMRegPair *parm_regs = NEW_RESOURCE_ARRAY( VMRegPair, argcnt );
1216 int i;
1217 for( i = 0; i < argcnt; i++ ) {
1218 sig_bt[i] = domain->field_at(i+TypeFunc::Parms)->basic_type();
1219 }
1220 // V-call to pick proper calling convention
1221 call->calling_convention( sig_bt, parm_regs, argcnt );
1222
1223 #ifdef ASSERT
1224 // Sanity check users' calling convention. Really handy during
1225 // the initial porting effort. Fairly expensive otherwise.
1226 { for (int i = 0; i<argcnt; i++) {
1227 if( !parm_regs[i].first()->is_valid() &&
1228 !parm_regs[i].second()->is_valid() ) continue;
1229 VMReg reg1 = parm_regs[i].first();
1230 VMReg reg2 = parm_regs[i].second();
1231 for (int j = 0; j < i; j++) {
1232 if( !parm_regs[j].first()->is_valid() &&
1233 !parm_regs[j].second()->is_valid() ) continue;
1234 VMReg reg3 = parm_regs[j].first();
1235 VMReg reg4 = parm_regs[j].second();
1236 if( !reg1->is_valid() ) {
1237 assert( !reg2->is_valid(), "valid halvsies" );
1238 } else if( !reg3->is_valid() ) {
1239 assert( !reg4->is_valid(), "valid halvsies" );
1240 } else {
1241 assert( reg1 != reg2, "calling conv. must produce distinct regs");
1242 assert( reg1 != reg3, "calling conv. must produce distinct regs");
1243 assert( reg1 != reg4, "calling conv. must produce distinct regs");
1244 assert( reg2 != reg3, "calling conv. must produce distinct regs");
1245 assert( reg2 != reg4 || !reg2->is_valid(), "calling conv. must produce distinct regs");
1246 assert( reg3 != reg4, "calling conv. must produce distinct regs");
1247 }
1248 }
1249 }
1250 }
1251 #endif
1252
1253 // Visit each argument. Compute its outgoing register mask.
1254 // Return results now can have 2 bits returned.
1255 // Compute max over all outgoing arguments both per call-site
1256 // and over the entire method.
1257 for( i = 0; i < argcnt; i++ ) {
1258 // Address of incoming argument mask to fill in
1259 RegMask *rm = &mcall->_in_rms[i+TypeFunc::Parms];
1260 if( !parm_regs[i].first()->is_valid() &&
1261 !parm_regs[i].second()->is_valid() ) {
1262 continue; // Avoid Halves
1263 }
1264 // Grab first register, adjust stack slots and insert in mask.
1265 OptoReg::Name reg1 = warp_outgoing_stk_arg(parm_regs[i].first(), begin_out_arg_area, out_arg_limit_per_call );
1266 if (OptoReg::is_valid(reg1))
1267 rm->Insert( reg1 );
1268 // Grab second register (if any), adjust stack slots and insert in mask.
1269 OptoReg::Name reg2 = warp_outgoing_stk_arg(parm_regs[i].second(), begin_out_arg_area, out_arg_limit_per_call );
1270 if (OptoReg::is_valid(reg2))
1271 rm->Insert( reg2 );
1272 } // End of for all arguments
1273
1274 // Compute number of stack slots needed to restore stack in case of
1275 // Pascal-style argument popping.
1276 mcall->_argsize = out_arg_limit_per_call - begin_out_arg_area;
1277 }
1278
1279 // Compute the max stack slot killed by any call. These will not be
1280 // available for debug info, and will be used to adjust FIRST_STACK_mask
1281 // after all call sites have been visited.
1282 if( _out_arg_limit < out_arg_limit_per_call)
1283 _out_arg_limit = out_arg_limit_per_call;
1284
1285 if (mcall) {
1286 // Kill the outgoing argument area, including any non-argument holes and
1287 // any legacy C-killed slots. Use Fat-Projections to do the killing.
1288 // Since the max-per-method covers the max-per-call-site and debug info
1289 // is excluded on the max-per-method basis, debug info cannot land in
1290 // this killed area.
1291 uint r_cnt = mcall->tf()->range()->cnt();
1292 MachProjNode *proj = new MachProjNode( mcall, r_cnt+10000, RegMask::Empty, MachProjNode::fat_proj );
1293 if (!RegMask::can_represent_arg(OptoReg::Name(out_arg_limit_per_call-1))) {
1294 C->record_method_not_compilable("unsupported outgoing calling sequence");
1295 } else {
1296 for (int i = begin_out_arg_area; i < out_arg_limit_per_call; i++)
1297 proj->_rout.Insert(OptoReg::Name(i));
1298 }
1299 if (proj->_rout.is_NotEmpty()) {
1300 push_projection(proj);
1301 }
1302 }
1303 // Transfer the safepoint information from the call to the mcall
1304 // Move the JVMState list
1305 msfpt->set_jvms(sfpt->jvms());
1306 for (JVMState* jvms = msfpt->jvms(); jvms; jvms = jvms->caller()) {
1307 jvms->set_map(sfpt);
1308 }
1309
1310 // Debug inputs begin just after the last incoming parameter
1311 assert((mcall == NULL) || (mcall->jvms() == NULL) ||
1312 (mcall->jvms()->debug_start() + mcall->_jvmadj == mcall->tf()->domain()->cnt()), "");
1313
1314 // Move the OopMap
1315 msfpt->_oop_map = sfpt->_oop_map;
1316
1317 // Add additional edges.
1318 if (msfpt->mach_constant_base_node_input() != (uint)-1 && !msfpt->is_MachCallLeaf()) {
1319 // For these calls we can not add MachConstantBase in expand(), as the
1320 // ins are not complete then.
1321 msfpt->ins_req(msfpt->mach_constant_base_node_input(), C->mach_constant_base_node());
1322 if (msfpt->jvms() &&
1323 msfpt->mach_constant_base_node_input() <= msfpt->jvms()->debug_start() + msfpt->_jvmadj) {
1324 // We added an edge before jvms, so we must adapt the position of the ins.
1325 msfpt->jvms()->adapt_position(+1);
1326 }
1327 }
1328
1329 // Registers killed by the call are set in the local scheduling pass
1330 // of Global Code Motion.
1331 return msfpt;
1332 }
1333
1334 //---------------------------match_tree----------------------------------------
1335 // Match a Ideal Node DAG - turn it into a tree; Label & Reduce. Used as part
1336 // of the whole-sale conversion from Ideal to Mach Nodes. Also used for
1337 // making GotoNodes while building the CFG and in init_spill_mask() to identify
1338 // a Load's result RegMask for memoization in idealreg2regmask[]
match_tree(const Node * n)1339 MachNode *Matcher::match_tree( const Node *n ) {
1340 assert( n->Opcode() != Op_Phi, "cannot match" );
1341 assert( !n->is_block_start(), "cannot match" );
1342 // Set the mark for all locally allocated State objects.
1343 // When this call returns, the _states_arena arena will be reset
1344 // freeing all State objects.
1345 ResourceMark rm( &_states_arena );
1346
1347 LabelRootDepth = 0;
1348
1349 // StoreNodes require their Memory input to match any LoadNodes
1350 Node *mem = n->is_Store() ? n->in(MemNode::Memory) : (Node*)1 ;
1351 #ifdef ASSERT
1352 Node* save_mem_node = _mem_node;
1353 _mem_node = n->is_Store() ? (Node*)n : NULL;
1354 #endif
1355 // State object for root node of match tree
1356 // Allocate it on _states_arena - stack allocation can cause stack overflow.
1357 State *s = new (&_states_arena) State;
1358 s->_kids[0] = NULL;
1359 s->_kids[1] = NULL;
1360 s->_leaf = (Node*)n;
1361 // Label the input tree, allocating labels from top-level arena
1362 Node* root_mem = mem;
1363 Label_Root(n, s, n->in(0), root_mem);
1364 if (C->failing()) return NULL;
1365
1366 // The minimum cost match for the whole tree is found at the root State
1367 uint mincost = max_juint;
1368 uint cost = max_juint;
1369 uint i;
1370 for( i = 0; i < NUM_OPERANDS; i++ ) {
1371 if( s->valid(i) && // valid entry and
1372 s->_cost[i] < cost && // low cost and
1373 s->_rule[i] >= NUM_OPERANDS ) // not an operand
1374 cost = s->_cost[mincost=i];
1375 }
1376 if (mincost == max_juint) {
1377 #ifndef PRODUCT
1378 tty->print("No matching rule for:");
1379 s->dump();
1380 #endif
1381 Matcher::soft_match_failure();
1382 return NULL;
1383 }
1384 // Reduce input tree based upon the state labels to machine Nodes
1385 MachNode *m = ReduceInst( s, s->_rule[mincost], mem );
1386 #ifdef ASSERT
1387 _old2new_map.map(n->_idx, m);
1388 _new2old_map.map(m->_idx, (Node*)n);
1389 #endif
1390
1391 // Add any Matcher-ignored edges
1392 uint cnt = n->req();
1393 uint start = 1;
1394 if( mem != (Node*)1 ) start = MemNode::Memory+1;
1395 if( n->is_AddP() ) {
1396 assert( mem == (Node*)1, "" );
1397 start = AddPNode::Base+1;
1398 }
1399 for( i = start; i < cnt; i++ ) {
1400 if( !n->match_edge(i) ) {
1401 if( i < m->req() )
1402 m->ins_req( i, n->in(i) );
1403 else
1404 m->add_req( n->in(i) );
1405 }
1406 }
1407
1408 debug_only( _mem_node = save_mem_node; )
1409 return m;
1410 }
1411
1412
1413 //------------------------------match_into_reg---------------------------------
1414 // Choose to either match this Node in a register or part of the current
1415 // match tree. Return true for requiring a register and false for matching
1416 // as part of the current match tree.
match_into_reg(const Node * n,Node * m,Node * control,int i,bool shared)1417 static bool match_into_reg( const Node *n, Node *m, Node *control, int i, bool shared ) {
1418
1419 const Type *t = m->bottom_type();
1420
1421 if (t->singleton()) {
1422 // Never force constants into registers. Allow them to match as
1423 // constants or registers. Copies of the same value will share
1424 // the same register. See find_shared_node.
1425 return false;
1426 } else { // Not a constant
1427 // Stop recursion if they have different Controls.
1428 Node* m_control = m->in(0);
1429 // Control of load's memory can post-dominates load's control.
1430 // So use it since load can't float above its memory.
1431 Node* mem_control = (m->is_Load()) ? m->in(MemNode::Memory)->in(0) : NULL;
1432 if (control && m_control && control != m_control && control != mem_control) {
1433
1434 // Actually, we can live with the most conservative control we
1435 // find, if it post-dominates the others. This allows us to
1436 // pick up load/op/store trees where the load can float a little
1437 // above the store.
1438 Node *x = control;
1439 const uint max_scan = 6; // Arbitrary scan cutoff
1440 uint j;
1441 for (j=0; j<max_scan; j++) {
1442 if (x->is_Region()) // Bail out at merge points
1443 return true;
1444 x = x->in(0);
1445 if (x == m_control) // Does 'control' post-dominate
1446 break; // m->in(0)? If so, we can use it
1447 if (x == mem_control) // Does 'control' post-dominate
1448 break; // mem_control? If so, we can use it
1449 }
1450 if (j == max_scan) // No post-domination before scan end?
1451 return true; // Then break the match tree up
1452 }
1453 if ((m->is_DecodeN() && Matcher::narrow_oop_use_complex_address()) ||
1454 (m->is_DecodeNKlass() && Matcher::narrow_klass_use_complex_address())) {
1455 // These are commonly used in address expressions and can
1456 // efficiently fold into them on X64 in some cases.
1457 return false;
1458 }
1459 }
1460
1461 // Not forceable cloning. If shared, put it into a register.
1462 return shared;
1463 }
1464
1465
1466 //------------------------------Instruction Selection--------------------------
1467 // Label method walks a "tree" of nodes, using the ADLC generated DFA to match
1468 // ideal nodes to machine instructions. Trees are delimited by shared Nodes,
1469 // things the Matcher does not match (e.g., Memory), and things with different
1470 // Controls (hence forced into different blocks). We pass in the Control
1471 // selected for this entire State tree.
1472
1473 // The Matcher works on Trees, but an Intel add-to-memory requires a DAG: the
1474 // Store and the Load must have identical Memories (as well as identical
1475 // pointers). Since the Matcher does not have anything for Memory (and
1476 // does not handle DAGs), I have to match the Memory input myself. If the
1477 // Tree root is a Store or if there are multiple Loads in the tree, I require
1478 // all Loads to have the identical memory.
Label_Root(const Node * n,State * svec,Node * control,Node * & mem)1479 Node* Matcher::Label_Root(const Node* n, State* svec, Node* control, Node*& mem) {
1480 // Since Label_Root is a recursive function, its possible that we might run
1481 // out of stack space. See bugs 6272980 & 6227033 for more info.
1482 LabelRootDepth++;
1483 if (LabelRootDepth > MaxLabelRootDepth) {
1484 C->record_method_not_compilable("Out of stack space, increase MaxLabelRootDepth");
1485 return NULL;
1486 }
1487 uint care = 0; // Edges matcher cares about
1488 uint cnt = n->req();
1489 uint i = 0;
1490
1491 // Examine children for memory state
1492 // Can only subsume a child into your match-tree if that child's memory state
1493 // is not modified along the path to another input.
1494 // It is unsafe even if the other inputs are separate roots.
1495 Node *input_mem = NULL;
1496 for( i = 1; i < cnt; i++ ) {
1497 if( !n->match_edge(i) ) continue;
1498 Node *m = n->in(i); // Get ith input
1499 assert( m, "expect non-null children" );
1500 if( m->is_Load() ) {
1501 if( input_mem == NULL ) {
1502 input_mem = m->in(MemNode::Memory);
1503 if (mem == (Node*)1) {
1504 // Save this memory to bail out if there's another memory access
1505 // to a different memory location in the same tree.
1506 mem = input_mem;
1507 }
1508 } else if( input_mem != m->in(MemNode::Memory) ) {
1509 input_mem = NodeSentinel;
1510 }
1511 }
1512 }
1513
1514 for( i = 1; i < cnt; i++ ){// For my children
1515 if( !n->match_edge(i) ) continue;
1516 Node *m = n->in(i); // Get ith input
1517 // Allocate states out of a private arena
1518 State *s = new (&_states_arena) State;
1519 svec->_kids[care++] = s;
1520 assert( care <= 2, "binary only for now" );
1521
1522 // Recursively label the State tree.
1523 s->_kids[0] = NULL;
1524 s->_kids[1] = NULL;
1525 s->_leaf = m;
1526
1527 // Check for leaves of the State Tree; things that cannot be a part of
1528 // the current tree. If it finds any, that value is matched as a
1529 // register operand. If not, then the normal matching is used.
1530 if( match_into_reg(n, m, control, i, is_shared(m)) ||
1531 // Stop recursion if this is a LoadNode and there is another memory access
1532 // to a different memory location in the same tree (for example, a StoreNode
1533 // at the root of this tree or another LoadNode in one of the children).
1534 ((mem!=(Node*)1) && m->is_Load() && m->in(MemNode::Memory) != mem) ||
1535 // Can NOT include the match of a subtree when its memory state
1536 // is used by any of the other subtrees
1537 (input_mem == NodeSentinel) ) {
1538 // Print when we exclude matching due to different memory states at input-loads
1539 if (PrintOpto && (Verbose && WizardMode) && (input_mem == NodeSentinel)
1540 && !((mem!=(Node*)1) && m->is_Load() && m->in(MemNode::Memory) != mem)) {
1541 tty->print_cr("invalid input_mem");
1542 }
1543 // Switch to a register-only opcode; this value must be in a register
1544 // and cannot be subsumed as part of a larger instruction.
1545 s->DFA( m->ideal_reg(), m );
1546
1547 } else {
1548 // If match tree has no control and we do, adopt it for entire tree
1549 if( control == NULL && m->in(0) != NULL && m->req() > 1 )
1550 control = m->in(0); // Pick up control
1551 // Else match as a normal part of the match tree.
1552 control = Label_Root(m, s, control, mem);
1553 if (C->failing()) return NULL;
1554 }
1555 }
1556
1557
1558 // Call DFA to match this node, and return
1559 svec->DFA( n->Opcode(), n );
1560
1561 #ifdef ASSERT
1562 uint x;
1563 for( x = 0; x < _LAST_MACH_OPER; x++ )
1564 if( svec->valid(x) )
1565 break;
1566
1567 if (x >= _LAST_MACH_OPER) {
1568 n->dump();
1569 svec->dump();
1570 assert( false, "bad AD file" );
1571 }
1572 #endif
1573 return control;
1574 }
1575
1576
1577 // Con nodes reduced using the same rule can share their MachNode
1578 // which reduces the number of copies of a constant in the final
1579 // program. The register allocator is free to split uses later to
1580 // split live ranges.
find_shared_node(Node * leaf,uint rule)1581 MachNode* Matcher::find_shared_node(Node* leaf, uint rule) {
1582 if (!leaf->is_Con() && !leaf->is_DecodeNarrowPtr()) return NULL;
1583
1584 // See if this Con has already been reduced using this rule.
1585 if (_shared_nodes.Size() <= leaf->_idx) return NULL;
1586 MachNode* last = (MachNode*)_shared_nodes.at(leaf->_idx);
1587 if (last != NULL && rule == last->rule()) {
1588 // Don't expect control change for DecodeN
1589 if (leaf->is_DecodeNarrowPtr())
1590 return last;
1591 // Get the new space root.
1592 Node* xroot = new_node(C->root());
1593 if (xroot == NULL) {
1594 // This shouldn't happen give the order of matching.
1595 return NULL;
1596 }
1597
1598 // Shared constants need to have their control be root so they
1599 // can be scheduled properly.
1600 Node* control = last->in(0);
1601 if (control != xroot) {
1602 if (control == NULL || control == C->root()) {
1603 last->set_req(0, xroot);
1604 } else {
1605 assert(false, "unexpected control");
1606 return NULL;
1607 }
1608 }
1609 return last;
1610 }
1611 return NULL;
1612 }
1613
1614
1615 //------------------------------ReduceInst-------------------------------------
1616 // Reduce a State tree (with given Control) into a tree of MachNodes.
1617 // This routine (and it's cohort ReduceOper) convert Ideal Nodes into
1618 // complicated machine Nodes. Each MachNode covers some tree of Ideal Nodes.
1619 // Each MachNode has a number of complicated MachOper operands; each
1620 // MachOper also covers a further tree of Ideal Nodes.
1621
1622 // The root of the Ideal match tree is always an instruction, so we enter
1623 // the recursion here. After building the MachNode, we need to recurse
1624 // the tree checking for these cases:
1625 // (1) Child is an instruction -
1626 // Build the instruction (recursively), add it as an edge.
1627 // Build a simple operand (register) to hold the result of the instruction.
1628 // (2) Child is an interior part of an instruction -
1629 // Skip over it (do nothing)
1630 // (3) Child is the start of a operand -
1631 // Build the operand, place it inside the instruction
1632 // Call ReduceOper.
ReduceInst(State * s,int rule,Node * & mem)1633 MachNode *Matcher::ReduceInst( State *s, int rule, Node *&mem ) {
1634 assert( rule >= NUM_OPERANDS, "called with operand rule" );
1635
1636 MachNode* shared_node = find_shared_node(s->_leaf, rule);
1637 if (shared_node != NULL) {
1638 return shared_node;
1639 }
1640
1641 // Build the object to represent this state & prepare for recursive calls
1642 MachNode *mach = s->MachNodeGenerator(rule);
1643 guarantee(mach != NULL, "Missing MachNode");
1644 mach->_opnds[0] = s->MachOperGenerator(_reduceOp[rule]);
1645 assert( mach->_opnds[0] != NULL, "Missing result operand" );
1646 Node *leaf = s->_leaf;
1647 // Check for instruction or instruction chain rule
1648 if( rule >= _END_INST_CHAIN_RULE || rule < _BEGIN_INST_CHAIN_RULE ) {
1649 assert(C->node_arena()->contains(s->_leaf) || !has_new_node(s->_leaf),
1650 "duplicating node that's already been matched");
1651 // Instruction
1652 mach->add_req( leaf->in(0) ); // Set initial control
1653 // Reduce interior of complex instruction
1654 ReduceInst_Interior( s, rule, mem, mach, 1 );
1655 } else {
1656 // Instruction chain rules are data-dependent on their inputs
1657 mach->add_req(0); // Set initial control to none
1658 ReduceInst_Chain_Rule( s, rule, mem, mach );
1659 }
1660
1661 // If a Memory was used, insert a Memory edge
1662 if( mem != (Node*)1 ) {
1663 mach->ins_req(MemNode::Memory,mem);
1664 #ifdef ASSERT
1665 // Verify adr type after matching memory operation
1666 const MachOper* oper = mach->memory_operand();
1667 if (oper != NULL && oper != (MachOper*)-1) {
1668 // It has a unique memory operand. Find corresponding ideal mem node.
1669 Node* m = NULL;
1670 if (leaf->is_Mem()) {
1671 m = leaf;
1672 } else {
1673 m = _mem_node;
1674 assert(m != NULL && m->is_Mem(), "expecting memory node");
1675 }
1676 const Type* mach_at = mach->adr_type();
1677 // DecodeN node consumed by an address may have different type
1678 // than its input. Don't compare types for such case.
1679 if (m->adr_type() != mach_at &&
1680 (m->in(MemNode::Address)->is_DecodeNarrowPtr() ||
1681 (m->in(MemNode::Address)->is_AddP() &&
1682 m->in(MemNode::Address)->in(AddPNode::Address)->is_DecodeNarrowPtr()) ||
1683 (m->in(MemNode::Address)->is_AddP() &&
1684 m->in(MemNode::Address)->in(AddPNode::Address)->is_AddP() &&
1685 m->in(MemNode::Address)->in(AddPNode::Address)->in(AddPNode::Address)->is_DecodeNarrowPtr()))) {
1686 mach_at = m->adr_type();
1687 }
1688 if (m->adr_type() != mach_at) {
1689 m->dump();
1690 tty->print_cr("mach:");
1691 mach->dump(1);
1692 }
1693 assert(m->adr_type() == mach_at, "matcher should not change adr type");
1694 }
1695 #endif
1696 }
1697
1698 // If the _leaf is an AddP, insert the base edge
1699 if (leaf->is_AddP()) {
1700 mach->ins_req(AddPNode::Base,leaf->in(AddPNode::Base));
1701 }
1702
1703 uint number_of_projections_prior = number_of_projections();
1704
1705 // Perform any 1-to-many expansions required
1706 MachNode *ex = mach->Expand(s, _projection_list, mem);
1707 if (ex != mach) {
1708 assert(ex->ideal_reg() == mach->ideal_reg(), "ideal types should match");
1709 if( ex->in(1)->is_Con() )
1710 ex->in(1)->set_req(0, C->root());
1711 // Remove old node from the graph
1712 for( uint i=0; i<mach->req(); i++ ) {
1713 mach->set_req(i,NULL);
1714 }
1715 #ifdef ASSERT
1716 _new2old_map.map(ex->_idx, s->_leaf);
1717 #endif
1718 }
1719
1720 // PhaseChaitin::fixup_spills will sometimes generate spill code
1721 // via the matcher. By the time, nodes have been wired into the CFG,
1722 // and any further nodes generated by expand rules will be left hanging
1723 // in space, and will not get emitted as output code. Catch this.
1724 // Also, catch any new register allocation constraints ("projections")
1725 // generated belatedly during spill code generation.
1726 if (_allocation_started) {
1727 guarantee(ex == mach, "no expand rules during spill generation");
1728 guarantee(number_of_projections_prior == number_of_projections(), "no allocation during spill generation");
1729 }
1730
1731 if (leaf->is_Con() || leaf->is_DecodeNarrowPtr()) {
1732 // Record the con for sharing
1733 _shared_nodes.map(leaf->_idx, ex);
1734 }
1735
1736 // Have mach nodes inherit GC barrier data
1737 if (leaf->is_LoadStore()) {
1738 mach->set_barrier_data(leaf->as_LoadStore()->barrier_data());
1739 } else if (leaf->is_Mem()) {
1740 mach->set_barrier_data(leaf->as_Mem()->barrier_data());
1741 }
1742
1743 return ex;
1744 }
1745
handle_precedence_edges(Node * n,MachNode * mach)1746 void Matcher::handle_precedence_edges(Node* n, MachNode *mach) {
1747 for (uint i = n->req(); i < n->len(); i++) {
1748 if (n->in(i) != NULL) {
1749 mach->add_prec(n->in(i));
1750 }
1751 }
1752 }
1753
ReduceInst_Chain_Rule(State * s,int rule,Node * & mem,MachNode * mach)1754 void Matcher::ReduceInst_Chain_Rule( State *s, int rule, Node *&mem, MachNode *mach ) {
1755 // 'op' is what I am expecting to receive
1756 int op = _leftOp[rule];
1757 // Operand type to catch childs result
1758 // This is what my child will give me.
1759 int opnd_class_instance = s->_rule[op];
1760 // Choose between operand class or not.
1761 // This is what I will receive.
1762 int catch_op = (FIRST_OPERAND_CLASS <= op && op < NUM_OPERANDS) ? opnd_class_instance : op;
1763 // New rule for child. Chase operand classes to get the actual rule.
1764 int newrule = s->_rule[catch_op];
1765
1766 if( newrule < NUM_OPERANDS ) {
1767 // Chain from operand or operand class, may be output of shared node
1768 assert( 0 <= opnd_class_instance && opnd_class_instance < NUM_OPERANDS,
1769 "Bad AD file: Instruction chain rule must chain from operand");
1770 // Insert operand into array of operands for this instruction
1771 mach->_opnds[1] = s->MachOperGenerator(opnd_class_instance);
1772
1773 ReduceOper( s, newrule, mem, mach );
1774 } else {
1775 // Chain from the result of an instruction
1776 assert( newrule >= _LAST_MACH_OPER, "Do NOT chain from internal operand");
1777 mach->_opnds[1] = s->MachOperGenerator(_reduceOp[catch_op]);
1778 Node *mem1 = (Node*)1;
1779 debug_only(Node *save_mem_node = _mem_node;)
1780 mach->add_req( ReduceInst(s, newrule, mem1) );
1781 debug_only(_mem_node = save_mem_node;)
1782 }
1783 return;
1784 }
1785
1786
ReduceInst_Interior(State * s,int rule,Node * & mem,MachNode * mach,uint num_opnds)1787 uint Matcher::ReduceInst_Interior( State *s, int rule, Node *&mem, MachNode *mach, uint num_opnds ) {
1788 handle_precedence_edges(s->_leaf, mach);
1789
1790 if( s->_leaf->is_Load() ) {
1791 Node *mem2 = s->_leaf->in(MemNode::Memory);
1792 assert( mem == (Node*)1 || mem == mem2, "multiple Memories being matched at once?" );
1793 debug_only( if( mem == (Node*)1 ) _mem_node = s->_leaf;)
1794 mem = mem2;
1795 }
1796 if( s->_leaf->in(0) != NULL && s->_leaf->req() > 1) {
1797 if( mach->in(0) == NULL )
1798 mach->set_req(0, s->_leaf->in(0));
1799 }
1800
1801 // Now recursively walk the state tree & add operand list.
1802 for( uint i=0; i<2; i++ ) { // binary tree
1803 State *newstate = s->_kids[i];
1804 if( newstate == NULL ) break; // Might only have 1 child
1805 // 'op' is what I am expecting to receive
1806 int op;
1807 if( i == 0 ) {
1808 op = _leftOp[rule];
1809 } else {
1810 op = _rightOp[rule];
1811 }
1812 // Operand type to catch childs result
1813 // This is what my child will give me.
1814 int opnd_class_instance = newstate->_rule[op];
1815 // Choose between operand class or not.
1816 // This is what I will receive.
1817 int catch_op = (op >= FIRST_OPERAND_CLASS && op < NUM_OPERANDS) ? opnd_class_instance : op;
1818 // New rule for child. Chase operand classes to get the actual rule.
1819 int newrule = newstate->_rule[catch_op];
1820
1821 if( newrule < NUM_OPERANDS ) { // Operand/operandClass or internalOp/instruction?
1822 // Operand/operandClass
1823 // Insert operand into array of operands for this instruction
1824 mach->_opnds[num_opnds++] = newstate->MachOperGenerator(opnd_class_instance);
1825 ReduceOper( newstate, newrule, mem, mach );
1826
1827 } else { // Child is internal operand or new instruction
1828 if( newrule < _LAST_MACH_OPER ) { // internal operand or instruction?
1829 // internal operand --> call ReduceInst_Interior
1830 // Interior of complex instruction. Do nothing but recurse.
1831 num_opnds = ReduceInst_Interior( newstate, newrule, mem, mach, num_opnds );
1832 } else {
1833 // instruction --> call build operand( ) to catch result
1834 // --> ReduceInst( newrule )
1835 mach->_opnds[num_opnds++] = s->MachOperGenerator(_reduceOp[catch_op]);
1836 Node *mem1 = (Node*)1;
1837 debug_only(Node *save_mem_node = _mem_node;)
1838 mach->add_req( ReduceInst( newstate, newrule, mem1 ) );
1839 debug_only(_mem_node = save_mem_node;)
1840 }
1841 }
1842 assert( mach->_opnds[num_opnds-1], "" );
1843 }
1844 return num_opnds;
1845 }
1846
1847 // This routine walks the interior of possible complex operands.
1848 // At each point we check our children in the match tree:
1849 // (1) No children -
1850 // We are a leaf; add _leaf field as an input to the MachNode
1851 // (2) Child is an internal operand -
1852 // Skip over it ( do nothing )
1853 // (3) Child is an instruction -
1854 // Call ReduceInst recursively and
1855 // and instruction as an input to the MachNode
ReduceOper(State * s,int rule,Node * & mem,MachNode * mach)1856 void Matcher::ReduceOper( State *s, int rule, Node *&mem, MachNode *mach ) {
1857 assert( rule < _LAST_MACH_OPER, "called with operand rule" );
1858 State *kid = s->_kids[0];
1859 assert( kid == NULL || s->_leaf->in(0) == NULL, "internal operands have no control" );
1860
1861 // Leaf? And not subsumed?
1862 if( kid == NULL && !_swallowed[rule] ) {
1863 mach->add_req( s->_leaf ); // Add leaf pointer
1864 return; // Bail out
1865 }
1866
1867 if( s->_leaf->is_Load() ) {
1868 assert( mem == (Node*)1, "multiple Memories being matched at once?" );
1869 mem = s->_leaf->in(MemNode::Memory);
1870 debug_only(_mem_node = s->_leaf;)
1871 }
1872
1873 handle_precedence_edges(s->_leaf, mach);
1874
1875 if( s->_leaf->in(0) && s->_leaf->req() > 1) {
1876 if( !mach->in(0) )
1877 mach->set_req(0,s->_leaf->in(0));
1878 else {
1879 assert( s->_leaf->in(0) == mach->in(0), "same instruction, differing controls?" );
1880 }
1881 }
1882
1883 for( uint i=0; kid != NULL && i<2; kid = s->_kids[1], i++ ) { // binary tree
1884 int newrule;
1885 if( i == 0)
1886 newrule = kid->_rule[_leftOp[rule]];
1887 else
1888 newrule = kid->_rule[_rightOp[rule]];
1889
1890 if( newrule < _LAST_MACH_OPER ) { // Operand or instruction?
1891 // Internal operand; recurse but do nothing else
1892 ReduceOper( kid, newrule, mem, mach );
1893
1894 } else { // Child is a new instruction
1895 // Reduce the instruction, and add a direct pointer from this
1896 // machine instruction to the newly reduced one.
1897 Node *mem1 = (Node*)1;
1898 debug_only(Node *save_mem_node = _mem_node;)
1899 mach->add_req( ReduceInst( kid, newrule, mem1 ) );
1900 debug_only(_mem_node = save_mem_node;)
1901 }
1902 }
1903 }
1904
1905
1906 // -------------------------------------------------------------------------
1907 // Java-Java calling convention
1908 // (what you use when Java calls Java)
1909
1910 //------------------------------find_receiver----------------------------------
1911 // For a given signature, return the OptoReg for parameter 0.
find_receiver(bool is_outgoing)1912 OptoReg::Name Matcher::find_receiver( bool is_outgoing ) {
1913 VMRegPair regs;
1914 BasicType sig_bt = T_OBJECT;
1915 calling_convention(&sig_bt, ®s, 1, is_outgoing);
1916 // Return argument 0 register. In the LP64 build pointers
1917 // take 2 registers, but the VM wants only the 'main' name.
1918 return OptoReg::as_OptoReg(regs.first());
1919 }
1920
is_vshift_con_pattern(Node * n,Node * m)1921 bool Matcher::is_vshift_con_pattern(Node *n, Node *m) {
1922 if (n != NULL && m != NULL) {
1923 return VectorNode::is_vector_shift(n) &&
1924 VectorNode::is_vector_shift_count(m) && m->in(1)->is_Con();
1925 }
1926 return false;
1927 }
1928
1929
clone_node(Node * n,Node * m,Matcher::MStack & mstack)1930 bool Matcher::clone_node(Node* n, Node* m, Matcher::MStack& mstack) {
1931 // Must clone all producers of flags, or we will not match correctly.
1932 // Suppose a compare setting int-flags is shared (e.g., a switch-tree)
1933 // then it will match into an ideal Op_RegFlags. Alas, the fp-flags
1934 // are also there, so we may match a float-branch to int-flags and
1935 // expect the allocator to haul the flags from the int-side to the
1936 // fp-side. No can do.
1937 if (_must_clone[m->Opcode()]) {
1938 mstack.push(m, Visit);
1939 return true;
1940 }
1941 return pd_clone_node(n, m, mstack);
1942 }
1943
clone_base_plus_offset_address(AddPNode * m,Matcher::MStack & mstack,VectorSet & address_visited)1944 bool Matcher::clone_base_plus_offset_address(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
1945 Node *off = m->in(AddPNode::Offset);
1946 if (off->is_Con()) {
1947 address_visited.test_set(m->_idx); // Flag as address_visited
1948 mstack.push(m->in(AddPNode::Address), Pre_Visit);
1949 // Clone X+offset as it also folds into most addressing expressions
1950 mstack.push(off, Visit);
1951 mstack.push(m->in(AddPNode::Base), Pre_Visit);
1952 return true;
1953 }
1954 return false;
1955 }
1956
1957 // A method-klass-holder may be passed in the inline_cache_reg
1958 // and then expanded into the inline_cache_reg and a method_oop register
1959 // defined in ad_<arch>.cpp
1960
1961 //------------------------------find_shared------------------------------------
1962 // Set bits if Node is shared or otherwise a root
find_shared(Node * n)1963 void Matcher::find_shared(Node* n) {
1964 // Allocate stack of size C->live_nodes() * 2 to avoid frequent realloc
1965 MStack mstack(C->live_nodes() * 2);
1966 // Mark nodes as address_visited if they are inputs to an address expression
1967 VectorSet address_visited(Thread::current()->resource_area());
1968 mstack.push(n, Visit); // Don't need to pre-visit root node
1969 while (mstack.is_nonempty()) {
1970 n = mstack.node(); // Leave node on stack
1971 Node_State nstate = mstack.state();
1972 uint nop = n->Opcode();
1973 if (nstate == Pre_Visit) {
1974 if (address_visited.test(n->_idx)) { // Visited in address already?
1975 // Flag as visited and shared now.
1976 set_visited(n);
1977 }
1978 if (is_visited(n)) { // Visited already?
1979 // Node is shared and has no reason to clone. Flag it as shared.
1980 // This causes it to match into a register for the sharing.
1981 set_shared(n); // Flag as shared and
1982 if (n->is_DecodeNarrowPtr()) {
1983 // Oop field/array element loads must be shared but since
1984 // they are shared through a DecodeN they may appear to have
1985 // a single use so force sharing here.
1986 set_shared(n->in(1));
1987 }
1988 mstack.pop(); // remove node from stack
1989 continue;
1990 }
1991 nstate = Visit; // Not already visited; so visit now
1992 }
1993 if (nstate == Visit) {
1994 mstack.set_state(Post_Visit);
1995 set_visited(n); // Flag as visited now
1996 bool mem_op = false;
1997 int mem_addr_idx = MemNode::Address;
1998 if (find_shared_visit(mstack, n, nop, mem_op, mem_addr_idx)) {
1999 continue;
2000 }
2001 for (int i = n->req() - 1; i >= 0; --i) { // For my children
2002 Node* m = n->in(i); // Get ith input
2003 if (m == NULL) {
2004 continue; // Ignore NULLs
2005 }
2006 if (clone_node(n, m, mstack)) {
2007 continue;
2008 }
2009
2010 // Clone addressing expressions as they are "free" in memory access instructions
2011 if (mem_op && i == mem_addr_idx && m->is_AddP() &&
2012 // When there are other uses besides address expressions
2013 // put it on stack and mark as shared.
2014 !is_visited(m)) {
2015 // Some inputs for address expression are not put on stack
2016 // to avoid marking them as shared and forcing them into register
2017 // if they are used only in address expressions.
2018 // But they should be marked as shared if there are other uses
2019 // besides address expressions.
2020
2021 if (pd_clone_address_expressions(m->as_AddP(), mstack, address_visited)) {
2022 continue;
2023 }
2024 } // if( mem_op &&
2025 mstack.push(m, Pre_Visit);
2026 } // for(int i = ...)
2027 }
2028 else if (nstate == Alt_Post_Visit) {
2029 mstack.pop(); // Remove node from stack
2030 // We cannot remove the Cmp input from the Bool here, as the Bool may be
2031 // shared and all users of the Bool need to move the Cmp in parallel.
2032 // This leaves both the Bool and the If pointing at the Cmp. To
2033 // prevent the Matcher from trying to Match the Cmp along both paths
2034 // BoolNode::match_edge always returns a zero.
2035
2036 // We reorder the Op_If in a pre-order manner, so we can visit without
2037 // accidentally sharing the Cmp (the Bool and the If make 2 users).
2038 n->add_req( n->in(1)->in(1) ); // Add the Cmp next to the Bool
2039 }
2040 else if (nstate == Post_Visit) {
2041 mstack.pop(); // Remove node from stack
2042
2043 // Now hack a few special opcodes
2044 uint opcode = n->Opcode();
2045 bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->matcher_find_shared_post_visit(this, n, opcode);
2046 if (!gc_handled) {
2047 find_shared_post_visit(n, opcode);
2048 }
2049 }
2050 else {
2051 ShouldNotReachHere();
2052 }
2053 } // end of while (mstack.is_nonempty())
2054 }
2055
find_shared_visit(MStack & mstack,Node * n,uint opcode,bool & mem_op,int & mem_addr_idx)2056 bool Matcher::find_shared_visit(MStack& mstack, Node* n, uint opcode, bool& mem_op, int& mem_addr_idx) {
2057 switch(opcode) { // Handle some opcodes special
2058 case Op_Phi: // Treat Phis as shared roots
2059 case Op_Parm:
2060 case Op_Proj: // All handled specially during matching
2061 case Op_SafePointScalarObject:
2062 set_shared(n);
2063 set_dontcare(n);
2064 break;
2065 case Op_If:
2066 case Op_CountedLoopEnd:
2067 mstack.set_state(Alt_Post_Visit); // Alternative way
2068 // Convert (If (Bool (CmpX A B))) into (If (Bool) (CmpX A B)). Helps
2069 // with matching cmp/branch in 1 instruction. The Matcher needs the
2070 // Bool and CmpX side-by-side, because it can only get at constants
2071 // that are at the leaves of Match trees, and the Bool's condition acts
2072 // as a constant here.
2073 mstack.push(n->in(1), Visit); // Clone the Bool
2074 mstack.push(n->in(0), Pre_Visit); // Visit control input
2075 return true; // while (mstack.is_nonempty())
2076 case Op_ConvI2D: // These forms efficiently match with a prior
2077 case Op_ConvI2F: // Load but not a following Store
2078 if( n->in(1)->is_Load() && // Prior load
2079 n->outcnt() == 1 && // Not already shared
2080 n->unique_out()->is_Store() ) // Following store
2081 set_shared(n); // Force it to be a root
2082 break;
2083 case Op_ReverseBytesI:
2084 case Op_ReverseBytesL:
2085 if( n->in(1)->is_Load() && // Prior load
2086 n->outcnt() == 1 ) // Not already shared
2087 set_shared(n); // Force it to be a root
2088 break;
2089 case Op_BoxLock: // Cant match until we get stack-regs in ADLC
2090 case Op_IfFalse:
2091 case Op_IfTrue:
2092 case Op_MachProj:
2093 case Op_MergeMem:
2094 case Op_Catch:
2095 case Op_CatchProj:
2096 case Op_CProj:
2097 case Op_JumpProj:
2098 case Op_JProj:
2099 case Op_NeverBranch:
2100 set_dontcare(n);
2101 break;
2102 case Op_Jump:
2103 mstack.push(n->in(1), Pre_Visit); // Switch Value (could be shared)
2104 mstack.push(n->in(0), Pre_Visit); // Visit Control input
2105 return true; // while (mstack.is_nonempty())
2106 case Op_StrComp:
2107 case Op_StrEquals:
2108 case Op_StrIndexOf:
2109 case Op_StrIndexOfChar:
2110 case Op_AryEq:
2111 case Op_HasNegatives:
2112 case Op_StrInflatedCopy:
2113 case Op_StrCompressedCopy:
2114 case Op_EncodeISOArray:
2115 case Op_FmaD:
2116 case Op_FmaF:
2117 case Op_FmaVD:
2118 case Op_FmaVF:
2119 case Op_MacroLogicV:
2120 set_shared(n); // Force result into register (it will be anyways)
2121 break;
2122 case Op_ConP: { // Convert pointers above the centerline to NUL
2123 TypeNode *tn = n->as_Type(); // Constants derive from type nodes
2124 const TypePtr* tp = tn->type()->is_ptr();
2125 if (tp->_ptr == TypePtr::AnyNull) {
2126 tn->set_type(TypePtr::NULL_PTR);
2127 }
2128 break;
2129 }
2130 case Op_ConN: { // Convert narrow pointers above the centerline to NUL
2131 TypeNode *tn = n->as_Type(); // Constants derive from type nodes
2132 const TypePtr* tp = tn->type()->make_ptr();
2133 if (tp && tp->_ptr == TypePtr::AnyNull) {
2134 tn->set_type(TypeNarrowOop::NULL_PTR);
2135 }
2136 break;
2137 }
2138 case Op_Binary: // These are introduced in the Post_Visit state.
2139 ShouldNotReachHere();
2140 break;
2141 case Op_ClearArray:
2142 case Op_SafePoint:
2143 mem_op = true;
2144 break;
2145 default:
2146 if( n->is_Store() ) {
2147 // Do match stores, despite no ideal reg
2148 mem_op = true;
2149 break;
2150 }
2151 if( n->is_Mem() ) { // Loads and LoadStores
2152 mem_op = true;
2153 // Loads must be root of match tree due to prior load conflict
2154 if( C->subsume_loads() == false )
2155 set_shared(n);
2156 }
2157 // Fall into default case
2158 if( !n->ideal_reg() )
2159 set_dontcare(n); // Unmatchable Nodes
2160 } // end_switch
2161 return false;
2162 }
2163
find_shared_post_visit(Node * n,uint opcode)2164 void Matcher::find_shared_post_visit(Node* n, uint opcode) {
2165 switch(opcode) { // Handle some opcodes special
2166 case Op_StorePConditional:
2167 case Op_StoreIConditional:
2168 case Op_StoreLConditional:
2169 case Op_CompareAndExchangeB:
2170 case Op_CompareAndExchangeS:
2171 case Op_CompareAndExchangeI:
2172 case Op_CompareAndExchangeL:
2173 case Op_CompareAndExchangeP:
2174 case Op_CompareAndExchangeN:
2175 case Op_WeakCompareAndSwapB:
2176 case Op_WeakCompareAndSwapS:
2177 case Op_WeakCompareAndSwapI:
2178 case Op_WeakCompareAndSwapL:
2179 case Op_WeakCompareAndSwapP:
2180 case Op_WeakCompareAndSwapN:
2181 case Op_CompareAndSwapB:
2182 case Op_CompareAndSwapS:
2183 case Op_CompareAndSwapI:
2184 case Op_CompareAndSwapL:
2185 case Op_CompareAndSwapP:
2186 case Op_CompareAndSwapN: { // Convert trinary to binary-tree
2187 Node* newval = n->in(MemNode::ValueIn);
2188 Node* oldval = n->in(LoadStoreConditionalNode::ExpectedIn);
2189 Node* pair = new BinaryNode(oldval, newval);
2190 n->set_req(MemNode::ValueIn, pair);
2191 n->del_req(LoadStoreConditionalNode::ExpectedIn);
2192 break;
2193 }
2194 case Op_CMoveD: // Convert trinary to binary-tree
2195 case Op_CMoveF:
2196 case Op_CMoveI:
2197 case Op_CMoveL:
2198 case Op_CMoveN:
2199 case Op_CMoveP:
2200 case Op_CMoveVF:
2201 case Op_CMoveVD: {
2202 // Restructure into a binary tree for Matching. It's possible that
2203 // we could move this code up next to the graph reshaping for IfNodes
2204 // or vice-versa, but I do not want to debug this for Ladybird.
2205 // 10/2/2000 CNC.
2206 Node* pair1 = new BinaryNode(n->in(1), n->in(1)->in(1));
2207 n->set_req(1, pair1);
2208 Node* pair2 = new BinaryNode(n->in(2), n->in(3));
2209 n->set_req(2, pair2);
2210 n->del_req(3);
2211 break;
2212 }
2213 case Op_MacroLogicV: {
2214 Node* pair1 = new BinaryNode(n->in(1), n->in(2));
2215 Node* pair2 = new BinaryNode(n->in(3), n->in(4));
2216 n->set_req(1, pair1);
2217 n->set_req(2, pair2);
2218 n->del_req(4);
2219 n->del_req(3);
2220 break;
2221 }
2222 case Op_LoopLimit: {
2223 Node* pair1 = new BinaryNode(n->in(1), n->in(2));
2224 n->set_req(1, pair1);
2225 n->set_req(2, n->in(3));
2226 n->del_req(3);
2227 break;
2228 }
2229 case Op_StrEquals:
2230 case Op_StrIndexOfChar: {
2231 Node* pair1 = new BinaryNode(n->in(2), n->in(3));
2232 n->set_req(2, pair1);
2233 n->set_req(3, n->in(4));
2234 n->del_req(4);
2235 break;
2236 }
2237 case Op_StrComp:
2238 case Op_StrIndexOf: {
2239 Node* pair1 = new BinaryNode(n->in(2), n->in(3));
2240 n->set_req(2, pair1);
2241 Node* pair2 = new BinaryNode(n->in(4),n->in(5));
2242 n->set_req(3, pair2);
2243 n->del_req(5);
2244 n->del_req(4);
2245 break;
2246 }
2247 case Op_StrCompressedCopy:
2248 case Op_StrInflatedCopy:
2249 case Op_EncodeISOArray: {
2250 // Restructure into a binary tree for Matching.
2251 Node* pair = new BinaryNode(n->in(3), n->in(4));
2252 n->set_req(3, pair);
2253 n->del_req(4);
2254 break;
2255 }
2256 case Op_FmaD:
2257 case Op_FmaF:
2258 case Op_FmaVD:
2259 case Op_FmaVF: {
2260 // Restructure into a binary tree for Matching.
2261 Node* pair = new BinaryNode(n->in(1), n->in(2));
2262 n->set_req(2, pair);
2263 n->set_req(1, n->in(3));
2264 n->del_req(3);
2265 break;
2266 }
2267 case Op_MulAddS2I: {
2268 Node* pair1 = new BinaryNode(n->in(1), n->in(2));
2269 Node* pair2 = new BinaryNode(n->in(3), n->in(4));
2270 n->set_req(1, pair1);
2271 n->set_req(2, pair2);
2272 n->del_req(4);
2273 n->del_req(3);
2274 break;
2275 }
2276 default:
2277 break;
2278 }
2279 }
2280
2281 #ifdef ASSERT
2282 // machine-independent root to machine-dependent root
dump_old2new_map()2283 void Matcher::dump_old2new_map() {
2284 _old2new_map.dump();
2285 }
2286 #endif
2287
2288 //---------------------------collect_null_checks-------------------------------
2289 // Find null checks in the ideal graph; write a machine-specific node for
2290 // it. Used by later implicit-null-check handling. Actually collects
2291 // either an IfTrue or IfFalse for the common NOT-null path, AND the ideal
2292 // value being tested.
collect_null_checks(Node * proj,Node * orig_proj)2293 void Matcher::collect_null_checks( Node *proj, Node *orig_proj ) {
2294 Node *iff = proj->in(0);
2295 if( iff->Opcode() == Op_If ) {
2296 // During matching If's have Bool & Cmp side-by-side
2297 BoolNode *b = iff->in(1)->as_Bool();
2298 Node *cmp = iff->in(2);
2299 int opc = cmp->Opcode();
2300 if (opc != Op_CmpP && opc != Op_CmpN) return;
2301
2302 const Type* ct = cmp->in(2)->bottom_type();
2303 if (ct == TypePtr::NULL_PTR ||
2304 (opc == Op_CmpN && ct == TypeNarrowOop::NULL_PTR)) {
2305
2306 bool push_it = false;
2307 if( proj->Opcode() == Op_IfTrue ) {
2308 #ifndef PRODUCT
2309 extern int all_null_checks_found;
2310 all_null_checks_found++;
2311 #endif
2312 if( b->_test._test == BoolTest::ne ) {
2313 push_it = true;
2314 }
2315 } else {
2316 assert( proj->Opcode() == Op_IfFalse, "" );
2317 if( b->_test._test == BoolTest::eq ) {
2318 push_it = true;
2319 }
2320 }
2321 if( push_it ) {
2322 _null_check_tests.push(proj);
2323 Node* val = cmp->in(1);
2324 #ifdef _LP64
2325 if (val->bottom_type()->isa_narrowoop() &&
2326 !Matcher::narrow_oop_use_complex_address()) {
2327 //
2328 // Look for DecodeN node which should be pinned to orig_proj.
2329 // On platforms (Sparc) which can not handle 2 adds
2330 // in addressing mode we have to keep a DecodeN node and
2331 // use it to do implicit NULL check in address.
2332 //
2333 // DecodeN node was pinned to non-null path (orig_proj) during
2334 // CastPP transformation in final_graph_reshaping_impl().
2335 //
2336 uint cnt = orig_proj->outcnt();
2337 for (uint i = 0; i < orig_proj->outcnt(); i++) {
2338 Node* d = orig_proj->raw_out(i);
2339 if (d->is_DecodeN() && d->in(1) == val) {
2340 val = d;
2341 val->set_req(0, NULL); // Unpin now.
2342 // Mark this as special case to distinguish from
2343 // a regular case: CmpP(DecodeN, NULL).
2344 val = (Node*)(((intptr_t)val) | 1);
2345 break;
2346 }
2347 }
2348 }
2349 #endif
2350 _null_check_tests.push(val);
2351 }
2352 }
2353 }
2354 }
2355
2356 //---------------------------validate_null_checks------------------------------
2357 // Its possible that the value being NULL checked is not the root of a match
2358 // tree. If so, I cannot use the value in an implicit null check.
validate_null_checks()2359 void Matcher::validate_null_checks( ) {
2360 uint cnt = _null_check_tests.size();
2361 for( uint i=0; i < cnt; i+=2 ) {
2362 Node *test = _null_check_tests[i];
2363 Node *val = _null_check_tests[i+1];
2364 bool is_decoden = ((intptr_t)val) & 1;
2365 val = (Node*)(((intptr_t)val) & ~1);
2366 if (has_new_node(val)) {
2367 Node* new_val = new_node(val);
2368 if (is_decoden) {
2369 assert(val->is_DecodeNarrowPtr() && val->in(0) == NULL, "sanity");
2370 // Note: new_val may have a control edge if
2371 // the original ideal node DecodeN was matched before
2372 // it was unpinned in Matcher::collect_null_checks().
2373 // Unpin the mach node and mark it.
2374 new_val->set_req(0, NULL);
2375 new_val = (Node*)(((intptr_t)new_val) | 1);
2376 }
2377 // Is a match-tree root, so replace with the matched value
2378 _null_check_tests.map(i+1, new_val);
2379 } else {
2380 // Yank from candidate list
2381 _null_check_tests.map(i+1,_null_check_tests[--cnt]);
2382 _null_check_tests.map(i,_null_check_tests[--cnt]);
2383 _null_check_tests.pop();
2384 _null_check_tests.pop();
2385 i-=2;
2386 }
2387 }
2388 }
2389
gen_narrow_oop_implicit_null_checks()2390 bool Matcher::gen_narrow_oop_implicit_null_checks() {
2391 // Advice matcher to perform null checks on the narrow oop side.
2392 // Implicit checks are not possible on the uncompressed oop side anyway
2393 // (at least not for read accesses).
2394 // Performs significantly better (especially on Power 6).
2395 if (!os::zero_page_read_protected()) {
2396 return true;
2397 }
2398 return CompressedOops::use_implicit_null_checks() &&
2399 (narrow_oop_use_complex_address() ||
2400 CompressedOops::base() != NULL);
2401 }
2402
2403 // Compute RegMask for an ideal register.
regmask_for_ideal_register(uint ideal_reg,Node * ret)2404 const RegMask* Matcher::regmask_for_ideal_register(uint ideal_reg, Node* ret) {
2405 const Type* t = Type::mreg2type[ideal_reg];
2406 if (t == NULL) {
2407 assert(ideal_reg >= Op_VecS && ideal_reg <= Op_VecZ, "not a vector: %d", ideal_reg);
2408 return NULL; // not supported
2409 }
2410 Node* fp = ret->in(TypeFunc::FramePtr);
2411 Node* mem = ret->in(TypeFunc::Memory);
2412 const TypePtr* atp = TypePtr::BOTTOM;
2413 MemNode::MemOrd mo = MemNode::unordered;
2414
2415 Node* spill;
2416 switch (ideal_reg) {
2417 case Op_RegN: spill = new LoadNNode(NULL, mem, fp, atp, t->is_narrowoop(), mo); break;
2418 case Op_RegI: spill = new LoadINode(NULL, mem, fp, atp, t->is_int(), mo); break;
2419 case Op_RegP: spill = new LoadPNode(NULL, mem, fp, atp, t->is_ptr(), mo); break;
2420 case Op_RegF: spill = new LoadFNode(NULL, mem, fp, atp, t, mo); break;
2421 case Op_RegD: spill = new LoadDNode(NULL, mem, fp, atp, t, mo); break;
2422 case Op_RegL: spill = new LoadLNode(NULL, mem, fp, atp, t->is_long(), mo); break;
2423
2424 case Op_VecS: // fall-through
2425 case Op_VecD: // fall-through
2426 case Op_VecX: // fall-through
2427 case Op_VecY: // fall-through
2428 case Op_VecZ: spill = new LoadVectorNode(NULL, mem, fp, atp, t->is_vect()); break;
2429
2430 default: ShouldNotReachHere();
2431 }
2432 MachNode* mspill = match_tree(spill);
2433 assert(mspill != NULL, "matching failed: %d", ideal_reg);
2434 // Handle generic vector operand case
2435 if (Matcher::supports_generic_vector_operands && t->isa_vect()) {
2436 specialize_mach_node(mspill);
2437 }
2438 return &mspill->out_RegMask();
2439 }
2440
2441 // Process Mach IR right after selection phase is over.
do_postselect_cleanup()2442 void Matcher::do_postselect_cleanup() {
2443 if (supports_generic_vector_operands) {
2444 specialize_generic_vector_operands();
2445 if (C->failing()) return;
2446 }
2447 }
2448
2449 //----------------------------------------------------------------------
2450 // Generic machine operands elision.
2451 //----------------------------------------------------------------------
2452
2453 // Compute concrete vector operand for a generic TEMP vector mach node based on its user info.
specialize_temp_node(MachTempNode * tmp,MachNode * use,uint idx)2454 void Matcher::specialize_temp_node(MachTempNode* tmp, MachNode* use, uint idx) {
2455 assert(use->in(idx) == tmp, "not a user");
2456 assert(!Matcher::is_generic_vector(use->_opnds[0]), "use not processed yet");
2457
2458 if ((uint)idx == use->two_adr()) { // DEF_TEMP case
2459 tmp->_opnds[0] = use->_opnds[0]->clone();
2460 } else {
2461 uint ideal_vreg = vector_ideal_reg(C->max_vector_size());
2462 tmp->_opnds[0] = Matcher::pd_specialize_generic_vector_operand(tmp->_opnds[0], ideal_vreg, true /*is_temp*/);
2463 }
2464 }
2465
2466 // Compute concrete vector operand for a generic DEF/USE vector operand (of mach node m at index idx).
specialize_vector_operand(MachNode * m,uint opnd_idx)2467 MachOper* Matcher::specialize_vector_operand(MachNode* m, uint opnd_idx) {
2468 assert(Matcher::is_generic_vector(m->_opnds[opnd_idx]), "repeated updates");
2469 Node* def = NULL;
2470 if (opnd_idx == 0) { // DEF
2471 def = m; // use mach node itself to compute vector operand type
2472 } else {
2473 int base_idx = m->operand_index(opnd_idx);
2474 def = m->in(base_idx);
2475 if (def->is_Mach()) {
2476 if (def->is_MachTemp() && Matcher::is_generic_vector(def->as_Mach()->_opnds[0])) {
2477 specialize_temp_node(def->as_MachTemp(), m, base_idx); // MachTemp node use site
2478 } else if (is_generic_reg2reg_move(def->as_Mach())) {
2479 def = def->in(1); // skip over generic reg-to-reg moves
2480 }
2481 }
2482 }
2483 assert(def->bottom_type()->isa_vect(), "not a vector");
2484 uint ideal_vreg = def->bottom_type()->ideal_reg();
2485 return Matcher::pd_specialize_generic_vector_operand(m->_opnds[opnd_idx], ideal_vreg, false /*is_temp*/);
2486 }
2487
specialize_mach_node(MachNode * m)2488 void Matcher::specialize_mach_node(MachNode* m) {
2489 assert(!m->is_MachTemp(), "processed along with its user");
2490 // For generic use operands pull specific register class operands from
2491 // its def instruction's output operand (def operand).
2492 for (uint i = 0; i < m->num_opnds(); i++) {
2493 if (Matcher::is_generic_vector(m->_opnds[i])) {
2494 m->_opnds[i] = specialize_vector_operand(m, i);
2495 }
2496 }
2497 }
2498
2499 // Replace generic vector operands with concrete vector operands and eliminate generic reg-to-reg moves from the graph.
specialize_generic_vector_operands()2500 void Matcher::specialize_generic_vector_operands() {
2501 assert(supports_generic_vector_operands, "sanity");
2502 ResourceMark rm;
2503
2504 if (C->max_vector_size() == 0) {
2505 return; // no vector instructions or operands
2506 }
2507 // Replace generic vector operands (vec/legVec) with concrete ones (vec[SDXYZ]/legVec[SDXYZ])
2508 // and remove reg-to-reg vector moves (MoveVec2Leg and MoveLeg2Vec).
2509 Unique_Node_List live_nodes;
2510 C->identify_useful_nodes(live_nodes);
2511
2512 while (live_nodes.size() > 0) {
2513 MachNode* m = live_nodes.pop()->isa_Mach();
2514 if (m != NULL) {
2515 if (Matcher::is_generic_reg2reg_move(m)) {
2516 // Register allocator properly handles vec <=> leg moves using register masks.
2517 int opnd_idx = m->operand_index(1);
2518 Node* def = m->in(opnd_idx);
2519 m->subsume_by(def, C);
2520 } else if (m->is_MachTemp()) {
2521 // process MachTemp nodes at use site (see Matcher::specialize_vector_operand)
2522 } else {
2523 specialize_mach_node(m);
2524 }
2525 }
2526 }
2527 }
2528
2529 #ifdef ASSERT
verify_after_postselect_cleanup()2530 bool Matcher::verify_after_postselect_cleanup() {
2531 assert(!C->failing(), "sanity");
2532 if (supports_generic_vector_operands) {
2533 Unique_Node_List useful;
2534 C->identify_useful_nodes(useful);
2535 for (uint i = 0; i < useful.size(); i++) {
2536 MachNode* m = useful.at(i)->isa_Mach();
2537 if (m != NULL) {
2538 assert(!Matcher::is_generic_reg2reg_move(m), "no MoveVec nodes allowed");
2539 for (uint j = 0; j < m->num_opnds(); j++) {
2540 assert(!Matcher::is_generic_vector(m->_opnds[j]), "no generic vector operands allowed");
2541 }
2542 }
2543 }
2544 }
2545 return true;
2546 }
2547 #endif // ASSERT
2548
2549 // Used by the DFA in dfa_xxx.cpp. Check for a following barrier or
2550 // atomic instruction acting as a store_load barrier without any
2551 // intervening volatile load, and thus we don't need a barrier here.
2552 // We retain the Node to act as a compiler ordering barrier.
post_store_load_barrier(const Node * vmb)2553 bool Matcher::post_store_load_barrier(const Node* vmb) {
2554 Compile* C = Compile::current();
2555 assert(vmb->is_MemBar(), "");
2556 assert(vmb->Opcode() != Op_MemBarAcquire && vmb->Opcode() != Op_LoadFence, "");
2557 const MemBarNode* membar = vmb->as_MemBar();
2558
2559 // Get the Ideal Proj node, ctrl, that can be used to iterate forward
2560 Node* ctrl = NULL;
2561 for (DUIterator_Fast imax, i = membar->fast_outs(imax); i < imax; i++) {
2562 Node* p = membar->fast_out(i);
2563 assert(p->is_Proj(), "only projections here");
2564 if ((p->as_Proj()->_con == TypeFunc::Control) &&
2565 !C->node_arena()->contains(p)) { // Unmatched old-space only
2566 ctrl = p;
2567 break;
2568 }
2569 }
2570 assert((ctrl != NULL), "missing control projection");
2571
2572 for (DUIterator_Fast jmax, j = ctrl->fast_outs(jmax); j < jmax; j++) {
2573 Node *x = ctrl->fast_out(j);
2574 int xop = x->Opcode();
2575
2576 // We don't need current barrier if we see another or a lock
2577 // before seeing volatile load.
2578 //
2579 // Op_Fastunlock previously appeared in the Op_* list below.
2580 // With the advent of 1-0 lock operations we're no longer guaranteed
2581 // that a monitor exit operation contains a serializing instruction.
2582
2583 if (xop == Op_MemBarVolatile ||
2584 xop == Op_CompareAndExchangeB ||
2585 xop == Op_CompareAndExchangeS ||
2586 xop == Op_CompareAndExchangeI ||
2587 xop == Op_CompareAndExchangeL ||
2588 xop == Op_CompareAndExchangeP ||
2589 xop == Op_CompareAndExchangeN ||
2590 xop == Op_WeakCompareAndSwapB ||
2591 xop == Op_WeakCompareAndSwapS ||
2592 xop == Op_WeakCompareAndSwapL ||
2593 xop == Op_WeakCompareAndSwapP ||
2594 xop == Op_WeakCompareAndSwapN ||
2595 xop == Op_WeakCompareAndSwapI ||
2596 xop == Op_CompareAndSwapB ||
2597 xop == Op_CompareAndSwapS ||
2598 xop == Op_CompareAndSwapL ||
2599 xop == Op_CompareAndSwapP ||
2600 xop == Op_CompareAndSwapN ||
2601 xop == Op_CompareAndSwapI ||
2602 BarrierSet::barrier_set()->barrier_set_c2()->matcher_is_store_load_barrier(x, xop)) {
2603 return true;
2604 }
2605
2606 // Op_FastLock previously appeared in the Op_* list above.
2607 // With biased locking we're no longer guaranteed that a monitor
2608 // enter operation contains a serializing instruction.
2609 if ((xop == Op_FastLock) && !UseBiasedLocking) {
2610 return true;
2611 }
2612
2613 if (x->is_MemBar()) {
2614 // We must retain this membar if there is an upcoming volatile
2615 // load, which will be followed by acquire membar.
2616 if (xop == Op_MemBarAcquire || xop == Op_LoadFence) {
2617 return false;
2618 } else {
2619 // For other kinds of barriers, check by pretending we
2620 // are them, and seeing if we can be removed.
2621 return post_store_load_barrier(x->as_MemBar());
2622 }
2623 }
2624
2625 // probably not necessary to check for these
2626 if (x->is_Call() || x->is_SafePoint() || x->is_block_proj()) {
2627 return false;
2628 }
2629 }
2630 return false;
2631 }
2632
2633 // Check whether node n is a branch to an uncommon trap that we could
2634 // optimize as test with very high branch costs in case of going to
2635 // the uncommon trap. The code must be able to be recompiled to use
2636 // a cheaper test.
branches_to_uncommon_trap(const Node * n)2637 bool Matcher::branches_to_uncommon_trap(const Node *n) {
2638 // Don't do it for natives, adapters, or runtime stubs
2639 Compile *C = Compile::current();
2640 if (!C->is_method_compilation()) return false;
2641
2642 assert(n->is_If(), "You should only call this on if nodes.");
2643 IfNode *ifn = n->as_If();
2644
2645 Node *ifFalse = NULL;
2646 for (DUIterator_Fast imax, i = ifn->fast_outs(imax); i < imax; i++) {
2647 if (ifn->fast_out(i)->is_IfFalse()) {
2648 ifFalse = ifn->fast_out(i);
2649 break;
2650 }
2651 }
2652 assert(ifFalse, "An If should have an ifFalse. Graph is broken.");
2653
2654 Node *reg = ifFalse;
2655 int cnt = 4; // We must protect against cycles. Limit to 4 iterations.
2656 // Alternatively use visited set? Seems too expensive.
2657 while (reg != NULL && cnt > 0) {
2658 CallNode *call = NULL;
2659 RegionNode *nxt_reg = NULL;
2660 for (DUIterator_Fast imax, i = reg->fast_outs(imax); i < imax; i++) {
2661 Node *o = reg->fast_out(i);
2662 if (o->is_Call()) {
2663 call = o->as_Call();
2664 }
2665 if (o->is_Region()) {
2666 nxt_reg = o->as_Region();
2667 }
2668 }
2669
2670 if (call &&
2671 call->entry_point() == SharedRuntime::uncommon_trap_blob()->entry_point()) {
2672 const Type* trtype = call->in(TypeFunc::Parms)->bottom_type();
2673 if (trtype->isa_int() && trtype->is_int()->is_con()) {
2674 jint tr_con = trtype->is_int()->get_con();
2675 Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(tr_con);
2676 Deoptimization::DeoptAction action = Deoptimization::trap_request_action(tr_con);
2677 assert((int)reason < (int)BitsPerInt, "recode bit map");
2678
2679 if (is_set_nth_bit(C->allowed_deopt_reasons(), (int)reason)
2680 && action != Deoptimization::Action_none) {
2681 // This uncommon trap is sure to recompile, eventually.
2682 // When that happens, C->too_many_traps will prevent
2683 // this transformation from happening again.
2684 return true;
2685 }
2686 }
2687 }
2688
2689 reg = nxt_reg;
2690 cnt--;
2691 }
2692
2693 return false;
2694 }
2695
2696 //=============================================================================
2697 //---------------------------State---------------------------------------------
State(void)2698 State::State(void) {
2699 #ifdef ASSERT
2700 _id = 0;
2701 _kids[0] = _kids[1] = (State*)(intptr_t) CONST64(0xcafebabecafebabe);
2702 _leaf = (Node*)(intptr_t) CONST64(0xbaadf00dbaadf00d);
2703 //memset(_cost, -1, sizeof(_cost));
2704 //memset(_rule, -1, sizeof(_rule));
2705 #endif
2706 memset(_valid, 0, sizeof(_valid));
2707 }
2708
2709 #ifdef ASSERT
~State()2710 State::~State() {
2711 _id = 99;
2712 _kids[0] = _kids[1] = (State*)(intptr_t) CONST64(0xcafebabecafebabe);
2713 _leaf = (Node*)(intptr_t) CONST64(0xbaadf00dbaadf00d);
2714 memset(_cost, -3, sizeof(_cost));
2715 memset(_rule, -3, sizeof(_rule));
2716 }
2717 #endif
2718
2719 #ifndef PRODUCT
2720 //---------------------------dump----------------------------------------------
dump()2721 void State::dump() {
2722 tty->print("\n");
2723 dump(0);
2724 }
2725
dump(int depth)2726 void State::dump(int depth) {
2727 for( int j = 0; j < depth; j++ )
2728 tty->print(" ");
2729 tty->print("--N: ");
2730 _leaf->dump();
2731 uint i;
2732 for( i = 0; i < _LAST_MACH_OPER; i++ )
2733 // Check for valid entry
2734 if( valid(i) ) {
2735 for( int j = 0; j < depth; j++ )
2736 tty->print(" ");
2737 assert(_cost[i] != max_juint, "cost must be a valid value");
2738 assert(_rule[i] < _last_Mach_Node, "rule[i] must be valid rule");
2739 tty->print_cr("%s %d %s",
2740 ruleName[i], _cost[i], ruleName[_rule[i]] );
2741 }
2742 tty->cr();
2743
2744 for( i=0; i<2; i++ )
2745 if( _kids[i] )
2746 _kids[i]->dump(depth+1);
2747 }
2748 #endif
2749