1 /*
2 * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/shared/barrierSet.hpp"
27 #include "gc/shared/c2/barrierSetC2.hpp"
28 #include "memory/allocation.inline.hpp"
29 #include "memory/resourceArea.hpp"
30 #include "oops/compressedOops.hpp"
31 #include "opto/ad.hpp"
32 #include "opto/addnode.hpp"
33 #include "opto/callnode.hpp"
34 #include "opto/idealGraphPrinter.hpp"
35 #include "opto/matcher.hpp"
36 #include "opto/memnode.hpp"
37 #include "opto/movenode.hpp"
38 #include "opto/opcodes.hpp"
39 #include "opto/regmask.hpp"
40 #include "opto/rootnode.hpp"
41 #include "opto/runtime.hpp"
42 #include "opto/type.hpp"
43 #include "opto/vectornode.hpp"
44 #include "runtime/os.hpp"
45 #include "runtime/sharedRuntime.hpp"
46 #include "utilities/align.hpp"
47
48 OptoReg::Name OptoReg::c_frame_pointer;
49
50 const RegMask *Matcher::idealreg2regmask[_last_machine_leaf];
51 RegMask Matcher::mreg2regmask[_last_Mach_Reg];
52 RegMask Matcher::caller_save_regmask;
53 RegMask Matcher::caller_save_regmask_exclude_soe;
54 RegMask Matcher::mh_caller_save_regmask;
55 RegMask Matcher::mh_caller_save_regmask_exclude_soe;
56 RegMask Matcher::STACK_ONLY_mask;
57 RegMask Matcher::c_frame_ptr_mask;
58 const uint Matcher::_begin_rematerialize = _BEGIN_REMATERIALIZE;
59 const uint Matcher::_end_rematerialize = _END_REMATERIALIZE;
60
61 //---------------------------Matcher-------------------------------------------
Matcher()62 Matcher::Matcher()
63 : PhaseTransform( Phase::Ins_Select ),
64 _states_arena(Chunk::medium_size, mtCompiler),
65 _visited(&_states_arena),
66 _shared(&_states_arena),
67 _dontcare(&_states_arena),
68 _reduceOp(reduceOp), _leftOp(leftOp), _rightOp(rightOp),
69 _swallowed(swallowed),
70 _begin_inst_chain_rule(_BEGIN_INST_CHAIN_RULE),
71 _end_inst_chain_rule(_END_INST_CHAIN_RULE),
72 _must_clone(must_clone),
73 _shared_nodes(C->comp_arena()),
74 #ifndef PRODUCT
75 _old2new_map(C->comp_arena()),
76 _new2old_map(C->comp_arena()),
77 _reused(C->comp_arena()),
78 #endif // !PRODUCT
79 _allocation_started(false),
80 _ruleName(ruleName),
81 _register_save_policy(register_save_policy),
82 _c_reg_save_policy(c_reg_save_policy),
83 _register_save_type(register_save_type) {
84 C->set_matcher(this);
85
86 idealreg2spillmask [Op_RegI] = NULL;
87 idealreg2spillmask [Op_RegN] = NULL;
88 idealreg2spillmask [Op_RegL] = NULL;
89 idealreg2spillmask [Op_RegF] = NULL;
90 idealreg2spillmask [Op_RegD] = NULL;
91 idealreg2spillmask [Op_RegP] = NULL;
92 idealreg2spillmask [Op_VecA] = NULL;
93 idealreg2spillmask [Op_VecS] = NULL;
94 idealreg2spillmask [Op_VecD] = NULL;
95 idealreg2spillmask [Op_VecX] = NULL;
96 idealreg2spillmask [Op_VecY] = NULL;
97 idealreg2spillmask [Op_VecZ] = NULL;
98 idealreg2spillmask [Op_RegFlags] = NULL;
99 idealreg2spillmask [Op_RegVectMask] = NULL;
100
101 idealreg2debugmask [Op_RegI] = NULL;
102 idealreg2debugmask [Op_RegN] = NULL;
103 idealreg2debugmask [Op_RegL] = NULL;
104 idealreg2debugmask [Op_RegF] = NULL;
105 idealreg2debugmask [Op_RegD] = NULL;
106 idealreg2debugmask [Op_RegP] = NULL;
107 idealreg2debugmask [Op_VecA] = NULL;
108 idealreg2debugmask [Op_VecS] = NULL;
109 idealreg2debugmask [Op_VecD] = NULL;
110 idealreg2debugmask [Op_VecX] = NULL;
111 idealreg2debugmask [Op_VecY] = NULL;
112 idealreg2debugmask [Op_VecZ] = NULL;
113 idealreg2debugmask [Op_RegFlags] = NULL;
114 idealreg2debugmask [Op_RegVectMask] = NULL;
115
116 idealreg2mhdebugmask[Op_RegI] = NULL;
117 idealreg2mhdebugmask[Op_RegN] = NULL;
118 idealreg2mhdebugmask[Op_RegL] = NULL;
119 idealreg2mhdebugmask[Op_RegF] = NULL;
120 idealreg2mhdebugmask[Op_RegD] = NULL;
121 idealreg2mhdebugmask[Op_RegP] = NULL;
122 idealreg2mhdebugmask[Op_VecA] = NULL;
123 idealreg2mhdebugmask[Op_VecS] = NULL;
124 idealreg2mhdebugmask[Op_VecD] = NULL;
125 idealreg2mhdebugmask[Op_VecX] = NULL;
126 idealreg2mhdebugmask[Op_VecY] = NULL;
127 idealreg2mhdebugmask[Op_VecZ] = NULL;
128 idealreg2mhdebugmask[Op_RegFlags] = NULL;
129 idealreg2mhdebugmask[Op_RegVectMask] = NULL;
130
131 debug_only(_mem_node = NULL;) // Ideal memory node consumed by mach node
132 }
133
134 //------------------------------warp_incoming_stk_arg------------------------
135 // This warps a VMReg into an OptoReg::Name
warp_incoming_stk_arg(VMReg reg)136 OptoReg::Name Matcher::warp_incoming_stk_arg( VMReg reg ) {
137 OptoReg::Name warped;
138 if( reg->is_stack() ) { // Stack slot argument?
139 warped = OptoReg::add(_old_SP, reg->reg2stack() );
140 warped = OptoReg::add(warped, C->out_preserve_stack_slots());
141 if( warped >= _in_arg_limit )
142 _in_arg_limit = OptoReg::add(warped, 1); // Bump max stack slot seen
143 if (!RegMask::can_represent_arg(warped)) {
144 // the compiler cannot represent this method's calling sequence
145 C->record_method_not_compilable("unsupported incoming calling sequence");
146 return OptoReg::Bad;
147 }
148 return warped;
149 }
150 return OptoReg::as_OptoReg(reg);
151 }
152
153 //---------------------------compute_old_SP------------------------------------
compute_old_SP()154 OptoReg::Name Compile::compute_old_SP() {
155 int fixed = fixed_slots();
156 int preserve = in_preserve_stack_slots();
157 return OptoReg::stack2reg(align_up(fixed + preserve, (int)Matcher::stack_alignment_in_slots()));
158 }
159
160
161
162 #ifdef ASSERT
verify_new_nodes_only(Node * xroot)163 void Matcher::verify_new_nodes_only(Node* xroot) {
164 // Make sure that the new graph only references new nodes
165 ResourceMark rm;
166 Unique_Node_List worklist;
167 VectorSet visited;
168 worklist.push(xroot);
169 while (worklist.size() > 0) {
170 Node* n = worklist.pop();
171 visited.set(n->_idx);
172 assert(C->node_arena()->contains(n), "dead node");
173 for (uint j = 0; j < n->req(); j++) {
174 Node* in = n->in(j);
175 if (in != NULL) {
176 assert(C->node_arena()->contains(in), "dead node");
177 if (!visited.test(in->_idx)) {
178 worklist.push(in);
179 }
180 }
181 }
182 }
183 }
184 #endif
185
186
187 //---------------------------match---------------------------------------------
match()188 void Matcher::match( ) {
189 if( MaxLabelRootDepth < 100 ) { // Too small?
190 assert(false, "invalid MaxLabelRootDepth, increase it to 100 minimum");
191 MaxLabelRootDepth = 100;
192 }
193 // One-time initialization of some register masks.
194 init_spill_mask( C->root()->in(1) );
195 _return_addr_mask = return_addr();
196 #ifdef _LP64
197 // Pointers take 2 slots in 64-bit land
198 _return_addr_mask.Insert(OptoReg::add(return_addr(),1));
199 #endif
200
201 // Map a Java-signature return type into return register-value
202 // machine registers for 0, 1 and 2 returned values.
203 const TypeTuple *range = C->tf()->range();
204 if( range->cnt() > TypeFunc::Parms ) { // If not a void function
205 // Get ideal-register return type
206 uint ireg = range->field_at(TypeFunc::Parms)->ideal_reg();
207 // Get machine return register
208 uint sop = C->start()->Opcode();
209 OptoRegPair regs = return_value(ireg);
210
211 // And mask for same
212 _return_value_mask = RegMask(regs.first());
213 if( OptoReg::is_valid(regs.second()) )
214 _return_value_mask.Insert(regs.second());
215 }
216
217 // ---------------
218 // Frame Layout
219
220 // Need the method signature to determine the incoming argument types,
221 // because the types determine which registers the incoming arguments are
222 // in, and this affects the matched code.
223 const TypeTuple *domain = C->tf()->domain();
224 uint argcnt = domain->cnt() - TypeFunc::Parms;
225 BasicType *sig_bt = NEW_RESOURCE_ARRAY( BasicType, argcnt );
226 VMRegPair *vm_parm_regs = NEW_RESOURCE_ARRAY( VMRegPair, argcnt );
227 _parm_regs = NEW_RESOURCE_ARRAY( OptoRegPair, argcnt );
228 _calling_convention_mask = NEW_RESOURCE_ARRAY( RegMask, argcnt );
229 uint i;
230 for( i = 0; i<argcnt; i++ ) {
231 sig_bt[i] = domain->field_at(i+TypeFunc::Parms)->basic_type();
232 }
233
234 // Pass array of ideal registers and length to USER code (from the AD file)
235 // that will convert this to an array of register numbers.
236 const StartNode *start = C->start();
237 start->calling_convention( sig_bt, vm_parm_regs, argcnt );
238 #ifdef ASSERT
239 // Sanity check users' calling convention. Real handy while trying to
240 // get the initial port correct.
241 { for (uint i = 0; i<argcnt; i++) {
242 if( !vm_parm_regs[i].first()->is_valid() && !vm_parm_regs[i].second()->is_valid() ) {
243 assert(domain->field_at(i+TypeFunc::Parms)==Type::HALF, "only allowed on halve" );
244 _parm_regs[i].set_bad();
245 continue;
246 }
247 VMReg parm_reg = vm_parm_regs[i].first();
248 assert(parm_reg->is_valid(), "invalid arg?");
249 if (parm_reg->is_reg()) {
250 OptoReg::Name opto_parm_reg = OptoReg::as_OptoReg(parm_reg);
251 assert(can_be_java_arg(opto_parm_reg) ||
252 C->stub_function() == CAST_FROM_FN_PTR(address, OptoRuntime::rethrow_C) ||
253 opto_parm_reg == inline_cache_reg(),
254 "parameters in register must be preserved by runtime stubs");
255 }
256 for (uint j = 0; j < i; j++) {
257 assert(parm_reg != vm_parm_regs[j].first(),
258 "calling conv. must produce distinct regs");
259 }
260 }
261 }
262 #endif
263
264 // Do some initial frame layout.
265
266 // Compute the old incoming SP (may be called FP) as
267 // OptoReg::stack0() + locks + in_preserve_stack_slots + pad2.
268 _old_SP = C->compute_old_SP();
269 assert( is_even(_old_SP), "must be even" );
270
271 // Compute highest incoming stack argument as
272 // _old_SP + out_preserve_stack_slots + incoming argument size.
273 _in_arg_limit = OptoReg::add(_old_SP, C->out_preserve_stack_slots());
274 assert( is_even(_in_arg_limit), "out_preserve must be even" );
275 for( i = 0; i < argcnt; i++ ) {
276 // Permit args to have no register
277 _calling_convention_mask[i].Clear();
278 if( !vm_parm_regs[i].first()->is_valid() && !vm_parm_regs[i].second()->is_valid() ) {
279 continue;
280 }
281 // calling_convention returns stack arguments as a count of
282 // slots beyond OptoReg::stack0()/VMRegImpl::stack0. We need to convert this to
283 // the allocators point of view, taking into account all the
284 // preserve area, locks & pad2.
285
286 OptoReg::Name reg1 = warp_incoming_stk_arg(vm_parm_regs[i].first());
287 if( OptoReg::is_valid(reg1))
288 _calling_convention_mask[i].Insert(reg1);
289
290 OptoReg::Name reg2 = warp_incoming_stk_arg(vm_parm_regs[i].second());
291 if( OptoReg::is_valid(reg2))
292 _calling_convention_mask[i].Insert(reg2);
293
294 // Saved biased stack-slot register number
295 _parm_regs[i].set_pair(reg2, reg1);
296 }
297
298 // Finally, make sure the incoming arguments take up an even number of
299 // words, in case the arguments or locals need to contain doubleword stack
300 // slots. The rest of the system assumes that stack slot pairs (in
301 // particular, in the spill area) which look aligned will in fact be
302 // aligned relative to the stack pointer in the target machine. Double
303 // stack slots will always be allocated aligned.
304 _new_SP = OptoReg::Name(align_up(_in_arg_limit, (int)RegMask::SlotsPerLong));
305
306 // Compute highest outgoing stack argument as
307 // _new_SP + out_preserve_stack_slots + max(outgoing argument size).
308 _out_arg_limit = OptoReg::add(_new_SP, C->out_preserve_stack_slots());
309 assert( is_even(_out_arg_limit), "out_preserve must be even" );
310
311 if (!RegMask::can_represent_arg(OptoReg::add(_out_arg_limit,-1))) {
312 // the compiler cannot represent this method's calling sequence
313 C->record_method_not_compilable("must be able to represent all call arguments in reg mask");
314 }
315
316 if (C->failing()) return; // bailed out on incoming arg failure
317
318 // ---------------
319 // Collect roots of matcher trees. Every node for which
320 // _shared[_idx] is cleared is guaranteed to not be shared, and thus
321 // can be a valid interior of some tree.
322 find_shared( C->root() );
323 find_shared( C->top() );
324
325 C->print_method(PHASE_BEFORE_MATCHING);
326
327 // Create new ideal node ConP #NULL even if it does exist in old space
328 // to avoid false sharing if the corresponding mach node is not used.
329 // The corresponding mach node is only used in rare cases for derived
330 // pointers.
331 Node* new_ideal_null = ConNode::make(TypePtr::NULL_PTR);
332
333 // Swap out to old-space; emptying new-space
334 Arena *old = C->node_arena()->move_contents(C->old_arena());
335
336 // Save debug and profile information for nodes in old space:
337 _old_node_note_array = C->node_note_array();
338 if (_old_node_note_array != NULL) {
339 C->set_node_note_array(new(C->comp_arena()) GrowableArray<Node_Notes*>
340 (C->comp_arena(), _old_node_note_array->length(),
341 0, NULL));
342 }
343
344 // Pre-size the new_node table to avoid the need for range checks.
345 grow_new_node_array(C->unique());
346
347 // Reset node counter so MachNodes start with _idx at 0
348 int live_nodes = C->live_nodes();
349 C->set_unique(0);
350 C->reset_dead_node_list();
351
352 // Recursively match trees from old space into new space.
353 // Correct leaves of new-space Nodes; they point to old-space.
354 _visited.clear();
355 C->set_cached_top_node(xform( C->top(), live_nodes ));
356 if (!C->failing()) {
357 Node* xroot = xform( C->root(), 1 );
358 if (xroot == NULL) {
359 Matcher::soft_match_failure(); // recursive matching process failed
360 C->record_method_not_compilable("instruction match failed");
361 } else {
362 // During matching shared constants were attached to C->root()
363 // because xroot wasn't available yet, so transfer the uses to
364 // the xroot.
365 for( DUIterator_Fast jmax, j = C->root()->fast_outs(jmax); j < jmax; j++ ) {
366 Node* n = C->root()->fast_out(j);
367 if (C->node_arena()->contains(n)) {
368 assert(n->in(0) == C->root(), "should be control user");
369 n->set_req(0, xroot);
370 --j;
371 --jmax;
372 }
373 }
374
375 // Generate new mach node for ConP #NULL
376 assert(new_ideal_null != NULL, "sanity");
377 _mach_null = match_tree(new_ideal_null);
378 // Don't set control, it will confuse GCM since there are no uses.
379 // The control will be set when this node is used first time
380 // in find_base_for_derived().
381 assert(_mach_null != NULL, "");
382
383 C->set_root(xroot->is_Root() ? xroot->as_Root() : NULL);
384
385 #ifdef ASSERT
386 verify_new_nodes_only(xroot);
387 #endif
388 }
389 }
390 if (C->top() == NULL || C->root() == NULL) {
391 C->record_method_not_compilable("graph lost"); // %%% cannot happen?
392 }
393 if (C->failing()) {
394 // delete old;
395 old->destruct_contents();
396 return;
397 }
398 assert( C->top(), "" );
399 assert( C->root(), "" );
400 validate_null_checks();
401
402 // Now smoke old-space
403 NOT_DEBUG( old->destruct_contents() );
404
405 // ------------------------
406 // Set up save-on-entry registers.
407 Fixup_Save_On_Entry( );
408
409 { // Cleanup mach IR after selection phase is over.
410 Compile::TracePhase tp("postselect_cleanup", &timers[_t_postselect_cleanup]);
411 do_postselect_cleanup();
412 if (C->failing()) return;
413 assert(verify_after_postselect_cleanup(), "");
414 }
415 }
416
417 //------------------------------Fixup_Save_On_Entry----------------------------
418 // The stated purpose of this routine is to take care of save-on-entry
419 // registers. However, the overall goal of the Match phase is to convert into
420 // machine-specific instructions which have RegMasks to guide allocation.
421 // So what this procedure really does is put a valid RegMask on each input
422 // to the machine-specific variations of all Return, TailCall and Halt
423 // instructions. It also adds edgs to define the save-on-entry values (and of
424 // course gives them a mask).
425
init_input_masks(uint size,RegMask & ret_adr,RegMask & fp)426 static RegMask *init_input_masks( uint size, RegMask &ret_adr, RegMask &fp ) {
427 RegMask *rms = NEW_RESOURCE_ARRAY( RegMask, size );
428 // Do all the pre-defined register masks
429 rms[TypeFunc::Control ] = RegMask::Empty;
430 rms[TypeFunc::I_O ] = RegMask::Empty;
431 rms[TypeFunc::Memory ] = RegMask::Empty;
432 rms[TypeFunc::ReturnAdr] = ret_adr;
433 rms[TypeFunc::FramePtr ] = fp;
434 return rms;
435 }
436
437 #define NOF_STACK_MASKS (3*13)
438
439 // Create the initial stack mask used by values spilling to the stack.
440 // Disallow any debug info in outgoing argument areas by setting the
441 // initial mask accordingly.
init_first_stack_mask()442 void Matcher::init_first_stack_mask() {
443
444 // Allocate storage for spill masks as masks for the appropriate load type.
445 RegMask *rms = (RegMask*)C->comp_arena()->Amalloc_D(sizeof(RegMask) * NOF_STACK_MASKS);
446
447 // Initialize empty placeholder masks into the newly allocated arena
448 for (int i = 0; i < NOF_STACK_MASKS; i++) {
449 new (rms + i) RegMask();
450 }
451
452 idealreg2spillmask [Op_RegN] = &rms[0];
453 idealreg2spillmask [Op_RegI] = &rms[1];
454 idealreg2spillmask [Op_RegL] = &rms[2];
455 idealreg2spillmask [Op_RegF] = &rms[3];
456 idealreg2spillmask [Op_RegD] = &rms[4];
457 idealreg2spillmask [Op_RegP] = &rms[5];
458
459 idealreg2debugmask [Op_RegN] = &rms[6];
460 idealreg2debugmask [Op_RegI] = &rms[7];
461 idealreg2debugmask [Op_RegL] = &rms[8];
462 idealreg2debugmask [Op_RegF] = &rms[9];
463 idealreg2debugmask [Op_RegD] = &rms[10];
464 idealreg2debugmask [Op_RegP] = &rms[11];
465
466 idealreg2mhdebugmask[Op_RegN] = &rms[12];
467 idealreg2mhdebugmask[Op_RegI] = &rms[13];
468 idealreg2mhdebugmask[Op_RegL] = &rms[14];
469 idealreg2mhdebugmask[Op_RegF] = &rms[15];
470 idealreg2mhdebugmask[Op_RegD] = &rms[16];
471 idealreg2mhdebugmask[Op_RegP] = &rms[17];
472
473 idealreg2spillmask [Op_VecA] = &rms[18];
474 idealreg2spillmask [Op_VecS] = &rms[19];
475 idealreg2spillmask [Op_VecD] = &rms[20];
476 idealreg2spillmask [Op_VecX] = &rms[21];
477 idealreg2spillmask [Op_VecY] = &rms[22];
478 idealreg2spillmask [Op_VecZ] = &rms[23];
479
480 idealreg2debugmask [Op_VecA] = &rms[24];
481 idealreg2debugmask [Op_VecS] = &rms[25];
482 idealreg2debugmask [Op_VecD] = &rms[26];
483 idealreg2debugmask [Op_VecX] = &rms[27];
484 idealreg2debugmask [Op_VecY] = &rms[28];
485 idealreg2debugmask [Op_VecZ] = &rms[29];
486
487 idealreg2mhdebugmask[Op_VecA] = &rms[30];
488 idealreg2mhdebugmask[Op_VecS] = &rms[31];
489 idealreg2mhdebugmask[Op_VecD] = &rms[32];
490 idealreg2mhdebugmask[Op_VecX] = &rms[33];
491 idealreg2mhdebugmask[Op_VecY] = &rms[34];
492 idealreg2mhdebugmask[Op_VecZ] = &rms[35];
493
494 idealreg2spillmask [Op_RegVectMask] = &rms[36];
495 idealreg2debugmask [Op_RegVectMask] = &rms[37];
496 idealreg2mhdebugmask[Op_RegVectMask] = &rms[38];
497
498 OptoReg::Name i;
499
500 // At first, start with the empty mask
501 C->FIRST_STACK_mask().Clear();
502
503 // Add in the incoming argument area
504 OptoReg::Name init_in = OptoReg::add(_old_SP, C->out_preserve_stack_slots());
505 for (i = init_in; i < _in_arg_limit; i = OptoReg::add(i,1)) {
506 C->FIRST_STACK_mask().Insert(i);
507 }
508 // Add in all bits past the outgoing argument area
509 guarantee(RegMask::can_represent_arg(OptoReg::add(_out_arg_limit,-1)),
510 "must be able to represent all call arguments in reg mask");
511 OptoReg::Name init = _out_arg_limit;
512 for (i = init; RegMask::can_represent(i); i = OptoReg::add(i,1)) {
513 C->FIRST_STACK_mask().Insert(i);
514 }
515 // Finally, set the "infinite stack" bit.
516 C->FIRST_STACK_mask().set_AllStack();
517
518 // Make spill masks. Registers for their class, plus FIRST_STACK_mask.
519 RegMask aligned_stack_mask = C->FIRST_STACK_mask();
520 // Keep spill masks aligned.
521 aligned_stack_mask.clear_to_pairs();
522 assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
523 RegMask scalable_stack_mask = aligned_stack_mask;
524
525 *idealreg2spillmask[Op_RegP] = *idealreg2regmask[Op_RegP];
526 #ifdef _LP64
527 *idealreg2spillmask[Op_RegN] = *idealreg2regmask[Op_RegN];
528 idealreg2spillmask[Op_RegN]->OR(C->FIRST_STACK_mask());
529 idealreg2spillmask[Op_RegP]->OR(aligned_stack_mask);
530 #else
531 idealreg2spillmask[Op_RegP]->OR(C->FIRST_STACK_mask());
532 #endif
533 *idealreg2spillmask[Op_RegI] = *idealreg2regmask[Op_RegI];
534 idealreg2spillmask[Op_RegI]->OR(C->FIRST_STACK_mask());
535 *idealreg2spillmask[Op_RegL] = *idealreg2regmask[Op_RegL];
536 idealreg2spillmask[Op_RegL]->OR(aligned_stack_mask);
537 *idealreg2spillmask[Op_RegF] = *idealreg2regmask[Op_RegF];
538 idealreg2spillmask[Op_RegF]->OR(C->FIRST_STACK_mask());
539 *idealreg2spillmask[Op_RegD] = *idealreg2regmask[Op_RegD];
540 idealreg2spillmask[Op_RegD]->OR(aligned_stack_mask);
541
542 if (Matcher::has_predicated_vectors()) {
543 *idealreg2spillmask[Op_RegVectMask] = *idealreg2regmask[Op_RegVectMask];
544 idealreg2spillmask[Op_RegVectMask]->OR(aligned_stack_mask);
545 }
546
547 if (Matcher::vector_size_supported(T_BYTE,4)) {
548 *idealreg2spillmask[Op_VecS] = *idealreg2regmask[Op_VecS];
549 idealreg2spillmask[Op_VecS]->OR(C->FIRST_STACK_mask());
550 } else {
551 *idealreg2spillmask[Op_VecS] = RegMask::Empty;
552 }
553
554 if (Matcher::vector_size_supported(T_FLOAT,2)) {
555 // For VecD we need dual alignment and 8 bytes (2 slots) for spills.
556 // RA guarantees such alignment since it is needed for Double and Long values.
557 *idealreg2spillmask[Op_VecD] = *idealreg2regmask[Op_VecD];
558 idealreg2spillmask[Op_VecD]->OR(aligned_stack_mask);
559 } else {
560 *idealreg2spillmask[Op_VecD] = RegMask::Empty;
561 }
562
563 if (Matcher::vector_size_supported(T_FLOAT,4)) {
564 // For VecX we need quadro alignment and 16 bytes (4 slots) for spills.
565 //
566 // RA can use input arguments stack slots for spills but until RA
567 // we don't know frame size and offset of input arg stack slots.
568 //
569 // Exclude last input arg stack slots to avoid spilling vectors there
570 // otherwise vector spills could stomp over stack slots in caller frame.
571 OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
572 for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecX); k++) {
573 aligned_stack_mask.Remove(in);
574 in = OptoReg::add(in, -1);
575 }
576 aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecX);
577 assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
578 *idealreg2spillmask[Op_VecX] = *idealreg2regmask[Op_VecX];
579 idealreg2spillmask[Op_VecX]->OR(aligned_stack_mask);
580 } else {
581 *idealreg2spillmask[Op_VecX] = RegMask::Empty;
582 }
583
584 if (Matcher::vector_size_supported(T_FLOAT,8)) {
585 // For VecY we need octo alignment and 32 bytes (8 slots) for spills.
586 OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
587 for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecY); k++) {
588 aligned_stack_mask.Remove(in);
589 in = OptoReg::add(in, -1);
590 }
591 aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecY);
592 assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
593 *idealreg2spillmask[Op_VecY] = *idealreg2regmask[Op_VecY];
594 idealreg2spillmask[Op_VecY]->OR(aligned_stack_mask);
595 } else {
596 *idealreg2spillmask[Op_VecY] = RegMask::Empty;
597 }
598
599 if (Matcher::vector_size_supported(T_FLOAT,16)) {
600 // For VecZ we need enough alignment and 64 bytes (16 slots) for spills.
601 OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
602 for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecZ); k++) {
603 aligned_stack_mask.Remove(in);
604 in = OptoReg::add(in, -1);
605 }
606 aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecZ);
607 assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
608 *idealreg2spillmask[Op_VecZ] = *idealreg2regmask[Op_VecZ];
609 idealreg2spillmask[Op_VecZ]->OR(aligned_stack_mask);
610 } else {
611 *idealreg2spillmask[Op_VecZ] = RegMask::Empty;
612 }
613
614 if (Matcher::supports_scalable_vector()) {
615 int k = 1;
616 OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
617 // Exclude last input arg stack slots to avoid spilling vector register there,
618 // otherwise vector spills could stomp over stack slots in caller frame.
619 for (; (in >= init_in) && (k < scalable_vector_reg_size(T_FLOAT)); k++) {
620 scalable_stack_mask.Remove(in);
621 in = OptoReg::add(in, -1);
622 }
623
624 // For VecA
625 scalable_stack_mask.clear_to_sets(RegMask::SlotsPerVecA);
626 assert(scalable_stack_mask.is_AllStack(), "should be infinite stack");
627 *idealreg2spillmask[Op_VecA] = *idealreg2regmask[Op_VecA];
628 idealreg2spillmask[Op_VecA]->OR(scalable_stack_mask);
629 } else {
630 *idealreg2spillmask[Op_VecA] = RegMask::Empty;
631 }
632
633 if (UseFPUForSpilling) {
634 // This mask logic assumes that the spill operations are
635 // symmetric and that the registers involved are the same size.
636 // On sparc for instance we may have to use 64 bit moves will
637 // kill 2 registers when used with F0-F31.
638 idealreg2spillmask[Op_RegI]->OR(*idealreg2regmask[Op_RegF]);
639 idealreg2spillmask[Op_RegF]->OR(*idealreg2regmask[Op_RegI]);
640 #ifdef _LP64
641 idealreg2spillmask[Op_RegN]->OR(*idealreg2regmask[Op_RegF]);
642 idealreg2spillmask[Op_RegL]->OR(*idealreg2regmask[Op_RegD]);
643 idealreg2spillmask[Op_RegD]->OR(*idealreg2regmask[Op_RegL]);
644 idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegD]);
645 #else
646 idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegF]);
647 #ifdef ARM
648 // ARM has support for moving 64bit values between a pair of
649 // integer registers and a double register
650 idealreg2spillmask[Op_RegL]->OR(*idealreg2regmask[Op_RegD]);
651 idealreg2spillmask[Op_RegD]->OR(*idealreg2regmask[Op_RegL]);
652 #endif
653 #endif
654 }
655
656 // Make up debug masks. Any spill slot plus callee-save (SOE) registers.
657 // Caller-save (SOC, AS) registers are assumed to be trashable by the various
658 // inline-cache fixup routines.
659 *idealreg2debugmask [Op_RegN] = *idealreg2spillmask[Op_RegN];
660 *idealreg2debugmask [Op_RegI] = *idealreg2spillmask[Op_RegI];
661 *idealreg2debugmask [Op_RegL] = *idealreg2spillmask[Op_RegL];
662 *idealreg2debugmask [Op_RegF] = *idealreg2spillmask[Op_RegF];
663 *idealreg2debugmask [Op_RegD] = *idealreg2spillmask[Op_RegD];
664 *idealreg2debugmask [Op_RegP] = *idealreg2spillmask[Op_RegP];
665 *idealreg2debugmask [Op_RegVectMask] = *idealreg2spillmask[Op_RegVectMask];
666
667 *idealreg2debugmask [Op_VecA] = *idealreg2spillmask[Op_VecA];
668 *idealreg2debugmask [Op_VecS] = *idealreg2spillmask[Op_VecS];
669 *idealreg2debugmask [Op_VecD] = *idealreg2spillmask[Op_VecD];
670 *idealreg2debugmask [Op_VecX] = *idealreg2spillmask[Op_VecX];
671 *idealreg2debugmask [Op_VecY] = *idealreg2spillmask[Op_VecY];
672 *idealreg2debugmask [Op_VecZ] = *idealreg2spillmask[Op_VecZ];
673
674 *idealreg2mhdebugmask[Op_RegN] = *idealreg2spillmask[Op_RegN];
675 *idealreg2mhdebugmask[Op_RegI] = *idealreg2spillmask[Op_RegI];
676 *idealreg2mhdebugmask[Op_RegL] = *idealreg2spillmask[Op_RegL];
677 *idealreg2mhdebugmask[Op_RegF] = *idealreg2spillmask[Op_RegF];
678 *idealreg2mhdebugmask[Op_RegD] = *idealreg2spillmask[Op_RegD];
679 *idealreg2mhdebugmask[Op_RegP] = *idealreg2spillmask[Op_RegP];
680 *idealreg2mhdebugmask[Op_RegVectMask] = *idealreg2spillmask[Op_RegVectMask];
681
682 *idealreg2mhdebugmask[Op_VecA] = *idealreg2spillmask[Op_VecA];
683 *idealreg2mhdebugmask[Op_VecS] = *idealreg2spillmask[Op_VecS];
684 *idealreg2mhdebugmask[Op_VecD] = *idealreg2spillmask[Op_VecD];
685 *idealreg2mhdebugmask[Op_VecX] = *idealreg2spillmask[Op_VecX];
686 *idealreg2mhdebugmask[Op_VecY] = *idealreg2spillmask[Op_VecY];
687 *idealreg2mhdebugmask[Op_VecZ] = *idealreg2spillmask[Op_VecZ];
688
689 // Prevent stub compilations from attempting to reference
690 // callee-saved (SOE) registers from debug info
691 bool exclude_soe = !Compile::current()->is_method_compilation();
692 RegMask* caller_save_mask = exclude_soe ? &caller_save_regmask_exclude_soe : &caller_save_regmask;
693 RegMask* mh_caller_save_mask = exclude_soe ? &mh_caller_save_regmask_exclude_soe : &mh_caller_save_regmask;
694
695 idealreg2debugmask[Op_RegN]->SUBTRACT(*caller_save_mask);
696 idealreg2debugmask[Op_RegI]->SUBTRACT(*caller_save_mask);
697 idealreg2debugmask[Op_RegL]->SUBTRACT(*caller_save_mask);
698 idealreg2debugmask[Op_RegF]->SUBTRACT(*caller_save_mask);
699 idealreg2debugmask[Op_RegD]->SUBTRACT(*caller_save_mask);
700 idealreg2debugmask[Op_RegP]->SUBTRACT(*caller_save_mask);
701 idealreg2debugmask[Op_RegVectMask]->SUBTRACT(*caller_save_mask);
702
703 idealreg2debugmask[Op_VecA]->SUBTRACT(*caller_save_mask);
704 idealreg2debugmask[Op_VecS]->SUBTRACT(*caller_save_mask);
705 idealreg2debugmask[Op_VecD]->SUBTRACT(*caller_save_mask);
706 idealreg2debugmask[Op_VecX]->SUBTRACT(*caller_save_mask);
707 idealreg2debugmask[Op_VecY]->SUBTRACT(*caller_save_mask);
708 idealreg2debugmask[Op_VecZ]->SUBTRACT(*caller_save_mask);
709
710 idealreg2mhdebugmask[Op_RegN]->SUBTRACT(*mh_caller_save_mask);
711 idealreg2mhdebugmask[Op_RegI]->SUBTRACT(*mh_caller_save_mask);
712 idealreg2mhdebugmask[Op_RegL]->SUBTRACT(*mh_caller_save_mask);
713 idealreg2mhdebugmask[Op_RegF]->SUBTRACT(*mh_caller_save_mask);
714 idealreg2mhdebugmask[Op_RegD]->SUBTRACT(*mh_caller_save_mask);
715 idealreg2mhdebugmask[Op_RegP]->SUBTRACT(*mh_caller_save_mask);
716 idealreg2mhdebugmask[Op_RegVectMask]->SUBTRACT(*mh_caller_save_mask);
717
718 idealreg2mhdebugmask[Op_VecA]->SUBTRACT(*mh_caller_save_mask);
719 idealreg2mhdebugmask[Op_VecS]->SUBTRACT(*mh_caller_save_mask);
720 idealreg2mhdebugmask[Op_VecD]->SUBTRACT(*mh_caller_save_mask);
721 idealreg2mhdebugmask[Op_VecX]->SUBTRACT(*mh_caller_save_mask);
722 idealreg2mhdebugmask[Op_VecY]->SUBTRACT(*mh_caller_save_mask);
723 idealreg2mhdebugmask[Op_VecZ]->SUBTRACT(*mh_caller_save_mask);
724 }
725
726 //---------------------------is_save_on_entry----------------------------------
is_save_on_entry(int reg)727 bool Matcher::is_save_on_entry(int reg) {
728 return
729 _register_save_policy[reg] == 'E' ||
730 _register_save_policy[reg] == 'A'; // Save-on-entry register?
731 }
732
733 //---------------------------Fixup_Save_On_Entry-------------------------------
Fixup_Save_On_Entry()734 void Matcher::Fixup_Save_On_Entry( ) {
735 init_first_stack_mask();
736
737 Node *root = C->root(); // Short name for root
738 // Count number of save-on-entry registers.
739 uint soe_cnt = number_of_saved_registers();
740 uint i;
741
742 // Find the procedure Start Node
743 StartNode *start = C->start();
744 assert( start, "Expect a start node" );
745
746 // Input RegMask array shared by all Returns.
747 // The type for doubles and longs has a count of 2, but
748 // there is only 1 returned value
749 uint ret_edge_cnt = TypeFunc::Parms + ((C->tf()->range()->cnt() == TypeFunc::Parms) ? 0 : 1);
750 RegMask *ret_rms = init_input_masks( ret_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
751 // Returns have 0 or 1 returned values depending on call signature.
752 // Return register is specified by return_value in the AD file.
753 if (ret_edge_cnt > TypeFunc::Parms)
754 ret_rms[TypeFunc::Parms+0] = _return_value_mask;
755
756 // Input RegMask array shared by all Rethrows.
757 uint reth_edge_cnt = TypeFunc::Parms+1;
758 RegMask *reth_rms = init_input_masks( reth_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
759 // Rethrow takes exception oop only, but in the argument 0 slot.
760 OptoReg::Name reg = find_receiver();
761 if (reg >= 0) {
762 reth_rms[TypeFunc::Parms] = mreg2regmask[reg];
763 #ifdef _LP64
764 // Need two slots for ptrs in 64-bit land
765 reth_rms[TypeFunc::Parms].Insert(OptoReg::add(OptoReg::Name(reg), 1));
766 #endif
767 }
768
769 // Input RegMask array shared by all TailCalls
770 uint tail_call_edge_cnt = TypeFunc::Parms+2;
771 RegMask *tail_call_rms = init_input_masks( tail_call_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
772
773 // Input RegMask array shared by all TailJumps
774 uint tail_jump_edge_cnt = TypeFunc::Parms+2;
775 RegMask *tail_jump_rms = init_input_masks( tail_jump_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
776
777 // TailCalls have 2 returned values (target & moop), whose masks come
778 // from the usual MachNode/MachOper mechanism. Find a sample
779 // TailCall to extract these masks and put the correct masks into
780 // the tail_call_rms array.
781 for( i=1; i < root->req(); i++ ) {
782 MachReturnNode *m = root->in(i)->as_MachReturn();
783 if( m->ideal_Opcode() == Op_TailCall ) {
784 tail_call_rms[TypeFunc::Parms+0] = m->MachNode::in_RegMask(TypeFunc::Parms+0);
785 tail_call_rms[TypeFunc::Parms+1] = m->MachNode::in_RegMask(TypeFunc::Parms+1);
786 break;
787 }
788 }
789
790 // TailJumps have 2 returned values (target & ex_oop), whose masks come
791 // from the usual MachNode/MachOper mechanism. Find a sample
792 // TailJump to extract these masks and put the correct masks into
793 // the tail_jump_rms array.
794 for( i=1; i < root->req(); i++ ) {
795 MachReturnNode *m = root->in(i)->as_MachReturn();
796 if( m->ideal_Opcode() == Op_TailJump ) {
797 tail_jump_rms[TypeFunc::Parms+0] = m->MachNode::in_RegMask(TypeFunc::Parms+0);
798 tail_jump_rms[TypeFunc::Parms+1] = m->MachNode::in_RegMask(TypeFunc::Parms+1);
799 break;
800 }
801 }
802
803 // Input RegMask array shared by all Halts
804 uint halt_edge_cnt = TypeFunc::Parms;
805 RegMask *halt_rms = init_input_masks( halt_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
806
807 // Capture the return input masks into each exit flavor
808 for( i=1; i < root->req(); i++ ) {
809 MachReturnNode *exit = root->in(i)->as_MachReturn();
810 switch( exit->ideal_Opcode() ) {
811 case Op_Return : exit->_in_rms = ret_rms; break;
812 case Op_Rethrow : exit->_in_rms = reth_rms; break;
813 case Op_TailCall : exit->_in_rms = tail_call_rms; break;
814 case Op_TailJump : exit->_in_rms = tail_jump_rms; break;
815 case Op_Halt : exit->_in_rms = halt_rms; break;
816 default : ShouldNotReachHere();
817 }
818 }
819
820 // Next unused projection number from Start.
821 int proj_cnt = C->tf()->domain()->cnt();
822
823 // Do all the save-on-entry registers. Make projections from Start for
824 // them, and give them a use at the exit points. To the allocator, they
825 // look like incoming register arguments.
826 for( i = 0; i < _last_Mach_Reg; i++ ) {
827 if( is_save_on_entry(i) ) {
828
829 // Add the save-on-entry to the mask array
830 ret_rms [ ret_edge_cnt] = mreg2regmask[i];
831 reth_rms [ reth_edge_cnt] = mreg2regmask[i];
832 tail_call_rms[tail_call_edge_cnt] = mreg2regmask[i];
833 tail_jump_rms[tail_jump_edge_cnt] = mreg2regmask[i];
834 // Halts need the SOE registers, but only in the stack as debug info.
835 // A just-prior uncommon-trap or deoptimization will use the SOE regs.
836 halt_rms [ halt_edge_cnt] = *idealreg2spillmask[_register_save_type[i]];
837
838 Node *mproj;
839
840 // Is this a RegF low half of a RegD? Double up 2 adjacent RegF's
841 // into a single RegD.
842 if( (i&1) == 0 &&
843 _register_save_type[i ] == Op_RegF &&
844 _register_save_type[i+1] == Op_RegF &&
845 is_save_on_entry(i+1) ) {
846 // Add other bit for double
847 ret_rms [ ret_edge_cnt].Insert(OptoReg::Name(i+1));
848 reth_rms [ reth_edge_cnt].Insert(OptoReg::Name(i+1));
849 tail_call_rms[tail_call_edge_cnt].Insert(OptoReg::Name(i+1));
850 tail_jump_rms[tail_jump_edge_cnt].Insert(OptoReg::Name(i+1));
851 halt_rms [ halt_edge_cnt].Insert(OptoReg::Name(i+1));
852 mproj = new MachProjNode( start, proj_cnt, ret_rms[ret_edge_cnt], Op_RegD );
853 proj_cnt += 2; // Skip 2 for doubles
854 }
855 else if( (i&1) == 1 && // Else check for high half of double
856 _register_save_type[i-1] == Op_RegF &&
857 _register_save_type[i ] == Op_RegF &&
858 is_save_on_entry(i-1) ) {
859 ret_rms [ ret_edge_cnt] = RegMask::Empty;
860 reth_rms [ reth_edge_cnt] = RegMask::Empty;
861 tail_call_rms[tail_call_edge_cnt] = RegMask::Empty;
862 tail_jump_rms[tail_jump_edge_cnt] = RegMask::Empty;
863 halt_rms [ halt_edge_cnt] = RegMask::Empty;
864 mproj = C->top();
865 }
866 // Is this a RegI low half of a RegL? Double up 2 adjacent RegI's
867 // into a single RegL.
868 else if( (i&1) == 0 &&
869 _register_save_type[i ] == Op_RegI &&
870 _register_save_type[i+1] == Op_RegI &&
871 is_save_on_entry(i+1) ) {
872 // Add other bit for long
873 ret_rms [ ret_edge_cnt].Insert(OptoReg::Name(i+1));
874 reth_rms [ reth_edge_cnt].Insert(OptoReg::Name(i+1));
875 tail_call_rms[tail_call_edge_cnt].Insert(OptoReg::Name(i+1));
876 tail_jump_rms[tail_jump_edge_cnt].Insert(OptoReg::Name(i+1));
877 halt_rms [ halt_edge_cnt].Insert(OptoReg::Name(i+1));
878 mproj = new MachProjNode( start, proj_cnt, ret_rms[ret_edge_cnt], Op_RegL );
879 proj_cnt += 2; // Skip 2 for longs
880 }
881 else if( (i&1) == 1 && // Else check for high half of long
882 _register_save_type[i-1] == Op_RegI &&
883 _register_save_type[i ] == Op_RegI &&
884 is_save_on_entry(i-1) ) {
885 ret_rms [ ret_edge_cnt] = RegMask::Empty;
886 reth_rms [ reth_edge_cnt] = RegMask::Empty;
887 tail_call_rms[tail_call_edge_cnt] = RegMask::Empty;
888 tail_jump_rms[tail_jump_edge_cnt] = RegMask::Empty;
889 halt_rms [ halt_edge_cnt] = RegMask::Empty;
890 mproj = C->top();
891 } else {
892 // Make a projection for it off the Start
893 mproj = new MachProjNode( start, proj_cnt++, ret_rms[ret_edge_cnt], _register_save_type[i] );
894 }
895
896 ret_edge_cnt ++;
897 reth_edge_cnt ++;
898 tail_call_edge_cnt ++;
899 tail_jump_edge_cnt ++;
900 halt_edge_cnt ++;
901
902 // Add a use of the SOE register to all exit paths
903 for( uint j=1; j < root->req(); j++ )
904 root->in(j)->add_req(mproj);
905 } // End of if a save-on-entry register
906 } // End of for all machine registers
907 }
908
909 //------------------------------init_spill_mask--------------------------------
init_spill_mask(Node * ret)910 void Matcher::init_spill_mask( Node *ret ) {
911 if( idealreg2regmask[Op_RegI] ) return; // One time only init
912
913 OptoReg::c_frame_pointer = c_frame_pointer();
914 c_frame_ptr_mask = c_frame_pointer();
915 #ifdef _LP64
916 // pointers are twice as big
917 c_frame_ptr_mask.Insert(OptoReg::add(c_frame_pointer(),1));
918 #endif
919
920 // Start at OptoReg::stack0()
921 STACK_ONLY_mask.Clear();
922 OptoReg::Name init = OptoReg::stack2reg(0);
923 // STACK_ONLY_mask is all stack bits
924 OptoReg::Name i;
925 for (i = init; RegMask::can_represent(i); i = OptoReg::add(i,1))
926 STACK_ONLY_mask.Insert(i);
927 // Also set the "infinite stack" bit.
928 STACK_ONLY_mask.set_AllStack();
929
930 for (i = OptoReg::Name(0); i < OptoReg::Name(_last_Mach_Reg); i = OptoReg::add(i, 1)) {
931 // Copy the register names over into the shared world.
932 // SharedInfo::regName[i] = regName[i];
933 // Handy RegMasks per machine register
934 mreg2regmask[i].Insert(i);
935
936 // Set up regmasks used to exclude save-on-call (and always-save) registers from debug masks.
937 if (_register_save_policy[i] == 'C' ||
938 _register_save_policy[i] == 'A') {
939 caller_save_regmask.Insert(i);
940 mh_caller_save_regmask.Insert(i);
941 }
942 // Exclude save-on-entry registers from debug masks for stub compilations.
943 if (_register_save_policy[i] == 'C' ||
944 _register_save_policy[i] == 'A' ||
945 _register_save_policy[i] == 'E') {
946 caller_save_regmask_exclude_soe.Insert(i);
947 mh_caller_save_regmask_exclude_soe.Insert(i);
948 }
949 }
950
951 // Also exclude the register we use to save the SP for MethodHandle
952 // invokes to from the corresponding MH debug masks
953 const RegMask sp_save_mask = method_handle_invoke_SP_save_mask();
954 mh_caller_save_regmask.OR(sp_save_mask);
955 mh_caller_save_regmask_exclude_soe.OR(sp_save_mask);
956
957 // Grab the Frame Pointer
958 Node *fp = ret->in(TypeFunc::FramePtr);
959 // Share frame pointer while making spill ops
960 set_shared(fp);
961
962 // Get the ADLC notion of the right regmask, for each basic type.
963 #ifdef _LP64
964 idealreg2regmask[Op_RegN] = regmask_for_ideal_register(Op_RegN, ret);
965 #endif
966 idealreg2regmask[Op_RegI] = regmask_for_ideal_register(Op_RegI, ret);
967 idealreg2regmask[Op_RegP] = regmask_for_ideal_register(Op_RegP, ret);
968 idealreg2regmask[Op_RegF] = regmask_for_ideal_register(Op_RegF, ret);
969 idealreg2regmask[Op_RegD] = regmask_for_ideal_register(Op_RegD, ret);
970 idealreg2regmask[Op_RegL] = regmask_for_ideal_register(Op_RegL, ret);
971 idealreg2regmask[Op_VecA] = regmask_for_ideal_register(Op_VecA, ret);
972 idealreg2regmask[Op_VecS] = regmask_for_ideal_register(Op_VecS, ret);
973 idealreg2regmask[Op_VecD] = regmask_for_ideal_register(Op_VecD, ret);
974 idealreg2regmask[Op_VecX] = regmask_for_ideal_register(Op_VecX, ret);
975 idealreg2regmask[Op_VecY] = regmask_for_ideal_register(Op_VecY, ret);
976 idealreg2regmask[Op_VecZ] = regmask_for_ideal_register(Op_VecZ, ret);
977 idealreg2regmask[Op_RegVectMask] = regmask_for_ideal_register(Op_RegVectMask, ret);
978 }
979
980 #ifdef ASSERT
match_alias_type(Compile * C,Node * n,Node * m)981 static void match_alias_type(Compile* C, Node* n, Node* m) {
982 if (!VerifyAliases) return; // do not go looking for trouble by default
983 const TypePtr* nat = n->adr_type();
984 const TypePtr* mat = m->adr_type();
985 int nidx = C->get_alias_index(nat);
986 int midx = C->get_alias_index(mat);
987 // Detune the assert for cases like (AndI 0xFF (LoadB p)).
988 if (nidx == Compile::AliasIdxTop && midx >= Compile::AliasIdxRaw) {
989 for (uint i = 1; i < n->req(); i++) {
990 Node* n1 = n->in(i);
991 const TypePtr* n1at = n1->adr_type();
992 if (n1at != NULL) {
993 nat = n1at;
994 nidx = C->get_alias_index(n1at);
995 }
996 }
997 }
998 // %%% Kludgery. Instead, fix ideal adr_type methods for all these cases:
999 if (nidx == Compile::AliasIdxTop && midx == Compile::AliasIdxRaw) {
1000 switch (n->Opcode()) {
1001 case Op_PrefetchAllocation:
1002 nidx = Compile::AliasIdxRaw;
1003 nat = TypeRawPtr::BOTTOM;
1004 break;
1005 }
1006 }
1007 if (nidx == Compile::AliasIdxRaw && midx == Compile::AliasIdxTop) {
1008 switch (n->Opcode()) {
1009 case Op_ClearArray:
1010 midx = Compile::AliasIdxRaw;
1011 mat = TypeRawPtr::BOTTOM;
1012 break;
1013 }
1014 }
1015 if (nidx == Compile::AliasIdxTop && midx == Compile::AliasIdxBot) {
1016 switch (n->Opcode()) {
1017 case Op_Return:
1018 case Op_Rethrow:
1019 case Op_Halt:
1020 case Op_TailCall:
1021 case Op_TailJump:
1022 nidx = Compile::AliasIdxBot;
1023 nat = TypePtr::BOTTOM;
1024 break;
1025 }
1026 }
1027 if (nidx == Compile::AliasIdxBot && midx == Compile::AliasIdxTop) {
1028 switch (n->Opcode()) {
1029 case Op_StrComp:
1030 case Op_StrEquals:
1031 case Op_StrIndexOf:
1032 case Op_StrIndexOfChar:
1033 case Op_AryEq:
1034 case Op_HasNegatives:
1035 case Op_MemBarVolatile:
1036 case Op_MemBarCPUOrder: // %%% these ideals should have narrower adr_type?
1037 case Op_StrInflatedCopy:
1038 case Op_StrCompressedCopy:
1039 case Op_OnSpinWait:
1040 case Op_EncodeISOArray:
1041 nidx = Compile::AliasIdxTop;
1042 nat = NULL;
1043 break;
1044 }
1045 }
1046 if (nidx != midx) {
1047 if (PrintOpto || (PrintMiscellaneous && (WizardMode || Verbose))) {
1048 tty->print_cr("==== Matcher alias shift %d => %d", nidx, midx);
1049 n->dump();
1050 m->dump();
1051 }
1052 assert(C->subsume_loads() && C->must_alias(nat, midx),
1053 "must not lose alias info when matching");
1054 }
1055 }
1056 #endif
1057
1058 //------------------------------xform------------------------------------------
1059 // Given a Node in old-space, Match him (Label/Reduce) to produce a machine
1060 // Node in new-space. Given a new-space Node, recursively walk his children.
transform(Node * n)1061 Node *Matcher::transform( Node *n ) { ShouldNotCallThis(); return n; }
xform(Node * n,int max_stack)1062 Node *Matcher::xform( Node *n, int max_stack ) {
1063 // Use one stack to keep both: child's node/state and parent's node/index
1064 MStack mstack(max_stack * 2 * 2); // usually: C->live_nodes() * 2 * 2
1065 mstack.push(n, Visit, NULL, -1); // set NULL as parent to indicate root
1066 while (mstack.is_nonempty()) {
1067 C->check_node_count(NodeLimitFudgeFactor, "too many nodes matching instructions");
1068 if (C->failing()) return NULL;
1069 n = mstack.node(); // Leave node on stack
1070 Node_State nstate = mstack.state();
1071 if (nstate == Visit) {
1072 mstack.set_state(Post_Visit);
1073 Node *oldn = n;
1074 // Old-space or new-space check
1075 if (!C->node_arena()->contains(n)) {
1076 // Old space!
1077 Node* m;
1078 if (has_new_node(n)) { // Not yet Label/Reduced
1079 m = new_node(n);
1080 } else {
1081 if (!is_dontcare(n)) { // Matcher can match this guy
1082 // Calls match special. They match alone with no children.
1083 // Their children, the incoming arguments, match normally.
1084 m = n->is_SafePoint() ? match_sfpt(n->as_SafePoint()):match_tree(n);
1085 if (C->failing()) return NULL;
1086 if (m == NULL) { Matcher::soft_match_failure(); return NULL; }
1087 if (n->is_MemBar()) {
1088 m->as_MachMemBar()->set_adr_type(n->adr_type());
1089 }
1090 } else { // Nothing the matcher cares about
1091 if (n->is_Proj() && n->in(0) != NULL && n->in(0)->is_Multi()) { // Projections?
1092 // Convert to machine-dependent projection
1093 m = n->in(0)->as_Multi()->match( n->as_Proj(), this );
1094 NOT_PRODUCT(record_new2old(m, n);)
1095 if (m->in(0) != NULL) // m might be top
1096 collect_null_checks(m, n);
1097 } else { // Else just a regular 'ol guy
1098 m = n->clone(); // So just clone into new-space
1099 NOT_PRODUCT(record_new2old(m, n);)
1100 // Def-Use edges will be added incrementally as Uses
1101 // of this node are matched.
1102 assert(m->outcnt() == 0, "no Uses of this clone yet");
1103 }
1104 }
1105
1106 set_new_node(n, m); // Map old to new
1107 if (_old_node_note_array != NULL) {
1108 Node_Notes* nn = C->locate_node_notes(_old_node_note_array,
1109 n->_idx);
1110 C->set_node_notes_at(m->_idx, nn);
1111 }
1112 debug_only(match_alias_type(C, n, m));
1113 }
1114 n = m; // n is now a new-space node
1115 mstack.set_node(n);
1116 }
1117
1118 // New space!
1119 if (_visited.test_set(n->_idx)) continue; // while(mstack.is_nonempty())
1120
1121 int i;
1122 // Put precedence edges on stack first (match them last).
1123 for (i = oldn->req(); (uint)i < oldn->len(); i++) {
1124 Node *m = oldn->in(i);
1125 if (m == NULL) break;
1126 // set -1 to call add_prec() instead of set_req() during Step1
1127 mstack.push(m, Visit, n, -1);
1128 }
1129
1130 // Handle precedence edges for interior nodes
1131 for (i = n->len()-1; (uint)i >= n->req(); i--) {
1132 Node *m = n->in(i);
1133 if (m == NULL || C->node_arena()->contains(m)) continue;
1134 n->rm_prec(i);
1135 // set -1 to call add_prec() instead of set_req() during Step1
1136 mstack.push(m, Visit, n, -1);
1137 }
1138
1139 // For constant debug info, I'd rather have unmatched constants.
1140 int cnt = n->req();
1141 JVMState* jvms = n->jvms();
1142 int debug_cnt = jvms ? jvms->debug_start() : cnt;
1143
1144 // Now do only debug info. Clone constants rather than matching.
1145 // Constants are represented directly in the debug info without
1146 // the need for executable machine instructions.
1147 // Monitor boxes are also represented directly.
1148 for (i = cnt - 1; i >= debug_cnt; --i) { // For all debug inputs do
1149 Node *m = n->in(i); // Get input
1150 int op = m->Opcode();
1151 assert((op == Op_BoxLock) == jvms->is_monitor_use(i), "boxes only at monitor sites");
1152 if( op == Op_ConI || op == Op_ConP || op == Op_ConN || op == Op_ConNKlass ||
1153 op == Op_ConF || op == Op_ConD || op == Op_ConL
1154 // || op == Op_BoxLock // %%%% enable this and remove (+++) in chaitin.cpp
1155 ) {
1156 m = m->clone();
1157 NOT_PRODUCT(record_new2old(m, n));
1158 mstack.push(m, Post_Visit, n, i); // Don't need to visit
1159 mstack.push(m->in(0), Visit, m, 0);
1160 } else {
1161 mstack.push(m, Visit, n, i);
1162 }
1163 }
1164
1165 // And now walk his children, and convert his inputs to new-space.
1166 for( ; i >= 0; --i ) { // For all normal inputs do
1167 Node *m = n->in(i); // Get input
1168 if(m != NULL)
1169 mstack.push(m, Visit, n, i);
1170 }
1171
1172 }
1173 else if (nstate == Post_Visit) {
1174 // Set xformed input
1175 Node *p = mstack.parent();
1176 if (p != NULL) { // root doesn't have parent
1177 int i = (int)mstack.index();
1178 if (i >= 0)
1179 p->set_req(i, n); // required input
1180 else if (i == -1)
1181 p->add_prec(n); // precedence input
1182 else
1183 ShouldNotReachHere();
1184 }
1185 mstack.pop(); // remove processed node from stack
1186 }
1187 else {
1188 ShouldNotReachHere();
1189 }
1190 } // while (mstack.is_nonempty())
1191 return n; // Return new-space Node
1192 }
1193
1194 //------------------------------warp_outgoing_stk_arg------------------------
warp_outgoing_stk_arg(VMReg reg,OptoReg::Name begin_out_arg_area,OptoReg::Name & out_arg_limit_per_call)1195 OptoReg::Name Matcher::warp_outgoing_stk_arg( VMReg reg, OptoReg::Name begin_out_arg_area, OptoReg::Name &out_arg_limit_per_call ) {
1196 // Convert outgoing argument location to a pre-biased stack offset
1197 if (reg->is_stack()) {
1198 OptoReg::Name warped = reg->reg2stack();
1199 // Adjust the stack slot offset to be the register number used
1200 // by the allocator.
1201 warped = OptoReg::add(begin_out_arg_area, warped);
1202 // Keep track of the largest numbered stack slot used for an arg.
1203 // Largest used slot per call-site indicates the amount of stack
1204 // that is killed by the call.
1205 if( warped >= out_arg_limit_per_call )
1206 out_arg_limit_per_call = OptoReg::add(warped,1);
1207 if (!RegMask::can_represent_arg(warped)) {
1208 C->record_method_not_compilable("unsupported calling sequence");
1209 return OptoReg::Bad;
1210 }
1211 return warped;
1212 }
1213 return OptoReg::as_OptoReg(reg);
1214 }
1215
1216
1217 //------------------------------match_sfpt-------------------------------------
1218 // Helper function to match call instructions. Calls match special.
1219 // They match alone with no children. Their children, the incoming
1220 // arguments, match normally.
match_sfpt(SafePointNode * sfpt)1221 MachNode *Matcher::match_sfpt( SafePointNode *sfpt ) {
1222 MachSafePointNode *msfpt = NULL;
1223 MachCallNode *mcall = NULL;
1224 uint cnt;
1225 // Split out case for SafePoint vs Call
1226 CallNode *call;
1227 const TypeTuple *domain;
1228 ciMethod* method = NULL;
1229 bool is_method_handle_invoke = false; // for special kill effects
1230 if( sfpt->is_Call() ) {
1231 call = sfpt->as_Call();
1232 domain = call->tf()->domain();
1233 cnt = domain->cnt();
1234
1235 // Match just the call, nothing else
1236 MachNode *m = match_tree(call);
1237 if (C->failing()) return NULL;
1238 if( m == NULL ) { Matcher::soft_match_failure(); return NULL; }
1239
1240 // Copy data from the Ideal SafePoint to the machine version
1241 mcall = m->as_MachCall();
1242
1243 mcall->set_tf( call->tf());
1244 mcall->set_entry_point( call->entry_point());
1245 mcall->set_cnt( call->cnt());
1246 mcall->set_guaranteed_safepoint(call->guaranteed_safepoint());
1247
1248 if( mcall->is_MachCallJava() ) {
1249 MachCallJavaNode *mcall_java = mcall->as_MachCallJava();
1250 const CallJavaNode *call_java = call->as_CallJava();
1251 assert(call_java->validate_symbolic_info(), "inconsistent info");
1252 method = call_java->method();
1253 mcall_java->_method = method;
1254 mcall_java->_optimized_virtual = call_java->is_optimized_virtual();
1255 is_method_handle_invoke = call_java->is_method_handle_invoke();
1256 mcall_java->_method_handle_invoke = is_method_handle_invoke;
1257 mcall_java->_override_symbolic_info = call_java->override_symbolic_info();
1258 mcall_java->_arg_escape = call_java->arg_escape();
1259 if (is_method_handle_invoke) {
1260 C->set_has_method_handle_invokes(true);
1261 }
1262 if( mcall_java->is_MachCallStaticJava() )
1263 mcall_java->as_MachCallStaticJava()->_name =
1264 call_java->as_CallStaticJava()->_name;
1265 if( mcall_java->is_MachCallDynamicJava() )
1266 mcall_java->as_MachCallDynamicJava()->_vtable_index =
1267 call_java->as_CallDynamicJava()->_vtable_index;
1268 }
1269 else if( mcall->is_MachCallRuntime() ) {
1270 MachCallRuntimeNode* mach_call_rt = mcall->as_MachCallRuntime();
1271 mach_call_rt->_name = call->as_CallRuntime()->_name;
1272 mach_call_rt->_leaf_no_fp = call->is_CallLeafNoFP();
1273 }
1274 else if( mcall->is_MachCallNative() ) {
1275 MachCallNativeNode* mach_call_native = mcall->as_MachCallNative();
1276 CallNativeNode* call_native = call->as_CallNative();
1277 mach_call_native->_name = call_native->_name;
1278 mach_call_native->_arg_regs = call_native->_arg_regs;
1279 mach_call_native->_ret_regs = call_native->_ret_regs;
1280 }
1281 msfpt = mcall;
1282 }
1283 // This is a non-call safepoint
1284 else {
1285 call = NULL;
1286 domain = NULL;
1287 MachNode *mn = match_tree(sfpt);
1288 if (C->failing()) return NULL;
1289 msfpt = mn->as_MachSafePoint();
1290 cnt = TypeFunc::Parms;
1291 }
1292 msfpt->_has_ea_local_in_scope = sfpt->has_ea_local_in_scope();
1293
1294 // Advertise the correct memory effects (for anti-dependence computation).
1295 msfpt->set_adr_type(sfpt->adr_type());
1296
1297 // Allocate a private array of RegMasks. These RegMasks are not shared.
1298 msfpt->_in_rms = NEW_RESOURCE_ARRAY( RegMask, cnt );
1299 // Empty them all.
1300 for (uint i = 0; i < cnt; i++) ::new (&(msfpt->_in_rms[i])) RegMask();
1301
1302 // Do all the pre-defined non-Empty register masks
1303 msfpt->_in_rms[TypeFunc::ReturnAdr] = _return_addr_mask;
1304 msfpt->_in_rms[TypeFunc::FramePtr ] = c_frame_ptr_mask;
1305
1306 // Place first outgoing argument can possibly be put.
1307 OptoReg::Name begin_out_arg_area = OptoReg::add(_new_SP, C->out_preserve_stack_slots());
1308 assert( is_even(begin_out_arg_area), "" );
1309 // Compute max outgoing register number per call site.
1310 OptoReg::Name out_arg_limit_per_call = begin_out_arg_area;
1311 // Calls to C may hammer extra stack slots above and beyond any arguments.
1312 // These are usually backing store for register arguments for varargs.
1313 if( call != NULL && call->is_CallRuntime() )
1314 out_arg_limit_per_call = OptoReg::add(out_arg_limit_per_call,C->varargs_C_out_slots_killed());
1315 if( call != NULL && call->is_CallNative() )
1316 out_arg_limit_per_call = OptoReg::add(out_arg_limit_per_call, call->as_CallNative()->_shadow_space_bytes);
1317
1318
1319 // Do the normal argument list (parameters) register masks
1320 int argcnt = cnt - TypeFunc::Parms;
1321 if( argcnt > 0 ) { // Skip it all if we have no args
1322 BasicType *sig_bt = NEW_RESOURCE_ARRAY( BasicType, argcnt );
1323 VMRegPair *parm_regs = NEW_RESOURCE_ARRAY( VMRegPair, argcnt );
1324 int i;
1325 for( i = 0; i < argcnt; i++ ) {
1326 sig_bt[i] = domain->field_at(i+TypeFunc::Parms)->basic_type();
1327 }
1328 // V-call to pick proper calling convention
1329 call->calling_convention( sig_bt, parm_regs, argcnt );
1330
1331 #ifdef ASSERT
1332 // Sanity check users' calling convention. Really handy during
1333 // the initial porting effort. Fairly expensive otherwise.
1334 { for (int i = 0; i<argcnt; i++) {
1335 if( !parm_regs[i].first()->is_valid() &&
1336 !parm_regs[i].second()->is_valid() ) continue;
1337 VMReg reg1 = parm_regs[i].first();
1338 VMReg reg2 = parm_regs[i].second();
1339 for (int j = 0; j < i; j++) {
1340 if( !parm_regs[j].first()->is_valid() &&
1341 !parm_regs[j].second()->is_valid() ) continue;
1342 VMReg reg3 = parm_regs[j].first();
1343 VMReg reg4 = parm_regs[j].second();
1344 if( !reg1->is_valid() ) {
1345 assert( !reg2->is_valid(), "valid halvsies" );
1346 } else if( !reg3->is_valid() ) {
1347 assert( !reg4->is_valid(), "valid halvsies" );
1348 } else {
1349 assert( reg1 != reg2, "calling conv. must produce distinct regs");
1350 assert( reg1 != reg3, "calling conv. must produce distinct regs");
1351 assert( reg1 != reg4, "calling conv. must produce distinct regs");
1352 assert( reg2 != reg3, "calling conv. must produce distinct regs");
1353 assert( reg2 != reg4 || !reg2->is_valid(), "calling conv. must produce distinct regs");
1354 assert( reg3 != reg4, "calling conv. must produce distinct regs");
1355 }
1356 }
1357 }
1358 }
1359 #endif
1360
1361 // Visit each argument. Compute its outgoing register mask.
1362 // Return results now can have 2 bits returned.
1363 // Compute max over all outgoing arguments both per call-site
1364 // and over the entire method.
1365 for( i = 0; i < argcnt; i++ ) {
1366 // Address of incoming argument mask to fill in
1367 RegMask *rm = &mcall->_in_rms[i+TypeFunc::Parms];
1368 VMReg first = parm_regs[i].first();
1369 VMReg second = parm_regs[i].second();
1370 if(!first->is_valid() &&
1371 !second->is_valid()) {
1372 continue; // Avoid Halves
1373 }
1374 // Handle case where arguments are in vector registers.
1375 if(call->in(TypeFunc::Parms + i)->bottom_type()->isa_vect()) {
1376 OptoReg::Name reg_fst = OptoReg::as_OptoReg(first);
1377 OptoReg::Name reg_snd = OptoReg::as_OptoReg(second);
1378 assert (reg_fst <= reg_snd, "fst=%d snd=%d", reg_fst, reg_snd);
1379 for (OptoReg::Name r = reg_fst; r <= reg_snd; r++) {
1380 rm->Insert(r);
1381 }
1382 }
1383 // Grab first register, adjust stack slots and insert in mask.
1384 OptoReg::Name reg1 = warp_outgoing_stk_arg(first, begin_out_arg_area, out_arg_limit_per_call );
1385 if (OptoReg::is_valid(reg1))
1386 rm->Insert( reg1 );
1387 // Grab second register (if any), adjust stack slots and insert in mask.
1388 OptoReg::Name reg2 = warp_outgoing_stk_arg(second, begin_out_arg_area, out_arg_limit_per_call );
1389 if (OptoReg::is_valid(reg2))
1390 rm->Insert( reg2 );
1391 } // End of for all arguments
1392 }
1393
1394 // Compute the max stack slot killed by any call. These will not be
1395 // available for debug info, and will be used to adjust FIRST_STACK_mask
1396 // after all call sites have been visited.
1397 if( _out_arg_limit < out_arg_limit_per_call)
1398 _out_arg_limit = out_arg_limit_per_call;
1399
1400 if (mcall) {
1401 // Kill the outgoing argument area, including any non-argument holes and
1402 // any legacy C-killed slots. Use Fat-Projections to do the killing.
1403 // Since the max-per-method covers the max-per-call-site and debug info
1404 // is excluded on the max-per-method basis, debug info cannot land in
1405 // this killed area.
1406 uint r_cnt = mcall->tf()->range()->cnt();
1407 MachProjNode *proj = new MachProjNode( mcall, r_cnt+10000, RegMask::Empty, MachProjNode::fat_proj );
1408 if (!RegMask::can_represent_arg(OptoReg::Name(out_arg_limit_per_call-1))) {
1409 C->record_method_not_compilable("unsupported outgoing calling sequence");
1410 } else {
1411 for (int i = begin_out_arg_area; i < out_arg_limit_per_call; i++)
1412 proj->_rout.Insert(OptoReg::Name(i));
1413 }
1414 if (proj->_rout.is_NotEmpty()) {
1415 push_projection(proj);
1416 }
1417 }
1418 // Transfer the safepoint information from the call to the mcall
1419 // Move the JVMState list
1420 msfpt->set_jvms(sfpt->jvms());
1421 for (JVMState* jvms = msfpt->jvms(); jvms; jvms = jvms->caller()) {
1422 jvms->set_map(sfpt);
1423 }
1424
1425 // Debug inputs begin just after the last incoming parameter
1426 assert((mcall == NULL) || (mcall->jvms() == NULL) ||
1427 (mcall->jvms()->debug_start() + mcall->_jvmadj == mcall->tf()->domain()->cnt()), "");
1428
1429 // Add additional edges.
1430 if (msfpt->mach_constant_base_node_input() != (uint)-1 && !msfpt->is_MachCallLeaf()) {
1431 // For these calls we can not add MachConstantBase in expand(), as the
1432 // ins are not complete then.
1433 msfpt->ins_req(msfpt->mach_constant_base_node_input(), C->mach_constant_base_node());
1434 if (msfpt->jvms() &&
1435 msfpt->mach_constant_base_node_input() <= msfpt->jvms()->debug_start() + msfpt->_jvmadj) {
1436 // We added an edge before jvms, so we must adapt the position of the ins.
1437 msfpt->jvms()->adapt_position(+1);
1438 }
1439 }
1440
1441 // Registers killed by the call are set in the local scheduling pass
1442 // of Global Code Motion.
1443 return msfpt;
1444 }
1445
1446 //---------------------------match_tree----------------------------------------
1447 // Match a Ideal Node DAG - turn it into a tree; Label & Reduce. Used as part
1448 // of the whole-sale conversion from Ideal to Mach Nodes. Also used for
1449 // making GotoNodes while building the CFG and in init_spill_mask() to identify
1450 // a Load's result RegMask for memoization in idealreg2regmask[]
match_tree(const Node * n)1451 MachNode *Matcher::match_tree( const Node *n ) {
1452 assert( n->Opcode() != Op_Phi, "cannot match" );
1453 assert( !n->is_block_start(), "cannot match" );
1454 // Set the mark for all locally allocated State objects.
1455 // When this call returns, the _states_arena arena will be reset
1456 // freeing all State objects.
1457 ResourceMark rm( &_states_arena );
1458
1459 LabelRootDepth = 0;
1460
1461 // StoreNodes require their Memory input to match any LoadNodes
1462 Node *mem = n->is_Store() ? n->in(MemNode::Memory) : (Node*)1 ;
1463 #ifdef ASSERT
1464 Node* save_mem_node = _mem_node;
1465 _mem_node = n->is_Store() ? (Node*)n : NULL;
1466 #endif
1467 // State object for root node of match tree
1468 // Allocate it on _states_arena - stack allocation can cause stack overflow.
1469 State *s = new (&_states_arena) State;
1470 s->_kids[0] = NULL;
1471 s->_kids[1] = NULL;
1472 s->_leaf = (Node*)n;
1473 // Label the input tree, allocating labels from top-level arena
1474 Node* root_mem = mem;
1475 Label_Root(n, s, n->in(0), root_mem);
1476 if (C->failing()) return NULL;
1477
1478 // The minimum cost match for the whole tree is found at the root State
1479 uint mincost = max_juint;
1480 uint cost = max_juint;
1481 uint i;
1482 for (i = 0; i < NUM_OPERANDS; i++) {
1483 if (s->valid(i) && // valid entry and
1484 s->cost(i) < cost && // low cost and
1485 s->rule(i) >= NUM_OPERANDS) {// not an operand
1486 mincost = i;
1487 cost = s->cost(i);
1488 }
1489 }
1490 if (mincost == max_juint) {
1491 #ifndef PRODUCT
1492 tty->print("No matching rule for:");
1493 s->dump();
1494 #endif
1495 Matcher::soft_match_failure();
1496 return NULL;
1497 }
1498 // Reduce input tree based upon the state labels to machine Nodes
1499 MachNode *m = ReduceInst(s, s->rule(mincost), mem);
1500 // New-to-old mapping is done in ReduceInst, to cover complex instructions.
1501 NOT_PRODUCT(_old2new_map.map(n->_idx, m);)
1502
1503 // Add any Matcher-ignored edges
1504 uint cnt = n->req();
1505 uint start = 1;
1506 if( mem != (Node*)1 ) start = MemNode::Memory+1;
1507 if( n->is_AddP() ) {
1508 assert( mem == (Node*)1, "" );
1509 start = AddPNode::Base+1;
1510 }
1511 for( i = start; i < cnt; i++ ) {
1512 if( !n->match_edge(i) ) {
1513 if( i < m->req() )
1514 m->ins_req( i, n->in(i) );
1515 else
1516 m->add_req( n->in(i) );
1517 }
1518 }
1519
1520 debug_only( _mem_node = save_mem_node; )
1521 return m;
1522 }
1523
1524
1525 //------------------------------match_into_reg---------------------------------
1526 // Choose to either match this Node in a register or part of the current
1527 // match tree. Return true for requiring a register and false for matching
1528 // as part of the current match tree.
match_into_reg(const Node * n,Node * m,Node * control,int i,bool shared)1529 static bool match_into_reg( const Node *n, Node *m, Node *control, int i, bool shared ) {
1530
1531 const Type *t = m->bottom_type();
1532
1533 if (t->singleton()) {
1534 // Never force constants into registers. Allow them to match as
1535 // constants or registers. Copies of the same value will share
1536 // the same register. See find_shared_node.
1537 return false;
1538 } else { // Not a constant
1539 // Stop recursion if they have different Controls.
1540 Node* m_control = m->in(0);
1541 // Control of load's memory can post-dominates load's control.
1542 // So use it since load can't float above its memory.
1543 Node* mem_control = (m->is_Load()) ? m->in(MemNode::Memory)->in(0) : NULL;
1544 if (control && m_control && control != m_control && control != mem_control) {
1545
1546 // Actually, we can live with the most conservative control we
1547 // find, if it post-dominates the others. This allows us to
1548 // pick up load/op/store trees where the load can float a little
1549 // above the store.
1550 Node *x = control;
1551 const uint max_scan = 6; // Arbitrary scan cutoff
1552 uint j;
1553 for (j=0; j<max_scan; j++) {
1554 if (x->is_Region()) // Bail out at merge points
1555 return true;
1556 x = x->in(0);
1557 if (x == m_control) // Does 'control' post-dominate
1558 break; // m->in(0)? If so, we can use it
1559 if (x == mem_control) // Does 'control' post-dominate
1560 break; // mem_control? If so, we can use it
1561 }
1562 if (j == max_scan) // No post-domination before scan end?
1563 return true; // Then break the match tree up
1564 }
1565 if ((m->is_DecodeN() && Matcher::narrow_oop_use_complex_address()) ||
1566 (m->is_DecodeNKlass() && Matcher::narrow_klass_use_complex_address())) {
1567 // These are commonly used in address expressions and can
1568 // efficiently fold into them on X64 in some cases.
1569 return false;
1570 }
1571 }
1572
1573 // Not forceable cloning. If shared, put it into a register.
1574 return shared;
1575 }
1576
1577
1578 //------------------------------Instruction Selection--------------------------
1579 // Label method walks a "tree" of nodes, using the ADLC generated DFA to match
1580 // ideal nodes to machine instructions. Trees are delimited by shared Nodes,
1581 // things the Matcher does not match (e.g., Memory), and things with different
1582 // Controls (hence forced into different blocks). We pass in the Control
1583 // selected for this entire State tree.
1584
1585 // The Matcher works on Trees, but an Intel add-to-memory requires a DAG: the
1586 // Store and the Load must have identical Memories (as well as identical
1587 // pointers). Since the Matcher does not have anything for Memory (and
1588 // does not handle DAGs), I have to match the Memory input myself. If the
1589 // Tree root is a Store or if there are multiple Loads in the tree, I require
1590 // all Loads to have the identical memory.
Label_Root(const Node * n,State * svec,Node * control,Node * & mem)1591 Node* Matcher::Label_Root(const Node* n, State* svec, Node* control, Node*& mem) {
1592 // Since Label_Root is a recursive function, its possible that we might run
1593 // out of stack space. See bugs 6272980 & 6227033 for more info.
1594 LabelRootDepth++;
1595 if (LabelRootDepth > MaxLabelRootDepth) {
1596 C->record_method_not_compilable("Out of stack space, increase MaxLabelRootDepth");
1597 return NULL;
1598 }
1599 uint care = 0; // Edges matcher cares about
1600 uint cnt = n->req();
1601 uint i = 0;
1602
1603 // Examine children for memory state
1604 // Can only subsume a child into your match-tree if that child's memory state
1605 // is not modified along the path to another input.
1606 // It is unsafe even if the other inputs are separate roots.
1607 Node *input_mem = NULL;
1608 for( i = 1; i < cnt; i++ ) {
1609 if( !n->match_edge(i) ) continue;
1610 Node *m = n->in(i); // Get ith input
1611 assert( m, "expect non-null children" );
1612 if( m->is_Load() ) {
1613 if( input_mem == NULL ) {
1614 input_mem = m->in(MemNode::Memory);
1615 if (mem == (Node*)1) {
1616 // Save this memory to bail out if there's another memory access
1617 // to a different memory location in the same tree.
1618 mem = input_mem;
1619 }
1620 } else if( input_mem != m->in(MemNode::Memory) ) {
1621 input_mem = NodeSentinel;
1622 }
1623 }
1624 }
1625
1626 for( i = 1; i < cnt; i++ ){// For my children
1627 if( !n->match_edge(i) ) continue;
1628 Node *m = n->in(i); // Get ith input
1629 // Allocate states out of a private arena
1630 State *s = new (&_states_arena) State;
1631 svec->_kids[care++] = s;
1632 assert( care <= 2, "binary only for now" );
1633
1634 // Recursively label the State tree.
1635 s->_kids[0] = NULL;
1636 s->_kids[1] = NULL;
1637 s->_leaf = m;
1638
1639 // Check for leaves of the State Tree; things that cannot be a part of
1640 // the current tree. If it finds any, that value is matched as a
1641 // register operand. If not, then the normal matching is used.
1642 if( match_into_reg(n, m, control, i, is_shared(m)) ||
1643 // Stop recursion if this is a LoadNode and there is another memory access
1644 // to a different memory location in the same tree (for example, a StoreNode
1645 // at the root of this tree or another LoadNode in one of the children).
1646 ((mem!=(Node*)1) && m->is_Load() && m->in(MemNode::Memory) != mem) ||
1647 // Can NOT include the match of a subtree when its memory state
1648 // is used by any of the other subtrees
1649 (input_mem == NodeSentinel) ) {
1650 // Print when we exclude matching due to different memory states at input-loads
1651 if (PrintOpto && (Verbose && WizardMode) && (input_mem == NodeSentinel)
1652 && !((mem!=(Node*)1) && m->is_Load() && m->in(MemNode::Memory) != mem)) {
1653 tty->print_cr("invalid input_mem");
1654 }
1655 // Switch to a register-only opcode; this value must be in a register
1656 // and cannot be subsumed as part of a larger instruction.
1657 s->DFA( m->ideal_reg(), m );
1658
1659 } else {
1660 // If match tree has no control and we do, adopt it for entire tree
1661 if( control == NULL && m->in(0) != NULL && m->req() > 1 )
1662 control = m->in(0); // Pick up control
1663 // Else match as a normal part of the match tree.
1664 control = Label_Root(m, s, control, mem);
1665 if (C->failing()) return NULL;
1666 }
1667 }
1668
1669 // Call DFA to match this node, and return
1670 svec->DFA( n->Opcode(), n );
1671
1672 #ifdef ASSERT
1673 uint x;
1674 for( x = 0; x < _LAST_MACH_OPER; x++ )
1675 if( svec->valid(x) )
1676 break;
1677
1678 if (x >= _LAST_MACH_OPER) {
1679 n->dump();
1680 svec->dump();
1681 assert( false, "bad AD file" );
1682 }
1683 #endif
1684 return control;
1685 }
1686
1687
1688 // Con nodes reduced using the same rule can share their MachNode
1689 // which reduces the number of copies of a constant in the final
1690 // program. The register allocator is free to split uses later to
1691 // split live ranges.
find_shared_node(Node * leaf,uint rule)1692 MachNode* Matcher::find_shared_node(Node* leaf, uint rule) {
1693 if (!leaf->is_Con() && !leaf->is_DecodeNarrowPtr()) return NULL;
1694
1695 // See if this Con has already been reduced using this rule.
1696 if (_shared_nodes.Size() <= leaf->_idx) return NULL;
1697 MachNode* last = (MachNode*)_shared_nodes.at(leaf->_idx);
1698 if (last != NULL && rule == last->rule()) {
1699 // Don't expect control change for DecodeN
1700 if (leaf->is_DecodeNarrowPtr())
1701 return last;
1702 // Get the new space root.
1703 Node* xroot = new_node(C->root());
1704 if (xroot == NULL) {
1705 // This shouldn't happen give the order of matching.
1706 return NULL;
1707 }
1708
1709 // Shared constants need to have their control be root so they
1710 // can be scheduled properly.
1711 Node* control = last->in(0);
1712 if (control != xroot) {
1713 if (control == NULL || control == C->root()) {
1714 last->set_req(0, xroot);
1715 } else {
1716 assert(false, "unexpected control");
1717 return NULL;
1718 }
1719 }
1720 return last;
1721 }
1722 return NULL;
1723 }
1724
1725
1726 //------------------------------ReduceInst-------------------------------------
1727 // Reduce a State tree (with given Control) into a tree of MachNodes.
1728 // This routine (and it's cohort ReduceOper) convert Ideal Nodes into
1729 // complicated machine Nodes. Each MachNode covers some tree of Ideal Nodes.
1730 // Each MachNode has a number of complicated MachOper operands; each
1731 // MachOper also covers a further tree of Ideal Nodes.
1732
1733 // The root of the Ideal match tree is always an instruction, so we enter
1734 // the recursion here. After building the MachNode, we need to recurse
1735 // the tree checking for these cases:
1736 // (1) Child is an instruction -
1737 // Build the instruction (recursively), add it as an edge.
1738 // Build a simple operand (register) to hold the result of the instruction.
1739 // (2) Child is an interior part of an instruction -
1740 // Skip over it (do nothing)
1741 // (3) Child is the start of a operand -
1742 // Build the operand, place it inside the instruction
1743 // Call ReduceOper.
ReduceInst(State * s,int rule,Node * & mem)1744 MachNode *Matcher::ReduceInst( State *s, int rule, Node *&mem ) {
1745 assert( rule >= NUM_OPERANDS, "called with operand rule" );
1746
1747 MachNode* shared_node = find_shared_node(s->_leaf, rule);
1748 if (shared_node != NULL) {
1749 return shared_node;
1750 }
1751
1752 // Build the object to represent this state & prepare for recursive calls
1753 MachNode *mach = s->MachNodeGenerator(rule);
1754 guarantee(mach != NULL, "Missing MachNode");
1755 mach->_opnds[0] = s->MachOperGenerator(_reduceOp[rule]);
1756 assert( mach->_opnds[0] != NULL, "Missing result operand" );
1757 Node *leaf = s->_leaf;
1758 NOT_PRODUCT(record_new2old(mach, leaf);)
1759 // Check for instruction or instruction chain rule
1760 if( rule >= _END_INST_CHAIN_RULE || rule < _BEGIN_INST_CHAIN_RULE ) {
1761 assert(C->node_arena()->contains(s->_leaf) || !has_new_node(s->_leaf),
1762 "duplicating node that's already been matched");
1763 // Instruction
1764 mach->add_req( leaf->in(0) ); // Set initial control
1765 // Reduce interior of complex instruction
1766 ReduceInst_Interior( s, rule, mem, mach, 1 );
1767 } else {
1768 // Instruction chain rules are data-dependent on their inputs
1769 mach->add_req(0); // Set initial control to none
1770 ReduceInst_Chain_Rule( s, rule, mem, mach );
1771 }
1772
1773 // If a Memory was used, insert a Memory edge
1774 if( mem != (Node*)1 ) {
1775 mach->ins_req(MemNode::Memory,mem);
1776 #ifdef ASSERT
1777 // Verify adr type after matching memory operation
1778 const MachOper* oper = mach->memory_operand();
1779 if (oper != NULL && oper != (MachOper*)-1) {
1780 // It has a unique memory operand. Find corresponding ideal mem node.
1781 Node* m = NULL;
1782 if (leaf->is_Mem()) {
1783 m = leaf;
1784 } else {
1785 m = _mem_node;
1786 assert(m != NULL && m->is_Mem(), "expecting memory node");
1787 }
1788 const Type* mach_at = mach->adr_type();
1789 // DecodeN node consumed by an address may have different type
1790 // than its input. Don't compare types for such case.
1791 if (m->adr_type() != mach_at &&
1792 (m->in(MemNode::Address)->is_DecodeNarrowPtr() ||
1793 (m->in(MemNode::Address)->is_AddP() &&
1794 m->in(MemNode::Address)->in(AddPNode::Address)->is_DecodeNarrowPtr()) ||
1795 (m->in(MemNode::Address)->is_AddP() &&
1796 m->in(MemNode::Address)->in(AddPNode::Address)->is_AddP() &&
1797 m->in(MemNode::Address)->in(AddPNode::Address)->in(AddPNode::Address)->is_DecodeNarrowPtr()))) {
1798 mach_at = m->adr_type();
1799 }
1800 if (m->adr_type() != mach_at) {
1801 m->dump();
1802 tty->print_cr("mach:");
1803 mach->dump(1);
1804 }
1805 assert(m->adr_type() == mach_at, "matcher should not change adr type");
1806 }
1807 #endif
1808 }
1809
1810 // If the _leaf is an AddP, insert the base edge
1811 if (leaf->is_AddP()) {
1812 mach->ins_req(AddPNode::Base,leaf->in(AddPNode::Base));
1813 }
1814
1815 uint number_of_projections_prior = number_of_projections();
1816
1817 // Perform any 1-to-many expansions required
1818 MachNode *ex = mach->Expand(s, _projection_list, mem);
1819 if (ex != mach) {
1820 assert(ex->ideal_reg() == mach->ideal_reg(), "ideal types should match");
1821 if( ex->in(1)->is_Con() )
1822 ex->in(1)->set_req(0, C->root());
1823 // Remove old node from the graph
1824 for( uint i=0; i<mach->req(); i++ ) {
1825 mach->set_req(i,NULL);
1826 }
1827 NOT_PRODUCT(record_new2old(ex, s->_leaf);)
1828 }
1829
1830 // PhaseChaitin::fixup_spills will sometimes generate spill code
1831 // via the matcher. By the time, nodes have been wired into the CFG,
1832 // and any further nodes generated by expand rules will be left hanging
1833 // in space, and will not get emitted as output code. Catch this.
1834 // Also, catch any new register allocation constraints ("projections")
1835 // generated belatedly during spill code generation.
1836 if (_allocation_started) {
1837 guarantee(ex == mach, "no expand rules during spill generation");
1838 guarantee(number_of_projections_prior == number_of_projections(), "no allocation during spill generation");
1839 }
1840
1841 if (leaf->is_Con() || leaf->is_DecodeNarrowPtr()) {
1842 // Record the con for sharing
1843 _shared_nodes.map(leaf->_idx, ex);
1844 }
1845
1846 // Have mach nodes inherit GC barrier data
1847 if (leaf->is_LoadStore()) {
1848 mach->set_barrier_data(leaf->as_LoadStore()->barrier_data());
1849 } else if (leaf->is_Mem()) {
1850 mach->set_barrier_data(leaf->as_Mem()->barrier_data());
1851 }
1852
1853 return ex;
1854 }
1855
handle_precedence_edges(Node * n,MachNode * mach)1856 void Matcher::handle_precedence_edges(Node* n, MachNode *mach) {
1857 for (uint i = n->req(); i < n->len(); i++) {
1858 if (n->in(i) != NULL) {
1859 mach->add_prec(n->in(i));
1860 }
1861 }
1862 }
1863
ReduceInst_Chain_Rule(State * s,int rule,Node * & mem,MachNode * mach)1864 void Matcher::ReduceInst_Chain_Rule(State* s, int rule, Node* &mem, MachNode* mach) {
1865 // 'op' is what I am expecting to receive
1866 int op = _leftOp[rule];
1867 // Operand type to catch childs result
1868 // This is what my child will give me.
1869 unsigned int opnd_class_instance = s->rule(op);
1870 // Choose between operand class or not.
1871 // This is what I will receive.
1872 int catch_op = (FIRST_OPERAND_CLASS <= op && op < NUM_OPERANDS) ? opnd_class_instance : op;
1873 // New rule for child. Chase operand classes to get the actual rule.
1874 unsigned int newrule = s->rule(catch_op);
1875
1876 if (newrule < NUM_OPERANDS) {
1877 // Chain from operand or operand class, may be output of shared node
1878 assert(opnd_class_instance < NUM_OPERANDS, "Bad AD file: Instruction chain rule must chain from operand");
1879 // Insert operand into array of operands for this instruction
1880 mach->_opnds[1] = s->MachOperGenerator(opnd_class_instance);
1881
1882 ReduceOper(s, newrule, mem, mach);
1883 } else {
1884 // Chain from the result of an instruction
1885 assert(newrule >= _LAST_MACH_OPER, "Do NOT chain from internal operand");
1886 mach->_opnds[1] = s->MachOperGenerator(_reduceOp[catch_op]);
1887 Node *mem1 = (Node*)1;
1888 debug_only(Node *save_mem_node = _mem_node;)
1889 mach->add_req( ReduceInst(s, newrule, mem1) );
1890 debug_only(_mem_node = save_mem_node;)
1891 }
1892 return;
1893 }
1894
1895
ReduceInst_Interior(State * s,int rule,Node * & mem,MachNode * mach,uint num_opnds)1896 uint Matcher::ReduceInst_Interior( State *s, int rule, Node *&mem, MachNode *mach, uint num_opnds ) {
1897 handle_precedence_edges(s->_leaf, mach);
1898
1899 if( s->_leaf->is_Load() ) {
1900 Node *mem2 = s->_leaf->in(MemNode::Memory);
1901 assert( mem == (Node*)1 || mem == mem2, "multiple Memories being matched at once?" );
1902 debug_only( if( mem == (Node*)1 ) _mem_node = s->_leaf;)
1903 mem = mem2;
1904 }
1905 if( s->_leaf->in(0) != NULL && s->_leaf->req() > 1) {
1906 if( mach->in(0) == NULL )
1907 mach->set_req(0, s->_leaf->in(0));
1908 }
1909
1910 // Now recursively walk the state tree & add operand list.
1911 for( uint i=0; i<2; i++ ) { // binary tree
1912 State *newstate = s->_kids[i];
1913 if( newstate == NULL ) break; // Might only have 1 child
1914 // 'op' is what I am expecting to receive
1915 int op;
1916 if( i == 0 ) {
1917 op = _leftOp[rule];
1918 } else {
1919 op = _rightOp[rule];
1920 }
1921 // Operand type to catch childs result
1922 // This is what my child will give me.
1923 int opnd_class_instance = newstate->rule(op);
1924 // Choose between operand class or not.
1925 // This is what I will receive.
1926 int catch_op = (op >= FIRST_OPERAND_CLASS && op < NUM_OPERANDS) ? opnd_class_instance : op;
1927 // New rule for child. Chase operand classes to get the actual rule.
1928 int newrule = newstate->rule(catch_op);
1929
1930 if (newrule < NUM_OPERANDS) { // Operand/operandClass or internalOp/instruction?
1931 // Operand/operandClass
1932 // Insert operand into array of operands for this instruction
1933 mach->_opnds[num_opnds++] = newstate->MachOperGenerator(opnd_class_instance);
1934 ReduceOper(newstate, newrule, mem, mach);
1935
1936 } else { // Child is internal operand or new instruction
1937 if (newrule < _LAST_MACH_OPER) { // internal operand or instruction?
1938 // internal operand --> call ReduceInst_Interior
1939 // Interior of complex instruction. Do nothing but recurse.
1940 num_opnds = ReduceInst_Interior(newstate, newrule, mem, mach, num_opnds);
1941 } else {
1942 // instruction --> call build operand( ) to catch result
1943 // --> ReduceInst( newrule )
1944 mach->_opnds[num_opnds++] = s->MachOperGenerator(_reduceOp[catch_op]);
1945 Node *mem1 = (Node*)1;
1946 debug_only(Node *save_mem_node = _mem_node;)
1947 mach->add_req( ReduceInst( newstate, newrule, mem1 ) );
1948 debug_only(_mem_node = save_mem_node;)
1949 }
1950 }
1951 assert( mach->_opnds[num_opnds-1], "" );
1952 }
1953 return num_opnds;
1954 }
1955
1956 // This routine walks the interior of possible complex operands.
1957 // At each point we check our children in the match tree:
1958 // (1) No children -
1959 // We are a leaf; add _leaf field as an input to the MachNode
1960 // (2) Child is an internal operand -
1961 // Skip over it ( do nothing )
1962 // (3) Child is an instruction -
1963 // Call ReduceInst recursively and
1964 // and instruction as an input to the MachNode
ReduceOper(State * s,int rule,Node * & mem,MachNode * mach)1965 void Matcher::ReduceOper( State *s, int rule, Node *&mem, MachNode *mach ) {
1966 assert( rule < _LAST_MACH_OPER, "called with operand rule" );
1967 State *kid = s->_kids[0];
1968 assert( kid == NULL || s->_leaf->in(0) == NULL, "internal operands have no control" );
1969
1970 // Leaf? And not subsumed?
1971 if( kid == NULL && !_swallowed[rule] ) {
1972 mach->add_req( s->_leaf ); // Add leaf pointer
1973 return; // Bail out
1974 }
1975
1976 if( s->_leaf->is_Load() ) {
1977 assert( mem == (Node*)1, "multiple Memories being matched at once?" );
1978 mem = s->_leaf->in(MemNode::Memory);
1979 debug_only(_mem_node = s->_leaf;)
1980 }
1981
1982 handle_precedence_edges(s->_leaf, mach);
1983
1984 if( s->_leaf->in(0) && s->_leaf->req() > 1) {
1985 if( !mach->in(0) )
1986 mach->set_req(0,s->_leaf->in(0));
1987 else {
1988 assert( s->_leaf->in(0) == mach->in(0), "same instruction, differing controls?" );
1989 }
1990 }
1991
1992 for (uint i = 0; kid != NULL && i < 2; kid = s->_kids[1], i++) { // binary tree
1993 int newrule;
1994 if( i == 0) {
1995 newrule = kid->rule(_leftOp[rule]);
1996 } else {
1997 newrule = kid->rule(_rightOp[rule]);
1998 }
1999
2000 if (newrule < _LAST_MACH_OPER) { // Operand or instruction?
2001 // Internal operand; recurse but do nothing else
2002 ReduceOper(kid, newrule, mem, mach);
2003
2004 } else { // Child is a new instruction
2005 // Reduce the instruction, and add a direct pointer from this
2006 // machine instruction to the newly reduced one.
2007 Node *mem1 = (Node*)1;
2008 debug_only(Node *save_mem_node = _mem_node;)
2009 mach->add_req( ReduceInst( kid, newrule, mem1 ) );
2010 debug_only(_mem_node = save_mem_node;)
2011 }
2012 }
2013 }
2014
2015
2016 // -------------------------------------------------------------------------
2017 // Java-Java calling convention
2018 // (what you use when Java calls Java)
2019
2020 //------------------------------find_receiver----------------------------------
2021 // For a given signature, return the OptoReg for parameter 0.
find_receiver()2022 OptoReg::Name Matcher::find_receiver() {
2023 VMRegPair regs;
2024 BasicType sig_bt = T_OBJECT;
2025 SharedRuntime::java_calling_convention(&sig_bt, ®s, 1);
2026 // Return argument 0 register. In the LP64 build pointers
2027 // take 2 registers, but the VM wants only the 'main' name.
2028 return OptoReg::as_OptoReg(regs.first());
2029 }
2030
is_vshift_con_pattern(Node * n,Node * m)2031 bool Matcher::is_vshift_con_pattern(Node* n, Node* m) {
2032 if (n != NULL && m != NULL) {
2033 return VectorNode::is_vector_shift(n) &&
2034 VectorNode::is_vector_shift_count(m) && m->in(1)->is_Con();
2035 }
2036 return false;
2037 }
2038
clone_node(Node * n,Node * m,Matcher::MStack & mstack)2039 bool Matcher::clone_node(Node* n, Node* m, Matcher::MStack& mstack) {
2040 // Must clone all producers of flags, or we will not match correctly.
2041 // Suppose a compare setting int-flags is shared (e.g., a switch-tree)
2042 // then it will match into an ideal Op_RegFlags. Alas, the fp-flags
2043 // are also there, so we may match a float-branch to int-flags and
2044 // expect the allocator to haul the flags from the int-side to the
2045 // fp-side. No can do.
2046 if (_must_clone[m->Opcode()]) {
2047 mstack.push(m, Visit);
2048 return true;
2049 }
2050 return pd_clone_node(n, m, mstack);
2051 }
2052
clone_base_plus_offset_address(AddPNode * m,Matcher::MStack & mstack,VectorSet & address_visited)2053 bool Matcher::clone_base_plus_offset_address(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
2054 Node *off = m->in(AddPNode::Offset);
2055 if (off->is_Con()) {
2056 address_visited.test_set(m->_idx); // Flag as address_visited
2057 mstack.push(m->in(AddPNode::Address), Pre_Visit);
2058 // Clone X+offset as it also folds into most addressing expressions
2059 mstack.push(off, Visit);
2060 mstack.push(m->in(AddPNode::Base), Pre_Visit);
2061 return true;
2062 }
2063 return false;
2064 }
2065
2066 // A method-klass-holder may be passed in the inline_cache_reg
2067 // and then expanded into the inline_cache_reg and a method_ptr register
2068 // defined in ad_<arch>.cpp
2069
2070 //------------------------------find_shared------------------------------------
2071 // Set bits if Node is shared or otherwise a root
find_shared(Node * n)2072 void Matcher::find_shared(Node* n) {
2073 // Allocate stack of size C->live_nodes() * 2 to avoid frequent realloc
2074 MStack mstack(C->live_nodes() * 2);
2075 // Mark nodes as address_visited if they are inputs to an address expression
2076 VectorSet address_visited;
2077 mstack.push(n, Visit); // Don't need to pre-visit root node
2078 while (mstack.is_nonempty()) {
2079 n = mstack.node(); // Leave node on stack
2080 Node_State nstate = mstack.state();
2081 uint nop = n->Opcode();
2082 if (nstate == Pre_Visit) {
2083 if (address_visited.test(n->_idx)) { // Visited in address already?
2084 // Flag as visited and shared now.
2085 set_visited(n);
2086 }
2087 if (is_visited(n)) { // Visited already?
2088 // Node is shared and has no reason to clone. Flag it as shared.
2089 // This causes it to match into a register for the sharing.
2090 set_shared(n); // Flag as shared and
2091 if (n->is_DecodeNarrowPtr()) {
2092 // Oop field/array element loads must be shared but since
2093 // they are shared through a DecodeN they may appear to have
2094 // a single use so force sharing here.
2095 set_shared(n->in(1));
2096 }
2097 mstack.pop(); // remove node from stack
2098 continue;
2099 }
2100 nstate = Visit; // Not already visited; so visit now
2101 }
2102 if (nstate == Visit) {
2103 mstack.set_state(Post_Visit);
2104 set_visited(n); // Flag as visited now
2105 bool mem_op = false;
2106 int mem_addr_idx = MemNode::Address;
2107 if (find_shared_visit(mstack, n, nop, mem_op, mem_addr_idx)) {
2108 continue;
2109 }
2110 for (int i = n->req() - 1; i >= 0; --i) { // For my children
2111 Node* m = n->in(i); // Get ith input
2112 if (m == NULL) {
2113 continue; // Ignore NULLs
2114 }
2115 if (clone_node(n, m, mstack)) {
2116 continue;
2117 }
2118
2119 // Clone addressing expressions as they are "free" in memory access instructions
2120 if (mem_op && i == mem_addr_idx && m->is_AddP() &&
2121 // When there are other uses besides address expressions
2122 // put it on stack and mark as shared.
2123 !is_visited(m)) {
2124 // Some inputs for address expression are not put on stack
2125 // to avoid marking them as shared and forcing them into register
2126 // if they are used only in address expressions.
2127 // But they should be marked as shared if there are other uses
2128 // besides address expressions.
2129
2130 if (pd_clone_address_expressions(m->as_AddP(), mstack, address_visited)) {
2131 continue;
2132 }
2133 } // if( mem_op &&
2134 mstack.push(m, Pre_Visit);
2135 } // for(int i = ...)
2136 }
2137 else if (nstate == Alt_Post_Visit) {
2138 mstack.pop(); // Remove node from stack
2139 // We cannot remove the Cmp input from the Bool here, as the Bool may be
2140 // shared and all users of the Bool need to move the Cmp in parallel.
2141 // This leaves both the Bool and the If pointing at the Cmp. To
2142 // prevent the Matcher from trying to Match the Cmp along both paths
2143 // BoolNode::match_edge always returns a zero.
2144
2145 // We reorder the Op_If in a pre-order manner, so we can visit without
2146 // accidentally sharing the Cmp (the Bool and the If make 2 users).
2147 n->add_req( n->in(1)->in(1) ); // Add the Cmp next to the Bool
2148 }
2149 else if (nstate == Post_Visit) {
2150 mstack.pop(); // Remove node from stack
2151
2152 // Now hack a few special opcodes
2153 uint opcode = n->Opcode();
2154 bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->matcher_find_shared_post_visit(this, n, opcode);
2155 if (!gc_handled) {
2156 find_shared_post_visit(n, opcode);
2157 }
2158 }
2159 else {
2160 ShouldNotReachHere();
2161 }
2162 } // end of while (mstack.is_nonempty())
2163 }
2164
find_shared_visit(MStack & mstack,Node * n,uint opcode,bool & mem_op,int & mem_addr_idx)2165 bool Matcher::find_shared_visit(MStack& mstack, Node* n, uint opcode, bool& mem_op, int& mem_addr_idx) {
2166 switch(opcode) { // Handle some opcodes special
2167 case Op_Phi: // Treat Phis as shared roots
2168 case Op_Parm:
2169 case Op_Proj: // All handled specially during matching
2170 case Op_SafePointScalarObject:
2171 set_shared(n);
2172 set_dontcare(n);
2173 break;
2174 case Op_If:
2175 case Op_CountedLoopEnd:
2176 mstack.set_state(Alt_Post_Visit); // Alternative way
2177 // Convert (If (Bool (CmpX A B))) into (If (Bool) (CmpX A B)). Helps
2178 // with matching cmp/branch in 1 instruction. The Matcher needs the
2179 // Bool and CmpX side-by-side, because it can only get at constants
2180 // that are at the leaves of Match trees, and the Bool's condition acts
2181 // as a constant here.
2182 mstack.push(n->in(1), Visit); // Clone the Bool
2183 mstack.push(n->in(0), Pre_Visit); // Visit control input
2184 return true; // while (mstack.is_nonempty())
2185 case Op_ConvI2D: // These forms efficiently match with a prior
2186 case Op_ConvI2F: // Load but not a following Store
2187 if( n->in(1)->is_Load() && // Prior load
2188 n->outcnt() == 1 && // Not already shared
2189 n->unique_out()->is_Store() ) // Following store
2190 set_shared(n); // Force it to be a root
2191 break;
2192 case Op_ReverseBytesI:
2193 case Op_ReverseBytesL:
2194 if( n->in(1)->is_Load() && // Prior load
2195 n->outcnt() == 1 ) // Not already shared
2196 set_shared(n); // Force it to be a root
2197 break;
2198 case Op_BoxLock: // Cant match until we get stack-regs in ADLC
2199 case Op_IfFalse:
2200 case Op_IfTrue:
2201 case Op_MachProj:
2202 case Op_MergeMem:
2203 case Op_Catch:
2204 case Op_CatchProj:
2205 case Op_CProj:
2206 case Op_JumpProj:
2207 case Op_JProj:
2208 case Op_NeverBranch:
2209 set_dontcare(n);
2210 break;
2211 case Op_Jump:
2212 mstack.push(n->in(1), Pre_Visit); // Switch Value (could be shared)
2213 mstack.push(n->in(0), Pre_Visit); // Visit Control input
2214 return true; // while (mstack.is_nonempty())
2215 case Op_StrComp:
2216 case Op_StrEquals:
2217 case Op_StrIndexOf:
2218 case Op_StrIndexOfChar:
2219 case Op_AryEq:
2220 case Op_HasNegatives:
2221 case Op_StrInflatedCopy:
2222 case Op_StrCompressedCopy:
2223 case Op_EncodeISOArray:
2224 case Op_FmaD:
2225 case Op_FmaF:
2226 case Op_FmaVD:
2227 case Op_FmaVF:
2228 case Op_MacroLogicV:
2229 case Op_LoadVectorMasked:
2230 case Op_VectorCmpMasked:
2231 set_shared(n); // Force result into register (it will be anyways)
2232 break;
2233 case Op_ConP: { // Convert pointers above the centerline to NUL
2234 TypeNode *tn = n->as_Type(); // Constants derive from type nodes
2235 const TypePtr* tp = tn->type()->is_ptr();
2236 if (tp->_ptr == TypePtr::AnyNull) {
2237 tn->set_type(TypePtr::NULL_PTR);
2238 }
2239 break;
2240 }
2241 case Op_ConN: { // Convert narrow pointers above the centerline to NUL
2242 TypeNode *tn = n->as_Type(); // Constants derive from type nodes
2243 const TypePtr* tp = tn->type()->make_ptr();
2244 if (tp && tp->_ptr == TypePtr::AnyNull) {
2245 tn->set_type(TypeNarrowOop::NULL_PTR);
2246 }
2247 break;
2248 }
2249 case Op_Binary: // These are introduced in the Post_Visit state.
2250 ShouldNotReachHere();
2251 break;
2252 case Op_ClearArray:
2253 case Op_SafePoint:
2254 mem_op = true;
2255 break;
2256 default:
2257 if( n->is_Store() ) {
2258 // Do match stores, despite no ideal reg
2259 mem_op = true;
2260 break;
2261 }
2262 if( n->is_Mem() ) { // Loads and LoadStores
2263 mem_op = true;
2264 // Loads must be root of match tree due to prior load conflict
2265 if( C->subsume_loads() == false )
2266 set_shared(n);
2267 }
2268 // Fall into default case
2269 if( !n->ideal_reg() )
2270 set_dontcare(n); // Unmatchable Nodes
2271 } // end_switch
2272 return false;
2273 }
2274
find_shared_post_visit(Node * n,uint opcode)2275 void Matcher::find_shared_post_visit(Node* n, uint opcode) {
2276 switch(opcode) { // Handle some opcodes special
2277 case Op_StorePConditional:
2278 case Op_StoreIConditional:
2279 case Op_StoreLConditional:
2280 case Op_CompareAndExchangeB:
2281 case Op_CompareAndExchangeS:
2282 case Op_CompareAndExchangeI:
2283 case Op_CompareAndExchangeL:
2284 case Op_CompareAndExchangeP:
2285 case Op_CompareAndExchangeN:
2286 case Op_WeakCompareAndSwapB:
2287 case Op_WeakCompareAndSwapS:
2288 case Op_WeakCompareAndSwapI:
2289 case Op_WeakCompareAndSwapL:
2290 case Op_WeakCompareAndSwapP:
2291 case Op_WeakCompareAndSwapN:
2292 case Op_CompareAndSwapB:
2293 case Op_CompareAndSwapS:
2294 case Op_CompareAndSwapI:
2295 case Op_CompareAndSwapL:
2296 case Op_CompareAndSwapP:
2297 case Op_CompareAndSwapN: { // Convert trinary to binary-tree
2298 Node* newval = n->in(MemNode::ValueIn);
2299 Node* oldval = n->in(LoadStoreConditionalNode::ExpectedIn);
2300 Node* pair = new BinaryNode(oldval, newval);
2301 n->set_req(MemNode::ValueIn, pair);
2302 n->del_req(LoadStoreConditionalNode::ExpectedIn);
2303 break;
2304 }
2305 case Op_CMoveD: // Convert trinary to binary-tree
2306 case Op_CMoveF:
2307 case Op_CMoveI:
2308 case Op_CMoveL:
2309 case Op_CMoveN:
2310 case Op_CMoveP:
2311 case Op_CMoveVF:
2312 case Op_CMoveVD: {
2313 // Restructure into a binary tree for Matching. It's possible that
2314 // we could move this code up next to the graph reshaping for IfNodes
2315 // or vice-versa, but I do not want to debug this for Ladybird.
2316 // 10/2/2000 CNC.
2317 Node* pair1 = new BinaryNode(n->in(1), n->in(1)->in(1));
2318 n->set_req(1, pair1);
2319 Node* pair2 = new BinaryNode(n->in(2), n->in(3));
2320 n->set_req(2, pair2);
2321 n->del_req(3);
2322 break;
2323 }
2324 case Op_VectorCmpMasked: {
2325 Node* pair1 = new BinaryNode(n->in(2), n->in(3));
2326 n->set_req(2, pair1);
2327 n->del_req(3);
2328 break;
2329 }
2330 case Op_MacroLogicV: {
2331 Node* pair1 = new BinaryNode(n->in(1), n->in(2));
2332 Node* pair2 = new BinaryNode(n->in(3), n->in(4));
2333 n->set_req(1, pair1);
2334 n->set_req(2, pair2);
2335 n->del_req(4);
2336 n->del_req(3);
2337 break;
2338 }
2339 case Op_StoreVectorMasked: {
2340 Node* pair = new BinaryNode(n->in(3), n->in(4));
2341 n->set_req(3, pair);
2342 n->del_req(4);
2343 break;
2344 }
2345 case Op_LoopLimit: {
2346 Node* pair1 = new BinaryNode(n->in(1), n->in(2));
2347 n->set_req(1, pair1);
2348 n->set_req(2, n->in(3));
2349 n->del_req(3);
2350 break;
2351 }
2352 case Op_StrEquals:
2353 case Op_StrIndexOfChar: {
2354 Node* pair1 = new BinaryNode(n->in(2), n->in(3));
2355 n->set_req(2, pair1);
2356 n->set_req(3, n->in(4));
2357 n->del_req(4);
2358 break;
2359 }
2360 case Op_StrComp:
2361 case Op_StrIndexOf: {
2362 Node* pair1 = new BinaryNode(n->in(2), n->in(3));
2363 n->set_req(2, pair1);
2364 Node* pair2 = new BinaryNode(n->in(4),n->in(5));
2365 n->set_req(3, pair2);
2366 n->del_req(5);
2367 n->del_req(4);
2368 break;
2369 }
2370 case Op_StrCompressedCopy:
2371 case Op_StrInflatedCopy:
2372 case Op_EncodeISOArray: {
2373 // Restructure into a binary tree for Matching.
2374 Node* pair = new BinaryNode(n->in(3), n->in(4));
2375 n->set_req(3, pair);
2376 n->del_req(4);
2377 break;
2378 }
2379 case Op_FmaD:
2380 case Op_FmaF:
2381 case Op_FmaVD:
2382 case Op_FmaVF: {
2383 // Restructure into a binary tree for Matching.
2384 Node* pair = new BinaryNode(n->in(1), n->in(2));
2385 n->set_req(2, pair);
2386 n->set_req(1, n->in(3));
2387 n->del_req(3);
2388 break;
2389 }
2390 case Op_MulAddS2I: {
2391 Node* pair1 = new BinaryNode(n->in(1), n->in(2));
2392 Node* pair2 = new BinaryNode(n->in(3), n->in(4));
2393 n->set_req(1, pair1);
2394 n->set_req(2, pair2);
2395 n->del_req(4);
2396 n->del_req(3);
2397 break;
2398 }
2399 case Op_CopySignD:
2400 case Op_SignumF:
2401 case Op_SignumD: {
2402 Node* pair = new BinaryNode(n->in(2), n->in(3));
2403 n->set_req(2, pair);
2404 n->del_req(3);
2405 break;
2406 }
2407 case Op_VectorBlend:
2408 case Op_VectorInsert: {
2409 Node* pair = new BinaryNode(n->in(1), n->in(2));
2410 n->set_req(1, pair);
2411 n->set_req(2, n->in(3));
2412 n->del_req(3);
2413 break;
2414 }
2415 case Op_StoreVectorScatter: {
2416 Node* pair = new BinaryNode(n->in(MemNode::ValueIn), n->in(MemNode::ValueIn+1));
2417 n->set_req(MemNode::ValueIn, pair);
2418 n->del_req(MemNode::ValueIn+1);
2419 break;
2420 }
2421 case Op_VectorMaskCmp: {
2422 n->set_req(1, new BinaryNode(n->in(1), n->in(2)));
2423 n->set_req(2, n->in(3));
2424 n->del_req(3);
2425 break;
2426 }
2427 default:
2428 break;
2429 }
2430 }
2431
2432 #ifndef PRODUCT
record_new2old(Node * newn,Node * old)2433 void Matcher::record_new2old(Node* newn, Node* old) {
2434 _new2old_map.map(newn->_idx, old);
2435 if (!_reused.test_set(old->_igv_idx)) {
2436 // Reuse the Ideal-level IGV identifier so that the node can be tracked
2437 // across matching. If there are multiple machine nodes expanded from the
2438 // same Ideal node, only one will reuse its IGV identifier.
2439 newn->_igv_idx = old->_igv_idx;
2440 }
2441 }
2442
2443 // machine-independent root to machine-dependent root
dump_old2new_map()2444 void Matcher::dump_old2new_map() {
2445 _old2new_map.dump();
2446 }
2447 #endif // !PRODUCT
2448
2449 //---------------------------collect_null_checks-------------------------------
2450 // Find null checks in the ideal graph; write a machine-specific node for
2451 // it. Used by later implicit-null-check handling. Actually collects
2452 // either an IfTrue or IfFalse for the common NOT-null path, AND the ideal
2453 // value being tested.
collect_null_checks(Node * proj,Node * orig_proj)2454 void Matcher::collect_null_checks( Node *proj, Node *orig_proj ) {
2455 Node *iff = proj->in(0);
2456 if( iff->Opcode() == Op_If ) {
2457 // During matching If's have Bool & Cmp side-by-side
2458 BoolNode *b = iff->in(1)->as_Bool();
2459 Node *cmp = iff->in(2);
2460 int opc = cmp->Opcode();
2461 if (opc != Op_CmpP && opc != Op_CmpN) return;
2462
2463 const Type* ct = cmp->in(2)->bottom_type();
2464 if (ct == TypePtr::NULL_PTR ||
2465 (opc == Op_CmpN && ct == TypeNarrowOop::NULL_PTR)) {
2466
2467 bool push_it = false;
2468 if( proj->Opcode() == Op_IfTrue ) {
2469 #ifndef PRODUCT
2470 extern int all_null_checks_found;
2471 all_null_checks_found++;
2472 #endif
2473 if( b->_test._test == BoolTest::ne ) {
2474 push_it = true;
2475 }
2476 } else {
2477 assert( proj->Opcode() == Op_IfFalse, "" );
2478 if( b->_test._test == BoolTest::eq ) {
2479 push_it = true;
2480 }
2481 }
2482 if( push_it ) {
2483 _null_check_tests.push(proj);
2484 Node* val = cmp->in(1);
2485 #ifdef _LP64
2486 if (val->bottom_type()->isa_narrowoop() &&
2487 !Matcher::narrow_oop_use_complex_address()) {
2488 //
2489 // Look for DecodeN node which should be pinned to orig_proj.
2490 // On platforms (Sparc) which can not handle 2 adds
2491 // in addressing mode we have to keep a DecodeN node and
2492 // use it to do implicit NULL check in address.
2493 //
2494 // DecodeN node was pinned to non-null path (orig_proj) during
2495 // CastPP transformation in final_graph_reshaping_impl().
2496 //
2497 uint cnt = orig_proj->outcnt();
2498 for (uint i = 0; i < orig_proj->outcnt(); i++) {
2499 Node* d = orig_proj->raw_out(i);
2500 if (d->is_DecodeN() && d->in(1) == val) {
2501 val = d;
2502 val->set_req(0, NULL); // Unpin now.
2503 // Mark this as special case to distinguish from
2504 // a regular case: CmpP(DecodeN, NULL).
2505 val = (Node*)(((intptr_t)val) | 1);
2506 break;
2507 }
2508 }
2509 }
2510 #endif
2511 _null_check_tests.push(val);
2512 }
2513 }
2514 }
2515 }
2516
2517 //---------------------------validate_null_checks------------------------------
2518 // Its possible that the value being NULL checked is not the root of a match
2519 // tree. If so, I cannot use the value in an implicit null check.
validate_null_checks()2520 void Matcher::validate_null_checks( ) {
2521 uint cnt = _null_check_tests.size();
2522 for( uint i=0; i < cnt; i+=2 ) {
2523 Node *test = _null_check_tests[i];
2524 Node *val = _null_check_tests[i+1];
2525 bool is_decoden = ((intptr_t)val) & 1;
2526 val = (Node*)(((intptr_t)val) & ~1);
2527 if (has_new_node(val)) {
2528 Node* new_val = new_node(val);
2529 if (is_decoden) {
2530 assert(val->is_DecodeNarrowPtr() && val->in(0) == NULL, "sanity");
2531 // Note: new_val may have a control edge if
2532 // the original ideal node DecodeN was matched before
2533 // it was unpinned in Matcher::collect_null_checks().
2534 // Unpin the mach node and mark it.
2535 new_val->set_req(0, NULL);
2536 new_val = (Node*)(((intptr_t)new_val) | 1);
2537 }
2538 // Is a match-tree root, so replace with the matched value
2539 _null_check_tests.map(i+1, new_val);
2540 } else {
2541 // Yank from candidate list
2542 _null_check_tests.map(i+1,_null_check_tests[--cnt]);
2543 _null_check_tests.map(i,_null_check_tests[--cnt]);
2544 _null_check_tests.pop();
2545 _null_check_tests.pop();
2546 i-=2;
2547 }
2548 }
2549 }
2550
gen_narrow_oop_implicit_null_checks()2551 bool Matcher::gen_narrow_oop_implicit_null_checks() {
2552 // Advice matcher to perform null checks on the narrow oop side.
2553 // Implicit checks are not possible on the uncompressed oop side anyway
2554 // (at least not for read accesses).
2555 // Performs significantly better (especially on Power 6).
2556 if (!os::zero_page_read_protected()) {
2557 return true;
2558 }
2559 return CompressedOops::use_implicit_null_checks() &&
2560 (narrow_oop_use_complex_address() ||
2561 CompressedOops::base() != NULL);
2562 }
2563
2564 // Compute RegMask for an ideal register.
regmask_for_ideal_register(uint ideal_reg,Node * ret)2565 const RegMask* Matcher::regmask_for_ideal_register(uint ideal_reg, Node* ret) {
2566 const Type* t = Type::mreg2type[ideal_reg];
2567 if (t == NULL) {
2568 assert(ideal_reg >= Op_VecA && ideal_reg <= Op_VecZ, "not a vector: %d", ideal_reg);
2569 return NULL; // not supported
2570 }
2571 Node* fp = ret->in(TypeFunc::FramePtr);
2572 Node* mem = ret->in(TypeFunc::Memory);
2573 const TypePtr* atp = TypePtr::BOTTOM;
2574 MemNode::MemOrd mo = MemNode::unordered;
2575
2576 Node* spill;
2577 switch (ideal_reg) {
2578 case Op_RegN: spill = new LoadNNode(NULL, mem, fp, atp, t->is_narrowoop(), mo); break;
2579 case Op_RegI: spill = new LoadINode(NULL, mem, fp, atp, t->is_int(), mo); break;
2580 case Op_RegP: spill = new LoadPNode(NULL, mem, fp, atp, t->is_ptr(), mo); break;
2581 case Op_RegF: spill = new LoadFNode(NULL, mem, fp, atp, t, mo); break;
2582 case Op_RegD: spill = new LoadDNode(NULL, mem, fp, atp, t, mo); break;
2583 case Op_RegL: spill = new LoadLNode(NULL, mem, fp, atp, t->is_long(), mo); break;
2584
2585 case Op_VecA: // fall-through
2586 case Op_VecS: // fall-through
2587 case Op_VecD: // fall-through
2588 case Op_VecX: // fall-through
2589 case Op_VecY: // fall-through
2590 case Op_VecZ: spill = new LoadVectorNode(NULL, mem, fp, atp, t->is_vect()); break;
2591 case Op_RegVectMask: return Matcher::predicate_reg_mask();
2592
2593 default: ShouldNotReachHere();
2594 }
2595 MachNode* mspill = match_tree(spill);
2596 assert(mspill != NULL, "matching failed: %d", ideal_reg);
2597 // Handle generic vector operand case
2598 if (Matcher::supports_generic_vector_operands && t->isa_vect()) {
2599 specialize_mach_node(mspill);
2600 }
2601 return &mspill->out_RegMask();
2602 }
2603
2604 // Process Mach IR right after selection phase is over.
do_postselect_cleanup()2605 void Matcher::do_postselect_cleanup() {
2606 if (supports_generic_vector_operands) {
2607 specialize_generic_vector_operands();
2608 if (C->failing()) return;
2609 }
2610 }
2611
2612 //----------------------------------------------------------------------
2613 // Generic machine operands elision.
2614 //----------------------------------------------------------------------
2615
2616 // Compute concrete vector operand for a generic TEMP vector mach node based on its user info.
specialize_temp_node(MachTempNode * tmp,MachNode * use,uint idx)2617 void Matcher::specialize_temp_node(MachTempNode* tmp, MachNode* use, uint idx) {
2618 assert(use->in(idx) == tmp, "not a user");
2619 assert(!Matcher::is_generic_vector(use->_opnds[0]), "use not processed yet");
2620
2621 if ((uint)idx == use->two_adr()) { // DEF_TEMP case
2622 tmp->_opnds[0] = use->_opnds[0]->clone();
2623 } else {
2624 uint ideal_vreg = vector_ideal_reg(C->max_vector_size());
2625 tmp->_opnds[0] = Matcher::pd_specialize_generic_vector_operand(tmp->_opnds[0], ideal_vreg, true /*is_temp*/);
2626 }
2627 }
2628
2629 // Compute concrete vector operand for a generic DEF/USE vector operand (of mach node m at index idx).
specialize_vector_operand(MachNode * m,uint opnd_idx)2630 MachOper* Matcher::specialize_vector_operand(MachNode* m, uint opnd_idx) {
2631 assert(Matcher::is_generic_vector(m->_opnds[opnd_idx]), "repeated updates");
2632 Node* def = NULL;
2633 if (opnd_idx == 0) { // DEF
2634 def = m; // use mach node itself to compute vector operand type
2635 } else {
2636 int base_idx = m->operand_index(opnd_idx);
2637 def = m->in(base_idx);
2638 if (def->is_Mach()) {
2639 if (def->is_MachTemp() && Matcher::is_generic_vector(def->as_Mach()->_opnds[0])) {
2640 specialize_temp_node(def->as_MachTemp(), m, base_idx); // MachTemp node use site
2641 } else if (is_generic_reg2reg_move(def->as_Mach())) {
2642 def = def->in(1); // skip over generic reg-to-reg moves
2643 }
2644 }
2645 }
2646 assert(def->bottom_type()->isa_vect(), "not a vector");
2647 uint ideal_vreg = def->bottom_type()->ideal_reg();
2648 return Matcher::pd_specialize_generic_vector_operand(m->_opnds[opnd_idx], ideal_vreg, false /*is_temp*/);
2649 }
2650
specialize_mach_node(MachNode * m)2651 void Matcher::specialize_mach_node(MachNode* m) {
2652 assert(!m->is_MachTemp(), "processed along with its user");
2653 // For generic use operands pull specific register class operands from
2654 // its def instruction's output operand (def operand).
2655 for (uint i = 0; i < m->num_opnds(); i++) {
2656 if (Matcher::is_generic_vector(m->_opnds[i])) {
2657 m->_opnds[i] = specialize_vector_operand(m, i);
2658 }
2659 }
2660 }
2661
2662 // Replace generic vector operands with concrete vector operands and eliminate generic reg-to-reg moves from the graph.
specialize_generic_vector_operands()2663 void Matcher::specialize_generic_vector_operands() {
2664 assert(supports_generic_vector_operands, "sanity");
2665 ResourceMark rm;
2666
2667 if (C->max_vector_size() == 0) {
2668 return; // no vector instructions or operands
2669 }
2670 // Replace generic vector operands (vec/legVec) with concrete ones (vec[SDXYZ]/legVec[SDXYZ])
2671 // and remove reg-to-reg vector moves (MoveVec2Leg and MoveLeg2Vec).
2672 Unique_Node_List live_nodes;
2673 C->identify_useful_nodes(live_nodes);
2674
2675 while (live_nodes.size() > 0) {
2676 MachNode* m = live_nodes.pop()->isa_Mach();
2677 if (m != NULL) {
2678 if (Matcher::is_generic_reg2reg_move(m)) {
2679 // Register allocator properly handles vec <=> leg moves using register masks.
2680 int opnd_idx = m->operand_index(1);
2681 Node* def = m->in(opnd_idx);
2682 m->subsume_by(def, C);
2683 } else if (m->is_MachTemp()) {
2684 // process MachTemp nodes at use site (see Matcher::specialize_vector_operand)
2685 } else {
2686 specialize_mach_node(m);
2687 }
2688 }
2689 }
2690 }
2691
2692 #ifdef ASSERT
verify_after_postselect_cleanup()2693 bool Matcher::verify_after_postselect_cleanup() {
2694 assert(!C->failing(), "sanity");
2695 if (supports_generic_vector_operands) {
2696 Unique_Node_List useful;
2697 C->identify_useful_nodes(useful);
2698 for (uint i = 0; i < useful.size(); i++) {
2699 MachNode* m = useful.at(i)->isa_Mach();
2700 if (m != NULL) {
2701 assert(!Matcher::is_generic_reg2reg_move(m), "no MoveVec nodes allowed");
2702 for (uint j = 0; j < m->num_opnds(); j++) {
2703 assert(!Matcher::is_generic_vector(m->_opnds[j]), "no generic vector operands allowed");
2704 }
2705 }
2706 }
2707 }
2708 return true;
2709 }
2710 #endif // ASSERT
2711
2712 // Used by the DFA in dfa_xxx.cpp. Check for a following barrier or
2713 // atomic instruction acting as a store_load barrier without any
2714 // intervening volatile load, and thus we don't need a barrier here.
2715 // We retain the Node to act as a compiler ordering barrier.
post_store_load_barrier(const Node * vmb)2716 bool Matcher::post_store_load_barrier(const Node* vmb) {
2717 Compile* C = Compile::current();
2718 assert(vmb->is_MemBar(), "");
2719 assert(vmb->Opcode() != Op_MemBarAcquire && vmb->Opcode() != Op_LoadFence, "");
2720 const MemBarNode* membar = vmb->as_MemBar();
2721
2722 // Get the Ideal Proj node, ctrl, that can be used to iterate forward
2723 Node* ctrl = NULL;
2724 for (DUIterator_Fast imax, i = membar->fast_outs(imax); i < imax; i++) {
2725 Node* p = membar->fast_out(i);
2726 assert(p->is_Proj(), "only projections here");
2727 if ((p->as_Proj()->_con == TypeFunc::Control) &&
2728 !C->node_arena()->contains(p)) { // Unmatched old-space only
2729 ctrl = p;
2730 break;
2731 }
2732 }
2733 assert((ctrl != NULL), "missing control projection");
2734
2735 for (DUIterator_Fast jmax, j = ctrl->fast_outs(jmax); j < jmax; j++) {
2736 Node *x = ctrl->fast_out(j);
2737 int xop = x->Opcode();
2738
2739 // We don't need current barrier if we see another or a lock
2740 // before seeing volatile load.
2741 //
2742 // Op_Fastunlock previously appeared in the Op_* list below.
2743 // With the advent of 1-0 lock operations we're no longer guaranteed
2744 // that a monitor exit operation contains a serializing instruction.
2745
2746 if (xop == Op_MemBarVolatile ||
2747 xop == Op_CompareAndExchangeB ||
2748 xop == Op_CompareAndExchangeS ||
2749 xop == Op_CompareAndExchangeI ||
2750 xop == Op_CompareAndExchangeL ||
2751 xop == Op_CompareAndExchangeP ||
2752 xop == Op_CompareAndExchangeN ||
2753 xop == Op_WeakCompareAndSwapB ||
2754 xop == Op_WeakCompareAndSwapS ||
2755 xop == Op_WeakCompareAndSwapL ||
2756 xop == Op_WeakCompareAndSwapP ||
2757 xop == Op_WeakCompareAndSwapN ||
2758 xop == Op_WeakCompareAndSwapI ||
2759 xop == Op_CompareAndSwapB ||
2760 xop == Op_CompareAndSwapS ||
2761 xop == Op_CompareAndSwapL ||
2762 xop == Op_CompareAndSwapP ||
2763 xop == Op_CompareAndSwapN ||
2764 xop == Op_CompareAndSwapI ||
2765 BarrierSet::barrier_set()->barrier_set_c2()->matcher_is_store_load_barrier(x, xop)) {
2766 return true;
2767 }
2768
2769 // Op_FastLock previously appeared in the Op_* list above.
2770 // With biased locking we're no longer guaranteed that a monitor
2771 // enter operation contains a serializing instruction.
2772 if ((xop == Op_FastLock) && !UseBiasedLocking) {
2773 return true;
2774 }
2775
2776 if (x->is_MemBar()) {
2777 // We must retain this membar if there is an upcoming volatile
2778 // load, which will be followed by acquire membar.
2779 if (xop == Op_MemBarAcquire || xop == Op_LoadFence) {
2780 return false;
2781 } else {
2782 // For other kinds of barriers, check by pretending we
2783 // are them, and seeing if we can be removed.
2784 return post_store_load_barrier(x->as_MemBar());
2785 }
2786 }
2787
2788 // probably not necessary to check for these
2789 if (x->is_Call() || x->is_SafePoint() || x->is_block_proj()) {
2790 return false;
2791 }
2792 }
2793 return false;
2794 }
2795
2796 // Check whether node n is a branch to an uncommon trap that we could
2797 // optimize as test with very high branch costs in case of going to
2798 // the uncommon trap. The code must be able to be recompiled to use
2799 // a cheaper test.
branches_to_uncommon_trap(const Node * n)2800 bool Matcher::branches_to_uncommon_trap(const Node *n) {
2801 // Don't do it for natives, adapters, or runtime stubs
2802 Compile *C = Compile::current();
2803 if (!C->is_method_compilation()) return false;
2804
2805 assert(n->is_If(), "You should only call this on if nodes.");
2806 IfNode *ifn = n->as_If();
2807
2808 Node *ifFalse = NULL;
2809 for (DUIterator_Fast imax, i = ifn->fast_outs(imax); i < imax; i++) {
2810 if (ifn->fast_out(i)->is_IfFalse()) {
2811 ifFalse = ifn->fast_out(i);
2812 break;
2813 }
2814 }
2815 assert(ifFalse, "An If should have an ifFalse. Graph is broken.");
2816
2817 Node *reg = ifFalse;
2818 int cnt = 4; // We must protect against cycles. Limit to 4 iterations.
2819 // Alternatively use visited set? Seems too expensive.
2820 while (reg != NULL && cnt > 0) {
2821 CallNode *call = NULL;
2822 RegionNode *nxt_reg = NULL;
2823 for (DUIterator_Fast imax, i = reg->fast_outs(imax); i < imax; i++) {
2824 Node *o = reg->fast_out(i);
2825 if (o->is_Call()) {
2826 call = o->as_Call();
2827 }
2828 if (o->is_Region()) {
2829 nxt_reg = o->as_Region();
2830 }
2831 }
2832
2833 if (call &&
2834 call->entry_point() == SharedRuntime::uncommon_trap_blob()->entry_point()) {
2835 const Type* trtype = call->in(TypeFunc::Parms)->bottom_type();
2836 if (trtype->isa_int() && trtype->is_int()->is_con()) {
2837 jint tr_con = trtype->is_int()->get_con();
2838 Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(tr_con);
2839 Deoptimization::DeoptAction action = Deoptimization::trap_request_action(tr_con);
2840 assert((int)reason < (int)BitsPerInt, "recode bit map");
2841
2842 if (is_set_nth_bit(C->allowed_deopt_reasons(), (int)reason)
2843 && action != Deoptimization::Action_none) {
2844 // This uncommon trap is sure to recompile, eventually.
2845 // When that happens, C->too_many_traps will prevent
2846 // this transformation from happening again.
2847 return true;
2848 }
2849 }
2850 }
2851
2852 reg = nxt_reg;
2853 cnt--;
2854 }
2855
2856 return false;
2857 }
2858
2859 //=============================================================================
2860 //---------------------------State---------------------------------------------
State(void)2861 State::State(void) : _rule() {
2862 #ifdef ASSERT
2863 _id = 0;
2864 _kids[0] = _kids[1] = (State*)(intptr_t) CONST64(0xcafebabecafebabe);
2865 _leaf = (Node*)(intptr_t) CONST64(0xbaadf00dbaadf00d);
2866 #endif
2867 }
2868
2869 #ifdef ASSERT
~State()2870 State::~State() {
2871 _id = 99;
2872 _kids[0] = _kids[1] = (State*)(intptr_t) CONST64(0xcafebabecafebabe);
2873 _leaf = (Node*)(intptr_t) CONST64(0xbaadf00dbaadf00d);
2874 memset(_cost, -3, sizeof(_cost));
2875 memset(_rule, -3, sizeof(_rule));
2876 }
2877 #endif
2878
2879 #ifndef PRODUCT
2880 //---------------------------dump----------------------------------------------
dump()2881 void State::dump() {
2882 tty->print("\n");
2883 dump(0);
2884 }
2885
dump(int depth)2886 void State::dump(int depth) {
2887 for (int j = 0; j < depth; j++) {
2888 tty->print(" ");
2889 }
2890 tty->print("--N: ");
2891 _leaf->dump();
2892 uint i;
2893 for (i = 0; i < _LAST_MACH_OPER; i++) {
2894 // Check for valid entry
2895 if (valid(i)) {
2896 for (int j = 0; j < depth; j++) {
2897 tty->print(" ");
2898 }
2899 assert(cost(i) != max_juint, "cost must be a valid value");
2900 assert(rule(i) < _last_Mach_Node, "rule[i] must be valid rule");
2901 tty->print_cr("%s %d %s",
2902 ruleName[i], cost(i), ruleName[rule(i)] );
2903 }
2904 }
2905 tty->cr();
2906
2907 for (i = 0; i < 2; i++) {
2908 if (_kids[i]) {
2909 _kids[i]->dump(depth + 1);
2910 }
2911 }
2912 }
2913 #endif
2914