1 /*
2  * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
3  * Copyright (c) 2012, 2019, SAP SE. All rights reserved.
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This code is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 only, as
8  * published by the Free Software Foundation.
9  *
10  * This code is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13  * version 2 for more details (a copy is included in the LICENSE file that
14  * accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License version
17  * 2 along with this work; if not, write to the Free Software Foundation,
18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19  *
20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21  * or visit www.oracle.com if you need additional information or have any
22  * questions.
23  *
24  */
25 
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "gc/shared/barrierSet.hpp"
29 #include "gc/shared/barrierSetAssembler.hpp"
30 #include "interpreter/interpreter.hpp"
31 #include "nativeInst_ppc.hpp"
32 #include "oops/instanceOop.hpp"
33 #include "oops/method.hpp"
34 #include "oops/objArrayKlass.hpp"
35 #include "oops/oop.inline.hpp"
36 #include "prims/methodHandles.hpp"
37 #include "runtime/frame.inline.hpp"
38 #include "runtime/handles.inline.hpp"
39 #include "runtime/sharedRuntime.hpp"
40 #include "runtime/stubCodeGenerator.hpp"
41 #include "runtime/stubRoutines.hpp"
42 #include "runtime/thread.inline.hpp"
43 #include "utilities/align.hpp"
44 
45 // Declaration and definition of StubGenerator (no .hpp file).
46 // For a more detailed description of the stub routine structure
47 // see the comment in stubRoutines.hpp.
48 
49 #define __ _masm->
50 
51 #ifdef PRODUCT
52 #define BLOCK_COMMENT(str) // nothing
53 #else
54 #define BLOCK_COMMENT(str) __ block_comment(str)
55 #endif
56 
57 #if defined(ABI_ELFv2)
58 #define STUB_ENTRY(name) StubRoutines::name()
59 #else
60 #define STUB_ENTRY(name) ((FunctionDescriptor*)StubRoutines::name())->entry()
61 #endif
62 
63 class StubGenerator: public StubCodeGenerator {
64  private:
65 
66   // Call stubs are used to call Java from C
67   //
68   // Arguments:
69   //
70   //   R3  - call wrapper address     : address
71   //   R4  - result                   : intptr_t*
72   //   R5  - result type              : BasicType
73   //   R6  - method                   : Method
74   //   R7  - frame mgr entry point    : address
75   //   R8  - parameter block          : intptr_t*
76   //   R9  - parameter count in words : int
77   //   R10 - thread                   : Thread*
78   //
generate_call_stub(address & return_address)79   address generate_call_stub(address& return_address) {
80     // Setup a new c frame, copy java arguments, call frame manager or
81     // native_entry, and process result.
82 
83     StubCodeMark mark(this, "StubRoutines", "call_stub");
84 
85     address start = __ function_entry();
86 
87     // some sanity checks
88     assert((sizeof(frame::abi_minframe) % 16) == 0,           "unaligned");
89     assert((sizeof(frame::abi_reg_args) % 16) == 0,           "unaligned");
90     assert((sizeof(frame::spill_nonvolatiles) % 16) == 0,     "unaligned");
91     assert((sizeof(frame::parent_ijava_frame_abi) % 16) == 0, "unaligned");
92     assert((sizeof(frame::entry_frame_locals) % 16) == 0,     "unaligned");
93 
94     Register r_arg_call_wrapper_addr        = R3;
95     Register r_arg_result_addr              = R4;
96     Register r_arg_result_type              = R5;
97     Register r_arg_method                   = R6;
98     Register r_arg_entry                    = R7;
99     Register r_arg_thread                   = R10;
100 
101     Register r_temp                         = R24;
102     Register r_top_of_arguments_addr        = R25;
103     Register r_entryframe_fp                = R26;
104 
105     {
106       // Stack on entry to call_stub:
107       //
108       //      F1      [C_FRAME]
109       //              ...
110 
111       Register r_arg_argument_addr          = R8;
112       Register r_arg_argument_count         = R9;
113       Register r_frame_alignment_in_bytes   = R27;
114       Register r_argument_addr              = R28;
115       Register r_argumentcopy_addr          = R29;
116       Register r_argument_size_in_bytes     = R30;
117       Register r_frame_size                 = R23;
118 
119       Label arguments_copied;
120 
121       // Save LR/CR to caller's C_FRAME.
122       __ save_LR_CR(R0);
123 
124       // Zero extend arg_argument_count.
125       __ clrldi(r_arg_argument_count, r_arg_argument_count, 32);
126 
127       // Save non-volatiles GPRs to ENTRY_FRAME (not yet pushed, but it's safe).
128       __ save_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14));
129 
130       // Keep copy of our frame pointer (caller's SP).
131       __ mr(r_entryframe_fp, R1_SP);
132 
133       BLOCK_COMMENT("Push ENTRY_FRAME including arguments");
134       // Push ENTRY_FRAME including arguments:
135       //
136       //      F0      [TOP_IJAVA_FRAME_ABI]
137       //              alignment (optional)
138       //              [outgoing Java arguments]
139       //              [ENTRY_FRAME_LOCALS]
140       //      F1      [C_FRAME]
141       //              ...
142 
143       // calculate frame size
144 
145       // unaligned size of arguments
146       __ sldi(r_argument_size_in_bytes,
147                   r_arg_argument_count, Interpreter::logStackElementSize);
148       // arguments alignment (max 1 slot)
149       // FIXME: use round_to() here
150       __ andi_(r_frame_alignment_in_bytes, r_arg_argument_count, 1);
151       __ sldi(r_frame_alignment_in_bytes,
152               r_frame_alignment_in_bytes, Interpreter::logStackElementSize);
153 
154       // size = unaligned size of arguments + top abi's size
155       __ addi(r_frame_size, r_argument_size_in_bytes,
156               frame::top_ijava_frame_abi_size);
157       // size += arguments alignment
158       __ add(r_frame_size,
159              r_frame_size, r_frame_alignment_in_bytes);
160       // size += size of call_stub locals
161       __ addi(r_frame_size,
162               r_frame_size, frame::entry_frame_locals_size);
163 
164       // push ENTRY_FRAME
165       __ push_frame(r_frame_size, r_temp);
166 
167       // initialize call_stub locals (step 1)
168       __ std(r_arg_call_wrapper_addr,
169              _entry_frame_locals_neg(call_wrapper_address), r_entryframe_fp);
170       __ std(r_arg_result_addr,
171              _entry_frame_locals_neg(result_address), r_entryframe_fp);
172       __ std(r_arg_result_type,
173              _entry_frame_locals_neg(result_type), r_entryframe_fp);
174       // we will save arguments_tos_address later
175 
176 
177       BLOCK_COMMENT("Copy Java arguments");
178       // copy Java arguments
179 
180       // Calculate top_of_arguments_addr which will be R17_tos (not prepushed) later.
181       // FIXME: why not simply use SP+frame::top_ijava_frame_size?
182       __ addi(r_top_of_arguments_addr,
183               R1_SP, frame::top_ijava_frame_abi_size);
184       __ add(r_top_of_arguments_addr,
185              r_top_of_arguments_addr, r_frame_alignment_in_bytes);
186 
187       // any arguments to copy?
188       __ cmpdi(CCR0, r_arg_argument_count, 0);
189       __ beq(CCR0, arguments_copied);
190 
191       // prepare loop and copy arguments in reverse order
192       {
193         // init CTR with arg_argument_count
194         __ mtctr(r_arg_argument_count);
195 
196         // let r_argumentcopy_addr point to last outgoing Java arguments P
197         __ mr(r_argumentcopy_addr, r_top_of_arguments_addr);
198 
199         // let r_argument_addr point to last incoming java argument
200         __ add(r_argument_addr,
201                    r_arg_argument_addr, r_argument_size_in_bytes);
202         __ addi(r_argument_addr, r_argument_addr, -BytesPerWord);
203 
204         // now loop while CTR > 0 and copy arguments
205         {
206           Label next_argument;
207           __ bind(next_argument);
208 
209           __ ld(r_temp, 0, r_argument_addr);
210           // argument_addr--;
211           __ addi(r_argument_addr, r_argument_addr, -BytesPerWord);
212           __ std(r_temp, 0, r_argumentcopy_addr);
213           // argumentcopy_addr++;
214           __ addi(r_argumentcopy_addr, r_argumentcopy_addr, BytesPerWord);
215 
216           __ bdnz(next_argument);
217         }
218       }
219 
220       // Arguments copied, continue.
221       __ bind(arguments_copied);
222     }
223 
224     {
225       BLOCK_COMMENT("Call frame manager or native entry.");
226       // Call frame manager or native entry.
227       Register r_new_arg_entry = R14;
228       assert_different_registers(r_new_arg_entry, r_top_of_arguments_addr,
229                                  r_arg_method, r_arg_thread);
230 
231       __ mr(r_new_arg_entry, r_arg_entry);
232 
233       // Register state on entry to frame manager / native entry:
234       //
235       //   tos         -  intptr_t*    sender tos (prepushed) Lesp = (SP) + copied_arguments_offset - 8
236       //   R19_method  -  Method
237       //   R16_thread  -  JavaThread*
238 
239       // Tos must point to last argument - element_size.
240       const Register tos = R15_esp;
241 
242       __ addi(tos, r_top_of_arguments_addr, -Interpreter::stackElementSize);
243 
244       // initialize call_stub locals (step 2)
245       // now save tos as arguments_tos_address
246       __ std(tos, _entry_frame_locals_neg(arguments_tos_address), r_entryframe_fp);
247 
248       // load argument registers for call
249       __ mr(R19_method, r_arg_method);
250       __ mr(R16_thread, r_arg_thread);
251       assert(tos != r_arg_method, "trashed r_arg_method");
252       assert(tos != r_arg_thread && R19_method != r_arg_thread, "trashed r_arg_thread");
253 
254       // Set R15_prev_state to 0 for simplifying checks in callee.
255       __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1);
256       // Stack on entry to frame manager / native entry:
257       //
258       //      F0      [TOP_IJAVA_FRAME_ABI]
259       //              alignment (optional)
260       //              [outgoing Java arguments]
261       //              [ENTRY_FRAME_LOCALS]
262       //      F1      [C_FRAME]
263       //              ...
264       //
265 
266       // global toc register
267       __ load_const_optimized(R29_TOC, MacroAssembler::global_toc(), R11_scratch1);
268       // Remember the senderSP so we interpreter can pop c2i arguments off of the stack
269       // when called via a c2i.
270 
271       // Pass initial_caller_sp to framemanager.
272       __ mr(R21_sender_SP, R1_SP);
273 
274       // Do a light-weight C-call here, r_new_arg_entry holds the address
275       // of the interpreter entry point (frame manager or native entry)
276       // and save runtime-value of LR in return_address.
277       assert(r_new_arg_entry != tos && r_new_arg_entry != R19_method && r_new_arg_entry != R16_thread,
278              "trashed r_new_arg_entry");
279       return_address = __ call_stub(r_new_arg_entry);
280     }
281 
282     {
283       BLOCK_COMMENT("Returned from frame manager or native entry.");
284       // Returned from frame manager or native entry.
285       // Now pop frame, process result, and return to caller.
286 
287       // Stack on exit from frame manager / native entry:
288       //
289       //      F0      [ABI]
290       //              ...
291       //              [ENTRY_FRAME_LOCALS]
292       //      F1      [C_FRAME]
293       //              ...
294       //
295       // Just pop the topmost frame ...
296       //
297 
298       Label ret_is_object;
299       Label ret_is_long;
300       Label ret_is_float;
301       Label ret_is_double;
302 
303       Register r_entryframe_fp = R30;
304       Register r_lr            = R7_ARG5;
305       Register r_cr            = R8_ARG6;
306 
307       // Reload some volatile registers which we've spilled before the call
308       // to frame manager / native entry.
309       // Access all locals via frame pointer, because we know nothing about
310       // the topmost frame's size.
311       __ ld(r_entryframe_fp, _abi(callers_sp), R1_SP);
312       assert_different_registers(r_entryframe_fp, R3_RET, r_arg_result_addr, r_arg_result_type, r_cr, r_lr);
313       __ ld(r_arg_result_addr,
314             _entry_frame_locals_neg(result_address), r_entryframe_fp);
315       __ ld(r_arg_result_type,
316             _entry_frame_locals_neg(result_type), r_entryframe_fp);
317       __ ld(r_cr, _abi(cr), r_entryframe_fp);
318       __ ld(r_lr, _abi(lr), r_entryframe_fp);
319 
320       // pop frame and restore non-volatiles, LR and CR
321       __ mr(R1_SP, r_entryframe_fp);
322       __ mtcr(r_cr);
323       __ mtlr(r_lr);
324 
325       // Store result depending on type. Everything that is not
326       // T_OBJECT, T_LONG, T_FLOAT, or T_DOUBLE is treated as T_INT.
327       __ cmpwi(CCR0, r_arg_result_type, T_OBJECT);
328       __ cmpwi(CCR1, r_arg_result_type, T_LONG);
329       __ cmpwi(CCR5, r_arg_result_type, T_FLOAT);
330       __ cmpwi(CCR6, r_arg_result_type, T_DOUBLE);
331 
332       // restore non-volatile registers
333       __ restore_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14));
334 
335 
336       // Stack on exit from call_stub:
337       //
338       //      0       [C_FRAME]
339       //              ...
340       //
341       //  no call_stub frames left.
342 
343       // All non-volatiles have been restored at this point!!
344       assert(R3_RET == R3, "R3_RET should be R3");
345 
346       __ beq(CCR0, ret_is_object);
347       __ beq(CCR1, ret_is_long);
348       __ beq(CCR5, ret_is_float);
349       __ beq(CCR6, ret_is_double);
350 
351       // default:
352       __ stw(R3_RET, 0, r_arg_result_addr);
353       __ blr(); // return to caller
354 
355       // case T_OBJECT:
356       __ bind(ret_is_object);
357       __ std(R3_RET, 0, r_arg_result_addr);
358       __ blr(); // return to caller
359 
360       // case T_LONG:
361       __ bind(ret_is_long);
362       __ std(R3_RET, 0, r_arg_result_addr);
363       __ blr(); // return to caller
364 
365       // case T_FLOAT:
366       __ bind(ret_is_float);
367       __ stfs(F1_RET, 0, r_arg_result_addr);
368       __ blr(); // return to caller
369 
370       // case T_DOUBLE:
371       __ bind(ret_is_double);
372       __ stfd(F1_RET, 0, r_arg_result_addr);
373       __ blr(); // return to caller
374     }
375 
376     return start;
377   }
378 
379   // Return point for a Java call if there's an exception thrown in
380   // Java code.  The exception is caught and transformed into a
381   // pending exception stored in JavaThread that can be tested from
382   // within the VM.
383   //
generate_catch_exception()384   address generate_catch_exception() {
385     StubCodeMark mark(this, "StubRoutines", "catch_exception");
386 
387     address start = __ pc();
388 
389     // Registers alive
390     //
391     //  R16_thread
392     //  R3_ARG1 - address of pending exception
393     //  R4_ARG2 - return address in call stub
394 
395     const Register exception_file = R21_tmp1;
396     const Register exception_line = R22_tmp2;
397 
398     __ load_const(exception_file, (void*)__FILE__);
399     __ load_const(exception_line, (void*)__LINE__);
400 
401     __ std(R3_ARG1, in_bytes(JavaThread::pending_exception_offset()), R16_thread);
402     // store into `char *'
403     __ std(exception_file, in_bytes(JavaThread::exception_file_offset()), R16_thread);
404     // store into `int'
405     __ stw(exception_line, in_bytes(JavaThread::exception_line_offset()), R16_thread);
406 
407     // complete return to VM
408     assert(StubRoutines::_call_stub_return_address != NULL, "must have been generated before");
409 
410     __ mtlr(R4_ARG2);
411     // continue in call stub
412     __ blr();
413 
414     return start;
415   }
416 
417   // Continuation point for runtime calls returning with a pending
418   // exception.  The pending exception check happened in the runtime
419   // or native call stub.  The pending exception in Thread is
420   // converted into a Java-level exception.
421   //
422   // Read:
423   //
424   //   LR:     The pc the runtime library callee wants to return to.
425   //           Since the exception occurred in the callee, the return pc
426   //           from the point of view of Java is the exception pc.
427   //   thread: Needed for method handles.
428   //
429   // Invalidate:
430   //
431   //   volatile registers (except below).
432   //
433   // Update:
434   //
435   //   R4_ARG2: exception
436   //
437   // (LR is unchanged and is live out).
438   //
generate_forward_exception()439   address generate_forward_exception() {
440     StubCodeMark mark(this, "StubRoutines", "forward_exception");
441     address start = __ pc();
442 
443 #if !defined(PRODUCT)
444     if (VerifyOops) {
445       // Get pending exception oop.
446       __ ld(R3_ARG1,
447                 in_bytes(Thread::pending_exception_offset()),
448                 R16_thread);
449       // Make sure that this code is only executed if there is a pending exception.
450       {
451         Label L;
452         __ cmpdi(CCR0, R3_ARG1, 0);
453         __ bne(CCR0, L);
454         __ stop("StubRoutines::forward exception: no pending exception (1)");
455         __ bind(L);
456       }
457       __ verify_oop(R3_ARG1, "StubRoutines::forward exception: not an oop");
458     }
459 #endif
460 
461     // Save LR/CR and copy exception pc (LR) into R4_ARG2.
462     __ save_LR_CR(R4_ARG2);
463     __ push_frame_reg_args(0, R0);
464     // Find exception handler.
465     __ call_VM_leaf(CAST_FROM_FN_PTR(address,
466                      SharedRuntime::exception_handler_for_return_address),
467                     R16_thread,
468                     R4_ARG2);
469     // Copy handler's address.
470     __ mtctr(R3_RET);
471     __ pop_frame();
472     __ restore_LR_CR(R0);
473 
474     // Set up the arguments for the exception handler:
475     //  - R3_ARG1: exception oop
476     //  - R4_ARG2: exception pc.
477 
478     // Load pending exception oop.
479     __ ld(R3_ARG1,
480               in_bytes(Thread::pending_exception_offset()),
481               R16_thread);
482 
483     // The exception pc is the return address in the caller.
484     // Must load it into R4_ARG2.
485     __ mflr(R4_ARG2);
486 
487 #ifdef ASSERT
488     // Make sure exception is set.
489     {
490       Label L;
491       __ cmpdi(CCR0, R3_ARG1, 0);
492       __ bne(CCR0, L);
493       __ stop("StubRoutines::forward exception: no pending exception (2)");
494       __ bind(L);
495     }
496 #endif
497 
498     // Clear the pending exception.
499     __ li(R0, 0);
500     __ std(R0,
501                in_bytes(Thread::pending_exception_offset()),
502                R16_thread);
503     // Jump to exception handler.
504     __ bctr();
505 
506     return start;
507   }
508 
509 #undef __
510 #define __ masm->
511   // Continuation point for throwing of implicit exceptions that are
512   // not handled in the current activation. Fabricates an exception
513   // oop and initiates normal exception dispatching in this
514   // frame. Only callee-saved registers are preserved (through the
515   // normal register window / RegisterMap handling).  If the compiler
516   // needs all registers to be preserved between the fault point and
517   // the exception handler then it must assume responsibility for that
518   // in AbstractCompiler::continuation_for_implicit_null_exception or
519   // continuation_for_implicit_division_by_zero_exception. All other
520   // implicit exceptions (e.g., NullPointerException or
521   // AbstractMethodError on entry) are either at call sites or
522   // otherwise assume that stack unwinding will be initiated, so
523   // caller saved registers were assumed volatile in the compiler.
524   //
525   // Note that we generate only this stub into a RuntimeStub, because
526   // it needs to be properly traversed and ignored during GC, so we
527   // change the meaning of the "__" macro within this method.
528   //
529   // Note: the routine set_pc_not_at_call_for_caller in
530   // SharedRuntime.cpp requires that this code be generated into a
531   // RuntimeStub.
generate_throw_exception(const char * name,address runtime_entry,bool restore_saved_exception_pc,Register arg1=noreg,Register arg2=noreg)532   address generate_throw_exception(const char* name, address runtime_entry, bool restore_saved_exception_pc,
533                                    Register arg1 = noreg, Register arg2 = noreg) {
534     CodeBuffer code(name, 1024 DEBUG_ONLY(+ 512), 0);
535     MacroAssembler* masm = new MacroAssembler(&code);
536 
537     OopMapSet* oop_maps  = new OopMapSet();
538     int frame_size_in_bytes = frame::abi_reg_args_size;
539     OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0);
540 
541     address start = __ pc();
542 
543     __ save_LR_CR(R11_scratch1);
544 
545     // Push a frame.
546     __ push_frame_reg_args(0, R11_scratch1);
547 
548     address frame_complete_pc = __ pc();
549 
550     if (restore_saved_exception_pc) {
551       __ unimplemented("StubGenerator::throw_exception with restore_saved_exception_pc", 74);
552     }
553 
554     // Note that we always have a runtime stub frame on the top of
555     // stack by this point. Remember the offset of the instruction
556     // whose address will be moved to R11_scratch1.
557     address gc_map_pc = __ get_PC_trash_LR(R11_scratch1);
558 
559     __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R11_scratch1);
560 
561     __ mr(R3_ARG1, R16_thread);
562     if (arg1 != noreg) {
563       __ mr(R4_ARG2, arg1);
564     }
565     if (arg2 != noreg) {
566       __ mr(R5_ARG3, arg2);
567     }
568 #if defined(ABI_ELFv2)
569     __ call_c(runtime_entry, relocInfo::none);
570 #else
571     __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, runtime_entry), relocInfo::none);
572 #endif
573 
574     // Set an oopmap for the call site.
575     oop_maps->add_gc_map((int)(gc_map_pc - start), map);
576 
577     __ reset_last_Java_frame();
578 
579 #ifdef ASSERT
580     // Make sure that this code is only executed if there is a pending
581     // exception.
582     {
583       Label L;
584       __ ld(R0,
585                 in_bytes(Thread::pending_exception_offset()),
586                 R16_thread);
587       __ cmpdi(CCR0, R0, 0);
588       __ bne(CCR0, L);
589       __ stop("StubRoutines::throw_exception: no pending exception");
590       __ bind(L);
591     }
592 #endif
593 
594     // Pop frame.
595     __ pop_frame();
596 
597     __ restore_LR_CR(R11_scratch1);
598 
599     __ load_const(R11_scratch1, StubRoutines::forward_exception_entry());
600     __ mtctr(R11_scratch1);
601     __ bctr();
602 
603     // Create runtime stub with OopMap.
604     RuntimeStub* stub =
605       RuntimeStub::new_runtime_stub(name, &code,
606                                     /*frame_complete=*/ (int)(frame_complete_pc - start),
607                                     frame_size_in_bytes/wordSize,
608                                     oop_maps,
609                                     false);
610     return stub->entry_point();
611   }
612 #undef __
613 #define __ _masm->
614 
615 
616   // Support for void zero_words_aligned8(HeapWord* to, size_t count)
617   //
618   // Arguments:
619   //   to:
620   //   count:
621   //
622   // Destroys:
623   //
generate_zero_words_aligned8()624   address generate_zero_words_aligned8() {
625     StubCodeMark mark(this, "StubRoutines", "zero_words_aligned8");
626 
627     // Implemented as in ClearArray.
628     address start = __ function_entry();
629 
630     Register base_ptr_reg   = R3_ARG1; // tohw (needs to be 8b aligned)
631     Register cnt_dwords_reg = R4_ARG2; // count (in dwords)
632     Register tmp1_reg       = R5_ARG3;
633     Register tmp2_reg       = R6_ARG4;
634     Register zero_reg       = R7_ARG5;
635 
636     // Procedure for large arrays (uses data cache block zero instruction).
637     Label dwloop, fast, fastloop, restloop, lastdword, done;
638     int cl_size = VM_Version::L1_data_cache_line_size();
639     int cl_dwords = cl_size >> 3;
640     int cl_dwordaddr_bits = exact_log2(cl_dwords);
641     int min_dcbz = 2; // Needs to be positive, apply dcbz only to at least min_dcbz cache lines.
642 
643     // Clear up to 128byte boundary if long enough, dword_cnt=(16-(base>>3))%16.
644     __ dcbtst(base_ptr_reg);                    // Indicate write access to first cache line ...
645     __ andi(tmp2_reg, cnt_dwords_reg, 1);       // to check if number of dwords is even.
646     __ srdi_(tmp1_reg, cnt_dwords_reg, 1);      // number of double dwords
647     __ load_const_optimized(zero_reg, 0L);      // Use as zero register.
648 
649     __ cmpdi(CCR1, tmp2_reg, 0);                // cnt_dwords even?
650     __ beq(CCR0, lastdword);                    // size <= 1
651     __ mtctr(tmp1_reg);                         // Speculatively preload counter for rest loop (>0).
652     __ cmpdi(CCR0, cnt_dwords_reg, (min_dcbz+1)*cl_dwords-1); // Big enough to ensure >=min_dcbz cache lines are included?
653     __ neg(tmp1_reg, base_ptr_reg);             // bit 0..58: bogus, bit 57..60: (16-(base>>3))%16, bit 61..63: 000
654 
655     __ blt(CCR0, restloop);                     // Too small. (<31=(2*cl_dwords)-1 is sufficient, but bigger performs better.)
656     __ rldicl_(tmp1_reg, tmp1_reg, 64-3, 64-cl_dwordaddr_bits); // Extract number of dwords to 128byte boundary=(16-(base>>3))%16.
657 
658     __ beq(CCR0, fast);                         // already 128byte aligned
659     __ mtctr(tmp1_reg);                         // Set ctr to hit 128byte boundary (0<ctr<cnt).
660     __ subf(cnt_dwords_reg, tmp1_reg, cnt_dwords_reg); // rest (>0 since size>=256-8)
661 
662     // Clear in first cache line dword-by-dword if not already 128byte aligned.
663     __ bind(dwloop);
664       __ std(zero_reg, 0, base_ptr_reg);        // Clear 8byte aligned block.
665       __ addi(base_ptr_reg, base_ptr_reg, 8);
666     __ bdnz(dwloop);
667 
668     // clear 128byte blocks
669     __ bind(fast);
670     __ srdi(tmp1_reg, cnt_dwords_reg, cl_dwordaddr_bits); // loop count for 128byte loop (>0 since size>=256-8)
671     __ andi(tmp2_reg, cnt_dwords_reg, 1);       // to check if rest even
672 
673     __ mtctr(tmp1_reg);                         // load counter
674     __ cmpdi(CCR1, tmp2_reg, 0);                // rest even?
675     __ rldicl_(tmp1_reg, cnt_dwords_reg, 63, 65-cl_dwordaddr_bits); // rest in double dwords
676 
677     __ bind(fastloop);
678       __ dcbz(base_ptr_reg);                    // Clear 128byte aligned block.
679       __ addi(base_ptr_reg, base_ptr_reg, cl_size);
680     __ bdnz(fastloop);
681 
682     //__ dcbtst(base_ptr_reg);                  // Indicate write access to last cache line.
683     __ beq(CCR0, lastdword);                    // rest<=1
684     __ mtctr(tmp1_reg);                         // load counter
685 
686     // Clear rest.
687     __ bind(restloop);
688       __ std(zero_reg, 0, base_ptr_reg);        // Clear 8byte aligned block.
689       __ std(zero_reg, 8, base_ptr_reg);        // Clear 8byte aligned block.
690       __ addi(base_ptr_reg, base_ptr_reg, 16);
691     __ bdnz(restloop);
692 
693     __ bind(lastdword);
694     __ beq(CCR1, done);
695     __ std(zero_reg, 0, base_ptr_reg);
696     __ bind(done);
697     __ blr();                                   // return
698 
699     return start;
700   }
701 
702 #if !defined(PRODUCT)
703   // Wrapper which calls oopDesc::is_oop_or_null()
704   // Only called by MacroAssembler::verify_oop
verify_oop_helper(const char * message,oop o)705   static void verify_oop_helper(const char* message, oop o) {
706     if (!oopDesc::is_oop_or_null(o)) {
707       fatal("%s", message);
708     }
709     ++ StubRoutines::_verify_oop_count;
710   }
711 #endif
712 
713   // Return address of code to be called from code generated by
714   // MacroAssembler::verify_oop.
715   //
716   // Don't generate, rather use C++ code.
generate_verify_oop()717   address generate_verify_oop() {
718     // this is actually a `FunctionDescriptor*'.
719     address start = 0;
720 
721 #if !defined(PRODUCT)
722     start = CAST_FROM_FN_PTR(address, verify_oop_helper);
723 #endif
724 
725     return start;
726   }
727 
728 
729   // -XX:+OptimizeFill : convert fill/copy loops into intrinsic
730   //
731   // The code is implemented(ported from sparc) as we believe it benefits JVM98, however
732   // tracing(-XX:+TraceOptimizeFill) shows the intrinsic replacement doesn't happen at all!
733   //
734   // Source code in function is_range_check_if() shows that OptimizeFill relaxed the condition
735   // for turning on loop predication optimization, and hence the behavior of "array range check"
736   // and "loop invariant check" could be influenced, which potentially boosted JVM98.
737   //
738   // Generate stub for disjoint short fill. If "aligned" is true, the
739   // "to" address is assumed to be heapword aligned.
740   //
741   // Arguments for generated stub:
742   //   to:    R3_ARG1
743   //   value: R4_ARG2
744   //   count: R5_ARG3 treated as signed
745   //
generate_fill(BasicType t,bool aligned,const char * name)746   address generate_fill(BasicType t, bool aligned, const char* name) {
747     StubCodeMark mark(this, "StubRoutines", name);
748     address start = __ function_entry();
749 
750     const Register to    = R3_ARG1;   // source array address
751     const Register value = R4_ARG2;   // fill value
752     const Register count = R5_ARG3;   // elements count
753     const Register temp  = R6_ARG4;   // temp register
754 
755     //assert_clean_int(count, O3);    // Make sure 'count' is clean int.
756 
757     Label L_exit, L_skip_align1, L_skip_align2, L_fill_byte;
758     Label L_fill_2_bytes, L_fill_4_bytes, L_fill_elements, L_fill_32_bytes;
759 
760     int shift = -1;
761     switch (t) {
762        case T_BYTE:
763         shift = 2;
764         // Clone bytes (zero extend not needed because store instructions below ignore high order bytes).
765         __ rldimi(value, value, 8, 48);     // 8 bit -> 16 bit
766         __ cmpdi(CCR0, count, 2<<shift);    // Short arrays (< 8 bytes) fill by element.
767         __ blt(CCR0, L_fill_elements);
768         __ rldimi(value, value, 16, 32);    // 16 bit -> 32 bit
769         break;
770        case T_SHORT:
771         shift = 1;
772         // Clone bytes (zero extend not needed because store instructions below ignore high order bytes).
773         __ rldimi(value, value, 16, 32);    // 16 bit -> 32 bit
774         __ cmpdi(CCR0, count, 2<<shift);    // Short arrays (< 8 bytes) fill by element.
775         __ blt(CCR0, L_fill_elements);
776         break;
777       case T_INT:
778         shift = 0;
779         __ cmpdi(CCR0, count, 2<<shift);    // Short arrays (< 8 bytes) fill by element.
780         __ blt(CCR0, L_fill_4_bytes);
781         break;
782       default: ShouldNotReachHere();
783     }
784 
785     if (!aligned && (t == T_BYTE || t == T_SHORT)) {
786       // Align source address at 4 bytes address boundary.
787       if (t == T_BYTE) {
788         // One byte misalignment happens only for byte arrays.
789         __ andi_(temp, to, 1);
790         __ beq(CCR0, L_skip_align1);
791         __ stb(value, 0, to);
792         __ addi(to, to, 1);
793         __ addi(count, count, -1);
794         __ bind(L_skip_align1);
795       }
796       // Two bytes misalignment happens only for byte and short (char) arrays.
797       __ andi_(temp, to, 2);
798       __ beq(CCR0, L_skip_align2);
799       __ sth(value, 0, to);
800       __ addi(to, to, 2);
801       __ addi(count, count, -(1 << (shift - 1)));
802       __ bind(L_skip_align2);
803     }
804 
805     if (!aligned) {
806       // Align to 8 bytes, we know we are 4 byte aligned to start.
807       __ andi_(temp, to, 7);
808       __ beq(CCR0, L_fill_32_bytes);
809       __ stw(value, 0, to);
810       __ addi(to, to, 4);
811       __ addi(count, count, -(1 << shift));
812       __ bind(L_fill_32_bytes);
813     }
814 
815     __ li(temp, 8<<shift);                  // Prepare for 32 byte loop.
816     // Clone bytes int->long as above.
817     __ rldimi(value, value, 32, 0);         // 32 bit -> 64 bit
818 
819     Label L_check_fill_8_bytes;
820     // Fill 32-byte chunks.
821     __ subf_(count, temp, count);
822     __ blt(CCR0, L_check_fill_8_bytes);
823 
824     Label L_fill_32_bytes_loop;
825     __ align(32);
826     __ bind(L_fill_32_bytes_loop);
827 
828     __ std(value, 0, to);
829     __ std(value, 8, to);
830     __ subf_(count, temp, count);           // Update count.
831     __ std(value, 16, to);
832     __ std(value, 24, to);
833 
834     __ addi(to, to, 32);
835     __ bge(CCR0, L_fill_32_bytes_loop);
836 
837     __ bind(L_check_fill_8_bytes);
838     __ add_(count, temp, count);
839     __ beq(CCR0, L_exit);
840     __ addic_(count, count, -(2 << shift));
841     __ blt(CCR0, L_fill_4_bytes);
842 
843     //
844     // Length is too short, just fill 8 bytes at a time.
845     //
846     Label L_fill_8_bytes_loop;
847     __ bind(L_fill_8_bytes_loop);
848     __ std(value, 0, to);
849     __ addic_(count, count, -(2 << shift));
850     __ addi(to, to, 8);
851     __ bge(CCR0, L_fill_8_bytes_loop);
852 
853     // Fill trailing 4 bytes.
854     __ bind(L_fill_4_bytes);
855     __ andi_(temp, count, 1<<shift);
856     __ beq(CCR0, L_fill_2_bytes);
857 
858     __ stw(value, 0, to);
859     if (t == T_BYTE || t == T_SHORT) {
860       __ addi(to, to, 4);
861       // Fill trailing 2 bytes.
862       __ bind(L_fill_2_bytes);
863       __ andi_(temp, count, 1<<(shift-1));
864       __ beq(CCR0, L_fill_byte);
865       __ sth(value, 0, to);
866       if (t == T_BYTE) {
867         __ addi(to, to, 2);
868         // Fill trailing byte.
869         __ bind(L_fill_byte);
870         __ andi_(count, count, 1);
871         __ beq(CCR0, L_exit);
872         __ stb(value, 0, to);
873       } else {
874         __ bind(L_fill_byte);
875       }
876     } else {
877       __ bind(L_fill_2_bytes);
878     }
879     __ bind(L_exit);
880     __ blr();
881 
882     // Handle copies less than 8 bytes. Int is handled elsewhere.
883     if (t == T_BYTE) {
884       __ bind(L_fill_elements);
885       Label L_fill_2, L_fill_4;
886       __ andi_(temp, count, 1);
887       __ beq(CCR0, L_fill_2);
888       __ stb(value, 0, to);
889       __ addi(to, to, 1);
890       __ bind(L_fill_2);
891       __ andi_(temp, count, 2);
892       __ beq(CCR0, L_fill_4);
893       __ stb(value, 0, to);
894       __ stb(value, 0, to);
895       __ addi(to, to, 2);
896       __ bind(L_fill_4);
897       __ andi_(temp, count, 4);
898       __ beq(CCR0, L_exit);
899       __ stb(value, 0, to);
900       __ stb(value, 1, to);
901       __ stb(value, 2, to);
902       __ stb(value, 3, to);
903       __ blr();
904     }
905 
906     if (t == T_SHORT) {
907       Label L_fill_2;
908       __ bind(L_fill_elements);
909       __ andi_(temp, count, 1);
910       __ beq(CCR0, L_fill_2);
911       __ sth(value, 0, to);
912       __ addi(to, to, 2);
913       __ bind(L_fill_2);
914       __ andi_(temp, count, 2);
915       __ beq(CCR0, L_exit);
916       __ sth(value, 0, to);
917       __ sth(value, 2, to);
918       __ blr();
919     }
920     return start;
921   }
922 
assert_positive_int(Register count)923   inline void assert_positive_int(Register count) {
924 #ifdef ASSERT
925     __ srdi_(R0, count, 31);
926     __ asm_assert_eq("missing zero extend", 0xAFFE);
927 #endif
928   }
929 
930   // Generate overlap test for array copy stubs.
931   //
932   // Input:
933   //   R3_ARG1    -  from
934   //   R4_ARG2    -  to
935   //   R5_ARG3    -  element count
936   //
array_overlap_test(address no_overlap_target,int log2_elem_size)937   void array_overlap_test(address no_overlap_target, int log2_elem_size) {
938     Register tmp1 = R6_ARG4;
939     Register tmp2 = R7_ARG5;
940 
941     assert_positive_int(R5_ARG3);
942 
943     __ subf(tmp1, R3_ARG1, R4_ARG2); // distance in bytes
944     __ sldi(tmp2, R5_ARG3, log2_elem_size); // size in bytes
945     __ cmpld(CCR0, R3_ARG1, R4_ARG2); // Use unsigned comparison!
946     __ cmpld(CCR1, tmp1, tmp2);
947     __ crnand(CCR0, Assembler::less, CCR1, Assembler::less);
948     // Overlaps if Src before dst and distance smaller than size.
949     // Branch to forward copy routine otherwise (within range of 32kB).
950     __ bc(Assembler::bcondCRbiIs1, Assembler::bi0(CCR0, Assembler::less), no_overlap_target);
951 
952     // need to copy backwards
953   }
954 
955   // The guideline in the implementations of generate_disjoint_xxx_copy
956   // (xxx=byte,short,int,long,oop) is to copy as many elements as possible with
957   // single instructions, but to avoid alignment interrupts (see subsequent
958   // comment). Furthermore, we try to minimize misaligned access, even
959   // though they cause no alignment interrupt.
960   //
961   // In Big-Endian mode, the PowerPC architecture requires implementations to
962   // handle automatically misaligned integer halfword and word accesses,
963   // word-aligned integer doubleword accesses, and word-aligned floating-point
964   // accesses. Other accesses may or may not generate an Alignment interrupt
965   // depending on the implementation.
966   // Alignment interrupt handling may require on the order of hundreds of cycles,
967   // so every effort should be made to avoid misaligned memory values.
968   //
969   //
970   // Generate stub for disjoint byte copy.  If "aligned" is true, the
971   // "from" and "to" addresses are assumed to be heapword aligned.
972   //
973   // Arguments for generated stub:
974   //      from:  R3_ARG1
975   //      to:    R4_ARG2
976   //      count: R5_ARG3 treated as signed
977   //
generate_disjoint_byte_copy(bool aligned,const char * name)978   address generate_disjoint_byte_copy(bool aligned, const char * name) {
979     StubCodeMark mark(this, "StubRoutines", name);
980     address start = __ function_entry();
981     assert_positive_int(R5_ARG3);
982 
983     Register tmp1 = R6_ARG4;
984     Register tmp2 = R7_ARG5;
985     Register tmp3 = R8_ARG6;
986     Register tmp4 = R9_ARG7;
987 
988     VectorSRegister tmp_vsr1  = VSR1;
989     VectorSRegister tmp_vsr2  = VSR2;
990 
991     Label l_1, l_2, l_3, l_4, l_5, l_6, l_7, l_8, l_9, l_10;
992 
993     // Don't try anything fancy if arrays don't have many elements.
994     __ li(tmp3, 0);
995     __ cmpwi(CCR0, R5_ARG3, 17);
996     __ ble(CCR0, l_6); // copy 4 at a time
997 
998     if (!aligned) {
999       __ xorr(tmp1, R3_ARG1, R4_ARG2);
1000       __ andi_(tmp1, tmp1, 3);
1001       __ bne(CCR0, l_6); // If arrays don't have the same alignment mod 4, do 4 element copy.
1002 
1003       // Copy elements if necessary to align to 4 bytes.
1004       __ neg(tmp1, R3_ARG1); // Compute distance to alignment boundary.
1005       __ andi_(tmp1, tmp1, 3);
1006       __ beq(CCR0, l_2);
1007 
1008       __ subf(R5_ARG3, tmp1, R5_ARG3);
1009       __ bind(l_9);
1010       __ lbz(tmp2, 0, R3_ARG1);
1011       __ addic_(tmp1, tmp1, -1);
1012       __ stb(tmp2, 0, R4_ARG2);
1013       __ addi(R3_ARG1, R3_ARG1, 1);
1014       __ addi(R4_ARG2, R4_ARG2, 1);
1015       __ bne(CCR0, l_9);
1016 
1017       __ bind(l_2);
1018     }
1019 
1020     // copy 8 elements at a time
1021     __ xorr(tmp2, R3_ARG1, R4_ARG2); // skip if src & dest have differing alignment mod 8
1022     __ andi_(tmp1, tmp2, 7);
1023     __ bne(CCR0, l_7); // not same alignment -> to or from is aligned -> copy 8
1024 
1025     // copy a 2-element word if necessary to align to 8 bytes
1026     __ andi_(R0, R3_ARG1, 7);
1027     __ beq(CCR0, l_7);
1028 
1029     __ lwzx(tmp2, R3_ARG1, tmp3);
1030     __ addi(R5_ARG3, R5_ARG3, -4);
1031     __ stwx(tmp2, R4_ARG2, tmp3);
1032     { // FasterArrayCopy
1033       __ addi(R3_ARG1, R3_ARG1, 4);
1034       __ addi(R4_ARG2, R4_ARG2, 4);
1035     }
1036     __ bind(l_7);
1037 
1038     { // FasterArrayCopy
1039       __ cmpwi(CCR0, R5_ARG3, 31);
1040       __ ble(CCR0, l_6); // copy 2 at a time if less than 32 elements remain
1041 
1042       __ srdi(tmp1, R5_ARG3, 5);
1043       __ andi_(R5_ARG3, R5_ARG3, 31);
1044       __ mtctr(tmp1);
1045 
1046      if (!VM_Version::has_vsx()) {
1047 
1048       __ bind(l_8);
1049       // Use unrolled version for mass copying (copy 32 elements a time)
1050       // Load feeding store gets zero latency on Power6, however not on Power5.
1051       // Therefore, the following sequence is made for the good of both.
1052       __ ld(tmp1, 0, R3_ARG1);
1053       __ ld(tmp2, 8, R3_ARG1);
1054       __ ld(tmp3, 16, R3_ARG1);
1055       __ ld(tmp4, 24, R3_ARG1);
1056       __ std(tmp1, 0, R4_ARG2);
1057       __ std(tmp2, 8, R4_ARG2);
1058       __ std(tmp3, 16, R4_ARG2);
1059       __ std(tmp4, 24, R4_ARG2);
1060       __ addi(R3_ARG1, R3_ARG1, 32);
1061       __ addi(R4_ARG2, R4_ARG2, 32);
1062       __ bdnz(l_8);
1063 
1064     } else { // Processor supports VSX, so use it to mass copy.
1065 
1066       // Prefetch the data into the L2 cache.
1067       __ dcbt(R3_ARG1, 0);
1068 
1069       // If supported set DSCR pre-fetch to deepest.
1070       if (VM_Version::has_mfdscr()) {
1071         __ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
1072         __ mtdscr(tmp2);
1073       }
1074 
1075       __ li(tmp1, 16);
1076 
1077       // Backbranch target aligned to 32-byte. Not 16-byte align as
1078       // loop contains < 8 instructions that fit inside a single
1079       // i-cache sector.
1080       __ align(32);
1081 
1082       __ bind(l_10);
1083       // Use loop with VSX load/store instructions to
1084       // copy 32 elements a time.
1085       __ lxvd2x(tmp_vsr1, R3_ARG1);        // Load src
1086       __ stxvd2x(tmp_vsr1, R4_ARG2);       // Store to dst
1087       __ lxvd2x(tmp_vsr2, tmp1, R3_ARG1);  // Load src + 16
1088       __ stxvd2x(tmp_vsr2, tmp1, R4_ARG2); // Store to dst + 16
1089       __ addi(R3_ARG1, R3_ARG1, 32);       // Update src+=32
1090       __ addi(R4_ARG2, R4_ARG2, 32);       // Update dsc+=32
1091       __ bdnz(l_10);                       // Dec CTR and loop if not zero.
1092 
1093       // Restore DSCR pre-fetch value.
1094       if (VM_Version::has_mfdscr()) {
1095         __ load_const_optimized(tmp2, VM_Version::_dscr_val);
1096         __ mtdscr(tmp2);
1097       }
1098 
1099     } // VSX
1100    } // FasterArrayCopy
1101 
1102     __ bind(l_6);
1103 
1104     // copy 4 elements at a time
1105     __ cmpwi(CCR0, R5_ARG3, 4);
1106     __ blt(CCR0, l_1);
1107     __ srdi(tmp1, R5_ARG3, 2);
1108     __ mtctr(tmp1); // is > 0
1109     __ andi_(R5_ARG3, R5_ARG3, 3);
1110 
1111     { // FasterArrayCopy
1112       __ addi(R3_ARG1, R3_ARG1, -4);
1113       __ addi(R4_ARG2, R4_ARG2, -4);
1114       __ bind(l_3);
1115       __ lwzu(tmp2, 4, R3_ARG1);
1116       __ stwu(tmp2, 4, R4_ARG2);
1117       __ bdnz(l_3);
1118       __ addi(R3_ARG1, R3_ARG1, 4);
1119       __ addi(R4_ARG2, R4_ARG2, 4);
1120     }
1121 
1122     // do single element copy
1123     __ bind(l_1);
1124     __ cmpwi(CCR0, R5_ARG3, 0);
1125     __ beq(CCR0, l_4);
1126 
1127     { // FasterArrayCopy
1128       __ mtctr(R5_ARG3);
1129       __ addi(R3_ARG1, R3_ARG1, -1);
1130       __ addi(R4_ARG2, R4_ARG2, -1);
1131 
1132       __ bind(l_5);
1133       __ lbzu(tmp2, 1, R3_ARG1);
1134       __ stbu(tmp2, 1, R4_ARG2);
1135       __ bdnz(l_5);
1136     }
1137 
1138     __ bind(l_4);
1139     __ li(R3_RET, 0); // return 0
1140     __ blr();
1141 
1142     return start;
1143   }
1144 
1145   // Generate stub for conjoint byte copy.  If "aligned" is true, the
1146   // "from" and "to" addresses are assumed to be heapword aligned.
1147   //
1148   // Arguments for generated stub:
1149   //      from:  R3_ARG1
1150   //      to:    R4_ARG2
1151   //      count: R5_ARG3 treated as signed
1152   //
generate_conjoint_byte_copy(bool aligned,const char * name)1153   address generate_conjoint_byte_copy(bool aligned, const char * name) {
1154     StubCodeMark mark(this, "StubRoutines", name);
1155     address start = __ function_entry();
1156     assert_positive_int(R5_ARG3);
1157 
1158     Register tmp1 = R6_ARG4;
1159     Register tmp2 = R7_ARG5;
1160     Register tmp3 = R8_ARG6;
1161 
1162     address nooverlap_target = aligned ?
1163       STUB_ENTRY(arrayof_jbyte_disjoint_arraycopy) :
1164       STUB_ENTRY(jbyte_disjoint_arraycopy);
1165 
1166     array_overlap_test(nooverlap_target, 0);
1167     // Do reverse copy. We assume the case of actual overlap is rare enough
1168     // that we don't have to optimize it.
1169     Label l_1, l_2;
1170 
1171     __ b(l_2);
1172     __ bind(l_1);
1173     __ stbx(tmp1, R4_ARG2, R5_ARG3);
1174     __ bind(l_2);
1175     __ addic_(R5_ARG3, R5_ARG3, -1);
1176     __ lbzx(tmp1, R3_ARG1, R5_ARG3);
1177     __ bge(CCR0, l_1);
1178 
1179     __ li(R3_RET, 0); // return 0
1180     __ blr();
1181 
1182     return start;
1183   }
1184 
1185   // Generate stub for disjoint short copy.  If "aligned" is true, the
1186   // "from" and "to" addresses are assumed to be heapword aligned.
1187   //
1188   // Arguments for generated stub:
1189   //      from:  R3_ARG1
1190   //      to:    R4_ARG2
1191   //  elm.count: R5_ARG3 treated as signed
1192   //
1193   // Strategy for aligned==true:
1194   //
1195   //  If length <= 9:
1196   //     1. copy 2 elements at a time (l_6)
1197   //     2. copy last element if original element count was odd (l_1)
1198   //
1199   //  If length > 9:
1200   //     1. copy 4 elements at a time until less than 4 elements are left (l_7)
1201   //     2. copy 2 elements at a time until less than 2 elements are left (l_6)
1202   //     3. copy last element if one was left in step 2. (l_1)
1203   //
1204   //
1205   // Strategy for aligned==false:
1206   //
1207   //  If length <= 9: same as aligned==true case, but NOTE: load/stores
1208   //                  can be unaligned (see comment below)
1209   //
1210   //  If length > 9:
1211   //     1. continue with step 6. if the alignment of from and to mod 4
1212   //        is different.
1213   //     2. align from and to to 4 bytes by copying 1 element if necessary
1214   //     3. at l_2 from and to are 4 byte aligned; continue with
1215   //        5. if they cannot be aligned to 8 bytes because they have
1216   //        got different alignment mod 8.
1217   //     4. at this point we know that both, from and to, have the same
1218   //        alignment mod 8, now copy one element if necessary to get
1219   //        8 byte alignment of from and to.
1220   //     5. copy 4 elements at a time until less than 4 elements are
1221   //        left; depending on step 3. all load/stores are aligned or
1222   //        either all loads or all stores are unaligned.
1223   //     6. copy 2 elements at a time until less than 2 elements are
1224   //        left (l_6); arriving here from step 1., there is a chance
1225   //        that all accesses are unaligned.
1226   //     7. copy last element if one was left in step 6. (l_1)
1227   //
1228   //  There are unaligned data accesses using integer load/store
1229   //  instructions in this stub. POWER allows such accesses.
1230   //
1231   //  According to the manuals (PowerISA_V2.06_PUBLIC, Book II,
1232   //  Chapter 2: Effect of Operand Placement on Performance) unaligned
1233   //  integer load/stores have good performance. Only unaligned
1234   //  floating point load/stores can have poor performance.
1235   //
1236   //  TODO:
1237   //
1238   //  1. check if aligning the backbranch target of loops is beneficial
1239   //
generate_disjoint_short_copy(bool aligned,const char * name)1240   address generate_disjoint_short_copy(bool aligned, const char * name) {
1241     StubCodeMark mark(this, "StubRoutines", name);
1242 
1243     Register tmp1 = R6_ARG4;
1244     Register tmp2 = R7_ARG5;
1245     Register tmp3 = R8_ARG6;
1246     Register tmp4 = R9_ARG7;
1247 
1248     VectorSRegister tmp_vsr1  = VSR1;
1249     VectorSRegister tmp_vsr2  = VSR2;
1250 
1251     address start = __ function_entry();
1252     assert_positive_int(R5_ARG3);
1253 
1254     Label l_1, l_2, l_3, l_4, l_5, l_6, l_7, l_8, l_9;
1255 
1256     // don't try anything fancy if arrays don't have many elements
1257     __ li(tmp3, 0);
1258     __ cmpwi(CCR0, R5_ARG3, 9);
1259     __ ble(CCR0, l_6); // copy 2 at a time
1260 
1261     if (!aligned) {
1262       __ xorr(tmp1, R3_ARG1, R4_ARG2);
1263       __ andi_(tmp1, tmp1, 3);
1264       __ bne(CCR0, l_6); // if arrays don't have the same alignment mod 4, do 2 element copy
1265 
1266       // At this point it is guaranteed that both, from and to have the same alignment mod 4.
1267 
1268       // Copy 1 element if necessary to align to 4 bytes.
1269       __ andi_(tmp1, R3_ARG1, 3);
1270       __ beq(CCR0, l_2);
1271 
1272       __ lhz(tmp2, 0, R3_ARG1);
1273       __ addi(R3_ARG1, R3_ARG1, 2);
1274       __ sth(tmp2, 0, R4_ARG2);
1275       __ addi(R4_ARG2, R4_ARG2, 2);
1276       __ addi(R5_ARG3, R5_ARG3, -1);
1277       __ bind(l_2);
1278 
1279       // At this point the positions of both, from and to, are at least 4 byte aligned.
1280 
1281       // Copy 4 elements at a time.
1282       // Align to 8 bytes, but only if both, from and to, have same alignment mod 8.
1283       __ xorr(tmp2, R3_ARG1, R4_ARG2);
1284       __ andi_(tmp1, tmp2, 7);
1285       __ bne(CCR0, l_7); // not same alignment mod 8 -> copy 4, either from or to will be unaligned
1286 
1287       // Copy a 2-element word if necessary to align to 8 bytes.
1288       __ andi_(R0, R3_ARG1, 7);
1289       __ beq(CCR0, l_7);
1290 
1291       __ lwzx(tmp2, R3_ARG1, tmp3);
1292       __ addi(R5_ARG3, R5_ARG3, -2);
1293       __ stwx(tmp2, R4_ARG2, tmp3);
1294       { // FasterArrayCopy
1295         __ addi(R3_ARG1, R3_ARG1, 4);
1296         __ addi(R4_ARG2, R4_ARG2, 4);
1297       }
1298     }
1299 
1300     __ bind(l_7);
1301 
1302     // Copy 4 elements at a time; either the loads or the stores can
1303     // be unaligned if aligned == false.
1304 
1305     { // FasterArrayCopy
1306       __ cmpwi(CCR0, R5_ARG3, 15);
1307       __ ble(CCR0, l_6); // copy 2 at a time if less than 16 elements remain
1308 
1309       __ srdi(tmp1, R5_ARG3, 4);
1310       __ andi_(R5_ARG3, R5_ARG3, 15);
1311       __ mtctr(tmp1);
1312 
1313       if (!VM_Version::has_vsx()) {
1314 
1315         __ bind(l_8);
1316         // Use unrolled version for mass copying (copy 16 elements a time).
1317         // Load feeding store gets zero latency on Power6, however not on Power5.
1318         // Therefore, the following sequence is made for the good of both.
1319         __ ld(tmp1, 0, R3_ARG1);
1320         __ ld(tmp2, 8, R3_ARG1);
1321         __ ld(tmp3, 16, R3_ARG1);
1322         __ ld(tmp4, 24, R3_ARG1);
1323         __ std(tmp1, 0, R4_ARG2);
1324         __ std(tmp2, 8, R4_ARG2);
1325         __ std(tmp3, 16, R4_ARG2);
1326         __ std(tmp4, 24, R4_ARG2);
1327         __ addi(R3_ARG1, R3_ARG1, 32);
1328         __ addi(R4_ARG2, R4_ARG2, 32);
1329         __ bdnz(l_8);
1330 
1331       } else { // Processor supports VSX, so use it to mass copy.
1332 
1333         // Prefetch src data into L2 cache.
1334         __ dcbt(R3_ARG1, 0);
1335 
1336         // If supported set DSCR pre-fetch to deepest.
1337         if (VM_Version::has_mfdscr()) {
1338           __ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
1339           __ mtdscr(tmp2);
1340         }
1341         __ li(tmp1, 16);
1342 
1343         // Backbranch target aligned to 32-byte. It's not aligned 16-byte
1344         // as loop contains < 8 instructions that fit inside a single
1345         // i-cache sector.
1346         __ align(32);
1347 
1348         __ bind(l_9);
1349         // Use loop with VSX load/store instructions to
1350         // copy 16 elements a time.
1351         __ lxvd2x(tmp_vsr1, R3_ARG1);        // Load from src.
1352         __ stxvd2x(tmp_vsr1, R4_ARG2);       // Store to dst.
1353         __ lxvd2x(tmp_vsr2, R3_ARG1, tmp1);  // Load from src + 16.
1354         __ stxvd2x(tmp_vsr2, R4_ARG2, tmp1); // Store to dst + 16.
1355         __ addi(R3_ARG1, R3_ARG1, 32);       // Update src+=32.
1356         __ addi(R4_ARG2, R4_ARG2, 32);       // Update dsc+=32.
1357         __ bdnz(l_9);                        // Dec CTR and loop if not zero.
1358 
1359         // Restore DSCR pre-fetch value.
1360         if (VM_Version::has_mfdscr()) {
1361           __ load_const_optimized(tmp2, VM_Version::_dscr_val);
1362           __ mtdscr(tmp2);
1363         }
1364 
1365       }
1366     } // FasterArrayCopy
1367     __ bind(l_6);
1368 
1369     // copy 2 elements at a time
1370     { // FasterArrayCopy
1371       __ cmpwi(CCR0, R5_ARG3, 2);
1372       __ blt(CCR0, l_1);
1373       __ srdi(tmp1, R5_ARG3, 1);
1374       __ andi_(R5_ARG3, R5_ARG3, 1);
1375 
1376       __ addi(R3_ARG1, R3_ARG1, -4);
1377       __ addi(R4_ARG2, R4_ARG2, -4);
1378       __ mtctr(tmp1);
1379 
1380       __ bind(l_3);
1381       __ lwzu(tmp2, 4, R3_ARG1);
1382       __ stwu(tmp2, 4, R4_ARG2);
1383       __ bdnz(l_3);
1384 
1385       __ addi(R3_ARG1, R3_ARG1, 4);
1386       __ addi(R4_ARG2, R4_ARG2, 4);
1387     }
1388 
1389     // do single element copy
1390     __ bind(l_1);
1391     __ cmpwi(CCR0, R5_ARG3, 0);
1392     __ beq(CCR0, l_4);
1393 
1394     { // FasterArrayCopy
1395       __ mtctr(R5_ARG3);
1396       __ addi(R3_ARG1, R3_ARG1, -2);
1397       __ addi(R4_ARG2, R4_ARG2, -2);
1398 
1399       __ bind(l_5);
1400       __ lhzu(tmp2, 2, R3_ARG1);
1401       __ sthu(tmp2, 2, R4_ARG2);
1402       __ bdnz(l_5);
1403     }
1404     __ bind(l_4);
1405     __ li(R3_RET, 0); // return 0
1406     __ blr();
1407 
1408     return start;
1409   }
1410 
1411   // Generate stub for conjoint short copy.  If "aligned" is true, the
1412   // "from" and "to" addresses are assumed to be heapword aligned.
1413   //
1414   // Arguments for generated stub:
1415   //      from:  R3_ARG1
1416   //      to:    R4_ARG2
1417   //      count: R5_ARG3 treated as signed
1418   //
generate_conjoint_short_copy(bool aligned,const char * name)1419   address generate_conjoint_short_copy(bool aligned, const char * name) {
1420     StubCodeMark mark(this, "StubRoutines", name);
1421     address start = __ function_entry();
1422     assert_positive_int(R5_ARG3);
1423 
1424     Register tmp1 = R6_ARG4;
1425     Register tmp2 = R7_ARG5;
1426     Register tmp3 = R8_ARG6;
1427 
1428     address nooverlap_target = aligned ?
1429       STUB_ENTRY(arrayof_jshort_disjoint_arraycopy) :
1430       STUB_ENTRY(jshort_disjoint_arraycopy);
1431 
1432     array_overlap_test(nooverlap_target, 1);
1433 
1434     Label l_1, l_2;
1435     __ sldi(tmp1, R5_ARG3, 1);
1436     __ b(l_2);
1437     __ bind(l_1);
1438     __ sthx(tmp2, R4_ARG2, tmp1);
1439     __ bind(l_2);
1440     __ addic_(tmp1, tmp1, -2);
1441     __ lhzx(tmp2, R3_ARG1, tmp1);
1442     __ bge(CCR0, l_1);
1443 
1444     __ li(R3_RET, 0); // return 0
1445     __ blr();
1446 
1447     return start;
1448   }
1449 
1450   // Generate core code for disjoint int copy (and oop copy on 32-bit).  If "aligned"
1451   // is true, the "from" and "to" addresses are assumed to be heapword aligned.
1452   //
1453   // Arguments:
1454   //      from:  R3_ARG1
1455   //      to:    R4_ARG2
1456   //      count: R5_ARG3 treated as signed
1457   //
generate_disjoint_int_copy_core(bool aligned)1458   void generate_disjoint_int_copy_core(bool aligned) {
1459     Register tmp1 = R6_ARG4;
1460     Register tmp2 = R7_ARG5;
1461     Register tmp3 = R8_ARG6;
1462     Register tmp4 = R0;
1463 
1464     VectorSRegister tmp_vsr1  = VSR1;
1465     VectorSRegister tmp_vsr2  = VSR2;
1466 
1467     Label l_1, l_2, l_3, l_4, l_5, l_6, l_7;
1468 
1469     // for short arrays, just do single element copy
1470     __ li(tmp3, 0);
1471     __ cmpwi(CCR0, R5_ARG3, 5);
1472     __ ble(CCR0, l_2);
1473 
1474     if (!aligned) {
1475         // check if arrays have same alignment mod 8.
1476         __ xorr(tmp1, R3_ARG1, R4_ARG2);
1477         __ andi_(R0, tmp1, 7);
1478         // Not the same alignment, but ld and std just need to be 4 byte aligned.
1479         __ bne(CCR0, l_4); // to OR from is 8 byte aligned -> copy 2 at a time
1480 
1481         // copy 1 element to align to and from on an 8 byte boundary
1482         __ andi_(R0, R3_ARG1, 7);
1483         __ beq(CCR0, l_4);
1484 
1485         __ lwzx(tmp2, R3_ARG1, tmp3);
1486         __ addi(R5_ARG3, R5_ARG3, -1);
1487         __ stwx(tmp2, R4_ARG2, tmp3);
1488         { // FasterArrayCopy
1489           __ addi(R3_ARG1, R3_ARG1, 4);
1490           __ addi(R4_ARG2, R4_ARG2, 4);
1491         }
1492         __ bind(l_4);
1493       }
1494 
1495     { // FasterArrayCopy
1496       __ cmpwi(CCR0, R5_ARG3, 7);
1497       __ ble(CCR0, l_2); // copy 1 at a time if less than 8 elements remain
1498 
1499       __ srdi(tmp1, R5_ARG3, 3);
1500       __ andi_(R5_ARG3, R5_ARG3, 7);
1501       __ mtctr(tmp1);
1502 
1503      if (!VM_Version::has_vsx()) {
1504 
1505       __ bind(l_6);
1506       // Use unrolled version for mass copying (copy 8 elements a time).
1507       // Load feeding store gets zero latency on power6, however not on power 5.
1508       // Therefore, the following sequence is made for the good of both.
1509       __ ld(tmp1, 0, R3_ARG1);
1510       __ ld(tmp2, 8, R3_ARG1);
1511       __ ld(tmp3, 16, R3_ARG1);
1512       __ ld(tmp4, 24, R3_ARG1);
1513       __ std(tmp1, 0, R4_ARG2);
1514       __ std(tmp2, 8, R4_ARG2);
1515       __ std(tmp3, 16, R4_ARG2);
1516       __ std(tmp4, 24, R4_ARG2);
1517       __ addi(R3_ARG1, R3_ARG1, 32);
1518       __ addi(R4_ARG2, R4_ARG2, 32);
1519       __ bdnz(l_6);
1520 
1521     } else { // Processor supports VSX, so use it to mass copy.
1522 
1523       // Prefetch the data into the L2 cache.
1524       __ dcbt(R3_ARG1, 0);
1525 
1526       // If supported set DSCR pre-fetch to deepest.
1527       if (VM_Version::has_mfdscr()) {
1528         __ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
1529         __ mtdscr(tmp2);
1530       }
1531 
1532       __ li(tmp1, 16);
1533 
1534       // Backbranch target aligned to 32-byte. Not 16-byte align as
1535       // loop contains < 8 instructions that fit inside a single
1536       // i-cache sector.
1537       __ align(32);
1538 
1539       __ bind(l_7);
1540       // Use loop with VSX load/store instructions to
1541       // copy 8 elements a time.
1542       __ lxvd2x(tmp_vsr1, R3_ARG1);        // Load src
1543       __ stxvd2x(tmp_vsr1, R4_ARG2);       // Store to dst
1544       __ lxvd2x(tmp_vsr2, tmp1, R3_ARG1);  // Load src + 16
1545       __ stxvd2x(tmp_vsr2, tmp1, R4_ARG2); // Store to dst + 16
1546       __ addi(R3_ARG1, R3_ARG1, 32);       // Update src+=32
1547       __ addi(R4_ARG2, R4_ARG2, 32);       // Update dsc+=32
1548       __ bdnz(l_7);                        // Dec CTR and loop if not zero.
1549 
1550       // Restore DSCR pre-fetch value.
1551       if (VM_Version::has_mfdscr()) {
1552         __ load_const_optimized(tmp2, VM_Version::_dscr_val);
1553         __ mtdscr(tmp2);
1554       }
1555 
1556     } // VSX
1557    } // FasterArrayCopy
1558 
1559     // copy 1 element at a time
1560     __ bind(l_2);
1561     __ cmpwi(CCR0, R5_ARG3, 0);
1562     __ beq(CCR0, l_1);
1563 
1564     { // FasterArrayCopy
1565       __ mtctr(R5_ARG3);
1566       __ addi(R3_ARG1, R3_ARG1, -4);
1567       __ addi(R4_ARG2, R4_ARG2, -4);
1568 
1569       __ bind(l_3);
1570       __ lwzu(tmp2, 4, R3_ARG1);
1571       __ stwu(tmp2, 4, R4_ARG2);
1572       __ bdnz(l_3);
1573     }
1574 
1575     __ bind(l_1);
1576     return;
1577   }
1578 
1579   // Generate stub for disjoint int copy.  If "aligned" is true, the
1580   // "from" and "to" addresses are assumed to be heapword aligned.
1581   //
1582   // Arguments for generated stub:
1583   //      from:  R3_ARG1
1584   //      to:    R4_ARG2
1585   //      count: R5_ARG3 treated as signed
1586   //
generate_disjoint_int_copy(bool aligned,const char * name)1587   address generate_disjoint_int_copy(bool aligned, const char * name) {
1588     StubCodeMark mark(this, "StubRoutines", name);
1589     address start = __ function_entry();
1590     assert_positive_int(R5_ARG3);
1591     generate_disjoint_int_copy_core(aligned);
1592     __ li(R3_RET, 0); // return 0
1593     __ blr();
1594     return start;
1595   }
1596 
1597   // Generate core code for conjoint int copy (and oop copy on
1598   // 32-bit).  If "aligned" is true, the "from" and "to" addresses
1599   // are assumed to be heapword aligned.
1600   //
1601   // Arguments:
1602   //      from:  R3_ARG1
1603   //      to:    R4_ARG2
1604   //      count: R5_ARG3 treated as signed
1605   //
generate_conjoint_int_copy_core(bool aligned)1606   void generate_conjoint_int_copy_core(bool aligned) {
1607     // Do reverse copy.  We assume the case of actual overlap is rare enough
1608     // that we don't have to optimize it.
1609 
1610     Label l_1, l_2, l_3, l_4, l_5, l_6, l_7;
1611 
1612     Register tmp1 = R6_ARG4;
1613     Register tmp2 = R7_ARG5;
1614     Register tmp3 = R8_ARG6;
1615     Register tmp4 = R0;
1616 
1617     VectorSRegister tmp_vsr1  = VSR1;
1618     VectorSRegister tmp_vsr2  = VSR2;
1619 
1620     { // FasterArrayCopy
1621       __ cmpwi(CCR0, R5_ARG3, 0);
1622       __ beq(CCR0, l_6);
1623 
1624       __ sldi(R5_ARG3, R5_ARG3, 2);
1625       __ add(R3_ARG1, R3_ARG1, R5_ARG3);
1626       __ add(R4_ARG2, R4_ARG2, R5_ARG3);
1627       __ srdi(R5_ARG3, R5_ARG3, 2);
1628 
1629       if (!aligned) {
1630         // check if arrays have same alignment mod 8.
1631         __ xorr(tmp1, R3_ARG1, R4_ARG2);
1632         __ andi_(R0, tmp1, 7);
1633         // Not the same alignment, but ld and std just need to be 4 byte aligned.
1634         __ bne(CCR0, l_7); // to OR from is 8 byte aligned -> copy 2 at a time
1635 
1636         // copy 1 element to align to and from on an 8 byte boundary
1637         __ andi_(R0, R3_ARG1, 7);
1638         __ beq(CCR0, l_7);
1639 
1640         __ addi(R3_ARG1, R3_ARG1, -4);
1641         __ addi(R4_ARG2, R4_ARG2, -4);
1642         __ addi(R5_ARG3, R5_ARG3, -1);
1643         __ lwzx(tmp2, R3_ARG1);
1644         __ stwx(tmp2, R4_ARG2);
1645         __ bind(l_7);
1646       }
1647 
1648       __ cmpwi(CCR0, R5_ARG3, 7);
1649       __ ble(CCR0, l_5); // copy 1 at a time if less than 8 elements remain
1650 
1651       __ srdi(tmp1, R5_ARG3, 3);
1652       __ andi(R5_ARG3, R5_ARG3, 7);
1653       __ mtctr(tmp1);
1654 
1655      if (!VM_Version::has_vsx()) {
1656       __ bind(l_4);
1657       // Use unrolled version for mass copying (copy 4 elements a time).
1658       // Load feeding store gets zero latency on Power6, however not on Power5.
1659       // Therefore, the following sequence is made for the good of both.
1660       __ addi(R3_ARG1, R3_ARG1, -32);
1661       __ addi(R4_ARG2, R4_ARG2, -32);
1662       __ ld(tmp4, 24, R3_ARG1);
1663       __ ld(tmp3, 16, R3_ARG1);
1664       __ ld(tmp2, 8, R3_ARG1);
1665       __ ld(tmp1, 0, R3_ARG1);
1666       __ std(tmp4, 24, R4_ARG2);
1667       __ std(tmp3, 16, R4_ARG2);
1668       __ std(tmp2, 8, R4_ARG2);
1669       __ std(tmp1, 0, R4_ARG2);
1670       __ bdnz(l_4);
1671      } else {  // Processor supports VSX, so use it to mass copy.
1672       // Prefetch the data into the L2 cache.
1673       __ dcbt(R3_ARG1, 0);
1674 
1675       // If supported set DSCR pre-fetch to deepest.
1676       if (VM_Version::has_mfdscr()) {
1677         __ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
1678         __ mtdscr(tmp2);
1679       }
1680 
1681       __ li(tmp1, 16);
1682 
1683       // Backbranch target aligned to 32-byte. Not 16-byte align as
1684       // loop contains < 8 instructions that fit inside a single
1685       // i-cache sector.
1686       __ align(32);
1687 
1688       __ bind(l_4);
1689       // Use loop with VSX load/store instructions to
1690       // copy 8 elements a time.
1691       __ addi(R3_ARG1, R3_ARG1, -32);      // Update src-=32
1692       __ addi(R4_ARG2, R4_ARG2, -32);      // Update dsc-=32
1693       __ lxvd2x(tmp_vsr2, tmp1, R3_ARG1);  // Load src+16
1694       __ lxvd2x(tmp_vsr1, R3_ARG1);        // Load src
1695       __ stxvd2x(tmp_vsr2, tmp1, R4_ARG2); // Store to dst+16
1696       __ stxvd2x(tmp_vsr1, R4_ARG2);       // Store to dst
1697       __ bdnz(l_4);
1698 
1699       // Restore DSCR pre-fetch value.
1700       if (VM_Version::has_mfdscr()) {
1701         __ load_const_optimized(tmp2, VM_Version::_dscr_val);
1702         __ mtdscr(tmp2);
1703       }
1704      }
1705 
1706       __ cmpwi(CCR0, R5_ARG3, 0);
1707       __ beq(CCR0, l_6);
1708 
1709       __ bind(l_5);
1710       __ mtctr(R5_ARG3);
1711       __ bind(l_3);
1712       __ lwz(R0, -4, R3_ARG1);
1713       __ stw(R0, -4, R4_ARG2);
1714       __ addi(R3_ARG1, R3_ARG1, -4);
1715       __ addi(R4_ARG2, R4_ARG2, -4);
1716       __ bdnz(l_3);
1717 
1718       __ bind(l_6);
1719     }
1720   }
1721 
1722   // Generate stub for conjoint int copy.  If "aligned" is true, the
1723   // "from" and "to" addresses are assumed to be heapword aligned.
1724   //
1725   // Arguments for generated stub:
1726   //      from:  R3_ARG1
1727   //      to:    R4_ARG2
1728   //      count: R5_ARG3 treated as signed
1729   //
generate_conjoint_int_copy(bool aligned,const char * name)1730   address generate_conjoint_int_copy(bool aligned, const char * name) {
1731     StubCodeMark mark(this, "StubRoutines", name);
1732     address start = __ function_entry();
1733     assert_positive_int(R5_ARG3);
1734     address nooverlap_target = aligned ?
1735       STUB_ENTRY(arrayof_jint_disjoint_arraycopy) :
1736       STUB_ENTRY(jint_disjoint_arraycopy);
1737 
1738     array_overlap_test(nooverlap_target, 2);
1739 
1740     generate_conjoint_int_copy_core(aligned);
1741 
1742     __ li(R3_RET, 0); // return 0
1743     __ blr();
1744 
1745     return start;
1746   }
1747 
1748   // Generate core code for disjoint long copy (and oop copy on
1749   // 64-bit).  If "aligned" is true, the "from" and "to" addresses
1750   // are assumed to be heapword aligned.
1751   //
1752   // Arguments:
1753   //      from:  R3_ARG1
1754   //      to:    R4_ARG2
1755   //      count: R5_ARG3 treated as signed
1756   //
generate_disjoint_long_copy_core(bool aligned)1757   void generate_disjoint_long_copy_core(bool aligned) {
1758     Register tmp1 = R6_ARG4;
1759     Register tmp2 = R7_ARG5;
1760     Register tmp3 = R8_ARG6;
1761     Register tmp4 = R0;
1762 
1763     Label l_1, l_2, l_3, l_4, l_5;
1764 
1765     VectorSRegister tmp_vsr1  = VSR1;
1766     VectorSRegister tmp_vsr2  = VSR2;
1767 
1768     { // FasterArrayCopy
1769       __ cmpwi(CCR0, R5_ARG3, 3);
1770       __ ble(CCR0, l_3); // copy 1 at a time if less than 4 elements remain
1771 
1772       __ srdi(tmp1, R5_ARG3, 2);
1773       __ andi_(R5_ARG3, R5_ARG3, 3);
1774       __ mtctr(tmp1);
1775 
1776     if (!VM_Version::has_vsx()) {
1777       __ bind(l_4);
1778       // Use unrolled version for mass copying (copy 4 elements a time).
1779       // Load feeding store gets zero latency on Power6, however not on Power5.
1780       // Therefore, the following sequence is made for the good of both.
1781       __ ld(tmp1, 0, R3_ARG1);
1782       __ ld(tmp2, 8, R3_ARG1);
1783       __ ld(tmp3, 16, R3_ARG1);
1784       __ ld(tmp4, 24, R3_ARG1);
1785       __ std(tmp1, 0, R4_ARG2);
1786       __ std(tmp2, 8, R4_ARG2);
1787       __ std(tmp3, 16, R4_ARG2);
1788       __ std(tmp4, 24, R4_ARG2);
1789       __ addi(R3_ARG1, R3_ARG1, 32);
1790       __ addi(R4_ARG2, R4_ARG2, 32);
1791       __ bdnz(l_4);
1792 
1793     } else { // Processor supports VSX, so use it to mass copy.
1794 
1795       // Prefetch the data into the L2 cache.
1796       __ dcbt(R3_ARG1, 0);
1797 
1798       // If supported set DSCR pre-fetch to deepest.
1799       if (VM_Version::has_mfdscr()) {
1800         __ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
1801         __ mtdscr(tmp2);
1802       }
1803 
1804       __ li(tmp1, 16);
1805 
1806       // Backbranch target aligned to 32-byte. Not 16-byte align as
1807       // loop contains < 8 instructions that fit inside a single
1808       // i-cache sector.
1809       __ align(32);
1810 
1811       __ bind(l_5);
1812       // Use loop with VSX load/store instructions to
1813       // copy 4 elements a time.
1814       __ lxvd2x(tmp_vsr1, R3_ARG1);        // Load src
1815       __ stxvd2x(tmp_vsr1, R4_ARG2);       // Store to dst
1816       __ lxvd2x(tmp_vsr2, tmp1, R3_ARG1);  // Load src + 16
1817       __ stxvd2x(tmp_vsr2, tmp1, R4_ARG2); // Store to dst + 16
1818       __ addi(R3_ARG1, R3_ARG1, 32);       // Update src+=32
1819       __ addi(R4_ARG2, R4_ARG2, 32);       // Update dsc+=32
1820       __ bdnz(l_5);                        // Dec CTR and loop if not zero.
1821 
1822       // Restore DSCR pre-fetch value.
1823       if (VM_Version::has_mfdscr()) {
1824         __ load_const_optimized(tmp2, VM_Version::_dscr_val);
1825         __ mtdscr(tmp2);
1826       }
1827 
1828     } // VSX
1829    } // FasterArrayCopy
1830 
1831     // copy 1 element at a time
1832     __ bind(l_3);
1833     __ cmpwi(CCR0, R5_ARG3, 0);
1834     __ beq(CCR0, l_1);
1835 
1836     { // FasterArrayCopy
1837       __ mtctr(R5_ARG3);
1838       __ addi(R3_ARG1, R3_ARG1, -8);
1839       __ addi(R4_ARG2, R4_ARG2, -8);
1840 
1841       __ bind(l_2);
1842       __ ldu(R0, 8, R3_ARG1);
1843       __ stdu(R0, 8, R4_ARG2);
1844       __ bdnz(l_2);
1845 
1846     }
1847     __ bind(l_1);
1848   }
1849 
1850   // Generate stub for disjoint long copy.  If "aligned" is true, the
1851   // "from" and "to" addresses are assumed to be heapword aligned.
1852   //
1853   // Arguments for generated stub:
1854   //      from:  R3_ARG1
1855   //      to:    R4_ARG2
1856   //      count: R5_ARG3 treated as signed
1857   //
generate_disjoint_long_copy(bool aligned,const char * name)1858   address generate_disjoint_long_copy(bool aligned, const char * name) {
1859     StubCodeMark mark(this, "StubRoutines", name);
1860     address start = __ function_entry();
1861     assert_positive_int(R5_ARG3);
1862     generate_disjoint_long_copy_core(aligned);
1863     __ li(R3_RET, 0); // return 0
1864     __ blr();
1865 
1866     return start;
1867   }
1868 
1869   // Generate core code for conjoint long copy (and oop copy on
1870   // 64-bit).  If "aligned" is true, the "from" and "to" addresses
1871   // are assumed to be heapword aligned.
1872   //
1873   // Arguments:
1874   //      from:  R3_ARG1
1875   //      to:    R4_ARG2
1876   //      count: R5_ARG3 treated as signed
1877   //
generate_conjoint_long_copy_core(bool aligned)1878   void generate_conjoint_long_copy_core(bool aligned) {
1879     Register tmp1 = R6_ARG4;
1880     Register tmp2 = R7_ARG5;
1881     Register tmp3 = R8_ARG6;
1882     Register tmp4 = R0;
1883 
1884     VectorSRegister tmp_vsr1  = VSR1;
1885     VectorSRegister tmp_vsr2  = VSR2;
1886 
1887     Label l_1, l_2, l_3, l_4, l_5;
1888 
1889     __ cmpwi(CCR0, R5_ARG3, 0);
1890     __ beq(CCR0, l_1);
1891 
1892     { // FasterArrayCopy
1893       __ sldi(R5_ARG3, R5_ARG3, 3);
1894       __ add(R3_ARG1, R3_ARG1, R5_ARG3);
1895       __ add(R4_ARG2, R4_ARG2, R5_ARG3);
1896       __ srdi(R5_ARG3, R5_ARG3, 3);
1897 
1898       __ cmpwi(CCR0, R5_ARG3, 3);
1899       __ ble(CCR0, l_5); // copy 1 at a time if less than 4 elements remain
1900 
1901       __ srdi(tmp1, R5_ARG3, 2);
1902       __ andi(R5_ARG3, R5_ARG3, 3);
1903       __ mtctr(tmp1);
1904 
1905      if (!VM_Version::has_vsx()) {
1906       __ bind(l_4);
1907       // Use unrolled version for mass copying (copy 4 elements a time).
1908       // Load feeding store gets zero latency on Power6, however not on Power5.
1909       // Therefore, the following sequence is made for the good of both.
1910       __ addi(R3_ARG1, R3_ARG1, -32);
1911       __ addi(R4_ARG2, R4_ARG2, -32);
1912       __ ld(tmp4, 24, R3_ARG1);
1913       __ ld(tmp3, 16, R3_ARG1);
1914       __ ld(tmp2, 8, R3_ARG1);
1915       __ ld(tmp1, 0, R3_ARG1);
1916       __ std(tmp4, 24, R4_ARG2);
1917       __ std(tmp3, 16, R4_ARG2);
1918       __ std(tmp2, 8, R4_ARG2);
1919       __ std(tmp1, 0, R4_ARG2);
1920       __ bdnz(l_4);
1921      } else { // Processor supports VSX, so use it to mass copy.
1922       // Prefetch the data into the L2 cache.
1923       __ dcbt(R3_ARG1, 0);
1924 
1925       // If supported set DSCR pre-fetch to deepest.
1926       if (VM_Version::has_mfdscr()) {
1927         __ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
1928         __ mtdscr(tmp2);
1929       }
1930 
1931       __ li(tmp1, 16);
1932 
1933       // Backbranch target aligned to 32-byte. Not 16-byte align as
1934       // loop contains < 8 instructions that fit inside a single
1935       // i-cache sector.
1936       __ align(32);
1937 
1938       __ bind(l_4);
1939       // Use loop with VSX load/store instructions to
1940       // copy 4 elements a time.
1941       __ addi(R3_ARG1, R3_ARG1, -32);      // Update src-=32
1942       __ addi(R4_ARG2, R4_ARG2, -32);      // Update dsc-=32
1943       __ lxvd2x(tmp_vsr2, tmp1, R3_ARG1);  // Load src+16
1944       __ lxvd2x(tmp_vsr1, R3_ARG1);        // Load src
1945       __ stxvd2x(tmp_vsr2, tmp1, R4_ARG2); // Store to dst+16
1946       __ stxvd2x(tmp_vsr1, R4_ARG2);       // Store to dst
1947       __ bdnz(l_4);
1948 
1949       // Restore DSCR pre-fetch value.
1950       if (VM_Version::has_mfdscr()) {
1951         __ load_const_optimized(tmp2, VM_Version::_dscr_val);
1952         __ mtdscr(tmp2);
1953       }
1954      }
1955 
1956       __ cmpwi(CCR0, R5_ARG3, 0);
1957       __ beq(CCR0, l_1);
1958 
1959       __ bind(l_5);
1960       __ mtctr(R5_ARG3);
1961       __ bind(l_3);
1962       __ ld(R0, -8, R3_ARG1);
1963       __ std(R0, -8, R4_ARG2);
1964       __ addi(R3_ARG1, R3_ARG1, -8);
1965       __ addi(R4_ARG2, R4_ARG2, -8);
1966       __ bdnz(l_3);
1967 
1968     }
1969     __ bind(l_1);
1970   }
1971 
1972   // Generate stub for conjoint long copy.  If "aligned" is true, the
1973   // "from" and "to" addresses are assumed to be heapword aligned.
1974   //
1975   // Arguments for generated stub:
1976   //      from:  R3_ARG1
1977   //      to:    R4_ARG2
1978   //      count: R5_ARG3 treated as signed
1979   //
generate_conjoint_long_copy(bool aligned,const char * name)1980   address generate_conjoint_long_copy(bool aligned, const char * name) {
1981     StubCodeMark mark(this, "StubRoutines", name);
1982     address start = __ function_entry();
1983     assert_positive_int(R5_ARG3);
1984     address nooverlap_target = aligned ?
1985       STUB_ENTRY(arrayof_jlong_disjoint_arraycopy) :
1986       STUB_ENTRY(jlong_disjoint_arraycopy);
1987 
1988     array_overlap_test(nooverlap_target, 3);
1989     generate_conjoint_long_copy_core(aligned);
1990 
1991     __ li(R3_RET, 0); // return 0
1992     __ blr();
1993 
1994     return start;
1995   }
1996 
1997   // Generate stub for conjoint oop copy.  If "aligned" is true, the
1998   // "from" and "to" addresses are assumed to be heapword aligned.
1999   //
2000   // Arguments for generated stub:
2001   //      from:  R3_ARG1
2002   //      to:    R4_ARG2
2003   //      count: R5_ARG3 treated as signed
2004   //      dest_uninitialized: G1 support
2005   //
generate_conjoint_oop_copy(bool aligned,const char * name,bool dest_uninitialized)2006   address generate_conjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) {
2007     StubCodeMark mark(this, "StubRoutines", name);
2008 
2009     address start = __ function_entry();
2010     assert_positive_int(R5_ARG3);
2011     address nooverlap_target = aligned ?
2012       STUB_ENTRY(arrayof_oop_disjoint_arraycopy) :
2013       STUB_ENTRY(oop_disjoint_arraycopy);
2014 
2015     DecoratorSet decorators = IN_HEAP | IS_ARRAY;
2016     if (dest_uninitialized) {
2017       decorators |= IS_DEST_UNINITIALIZED;
2018     }
2019     if (aligned) {
2020       decorators |= ARRAYCOPY_ALIGNED;
2021     }
2022 
2023     BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
2024     bs->arraycopy_prologue(_masm, decorators, T_OBJECT, R3_ARG1, R4_ARG2, R5_ARG3, noreg, noreg);
2025 
2026     if (UseCompressedOops) {
2027       array_overlap_test(nooverlap_target, 2);
2028       generate_conjoint_int_copy_core(aligned);
2029     } else {
2030       array_overlap_test(nooverlap_target, 3);
2031       generate_conjoint_long_copy_core(aligned);
2032     }
2033 
2034     bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, R4_ARG2, R5_ARG3, noreg);
2035     __ li(R3_RET, 0); // return 0
2036     __ blr();
2037     return start;
2038   }
2039 
2040   // Generate stub for disjoint oop copy.  If "aligned" is true, the
2041   // "from" and "to" addresses are assumed to be heapword aligned.
2042   //
2043   // Arguments for generated stub:
2044   //      from:  R3_ARG1
2045   //      to:    R4_ARG2
2046   //      count: R5_ARG3 treated as signed
2047   //      dest_uninitialized: G1 support
2048   //
generate_disjoint_oop_copy(bool aligned,const char * name,bool dest_uninitialized)2049   address generate_disjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) {
2050     StubCodeMark mark(this, "StubRoutines", name);
2051     address start = __ function_entry();
2052     assert_positive_int(R5_ARG3);
2053 
2054     DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_DISJOINT;
2055     if (dest_uninitialized) {
2056       decorators |= IS_DEST_UNINITIALIZED;
2057     }
2058     if (aligned) {
2059       decorators |= ARRAYCOPY_ALIGNED;
2060     }
2061 
2062     BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
2063     bs->arraycopy_prologue(_masm, decorators, T_OBJECT, R3_ARG1, R4_ARG2, R5_ARG3, noreg, noreg);
2064 
2065     if (UseCompressedOops) {
2066       generate_disjoint_int_copy_core(aligned);
2067     } else {
2068       generate_disjoint_long_copy_core(aligned);
2069     }
2070 
2071     bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, R4_ARG2, R5_ARG3, noreg);
2072     __ li(R3_RET, 0); // return 0
2073     __ blr();
2074 
2075     return start;
2076   }
2077 
2078 
2079   // Helper for generating a dynamic type check.
2080   // Smashes only the given temp registers.
generate_type_check(Register sub_klass,Register super_check_offset,Register super_klass,Register temp,Label & L_success)2081   void generate_type_check(Register sub_klass,
2082                            Register super_check_offset,
2083                            Register super_klass,
2084                            Register temp,
2085                            Label& L_success) {
2086     assert_different_registers(sub_klass, super_check_offset, super_klass);
2087 
2088     BLOCK_COMMENT("type_check:");
2089 
2090     Label L_miss;
2091 
2092     __ check_klass_subtype_fast_path(sub_klass, super_klass, temp, R0, &L_success, &L_miss, NULL,
2093                                      super_check_offset);
2094     __ check_klass_subtype_slow_path(sub_klass, super_klass, temp, R0, &L_success, NULL);
2095 
2096     // Fall through on failure!
2097     __ bind(L_miss);
2098   }
2099 
2100 
2101   //  Generate stub for checked oop copy.
2102   //
2103   // Arguments for generated stub:
2104   //      from:  R3
2105   //      to:    R4
2106   //      count: R5 treated as signed
2107   //      ckoff: R6 (super_check_offset)
2108   //      ckval: R7 (super_klass)
2109   //      ret:   R3 zero for success; (-1^K) where K is partial transfer count
2110   //
generate_checkcast_copy(const char * name,bool dest_uninitialized)2111   address generate_checkcast_copy(const char *name, bool dest_uninitialized) {
2112 
2113     const Register R3_from   = R3_ARG1;      // source array address
2114     const Register R4_to     = R4_ARG2;      // destination array address
2115     const Register R5_count  = R5_ARG3;      // elements count
2116     const Register R6_ckoff  = R6_ARG4;      // super_check_offset
2117     const Register R7_ckval  = R7_ARG5;      // super_klass
2118 
2119     const Register R8_offset = R8_ARG6;      // loop var, with stride wordSize
2120     const Register R9_remain = R9_ARG7;      // loop var, with stride -1
2121     const Register R10_oop   = R10_ARG8;     // actual oop copied
2122     const Register R11_klass = R11_scratch1; // oop._klass
2123     const Register R12_tmp   = R12_scratch2;
2124 
2125     const Register R2_minus1 = R2;
2126 
2127     //__ align(CodeEntryAlignment);
2128     StubCodeMark mark(this, "StubRoutines", name);
2129     address start = __ function_entry();
2130 
2131     // Assert that int is 64 bit sign extended and arrays are not conjoint.
2132 #ifdef ASSERT
2133     {
2134     assert_positive_int(R5_ARG3);
2135     const Register tmp1 = R11_scratch1, tmp2 = R12_scratch2;
2136     Label no_overlap;
2137     __ subf(tmp1, R3_ARG1, R4_ARG2); // distance in bytes
2138     __ sldi(tmp2, R5_ARG3, LogBytesPerHeapOop); // size in bytes
2139     __ cmpld(CCR0, R3_ARG1, R4_ARG2); // Use unsigned comparison!
2140     __ cmpld(CCR1, tmp1, tmp2);
2141     __ crnand(CCR0, Assembler::less, CCR1, Assembler::less);
2142     // Overlaps if Src before dst and distance smaller than size.
2143     // Branch to forward copy routine otherwise.
2144     __ blt(CCR0, no_overlap);
2145     __ stop("overlap in checkcast_copy", 0x9543);
2146     __ bind(no_overlap);
2147     }
2148 #endif
2149 
2150     DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_CHECKCAST;
2151     if (dest_uninitialized) {
2152       decorators |= IS_DEST_UNINITIALIZED;
2153     }
2154 
2155     BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
2156     bs->arraycopy_prologue(_masm, decorators, T_OBJECT, R3_from, R4_to, R5_count, /* preserve: */ R6_ckoff, R7_ckval);
2157 
2158     //inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, R12_tmp, R3_RET);
2159 
2160     Label load_element, store_element, store_null, success, do_epilogue;
2161     __ or_(R9_remain, R5_count, R5_count); // Initialize loop index, and test it.
2162     __ li(R8_offset, 0);                   // Offset from start of arrays.
2163     __ li(R2_minus1, -1);
2164     __ bne(CCR0, load_element);
2165 
2166     // Empty array: Nothing to do.
2167     __ li(R3_RET, 0);           // Return 0 on (trivial) success.
2168     __ blr();
2169 
2170     // ======== begin loop ========
2171     // (Entry is load_element.)
2172     __ align(OptoLoopAlignment);
2173     __ bind(store_element);
2174     if (UseCompressedOops) {
2175       __ encode_heap_oop_not_null(R10_oop);
2176       __ bind(store_null);
2177       __ stw(R10_oop, R8_offset, R4_to);
2178     } else {
2179       __ bind(store_null);
2180       __ std(R10_oop, R8_offset, R4_to);
2181     }
2182 
2183     __ addi(R8_offset, R8_offset, heapOopSize);   // Step to next offset.
2184     __ add_(R9_remain, R2_minus1, R9_remain);     // Decrement the count.
2185     __ beq(CCR0, success);
2186 
2187     // ======== loop entry is here ========
2188     __ bind(load_element);
2189     __ load_heap_oop(R10_oop, R8_offset, R3_from, R12_tmp, noreg, false, AS_RAW, &store_null);
2190 
2191     __ load_klass(R11_klass, R10_oop); // Query the object klass.
2192 
2193     generate_type_check(R11_klass, R6_ckoff, R7_ckval, R12_tmp,
2194                         // Branch to this on success:
2195                         store_element);
2196     // ======== end loop ========
2197 
2198     // It was a real error; we must depend on the caller to finish the job.
2199     // Register R9_remain has number of *remaining* oops, R5_count number of *total* oops.
2200     // Emit GC store barriers for the oops we have copied (R5_count minus R9_remain),
2201     // and report their number to the caller.
2202     __ subf_(R5_count, R9_remain, R5_count);
2203     __ nand(R3_RET, R5_count, R5_count);   // report (-1^K) to caller
2204     __ bne(CCR0, do_epilogue);
2205     __ blr();
2206 
2207     __ bind(success);
2208     __ li(R3_RET, 0);
2209 
2210     __ bind(do_epilogue);
2211     bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, R4_to, R5_count, /* preserve */ R3_RET);
2212 
2213     __ blr();
2214     return start;
2215   }
2216 
2217 
2218   //  Generate 'unsafe' array copy stub.
2219   //  Though just as safe as the other stubs, it takes an unscaled
2220   //  size_t argument instead of an element count.
2221   //
2222   // Arguments for generated stub:
2223   //      from:  R3
2224   //      to:    R4
2225   //      count: R5 byte count, treated as ssize_t, can be zero
2226   //
2227   // Examines the alignment of the operands and dispatches
2228   // to a long, int, short, or byte copy loop.
2229   //
generate_unsafe_copy(const char * name,address byte_copy_entry,address short_copy_entry,address int_copy_entry,address long_copy_entry)2230   address generate_unsafe_copy(const char* name,
2231                                address byte_copy_entry,
2232                                address short_copy_entry,
2233                                address int_copy_entry,
2234                                address long_copy_entry) {
2235 
2236     const Register R3_from   = R3_ARG1;      // source array address
2237     const Register R4_to     = R4_ARG2;      // destination array address
2238     const Register R5_count  = R5_ARG3;      // elements count (as long on PPC64)
2239 
2240     const Register R6_bits   = R6_ARG4;      // test copy of low bits
2241     const Register R7_tmp    = R7_ARG5;
2242 
2243     //__ align(CodeEntryAlignment);
2244     StubCodeMark mark(this, "StubRoutines", name);
2245     address start = __ function_entry();
2246 
2247     // Bump this on entry, not on exit:
2248     //inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr, R6_bits, R7_tmp);
2249 
2250     Label short_copy, int_copy, long_copy;
2251 
2252     __ orr(R6_bits, R3_from, R4_to);
2253     __ orr(R6_bits, R6_bits, R5_count);
2254     __ andi_(R0, R6_bits, (BytesPerLong-1));
2255     __ beq(CCR0, long_copy);
2256 
2257     __ andi_(R0, R6_bits, (BytesPerInt-1));
2258     __ beq(CCR0, int_copy);
2259 
2260     __ andi_(R0, R6_bits, (BytesPerShort-1));
2261     __ beq(CCR0, short_copy);
2262 
2263     // byte_copy:
2264     __ b(byte_copy_entry);
2265 
2266     __ bind(short_copy);
2267     __ srwi(R5_count, R5_count, LogBytesPerShort);
2268     __ b(short_copy_entry);
2269 
2270     __ bind(int_copy);
2271     __ srwi(R5_count, R5_count, LogBytesPerInt);
2272     __ b(int_copy_entry);
2273 
2274     __ bind(long_copy);
2275     __ srwi(R5_count, R5_count, LogBytesPerLong);
2276     __ b(long_copy_entry);
2277 
2278     return start;
2279   }
2280 
2281 
2282   // Perform range checks on the proposed arraycopy.
2283   // Kills the two temps, but nothing else.
2284   // Also, clean the sign bits of src_pos and dst_pos.
arraycopy_range_checks(Register src,Register src_pos,Register dst,Register dst_pos,Register length,Register temp1,Register temp2,Label & L_failed)2285   void arraycopy_range_checks(Register src,     // source array oop
2286                               Register src_pos, // source position
2287                               Register dst,     // destination array oop
2288                               Register dst_pos, // destination position
2289                               Register length,  // length of copy
2290                               Register temp1, Register temp2,
2291                               Label& L_failed) {
2292     BLOCK_COMMENT("arraycopy_range_checks:");
2293 
2294     const Register array_length = temp1;  // scratch
2295     const Register end_pos      = temp2;  // scratch
2296 
2297     //  if (src_pos + length > arrayOop(src)->length() ) FAIL;
2298     __ lwa(array_length, arrayOopDesc::length_offset_in_bytes(), src);
2299     __ add(end_pos, src_pos, length);  // src_pos + length
2300     __ cmpd(CCR0, end_pos, array_length);
2301     __ bgt(CCR0, L_failed);
2302 
2303     //  if (dst_pos + length > arrayOop(dst)->length() ) FAIL;
2304     __ lwa(array_length, arrayOopDesc::length_offset_in_bytes(), dst);
2305     __ add(end_pos, dst_pos, length);  // src_pos + length
2306     __ cmpd(CCR0, end_pos, array_length);
2307     __ bgt(CCR0, L_failed);
2308 
2309     BLOCK_COMMENT("arraycopy_range_checks done");
2310   }
2311 
2312 
2313   //
2314   //  Generate generic array copy stubs
2315   //
2316   //  Input:
2317   //    R3    -  src oop
2318   //    R4    -  src_pos
2319   //    R5    -  dst oop
2320   //    R6    -  dst_pos
2321   //    R7    -  element count
2322   //
2323   //  Output:
2324   //    R3 ==  0  -  success
2325   //    R3 == -1  -  need to call System.arraycopy
2326   //
generate_generic_copy(const char * name,address entry_jbyte_arraycopy,address entry_jshort_arraycopy,address entry_jint_arraycopy,address entry_oop_arraycopy,address entry_disjoint_oop_arraycopy,address entry_jlong_arraycopy,address entry_checkcast_arraycopy)2327   address generate_generic_copy(const char *name,
2328                                 address entry_jbyte_arraycopy,
2329                                 address entry_jshort_arraycopy,
2330                                 address entry_jint_arraycopy,
2331                                 address entry_oop_arraycopy,
2332                                 address entry_disjoint_oop_arraycopy,
2333                                 address entry_jlong_arraycopy,
2334                                 address entry_checkcast_arraycopy) {
2335     Label L_failed, L_objArray;
2336 
2337     // Input registers
2338     const Register src       = R3_ARG1;  // source array oop
2339     const Register src_pos   = R4_ARG2;  // source position
2340     const Register dst       = R5_ARG3;  // destination array oop
2341     const Register dst_pos   = R6_ARG4;  // destination position
2342     const Register length    = R7_ARG5;  // elements count
2343 
2344     // registers used as temp
2345     const Register src_klass = R8_ARG6;  // source array klass
2346     const Register dst_klass = R9_ARG7;  // destination array klass
2347     const Register lh        = R10_ARG8; // layout handler
2348     const Register temp      = R2;
2349 
2350     //__ align(CodeEntryAlignment);
2351     StubCodeMark mark(this, "StubRoutines", name);
2352     address start = __ function_entry();
2353 
2354     // Bump this on entry, not on exit:
2355     //inc_counter_np(SharedRuntime::_generic_array_copy_ctr, lh, temp);
2356 
2357     // In principle, the int arguments could be dirty.
2358 
2359     //-----------------------------------------------------------------------
2360     // Assembler stubs will be used for this call to arraycopy
2361     // if the following conditions are met:
2362     //
2363     // (1) src and dst must not be null.
2364     // (2) src_pos must not be negative.
2365     // (3) dst_pos must not be negative.
2366     // (4) length  must not be negative.
2367     // (5) src klass and dst klass should be the same and not NULL.
2368     // (6) src and dst should be arrays.
2369     // (7) src_pos + length must not exceed length of src.
2370     // (8) dst_pos + length must not exceed length of dst.
2371     BLOCK_COMMENT("arraycopy initial argument checks");
2372 
2373     __ cmpdi(CCR1, src, 0);      // if (src == NULL) return -1;
2374     __ extsw_(src_pos, src_pos); // if (src_pos < 0) return -1;
2375     __ cmpdi(CCR5, dst, 0);      // if (dst == NULL) return -1;
2376     __ cror(CCR1, Assembler::equal, CCR0, Assembler::less);
2377     __ extsw_(dst_pos, dst_pos); // if (src_pos < 0) return -1;
2378     __ cror(CCR5, Assembler::equal, CCR0, Assembler::less);
2379     __ extsw_(length, length);   // if (length < 0) return -1;
2380     __ cror(CCR1, Assembler::equal, CCR5, Assembler::equal);
2381     __ cror(CCR1, Assembler::equal, CCR0, Assembler::less);
2382     __ beq(CCR1, L_failed);
2383 
2384     BLOCK_COMMENT("arraycopy argument klass checks");
2385     __ load_klass(src_klass, src);
2386     __ load_klass(dst_klass, dst);
2387 
2388     // Load layout helper
2389     //
2390     //  |array_tag|     | header_size | element_type |     |log2_element_size|
2391     // 32        30    24            16              8     2                 0
2392     //
2393     //   array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
2394     //
2395 
2396     int lh_offset = in_bytes(Klass::layout_helper_offset());
2397 
2398     // Load 32-bits signed value. Use br() instruction with it to check icc.
2399     __ lwz(lh, lh_offset, src_klass);
2400 
2401     // Handle objArrays completely differently...
2402     jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2403     __ load_const_optimized(temp, objArray_lh, R0);
2404     __ cmpw(CCR0, lh, temp);
2405     __ beq(CCR0, L_objArray);
2406 
2407     __ cmpd(CCR5, src_klass, dst_klass);          // if (src->klass() != dst->klass()) return -1;
2408     __ cmpwi(CCR6, lh, Klass::_lh_neutral_value); // if (!src->is_Array()) return -1;
2409 
2410     __ crnand(CCR5, Assembler::equal, CCR6, Assembler::less);
2411     __ beq(CCR5, L_failed);
2412 
2413     // At this point, it is known to be a typeArray (array_tag 0x3).
2414 #ifdef ASSERT
2415     { Label L;
2416       jint lh_prim_tag_in_place = (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift);
2417       __ load_const_optimized(temp, lh_prim_tag_in_place, R0);
2418       __ cmpw(CCR0, lh, temp);
2419       __ bge(CCR0, L);
2420       __ stop("must be a primitive array");
2421       __ bind(L);
2422     }
2423 #endif
2424 
2425     arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
2426                            temp, dst_klass, L_failed);
2427 
2428     // TypeArrayKlass
2429     //
2430     // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize);
2431     // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize);
2432     //
2433 
2434     const Register offset = dst_klass;    // array offset
2435     const Register elsize = src_klass;    // log2 element size
2436 
2437     __ rldicl(offset, lh, 64 - Klass::_lh_header_size_shift, 64 - exact_log2(Klass::_lh_header_size_mask + 1));
2438     __ andi(elsize, lh, Klass::_lh_log2_element_size_mask);
2439     __ add(src, offset, src);       // src array offset
2440     __ add(dst, offset, dst);       // dst array offset
2441 
2442     // Next registers should be set before the jump to corresponding stub.
2443     const Register from     = R3_ARG1;  // source array address
2444     const Register to       = R4_ARG2;  // destination array address
2445     const Register count    = R5_ARG3;  // elements count
2446 
2447     // 'from', 'to', 'count' registers should be set in this order
2448     // since they are the same as 'src', 'src_pos', 'dst'.
2449 
2450     BLOCK_COMMENT("scale indexes to element size");
2451     __ sld(src_pos, src_pos, elsize);
2452     __ sld(dst_pos, dst_pos, elsize);
2453     __ add(from, src_pos, src);  // src_addr
2454     __ add(to, dst_pos, dst);    // dst_addr
2455     __ mr(count, length);        // length
2456 
2457     BLOCK_COMMENT("choose copy loop based on element size");
2458     // Using conditional branches with range 32kB.
2459     const int bo = Assembler::bcondCRbiIs1, bi = Assembler::bi0(CCR0, Assembler::equal);
2460     __ cmpwi(CCR0, elsize, 0);
2461     __ bc(bo, bi, entry_jbyte_arraycopy);
2462     __ cmpwi(CCR0, elsize, LogBytesPerShort);
2463     __ bc(bo, bi, entry_jshort_arraycopy);
2464     __ cmpwi(CCR0, elsize, LogBytesPerInt);
2465     __ bc(bo, bi, entry_jint_arraycopy);
2466 #ifdef ASSERT
2467     { Label L;
2468       __ cmpwi(CCR0, elsize, LogBytesPerLong);
2469       __ beq(CCR0, L);
2470       __ stop("must be long copy, but elsize is wrong");
2471       __ bind(L);
2472     }
2473 #endif
2474     __ b(entry_jlong_arraycopy);
2475 
2476     // ObjArrayKlass
2477   __ bind(L_objArray);
2478     // live at this point:  src_klass, dst_klass, src[_pos], dst[_pos], length
2479 
2480     Label L_disjoint_plain_copy, L_checkcast_copy;
2481     //  test array classes for subtyping
2482     __ cmpd(CCR0, src_klass, dst_klass);         // usual case is exact equality
2483     __ bne(CCR0, L_checkcast_copy);
2484 
2485     // Identically typed arrays can be copied without element-wise checks.
2486     arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
2487                            temp, lh, L_failed);
2488 
2489     __ addi(src, src, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); //src offset
2490     __ addi(dst, dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); //dst offset
2491     __ sldi(src_pos, src_pos, LogBytesPerHeapOop);
2492     __ sldi(dst_pos, dst_pos, LogBytesPerHeapOop);
2493     __ add(from, src_pos, src);  // src_addr
2494     __ add(to, dst_pos, dst);    // dst_addr
2495     __ mr(count, length);        // length
2496     __ b(entry_oop_arraycopy);
2497 
2498   __ bind(L_checkcast_copy);
2499     // live at this point:  src_klass, dst_klass
2500     {
2501       // Before looking at dst.length, make sure dst is also an objArray.
2502       __ lwz(temp, lh_offset, dst_klass);
2503       __ cmpw(CCR0, lh, temp);
2504       __ bne(CCR0, L_failed);
2505 
2506       // It is safe to examine both src.length and dst.length.
2507       arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
2508                              temp, lh, L_failed);
2509 
2510       // Marshal the base address arguments now, freeing registers.
2511       __ addi(src, src, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); //src offset
2512       __ addi(dst, dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); //dst offset
2513       __ sldi(src_pos, src_pos, LogBytesPerHeapOop);
2514       __ sldi(dst_pos, dst_pos, LogBytesPerHeapOop);
2515       __ add(from, src_pos, src);  // src_addr
2516       __ add(to, dst_pos, dst);    // dst_addr
2517       __ mr(count, length);        // length
2518 
2519       Register sco_temp = R6_ARG4;             // This register is free now.
2520       assert_different_registers(from, to, count, sco_temp,
2521                                  dst_klass, src_klass);
2522 
2523       // Generate the type check.
2524       int sco_offset = in_bytes(Klass::super_check_offset_offset());
2525       __ lwz(sco_temp, sco_offset, dst_klass);
2526       generate_type_check(src_klass, sco_temp, dst_klass,
2527                           temp, L_disjoint_plain_copy);
2528 
2529       // Fetch destination element klass from the ObjArrayKlass header.
2530       int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
2531 
2532       // The checkcast_copy loop needs two extra arguments:
2533       __ ld(R7_ARG5, ek_offset, dst_klass);   // dest elem klass
2534       __ lwz(R6_ARG4, sco_offset, R7_ARG5);   // sco of elem klass
2535       __ b(entry_checkcast_arraycopy);
2536     }
2537 
2538     __ bind(L_disjoint_plain_copy);
2539     __ b(entry_disjoint_oop_arraycopy);
2540 
2541   __ bind(L_failed);
2542     __ li(R3_RET, -1); // return -1
2543     __ blr();
2544     return start;
2545   }
2546 
2547   // Arguments for generated stub:
2548   //   R3_ARG1   - source byte array address
2549   //   R4_ARG2   - destination byte array address
2550   //   R5_ARG3   - round key array
generate_aescrypt_encryptBlock()2551   address generate_aescrypt_encryptBlock() {
2552     assert(UseAES, "need AES instructions and misaligned SSE support");
2553     StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock");
2554 
2555     address start = __ function_entry();
2556 
2557     Label L_doLast, L_error;
2558 
2559     Register from           = R3_ARG1;  // source array address
2560     Register to             = R4_ARG2;  // destination array address
2561     Register key            = R5_ARG3;  // round key array
2562 
2563     Register keylen         = R8;
2564     Register temp           = R9;
2565     Register keypos         = R10;
2566     Register fifteen        = R12;
2567 
2568     VectorRegister vRet     = VR0;
2569 
2570     VectorRegister vKey1    = VR1;
2571     VectorRegister vKey2    = VR2;
2572     VectorRegister vKey3    = VR3;
2573     VectorRegister vKey4    = VR4;
2574 
2575     VectorRegister fromPerm = VR5;
2576     VectorRegister keyPerm  = VR6;
2577     VectorRegister toPerm   = VR7;
2578     VectorRegister fSplt    = VR8;
2579 
2580     VectorRegister vTmp1    = VR9;
2581     VectorRegister vTmp2    = VR10;
2582     VectorRegister vTmp3    = VR11;
2583     VectorRegister vTmp4    = VR12;
2584 
2585     __ li              (fifteen, 15);
2586 
2587     // load unaligned from[0-15] to vRet
2588     __ lvx             (vRet, from);
2589     __ lvx             (vTmp1, fifteen, from);
2590     __ lvsl            (fromPerm, from);
2591 #ifdef VM_LITTLE_ENDIAN
2592     __ vspltisb        (fSplt, 0x0f);
2593     __ vxor            (fromPerm, fromPerm, fSplt);
2594 #endif
2595     __ vperm           (vRet, vRet, vTmp1, fromPerm);
2596 
2597     // load keylen (44 or 52 or 60)
2598     __ lwz             (keylen, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT), key);
2599 
2600     // to load keys
2601     __ load_perm       (keyPerm, key);
2602 #ifdef VM_LITTLE_ENDIAN
2603     __ vspltisb        (vTmp2, -16);
2604     __ vrld            (keyPerm, keyPerm, vTmp2);
2605     __ vrld            (keyPerm, keyPerm, vTmp2);
2606     __ vsldoi          (keyPerm, keyPerm, keyPerm, 8);
2607 #endif
2608 
2609     // load the 1st round key to vTmp1
2610     __ lvx             (vTmp1, key);
2611     __ li              (keypos, 16);
2612     __ lvx             (vKey1, keypos, key);
2613     __ vec_perm        (vTmp1, vKey1, keyPerm);
2614 
2615     // 1st round
2616     __ vxor            (vRet, vRet, vTmp1);
2617 
2618     // load the 2nd round key to vKey1
2619     __ li              (keypos, 32);
2620     __ lvx             (vKey2, keypos, key);
2621     __ vec_perm        (vKey1, vKey2, keyPerm);
2622 
2623     // load the 3rd round key to vKey2
2624     __ li              (keypos, 48);
2625     __ lvx             (vKey3, keypos, key);
2626     __ vec_perm        (vKey2, vKey3, keyPerm);
2627 
2628     // load the 4th round key to vKey3
2629     __ li              (keypos, 64);
2630     __ lvx             (vKey4, keypos, key);
2631     __ vec_perm        (vKey3, vKey4, keyPerm);
2632 
2633     // load the 5th round key to vKey4
2634     __ li              (keypos, 80);
2635     __ lvx             (vTmp1, keypos, key);
2636     __ vec_perm        (vKey4, vTmp1, keyPerm);
2637 
2638     // 2nd - 5th rounds
2639     __ vcipher         (vRet, vRet, vKey1);
2640     __ vcipher         (vRet, vRet, vKey2);
2641     __ vcipher         (vRet, vRet, vKey3);
2642     __ vcipher         (vRet, vRet, vKey4);
2643 
2644     // load the 6th round key to vKey1
2645     __ li              (keypos, 96);
2646     __ lvx             (vKey2, keypos, key);
2647     __ vec_perm        (vKey1, vTmp1, vKey2, keyPerm);
2648 
2649     // load the 7th round key to vKey2
2650     __ li              (keypos, 112);
2651     __ lvx             (vKey3, keypos, key);
2652     __ vec_perm        (vKey2, vKey3, keyPerm);
2653 
2654     // load the 8th round key to vKey3
2655     __ li              (keypos, 128);
2656     __ lvx             (vKey4, keypos, key);
2657     __ vec_perm        (vKey3, vKey4, keyPerm);
2658 
2659     // load the 9th round key to vKey4
2660     __ li              (keypos, 144);
2661     __ lvx             (vTmp1, keypos, key);
2662     __ vec_perm        (vKey4, vTmp1, keyPerm);
2663 
2664     // 6th - 9th rounds
2665     __ vcipher         (vRet, vRet, vKey1);
2666     __ vcipher         (vRet, vRet, vKey2);
2667     __ vcipher         (vRet, vRet, vKey3);
2668     __ vcipher         (vRet, vRet, vKey4);
2669 
2670     // load the 10th round key to vKey1
2671     __ li              (keypos, 160);
2672     __ lvx             (vKey2, keypos, key);
2673     __ vec_perm        (vKey1, vTmp1, vKey2, keyPerm);
2674 
2675     // load the 11th round key to vKey2
2676     __ li              (keypos, 176);
2677     __ lvx             (vTmp1, keypos, key);
2678     __ vec_perm        (vKey2, vTmp1, keyPerm);
2679 
2680     // if all round keys are loaded, skip next 4 rounds
2681     __ cmpwi           (CCR0, keylen, 44);
2682     __ beq             (CCR0, L_doLast);
2683 
2684     // 10th - 11th rounds
2685     __ vcipher         (vRet, vRet, vKey1);
2686     __ vcipher         (vRet, vRet, vKey2);
2687 
2688     // load the 12th round key to vKey1
2689     __ li              (keypos, 192);
2690     __ lvx             (vKey2, keypos, key);
2691     __ vec_perm        (vKey1, vTmp1, vKey2, keyPerm);
2692 
2693     // load the 13th round key to vKey2
2694     __ li              (keypos, 208);
2695     __ lvx             (vTmp1, keypos, key);
2696     __ vec_perm        (vKey2, vTmp1, keyPerm);
2697 
2698     // if all round keys are loaded, skip next 2 rounds
2699     __ cmpwi           (CCR0, keylen, 52);
2700     __ beq             (CCR0, L_doLast);
2701 
2702 #ifdef ASSERT
2703     __ cmpwi           (CCR0, keylen, 60);
2704     __ bne             (CCR0, L_error);
2705 #endif
2706 
2707     // 12th - 13th rounds
2708     __ vcipher         (vRet, vRet, vKey1);
2709     __ vcipher         (vRet, vRet, vKey2);
2710 
2711     // load the 14th round key to vKey1
2712     __ li              (keypos, 224);
2713     __ lvx             (vKey2, keypos, key);
2714     __ vec_perm        (vKey1, vTmp1, vKey2, keyPerm);
2715 
2716     // load the 15th round key to vKey2
2717     __ li              (keypos, 240);
2718     __ lvx             (vTmp1, keypos, key);
2719     __ vec_perm        (vKey2, vTmp1, keyPerm);
2720 
2721     __ bind(L_doLast);
2722 
2723     // last two rounds
2724     __ vcipher         (vRet, vRet, vKey1);
2725     __ vcipherlast     (vRet, vRet, vKey2);
2726 
2727 #ifdef VM_LITTLE_ENDIAN
2728     // toPerm = 0x0F0E0D0C0B0A09080706050403020100
2729     __ lvsl            (toPerm, keypos); // keypos is a multiple of 16
2730     __ vxor            (toPerm, toPerm, fSplt);
2731 
2732     // Swap Bytes
2733     __ vperm           (vRet, vRet, vRet, toPerm);
2734 #endif
2735 
2736     // store result (unaligned)
2737     // Note: We can't use a read-modify-write sequence which touches additional Bytes.
2738     Register lo = temp, hi = fifteen; // Reuse
2739     __ vsldoi          (vTmp1, vRet, vRet, 8);
2740     __ mfvrd           (hi, vRet);
2741     __ mfvrd           (lo, vTmp1);
2742     __ std             (hi, 0 LITTLE_ENDIAN_ONLY(+ 8), to);
2743     __ std             (lo, 0 BIG_ENDIAN_ONLY(+ 8), to);
2744 
2745     __ blr();
2746 
2747 #ifdef ASSERT
2748     __ bind(L_error);
2749     __ stop("aescrypt_encryptBlock: invalid key length");
2750 #endif
2751      return start;
2752   }
2753 
2754   // Arguments for generated stub:
2755   //   R3_ARG1   - source byte array address
2756   //   R4_ARG2   - destination byte array address
2757   //   R5_ARG3   - K (key) in little endian int array
generate_aescrypt_decryptBlock()2758   address generate_aescrypt_decryptBlock() {
2759     assert(UseAES, "need AES instructions and misaligned SSE support");
2760     StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock");
2761 
2762     address start = __ function_entry();
2763 
2764     Label L_doLast, L_do44, L_do52, L_error;
2765 
2766     Register from           = R3_ARG1;  // source array address
2767     Register to             = R4_ARG2;  // destination array address
2768     Register key            = R5_ARG3;  // round key array
2769 
2770     Register keylen         = R8;
2771     Register temp           = R9;
2772     Register keypos         = R10;
2773     Register fifteen        = R12;
2774 
2775     VectorRegister vRet     = VR0;
2776 
2777     VectorRegister vKey1    = VR1;
2778     VectorRegister vKey2    = VR2;
2779     VectorRegister vKey3    = VR3;
2780     VectorRegister vKey4    = VR4;
2781     VectorRegister vKey5    = VR5;
2782 
2783     VectorRegister fromPerm = VR6;
2784     VectorRegister keyPerm  = VR7;
2785     VectorRegister toPerm   = VR8;
2786     VectorRegister fSplt    = VR9;
2787 
2788     VectorRegister vTmp1    = VR10;
2789     VectorRegister vTmp2    = VR11;
2790     VectorRegister vTmp3    = VR12;
2791     VectorRegister vTmp4    = VR13;
2792 
2793     __ li              (fifteen, 15);
2794 
2795     // load unaligned from[0-15] to vRet
2796     __ lvx             (vRet, from);
2797     __ lvx             (vTmp1, fifteen, from);
2798     __ lvsl            (fromPerm, from);
2799 #ifdef VM_LITTLE_ENDIAN
2800     __ vspltisb        (fSplt, 0x0f);
2801     __ vxor            (fromPerm, fromPerm, fSplt);
2802 #endif
2803     __ vperm           (vRet, vRet, vTmp1, fromPerm); // align [and byte swap in LE]
2804 
2805     // load keylen (44 or 52 or 60)
2806     __ lwz             (keylen, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT), key);
2807 
2808     // to load keys
2809     __ load_perm       (keyPerm, key);
2810 #ifdef VM_LITTLE_ENDIAN
2811     __ vxor            (vTmp2, vTmp2, vTmp2);
2812     __ vspltisb        (vTmp2, -16);
2813     __ vrld            (keyPerm, keyPerm, vTmp2);
2814     __ vrld            (keyPerm, keyPerm, vTmp2);
2815     __ vsldoi          (keyPerm, keyPerm, keyPerm, 8);
2816 #endif
2817 
2818     __ cmpwi           (CCR0, keylen, 44);
2819     __ beq             (CCR0, L_do44);
2820 
2821     __ cmpwi           (CCR0, keylen, 52);
2822     __ beq             (CCR0, L_do52);
2823 
2824 #ifdef ASSERT
2825     __ cmpwi           (CCR0, keylen, 60);
2826     __ bne             (CCR0, L_error);
2827 #endif
2828 
2829     // load the 15th round key to vKey1
2830     __ li              (keypos, 240);
2831     __ lvx             (vKey1, keypos, key);
2832     __ li              (keypos, 224);
2833     __ lvx             (vKey2, keypos, key);
2834     __ vec_perm        (vKey1, vKey2, vKey1, keyPerm);
2835 
2836     // load the 14th round key to vKey2
2837     __ li              (keypos, 208);
2838     __ lvx             (vKey3, keypos, key);
2839     __ vec_perm        (vKey2, vKey3, vKey2, keyPerm);
2840 
2841     // load the 13th round key to vKey3
2842     __ li              (keypos, 192);
2843     __ lvx             (vKey4, keypos, key);
2844     __ vec_perm        (vKey3, vKey4, vKey3, keyPerm);
2845 
2846     // load the 12th round key to vKey4
2847     __ li              (keypos, 176);
2848     __ lvx             (vKey5, keypos, key);
2849     __ vec_perm        (vKey4, vKey5, vKey4, keyPerm);
2850 
2851     // load the 11th round key to vKey5
2852     __ li              (keypos, 160);
2853     __ lvx             (vTmp1, keypos, key);
2854     __ vec_perm        (vKey5, vTmp1, vKey5, keyPerm);
2855 
2856     // 1st - 5th rounds
2857     __ vxor            (vRet, vRet, vKey1);
2858     __ vncipher        (vRet, vRet, vKey2);
2859     __ vncipher        (vRet, vRet, vKey3);
2860     __ vncipher        (vRet, vRet, vKey4);
2861     __ vncipher        (vRet, vRet, vKey5);
2862 
2863     __ b               (L_doLast);
2864 
2865     __ align(32);
2866     __ bind            (L_do52);
2867 
2868     // load the 13th round key to vKey1
2869     __ li              (keypos, 208);
2870     __ lvx             (vKey1, keypos, key);
2871     __ li              (keypos, 192);
2872     __ lvx             (vKey2, keypos, key);
2873     __ vec_perm        (vKey1, vKey2, vKey1, keyPerm);
2874 
2875     // load the 12th round key to vKey2
2876     __ li              (keypos, 176);
2877     __ lvx             (vKey3, keypos, key);
2878     __ vec_perm        (vKey2, vKey3, vKey2, keyPerm);
2879 
2880     // load the 11th round key to vKey3
2881     __ li              (keypos, 160);
2882     __ lvx             (vTmp1, keypos, key);
2883     __ vec_perm        (vKey3, vTmp1, vKey3, keyPerm);
2884 
2885     // 1st - 3rd rounds
2886     __ vxor            (vRet, vRet, vKey1);
2887     __ vncipher        (vRet, vRet, vKey2);
2888     __ vncipher        (vRet, vRet, vKey3);
2889 
2890     __ b               (L_doLast);
2891 
2892     __ align(32);
2893     __ bind            (L_do44);
2894 
2895     // load the 11th round key to vKey1
2896     __ li              (keypos, 176);
2897     __ lvx             (vKey1, keypos, key);
2898     __ li              (keypos, 160);
2899     __ lvx             (vTmp1, keypos, key);
2900     __ vec_perm        (vKey1, vTmp1, vKey1, keyPerm);
2901 
2902     // 1st round
2903     __ vxor            (vRet, vRet, vKey1);
2904 
2905     __ bind            (L_doLast);
2906 
2907     // load the 10th round key to vKey1
2908     __ li              (keypos, 144);
2909     __ lvx             (vKey2, keypos, key);
2910     __ vec_perm        (vKey1, vKey2, vTmp1, keyPerm);
2911 
2912     // load the 9th round key to vKey2
2913     __ li              (keypos, 128);
2914     __ lvx             (vKey3, keypos, key);
2915     __ vec_perm        (vKey2, vKey3, vKey2, keyPerm);
2916 
2917     // load the 8th round key to vKey3
2918     __ li              (keypos, 112);
2919     __ lvx             (vKey4, keypos, key);
2920     __ vec_perm        (vKey3, vKey4, vKey3, keyPerm);
2921 
2922     // load the 7th round key to vKey4
2923     __ li              (keypos, 96);
2924     __ lvx             (vKey5, keypos, key);
2925     __ vec_perm        (vKey4, vKey5, vKey4, keyPerm);
2926 
2927     // load the 6th round key to vKey5
2928     __ li              (keypos, 80);
2929     __ lvx             (vTmp1, keypos, key);
2930     __ vec_perm        (vKey5, vTmp1, vKey5, keyPerm);
2931 
2932     // last 10th - 6th rounds
2933     __ vncipher        (vRet, vRet, vKey1);
2934     __ vncipher        (vRet, vRet, vKey2);
2935     __ vncipher        (vRet, vRet, vKey3);
2936     __ vncipher        (vRet, vRet, vKey4);
2937     __ vncipher        (vRet, vRet, vKey5);
2938 
2939     // load the 5th round key to vKey1
2940     __ li              (keypos, 64);
2941     __ lvx             (vKey2, keypos, key);
2942     __ vec_perm        (vKey1, vKey2, vTmp1, keyPerm);
2943 
2944     // load the 4th round key to vKey2
2945     __ li              (keypos, 48);
2946     __ lvx             (vKey3, keypos, key);
2947     __ vec_perm        (vKey2, vKey3, vKey2, keyPerm);
2948 
2949     // load the 3rd round key to vKey3
2950     __ li              (keypos, 32);
2951     __ lvx             (vKey4, keypos, key);
2952     __ vec_perm        (vKey3, vKey4, vKey3, keyPerm);
2953 
2954     // load the 2nd round key to vKey4
2955     __ li              (keypos, 16);
2956     __ lvx             (vKey5, keypos, key);
2957     __ vec_perm        (vKey4, vKey5, vKey4, keyPerm);
2958 
2959     // load the 1st round key to vKey5
2960     __ lvx             (vTmp1, key);
2961     __ vec_perm        (vKey5, vTmp1, vKey5, keyPerm);
2962 
2963     // last 5th - 1th rounds
2964     __ vncipher        (vRet, vRet, vKey1);
2965     __ vncipher        (vRet, vRet, vKey2);
2966     __ vncipher        (vRet, vRet, vKey3);
2967     __ vncipher        (vRet, vRet, vKey4);
2968     __ vncipherlast    (vRet, vRet, vKey5);
2969 
2970 #ifdef VM_LITTLE_ENDIAN
2971     // toPerm = 0x0F0E0D0C0B0A09080706050403020100
2972     __ lvsl            (toPerm, keypos); // keypos is a multiple of 16
2973     __ vxor            (toPerm, toPerm, fSplt);
2974 
2975     // Swap Bytes
2976     __ vperm           (vRet, vRet, vRet, toPerm);
2977 #endif
2978 
2979     // store result (unaligned)
2980     // Note: We can't use a read-modify-write sequence which touches additional Bytes.
2981     Register lo = temp, hi = fifteen; // Reuse
2982     __ vsldoi          (vTmp1, vRet, vRet, 8);
2983     __ mfvrd           (hi, vRet);
2984     __ mfvrd           (lo, vTmp1);
2985     __ std             (hi, 0 LITTLE_ENDIAN_ONLY(+ 8), to);
2986     __ std             (lo, 0 BIG_ENDIAN_ONLY(+ 8), to);
2987 
2988     __ blr();
2989 
2990 #ifdef ASSERT
2991     __ bind(L_error);
2992     __ stop("aescrypt_decryptBlock: invalid key length");
2993 #endif
2994      return start;
2995   }
2996 
generate_sha256_implCompress(bool multi_block,const char * name)2997   address generate_sha256_implCompress(bool multi_block, const char *name) {
2998     assert(UseSHA, "need SHA instructions");
2999     StubCodeMark mark(this, "StubRoutines", name);
3000     address start = __ function_entry();
3001 
3002     __ sha256 (multi_block);
3003 
3004     __ blr();
3005     return start;
3006   }
3007 
generate_sha512_implCompress(bool multi_block,const char * name)3008   address generate_sha512_implCompress(bool multi_block, const char *name) {
3009     assert(UseSHA, "need SHA instructions");
3010     StubCodeMark mark(this, "StubRoutines", name);
3011     address start = __ function_entry();
3012 
3013     __ sha512 (multi_block);
3014 
3015     __ blr();
3016     return start;
3017   }
3018 
generate_arraycopy_stubs()3019   void generate_arraycopy_stubs() {
3020     // Note: the disjoint stubs must be generated first, some of
3021     // the conjoint stubs use them.
3022 
3023     // non-aligned disjoint versions
3024     StubRoutines::_jbyte_disjoint_arraycopy       = generate_disjoint_byte_copy(false, "jbyte_disjoint_arraycopy");
3025     StubRoutines::_jshort_disjoint_arraycopy      = generate_disjoint_short_copy(false, "jshort_disjoint_arraycopy");
3026     StubRoutines::_jint_disjoint_arraycopy        = generate_disjoint_int_copy(false, "jint_disjoint_arraycopy");
3027     StubRoutines::_jlong_disjoint_arraycopy       = generate_disjoint_long_copy(false, "jlong_disjoint_arraycopy");
3028     StubRoutines::_oop_disjoint_arraycopy         = generate_disjoint_oop_copy(false, "oop_disjoint_arraycopy", false);
3029     StubRoutines::_oop_disjoint_arraycopy_uninit  = generate_disjoint_oop_copy(false, "oop_disjoint_arraycopy_uninit", true);
3030 
3031     // aligned disjoint versions
3032     StubRoutines::_arrayof_jbyte_disjoint_arraycopy      = generate_disjoint_byte_copy(true, "arrayof_jbyte_disjoint_arraycopy");
3033     StubRoutines::_arrayof_jshort_disjoint_arraycopy     = generate_disjoint_short_copy(true, "arrayof_jshort_disjoint_arraycopy");
3034     StubRoutines::_arrayof_jint_disjoint_arraycopy       = generate_disjoint_int_copy(true, "arrayof_jint_disjoint_arraycopy");
3035     StubRoutines::_arrayof_jlong_disjoint_arraycopy      = generate_disjoint_long_copy(true, "arrayof_jlong_disjoint_arraycopy");
3036     StubRoutines::_arrayof_oop_disjoint_arraycopy        = generate_disjoint_oop_copy(true, "arrayof_oop_disjoint_arraycopy", false);
3037     StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy(true, "oop_disjoint_arraycopy_uninit", true);
3038 
3039     // non-aligned conjoint versions
3040     StubRoutines::_jbyte_arraycopy      = generate_conjoint_byte_copy(false, "jbyte_arraycopy");
3041     StubRoutines::_jshort_arraycopy     = generate_conjoint_short_copy(false, "jshort_arraycopy");
3042     StubRoutines::_jint_arraycopy       = generate_conjoint_int_copy(false, "jint_arraycopy");
3043     StubRoutines::_jlong_arraycopy      = generate_conjoint_long_copy(false, "jlong_arraycopy");
3044     StubRoutines::_oop_arraycopy        = generate_conjoint_oop_copy(false, "oop_arraycopy", false);
3045     StubRoutines::_oop_arraycopy_uninit = generate_conjoint_oop_copy(false, "oop_arraycopy_uninit", true);
3046 
3047     // aligned conjoint versions
3048     StubRoutines::_arrayof_jbyte_arraycopy      = generate_conjoint_byte_copy(true, "arrayof_jbyte_arraycopy");
3049     StubRoutines::_arrayof_jshort_arraycopy     = generate_conjoint_short_copy(true, "arrayof_jshort_arraycopy");
3050     StubRoutines::_arrayof_jint_arraycopy       = generate_conjoint_int_copy(true, "arrayof_jint_arraycopy");
3051     StubRoutines::_arrayof_jlong_arraycopy      = generate_conjoint_long_copy(true, "arrayof_jlong_arraycopy");
3052     StubRoutines::_arrayof_oop_arraycopy        = generate_conjoint_oop_copy(true, "arrayof_oop_arraycopy", false);
3053     StubRoutines::_arrayof_oop_arraycopy_uninit = generate_conjoint_oop_copy(true, "arrayof_oop_arraycopy", true);
3054 
3055     // special/generic versions
3056     StubRoutines::_checkcast_arraycopy        = generate_checkcast_copy("checkcast_arraycopy", false);
3057     StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", true);
3058 
3059     StubRoutines::_unsafe_arraycopy  = generate_unsafe_copy("unsafe_arraycopy",
3060                                                             STUB_ENTRY(jbyte_arraycopy),
3061                                                             STUB_ENTRY(jshort_arraycopy),
3062                                                             STUB_ENTRY(jint_arraycopy),
3063                                                             STUB_ENTRY(jlong_arraycopy));
3064     StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy",
3065                                                              STUB_ENTRY(jbyte_arraycopy),
3066                                                              STUB_ENTRY(jshort_arraycopy),
3067                                                              STUB_ENTRY(jint_arraycopy),
3068                                                              STUB_ENTRY(oop_arraycopy),
3069                                                              STUB_ENTRY(oop_disjoint_arraycopy),
3070                                                              STUB_ENTRY(jlong_arraycopy),
3071                                                              STUB_ENTRY(checkcast_arraycopy));
3072 
3073     // fill routines
3074 #ifdef COMPILER2
3075     if (OptimizeFill) {
3076       StubRoutines::_jbyte_fill          = generate_fill(T_BYTE,  false, "jbyte_fill");
3077       StubRoutines::_jshort_fill         = generate_fill(T_SHORT, false, "jshort_fill");
3078       StubRoutines::_jint_fill           = generate_fill(T_INT,   false, "jint_fill");
3079       StubRoutines::_arrayof_jbyte_fill  = generate_fill(T_BYTE,  true, "arrayof_jbyte_fill");
3080       StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill");
3081       StubRoutines::_arrayof_jint_fill   = generate_fill(T_INT,   true, "arrayof_jint_fill");
3082     }
3083 #endif
3084   }
3085 
3086   // Safefetch stubs.
generate_safefetch(const char * name,int size,address * entry,address * fault_pc,address * continuation_pc)3087   void generate_safefetch(const char* name, int size, address* entry, address* fault_pc, address* continuation_pc) {
3088     // safefetch signatures:
3089     //   int      SafeFetch32(int*      adr, int      errValue);
3090     //   intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue);
3091     //
3092     // arguments:
3093     //   R3_ARG1 = adr
3094     //   R4_ARG2 = errValue
3095     //
3096     // result:
3097     //   R3_RET  = *adr or errValue
3098 
3099     StubCodeMark mark(this, "StubRoutines", name);
3100 
3101     // Entry point, pc or function descriptor.
3102     *entry = __ function_entry();
3103 
3104     // Load *adr into R4_ARG2, may fault.
3105     *fault_pc = __ pc();
3106     switch (size) {
3107       case 4:
3108         // int32_t, signed extended
3109         __ lwa(R4_ARG2, 0, R3_ARG1);
3110         break;
3111       case 8:
3112         // int64_t
3113         __ ld(R4_ARG2, 0, R3_ARG1);
3114         break;
3115       default:
3116         ShouldNotReachHere();
3117     }
3118 
3119     // return errValue or *adr
3120     *continuation_pc = __ pc();
3121     __ mr(R3_RET, R4_ARG2);
3122     __ blr();
3123   }
3124 
3125   // Stub for BigInteger::multiplyToLen()
3126   //
3127   //  Arguments:
3128   //
3129   //  Input:
3130   //    R3 - x address
3131   //    R4 - x length
3132   //    R5 - y address
3133   //    R6 - y length
3134   //    R7 - z address
3135   //    R8 - z length
3136   //
generate_multiplyToLen()3137   address generate_multiplyToLen() {
3138 
3139     StubCodeMark mark(this, "StubRoutines", "multiplyToLen");
3140 
3141     address start = __ function_entry();
3142 
3143     const Register x     = R3;
3144     const Register xlen  = R4;
3145     const Register y     = R5;
3146     const Register ylen  = R6;
3147     const Register z     = R7;
3148     const Register zlen  = R8;
3149 
3150     const Register tmp1  = R2; // TOC not used.
3151     const Register tmp2  = R9;
3152     const Register tmp3  = R10;
3153     const Register tmp4  = R11;
3154     const Register tmp5  = R12;
3155 
3156     // non-volatile regs
3157     const Register tmp6  = R31;
3158     const Register tmp7  = R30;
3159     const Register tmp8  = R29;
3160     const Register tmp9  = R28;
3161     const Register tmp10 = R27;
3162     const Register tmp11 = R26;
3163     const Register tmp12 = R25;
3164     const Register tmp13 = R24;
3165 
3166     BLOCK_COMMENT("Entry:");
3167 
3168     // C2 does not respect int to long conversion for stub calls.
3169     __ clrldi(xlen, xlen, 32);
3170     __ clrldi(ylen, ylen, 32);
3171     __ clrldi(zlen, zlen, 32);
3172 
3173     // Save non-volatile regs (frameless).
3174     int current_offs = 8;
3175     __ std(R24, -current_offs, R1_SP); current_offs += 8;
3176     __ std(R25, -current_offs, R1_SP); current_offs += 8;
3177     __ std(R26, -current_offs, R1_SP); current_offs += 8;
3178     __ std(R27, -current_offs, R1_SP); current_offs += 8;
3179     __ std(R28, -current_offs, R1_SP); current_offs += 8;
3180     __ std(R29, -current_offs, R1_SP); current_offs += 8;
3181     __ std(R30, -current_offs, R1_SP); current_offs += 8;
3182     __ std(R31, -current_offs, R1_SP);
3183 
3184     __ multiply_to_len(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5,
3185                        tmp6, tmp7, tmp8, tmp9, tmp10, tmp11, tmp12, tmp13);
3186 
3187     // Restore non-volatile regs.
3188     current_offs = 8;
3189     __ ld(R24, -current_offs, R1_SP); current_offs += 8;
3190     __ ld(R25, -current_offs, R1_SP); current_offs += 8;
3191     __ ld(R26, -current_offs, R1_SP); current_offs += 8;
3192     __ ld(R27, -current_offs, R1_SP); current_offs += 8;
3193     __ ld(R28, -current_offs, R1_SP); current_offs += 8;
3194     __ ld(R29, -current_offs, R1_SP); current_offs += 8;
3195     __ ld(R30, -current_offs, R1_SP); current_offs += 8;
3196     __ ld(R31, -current_offs, R1_SP);
3197 
3198     __ blr();  // Return to caller.
3199 
3200     return start;
3201   }
3202 
3203   /**
3204   *  Arguments:
3205   *
3206   *  Input:
3207   *   R3_ARG1    - out address
3208   *   R4_ARG2    - in address
3209   *   R5_ARG3    - offset
3210   *   R6_ARG4    - len
3211   *   R7_ARG5    - k
3212   *  Output:
3213   *   R3_RET     - carry
3214   */
generate_mulAdd()3215   address generate_mulAdd() {
3216     __ align(CodeEntryAlignment);
3217     StubCodeMark mark(this, "StubRoutines", "mulAdd");
3218 
3219     address start = __ function_entry();
3220 
3221     // C2 does not sign extend signed parameters to full 64 bits registers:
3222     __ rldic (R5_ARG3, R5_ARG3, 2, 32);  // always positive
3223     __ clrldi(R6_ARG4, R6_ARG4, 32);     // force zero bits on higher word
3224     __ clrldi(R7_ARG5, R7_ARG5, 32);     // force zero bits on higher word
3225 
3226     __ muladd(R3_ARG1, R4_ARG2, R5_ARG3, R6_ARG4, R7_ARG5, R8, R9, R10);
3227 
3228     // Moves output carry to return register
3229     __ mr    (R3_RET,  R10);
3230 
3231     __ blr();
3232 
3233     return start;
3234   }
3235 
3236   /**
3237   *  Arguments:
3238   *
3239   *  Input:
3240   *   R3_ARG1    - in address
3241   *   R4_ARG2    - in length
3242   *   R5_ARG3    - out address
3243   *   R6_ARG4    - out length
3244   */
generate_squareToLen()3245   address generate_squareToLen() {
3246     __ align(CodeEntryAlignment);
3247     StubCodeMark mark(this, "StubRoutines", "squareToLen");
3248 
3249     address start = __ function_entry();
3250 
3251     // args - higher word is cleaned (unsignedly) due to int to long casting
3252     const Register in        = R3_ARG1;
3253     const Register in_len    = R4_ARG2;
3254     __ clrldi(in_len, in_len, 32);
3255     const Register out       = R5_ARG3;
3256     const Register out_len   = R6_ARG4;
3257     __ clrldi(out_len, out_len, 32);
3258 
3259     // output
3260     const Register ret       = R3_RET;
3261 
3262     // temporaries
3263     const Register lplw_s    = R7;
3264     const Register in_aux    = R8;
3265     const Register out_aux   = R9;
3266     const Register piece     = R10;
3267     const Register product   = R14;
3268     const Register lplw      = R15;
3269     const Register i_minus1  = R16;
3270     const Register carry     = R17;
3271     const Register offset    = R18;
3272     const Register off_aux   = R19;
3273     const Register t         = R20;
3274     const Register mlen      = R21;
3275     const Register len       = R22;
3276     const Register a         = R23;
3277     const Register b         = R24;
3278     const Register i         = R25;
3279     const Register c         = R26;
3280     const Register cs        = R27;
3281 
3282     // Labels
3283     Label SKIP_LSHIFT, SKIP_DIAGONAL_SUM, SKIP_ADDONE, SKIP_MULADD, SKIP_LOOP_SQUARE;
3284     Label LOOP_LSHIFT, LOOP_DIAGONAL_SUM, LOOP_ADDONE, LOOP_MULADD, LOOP_SQUARE;
3285 
3286     // Save non-volatile regs (frameless).
3287     int current_offs = -8;
3288     __ std(R28, current_offs, R1_SP); current_offs -= 8;
3289     __ std(R27, current_offs, R1_SP); current_offs -= 8;
3290     __ std(R26, current_offs, R1_SP); current_offs -= 8;
3291     __ std(R25, current_offs, R1_SP); current_offs -= 8;
3292     __ std(R24, current_offs, R1_SP); current_offs -= 8;
3293     __ std(R23, current_offs, R1_SP); current_offs -= 8;
3294     __ std(R22, current_offs, R1_SP); current_offs -= 8;
3295     __ std(R21, current_offs, R1_SP); current_offs -= 8;
3296     __ std(R20, current_offs, R1_SP); current_offs -= 8;
3297     __ std(R19, current_offs, R1_SP); current_offs -= 8;
3298     __ std(R18, current_offs, R1_SP); current_offs -= 8;
3299     __ std(R17, current_offs, R1_SP); current_offs -= 8;
3300     __ std(R16, current_offs, R1_SP); current_offs -= 8;
3301     __ std(R15, current_offs, R1_SP); current_offs -= 8;
3302     __ std(R14, current_offs, R1_SP);
3303 
3304     // Store the squares, right shifted one bit (i.e., divided by 2)
3305     __ subi   (out_aux,   out,       8);
3306     __ subi   (in_aux,    in,        4);
3307     __ cmpwi  (CCR0,      in_len,    0);
3308     // Initialize lplw outside of the loop
3309     __ xorr   (lplw,      lplw,      lplw);
3310     __ ble    (CCR0,      SKIP_LOOP_SQUARE);    // in_len <= 0
3311     __ mtctr  (in_len);
3312 
3313     __ bind(LOOP_SQUARE);
3314     __ lwzu   (piece,     4,         in_aux);
3315     __ mulld  (product,   piece,     piece);
3316     // shift left 63 bits and only keep the MSB
3317     __ rldic  (lplw_s,    lplw,      63, 0);
3318     __ mr     (lplw,      product);
3319     // shift right 1 bit without sign extension
3320     __ srdi   (product,   product,   1);
3321     // join them to the same register and store it
3322     __ orr    (product,   lplw_s,    product);
3323 #ifdef VM_LITTLE_ENDIAN
3324     // Swap low and high words for little endian
3325     __ rldicl (product,   product,   32, 0);
3326 #endif
3327     __ stdu   (product,   8,         out_aux);
3328     __ bdnz   (LOOP_SQUARE);
3329 
3330     __ bind(SKIP_LOOP_SQUARE);
3331 
3332     // Add in off-diagonal sums
3333     __ cmpwi  (CCR0,      in_len,    0);
3334     __ ble    (CCR0,      SKIP_DIAGONAL_SUM);
3335     // Avoid CTR usage here in order to use it at mulAdd
3336     __ subi   (i_minus1,  in_len,    1);
3337     __ li     (offset,    4);
3338 
3339     __ bind(LOOP_DIAGONAL_SUM);
3340 
3341     __ sldi   (off_aux,   out_len,   2);
3342     __ sub    (off_aux,   off_aux,   offset);
3343 
3344     __ mr     (len,       i_minus1);
3345     __ sldi   (mlen,      i_minus1,  2);
3346     __ lwzx   (t,         in,        mlen);
3347 
3348     __ muladd (out, in, off_aux, len, t, a, b, carry);
3349 
3350     // begin<addOne>
3351     // off_aux = out_len*4 - 4 - mlen - offset*4 - 4;
3352     __ addi   (mlen,      mlen,      4);
3353     __ sldi   (a,         out_len,   2);
3354     __ subi   (a,         a,         4);
3355     __ sub    (a,         a,         mlen);
3356     __ subi   (off_aux,   offset,    4);
3357     __ sub    (off_aux,   a,         off_aux);
3358 
3359     __ lwzx   (b,         off_aux,   out);
3360     __ add    (b,         b,         carry);
3361     __ stwx   (b,         off_aux,   out);
3362 
3363     // if (((uint64_t)s >> 32) != 0) {
3364     __ srdi_  (a,         b,         32);
3365     __ beq    (CCR0,      SKIP_ADDONE);
3366 
3367     // while (--mlen >= 0) {
3368     __ bind(LOOP_ADDONE);
3369     __ subi   (mlen,      mlen,      4);
3370     __ cmpwi  (CCR0,      mlen,      0);
3371     __ beq    (CCR0,      SKIP_ADDONE);
3372 
3373     // if (--offset_aux < 0) { // Carry out of number
3374     __ subi   (off_aux,   off_aux,   4);
3375     __ cmpwi  (CCR0,      off_aux,   0);
3376     __ blt    (CCR0,      SKIP_ADDONE);
3377 
3378     // } else {
3379     __ lwzx   (b,         off_aux,   out);
3380     __ addi   (b,         b,         1);
3381     __ stwx   (b,         off_aux,   out);
3382     __ cmpwi  (CCR0,      b,         0);
3383     __ bne    (CCR0,      SKIP_ADDONE);
3384     __ b      (LOOP_ADDONE);
3385 
3386     __ bind(SKIP_ADDONE);
3387     // } } } end<addOne>
3388 
3389     __ addi   (offset,    offset,    8);
3390     __ subi   (i_minus1,  i_minus1,  1);
3391     __ cmpwi  (CCR0,      i_minus1,  0);
3392     __ bge    (CCR0,      LOOP_DIAGONAL_SUM);
3393 
3394     __ bind(SKIP_DIAGONAL_SUM);
3395 
3396     // Shift back up and set low bit
3397     // Shifts 1 bit left up to len positions. Assumes no leading zeros
3398     // begin<primitiveLeftShift>
3399     __ cmpwi  (CCR0,      out_len,   0);
3400     __ ble    (CCR0,      SKIP_LSHIFT);
3401     __ li     (i,         0);
3402     __ lwz    (c,         0,         out);
3403     __ subi   (b,         out_len,   1);
3404     __ mtctr  (b);
3405 
3406     __ bind(LOOP_LSHIFT);
3407     __ mr     (b,         c);
3408     __ addi   (cs,        i,         4);
3409     __ lwzx   (c,         out,       cs);
3410 
3411     __ sldi   (b,         b,         1);
3412     __ srwi   (cs,        c,         31);
3413     __ orr    (b,         b,         cs);
3414     __ stwx   (b,         i,         out);
3415 
3416     __ addi   (i,         i,         4);
3417     __ bdnz   (LOOP_LSHIFT);
3418 
3419     __ sldi   (c,         out_len,   2);
3420     __ subi   (c,         c,         4);
3421     __ lwzx   (b,         out,       c);
3422     __ sldi   (b,         b,         1);
3423     __ stwx   (b,         out,       c);
3424 
3425     __ bind(SKIP_LSHIFT);
3426     // end<primitiveLeftShift>
3427 
3428     // Set low bit
3429     __ sldi   (i,         in_len,    2);
3430     __ subi   (i,         i,         4);
3431     __ lwzx   (i,         in,        i);
3432     __ sldi   (c,         out_len,   2);
3433     __ subi   (c,         c,         4);
3434     __ lwzx   (b,         out,       c);
3435 
3436     __ andi   (i,         i,         1);
3437     __ orr    (i,         b,         i);
3438 
3439     __ stwx   (i,         out,       c);
3440 
3441     // Restore non-volatile regs.
3442     current_offs = -8;
3443     __ ld(R28, current_offs, R1_SP); current_offs -= 8;
3444     __ ld(R27, current_offs, R1_SP); current_offs -= 8;
3445     __ ld(R26, current_offs, R1_SP); current_offs -= 8;
3446     __ ld(R25, current_offs, R1_SP); current_offs -= 8;
3447     __ ld(R24, current_offs, R1_SP); current_offs -= 8;
3448     __ ld(R23, current_offs, R1_SP); current_offs -= 8;
3449     __ ld(R22, current_offs, R1_SP); current_offs -= 8;
3450     __ ld(R21, current_offs, R1_SP); current_offs -= 8;
3451     __ ld(R20, current_offs, R1_SP); current_offs -= 8;
3452     __ ld(R19, current_offs, R1_SP); current_offs -= 8;
3453     __ ld(R18, current_offs, R1_SP); current_offs -= 8;
3454     __ ld(R17, current_offs, R1_SP); current_offs -= 8;
3455     __ ld(R16, current_offs, R1_SP); current_offs -= 8;
3456     __ ld(R15, current_offs, R1_SP); current_offs -= 8;
3457     __ ld(R14, current_offs, R1_SP);
3458 
3459     __ mr(ret, out);
3460     __ blr();
3461 
3462     return start;
3463   }
3464 
3465   /**
3466    * Arguments:
3467    *
3468    * Inputs:
3469    *   R3_ARG1    - int   crc
3470    *   R4_ARG2    - byte* buf
3471    *   R5_ARG3    - int   length (of buffer)
3472    *
3473    * scratch:
3474    *   R2, R6-R12
3475    *
3476    * Ouput:
3477    *   R3_RET     - int   crc result
3478    */
3479   // Compute CRC32 function.
generate_CRC32_updateBytes(bool is_crc32c)3480   address generate_CRC32_updateBytes(bool is_crc32c) {
3481     __ align(CodeEntryAlignment);
3482     StubCodeMark mark(this, "StubRoutines", is_crc32c ? "CRC32C_updateBytes" : "CRC32_updateBytes");
3483     address start = __ function_entry();  // Remember stub start address (is rtn value).
3484     __ crc32(R3_ARG1, R4_ARG2, R5_ARG3, R2, R6, R7, R8, R9, R10, R11, R12, is_crc32c);
3485     __ blr();
3486     return start;
3487   }
3488 
3489   // Initialization
generate_initial()3490   void generate_initial() {
3491     // Generates all stubs and initializes the entry points
3492 
3493     // Entry points that exist in all platforms.
3494     // Note: This is code that could be shared among different platforms - however the
3495     // benefit seems to be smaller than the disadvantage of having a
3496     // much more complicated generator structure. See also comment in
3497     // stubRoutines.hpp.
3498 
3499     StubRoutines::_forward_exception_entry          = generate_forward_exception();
3500     StubRoutines::_call_stub_entry                  = generate_call_stub(StubRoutines::_call_stub_return_address);
3501     StubRoutines::_catch_exception_entry            = generate_catch_exception();
3502 
3503     // Build this early so it's available for the interpreter.
3504     StubRoutines::_throw_StackOverflowError_entry   =
3505       generate_throw_exception("StackOverflowError throw_exception",
3506                                CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError), false);
3507     StubRoutines::_throw_delayed_StackOverflowError_entry =
3508       generate_throw_exception("delayed StackOverflowError throw_exception",
3509                                CAST_FROM_FN_PTR(address, SharedRuntime::throw_delayed_StackOverflowError), false);
3510 
3511     // CRC32 Intrinsics.
3512     if (UseCRC32Intrinsics) {
3513       StubRoutines::_crc_table_adr = StubRoutines::generate_crc_constants(REVERSE_CRC32_POLY);
3514       StubRoutines::_updateBytesCRC32 = generate_CRC32_updateBytes(false);
3515     }
3516 
3517     // CRC32C Intrinsics.
3518     if (UseCRC32CIntrinsics) {
3519       StubRoutines::_crc32c_table_addr = StubRoutines::generate_crc_constants(REVERSE_CRC32C_POLY);
3520       StubRoutines::_updateBytesCRC32C = generate_CRC32_updateBytes(true);
3521     }
3522   }
3523 
generate_all()3524   void generate_all() {
3525     // Generates all stubs and initializes the entry points
3526 
3527     // These entry points require SharedInfo::stack0 to be set up in
3528     // non-core builds
3529     StubRoutines::_throw_AbstractMethodError_entry         = generate_throw_exception("AbstractMethodError throw_exception",          CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError),  false);
3530     // Handle IncompatibleClassChangeError in itable stubs.
3531     StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError),  false);
3532     StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false);
3533 
3534     // support for verify_oop (must happen after universe_init)
3535     StubRoutines::_verify_oop_subroutine_entry             = generate_verify_oop();
3536 
3537     // arraycopy stubs used by compilers
3538     generate_arraycopy_stubs();
3539 
3540     // Safefetch stubs.
3541     generate_safefetch("SafeFetch32", sizeof(int),     &StubRoutines::_safefetch32_entry,
3542                                                        &StubRoutines::_safefetch32_fault_pc,
3543                                                        &StubRoutines::_safefetch32_continuation_pc);
3544     generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry,
3545                                                        &StubRoutines::_safefetchN_fault_pc,
3546                                                        &StubRoutines::_safefetchN_continuation_pc);
3547 
3548 #ifdef COMPILER2
3549     if (UseMultiplyToLenIntrinsic) {
3550       StubRoutines::_multiplyToLen = generate_multiplyToLen();
3551     }
3552     if (UseSquareToLenIntrinsic) {
3553       StubRoutines::_squareToLen = generate_squareToLen();
3554     }
3555     if (UseMulAddIntrinsic) {
3556       StubRoutines::_mulAdd = generate_mulAdd();
3557     }
3558     if (UseMontgomeryMultiplyIntrinsic) {
3559       StubRoutines::_montgomeryMultiply
3560         = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_multiply);
3561     }
3562     if (UseMontgomerySquareIntrinsic) {
3563       StubRoutines::_montgomerySquare
3564         = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square);
3565     }
3566 #endif
3567 
3568     if (UseAESIntrinsics) {
3569       StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock();
3570       StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock();
3571     }
3572 
3573     if (UseSHA256Intrinsics) {
3574       StubRoutines::_sha256_implCompress   = generate_sha256_implCompress(false, "sha256_implCompress");
3575       StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true,  "sha256_implCompressMB");
3576     }
3577     if (UseSHA512Intrinsics) {
3578       StubRoutines::_sha512_implCompress   = generate_sha512_implCompress(false, "sha512_implCompress");
3579       StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(true, "sha512_implCompressMB");
3580     }
3581   }
3582 
3583  public:
StubGenerator(CodeBuffer * code,bool all)3584   StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) {
3585     // replace the standard masm with a special one:
3586     _masm = new MacroAssembler(code);
3587     if (all) {
3588       generate_all();
3589     } else {
3590       generate_initial();
3591     }
3592   }
3593 };
3594 
StubGenerator_generate(CodeBuffer * code,bool all)3595 void StubGenerator_generate(CodeBuffer* code, bool all) {
3596   StubGenerator g(code, all);
3597 }
3598