1 /*
2  * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #include "precompiled.hpp"
26 #include "asm/assembler.hpp"
27 #include "assembler_arm.inline.hpp"
28 #include "gc/shared/barrierSet.hpp"
29 #include "gc/shared/barrierSetAssembler.hpp"
30 #include "interpreter/interpreter.hpp"
31 #include "nativeInst_arm.hpp"
32 #include "oops/instanceOop.hpp"
33 #include "oops/method.hpp"
34 #include "oops/objArrayKlass.hpp"
35 #include "oops/oop.inline.hpp"
36 #include "prims/methodHandles.hpp"
37 #include "runtime/frame.inline.hpp"
38 #include "runtime/handles.inline.hpp"
39 #include "runtime/sharedRuntime.hpp"
40 #include "runtime/stubCodeGenerator.hpp"
41 #include "runtime/stubRoutines.hpp"
42 #include "utilities/align.hpp"
43 #ifdef COMPILER2
44 #include "opto/runtime.hpp"
45 #endif
46 
47 // Declaration and definition of StubGenerator (no .hpp file).
48 // For a more detailed description of the stub routine structure
49 // see the comment in stubRoutines.hpp
50 
51 #define __ _masm->
52 
53 #ifdef PRODUCT
54 #define BLOCK_COMMENT(str) /* nothing */
55 #else
56 #define BLOCK_COMMENT(str) __ block_comment(str)
57 #endif
58 
59 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
60 
61 // -------------------------------------------------------------------------------------------------------------------------
62 // Stub Code definitions
63 
64 // Platform dependent parameters for array copy stubs
65 
66 // Note: we have noticed a huge change in behavior on a microbenchmark
67 // from platform to platform depending on the configuration.
68 
69 // Instead of adding a series of command line options (which
70 // unfortunately have to be done in the shared file and cannot appear
71 // only in the ARM port), the tested result are hard-coded here in a set
72 // of options, selected by specifying 'ArmCopyPlatform'
73 
74 // Currently, this 'platform' is hardcoded to a value that is a good
75 // enough trade-off.  However, one can easily modify this file to test
76 // the hard-coded configurations or create new ones. If the gain is
77 // significant, we could decide to either add command line options or
78 // add code to automatically choose a configuration.
79 
80 // see comments below for the various configurations created
81 #define DEFAULT_ARRAYCOPY_CONFIG 0
82 #define TEGRA2_ARRAYCOPY_CONFIG 1
83 #define IMX515_ARRAYCOPY_CONFIG 2
84 
85 // Hard coded choices (XXX: could be changed to a command line option)
86 #define ArmCopyPlatform DEFAULT_ARRAYCOPY_CONFIG
87 
88 #ifdef AARCH64
89 #define ArmCopyCacheLineSize 64
90 #else
91 #define ArmCopyCacheLineSize 32 // not worth optimizing to 64 according to measured gains
92 #endif // AARCH64
93 
94 // TODO-AARCH64: tune and revise AArch64 arraycopy optimizations
95 
96 // configuration for each kind of loop
97 typedef struct {
98   int pld_distance;       // prefetch distance (0 => no prefetch, <0: prefetch_before);
99 #ifndef AARCH64
100   bool split_ldm;         // if true, split each STM in STMs with fewer registers
101   bool split_stm;         // if true, split each LTM in LTMs with fewer registers
102 #endif // !AARCH64
103 } arraycopy_loop_config;
104 
105 // configuration for all loops
106 typedef struct {
107   // const char *description;
108   arraycopy_loop_config forward_aligned;
109   arraycopy_loop_config backward_aligned;
110   arraycopy_loop_config forward_shifted;
111   arraycopy_loop_config backward_shifted;
112 } arraycopy_platform_config;
113 
114 // configured platforms
115 static arraycopy_platform_config arraycopy_configurations[] = {
116   // configuration parameters for arraycopy loops
117 #ifdef AARCH64
118   {
119     {-256 }, // forward aligned
120     {-128 }, // backward aligned
121     {-256 }, // forward shifted
122     {-128 }  // backward shifted
123   }
124 #else
125 
126   // Configurations were chosen based on manual analysis of benchmark
127   // results, minimizing overhead with respect to best results on the
128   // different test cases.
129 
130   // Prefetch before is always favored since it avoids dirtying the
131   // cache uselessly for small copies. Code for prefetch after has
132   // been kept in case the difference is significant for some
133   // platforms but we might consider dropping it.
134 
135   // distance, ldm, stm
136   {
137     // default: tradeoff tegra2/imx515/nv-tegra2,
138     // Notes on benchmarking:
139     // - not far from optimal configuration on nv-tegra2
140     // - within 5% of optimal configuration except for backward aligned on IMX
141     // - up to 40% from optimal configuration for backward shifted and backward align for tegra2
142     //   but still on par with the operating system copy
143     {-256, true,  true  }, // forward aligned
144     {-256, true,  true  }, // backward aligned
145     {-256, false, false }, // forward shifted
146     {-256, true,  true  } // backward shifted
147   },
148   {
149     // configuration tuned on tegra2-4.
150     // Warning: should not be used on nv-tegra2 !
151     // Notes:
152     // - prefetch after gives 40% gain on backward copies on tegra2-4,
153     //   resulting in better number than the operating system
154     //   copy. However, this can lead to a 300% loss on nv-tegra and has
155     //   more impact on the cache (fetches futher than what is
156     //   copied). Use this configuration with care, in case it improves
157     //   reference benchmarks.
158     {-256, true,  true  }, // forward aligned
159     {96,   false, false }, // backward aligned
160     {-256, false, false }, // forward shifted
161     {96,   false, false } // backward shifted
162   },
163   {
164     // configuration tuned on imx515
165     // Notes:
166     // - smaller prefetch distance is sufficient to get good result and might be more stable
167     // - refined backward aligned options within 5% of optimal configuration except for
168     //   tests were the arrays fit in the cache
169     {-160, false, false }, // forward aligned
170     {-160, false, false }, // backward aligned
171     {-160, false, false }, // forward shifted
172     {-160, true,  true  } // backward shifted
173   }
174 #endif // AARCH64
175 };
176 
177 class StubGenerator: public StubCodeGenerator {
178 
179 #ifdef PRODUCT
180 #define inc_counter_np(a,b,c) ((void)0)
181 #else
182 #define inc_counter_np(counter, t1, t2) \
183   BLOCK_COMMENT("inc_counter " #counter); \
184   __ inc_counter(&counter, t1, t2);
185 #endif
186 
187  private:
188 
generate_call_stub(address & return_address)189   address generate_call_stub(address& return_address) {
190     StubCodeMark mark(this, "StubRoutines", "call_stub");
191     address start = __ pc();
192 
193 #ifdef AARCH64
194     const int saved_regs_size = 192;
195 
196     __ stp(FP, LR, Address(SP, -saved_regs_size, pre_indexed));
197     __ mov(FP, SP);
198 
199     int sp_offset = 16;
200     assert(frame::entry_frame_call_wrapper_offset * wordSize == sp_offset, "adjust this code");
201     __ stp(R0,  ZR,  Address(SP, sp_offset)); sp_offset += 16;
202 
203     const int saved_result_and_result_type_offset = sp_offset;
204     __ stp(R1,  R2,  Address(SP, sp_offset)); sp_offset += 16;
205     __ stp(R19, R20, Address(SP, sp_offset)); sp_offset += 16;
206     __ stp(R21, R22, Address(SP, sp_offset)); sp_offset += 16;
207     __ stp(R23, R24, Address(SP, sp_offset)); sp_offset += 16;
208     __ stp(R25, R26, Address(SP, sp_offset)); sp_offset += 16;
209     __ stp(R27, R28, Address(SP, sp_offset)); sp_offset += 16;
210 
211     __ stp_d(V8,  V9,  Address(SP, sp_offset)); sp_offset += 16;
212     __ stp_d(V10, V11, Address(SP, sp_offset)); sp_offset += 16;
213     __ stp_d(V12, V13, Address(SP, sp_offset)); sp_offset += 16;
214     __ stp_d(V14, V15, Address(SP, sp_offset)); sp_offset += 16;
215     assert (sp_offset == saved_regs_size, "adjust this code");
216 
217     __ mov(Rmethod, R3);
218     __ mov(Rthread, R7);
219     __ reinit_heapbase();
220 
221     { // Pass parameters
222       Label done_parameters, pass_parameters;
223 
224       __ mov(Rparams, SP);
225       __ cbz_w(R6, done_parameters);
226 
227       __ sub(Rtemp, SP, R6, ex_uxtw, LogBytesPerWord);
228       __ align_reg(SP, Rtemp, StackAlignmentInBytes);
229       __ add(Rparams, SP, R6, ex_uxtw, LogBytesPerWord);
230 
231       __ bind(pass_parameters);
232       __ subs_w(R6, R6, 1);
233       __ ldr(Rtemp, Address(R5, wordSize, post_indexed));
234       __ str(Rtemp, Address(Rparams, -wordSize, pre_indexed));
235       __ b(pass_parameters, ne);
236 
237       __ bind(done_parameters);
238 
239 #ifdef ASSERT
240       {
241         Label L;
242         __ cmp(SP, Rparams);
243         __ b(L, eq);
244         __ stop("SP does not match Rparams");
245         __ bind(L);
246       }
247 #endif
248     }
249 
250     __ mov(Rsender_sp, SP);
251     __ blr(R4);
252     return_address = __ pc();
253 
254     __ mov(SP, FP);
255 
256     __ ldp(R1, R2, Address(SP, saved_result_and_result_type_offset));
257 
258     { // Handle return value
259       Label cont;
260       __ str(R0, Address(R1));
261 
262       __ cmp_w(R2, T_DOUBLE);
263       __ ccmp_w(R2, T_FLOAT, Assembler::flags_for_condition(eq), ne);
264       __ b(cont, ne);
265 
266       __ str_d(V0, Address(R1));
267       __ bind(cont);
268     }
269 
270     sp_offset = saved_result_and_result_type_offset + 16;
271     __ ldp(R19, R20, Address(SP, sp_offset)); sp_offset += 16;
272     __ ldp(R21, R22, Address(SP, sp_offset)); sp_offset += 16;
273     __ ldp(R23, R24, Address(SP, sp_offset)); sp_offset += 16;
274     __ ldp(R25, R26, Address(SP, sp_offset)); sp_offset += 16;
275     __ ldp(R27, R28, Address(SP, sp_offset)); sp_offset += 16;
276 
277     __ ldp_d(V8,  V9,  Address(SP, sp_offset)); sp_offset += 16;
278     __ ldp_d(V10, V11, Address(SP, sp_offset)); sp_offset += 16;
279     __ ldp_d(V12, V13, Address(SP, sp_offset)); sp_offset += 16;
280     __ ldp_d(V14, V15, Address(SP, sp_offset)); sp_offset += 16;
281     assert (sp_offset == saved_regs_size, "adjust this code");
282 
283     __ ldp(FP, LR, Address(SP, saved_regs_size, post_indexed));
284     __ ret();
285 
286 #else // AARCH64
287 
288     assert(frame::entry_frame_call_wrapper_offset == 0, "adjust this code");
289 
290     __ mov(Rtemp, SP);
291     __ push(RegisterSet(FP) | RegisterSet(LR));
292     __ fpush_hardfp(FloatRegisterSet(D8, 8));
293     __ stmdb(SP, RegisterSet(R0, R2) | RegisterSet(R4, R6) | RegisterSet(R8, R10) | altFP_7_11, writeback);
294     __ mov(Rmethod, R3);
295     __ ldmia(Rtemp, RegisterSet(R1, R3) | Rthread); // stacked arguments
296 
297     // XXX: TODO
298     // Would be better with respect to native tools if the following
299     // setting of FP was changed to conform to the native ABI, with FP
300     // pointing to the saved FP slot (and the corresponding modifications
301     // for entry_frame_call_wrapper_offset and frame::real_fp).
302     __ mov(FP, SP);
303 
304     {
305       Label no_parameters, pass_parameters;
306       __ cmp(R3, 0);
307       __ b(no_parameters, eq);
308 
309       __ bind(pass_parameters);
310       __ ldr(Rtemp, Address(R2, wordSize, post_indexed)); // Rtemp OK, unused and scratchable
311       __ subs(R3, R3, 1);
312       __ push(Rtemp);
313       __ b(pass_parameters, ne);
314       __ bind(no_parameters);
315     }
316 
317     __ mov(Rsender_sp, SP);
318     __ blx(R1);
319     return_address = __ pc();
320 
321     __ add(SP, FP, wordSize); // Skip link to JavaCallWrapper
322     __ pop(RegisterSet(R2, R3));
323 #ifndef __ABI_HARD__
324     __ cmp(R3, T_LONG);
325     __ cmp(R3, T_DOUBLE, ne);
326     __ str(R0, Address(R2));
327     __ str(R1, Address(R2, wordSize), eq);
328 #else
329     Label cont, l_float, l_double;
330 
331     __ cmp(R3, T_DOUBLE);
332     __ b(l_double, eq);
333 
334     __ cmp(R3, T_FLOAT);
335     __ b(l_float, eq);
336 
337     __ cmp(R3, T_LONG);
338     __ str(R0, Address(R2));
339     __ str(R1, Address(R2, wordSize), eq);
340     __ b(cont);
341 
342 
343     __ bind(l_double);
344     __ fstd(D0, Address(R2));
345     __ b(cont);
346 
347     __ bind(l_float);
348     __ fsts(S0, Address(R2));
349 
350     __ bind(cont);
351 #endif
352 
353     __ pop(RegisterSet(R4, R6) | RegisterSet(R8, R10) | altFP_7_11);
354     __ fpop_hardfp(FloatRegisterSet(D8, 8));
355     __ pop(RegisterSet(FP) | RegisterSet(PC));
356 
357 #endif // AARCH64
358     return start;
359   }
360 
361 
362   // (in) Rexception_obj: exception oop
generate_catch_exception()363   address generate_catch_exception() {
364     StubCodeMark mark(this, "StubRoutines", "catch_exception");
365     address start = __ pc();
366 
367     __ str(Rexception_obj, Address(Rthread, Thread::pending_exception_offset()));
368     __ b(StubRoutines::_call_stub_return_address);
369 
370     return start;
371   }
372 
373 
374   // (in) Rexception_pc: return address
generate_forward_exception()375   address generate_forward_exception() {
376     StubCodeMark mark(this, "StubRoutines", "forward exception");
377     address start = __ pc();
378 
379     __ mov(c_rarg0, Rthread);
380     __ mov(c_rarg1, Rexception_pc);
381     __ call_VM_leaf(CAST_FROM_FN_PTR(address,
382                          SharedRuntime::exception_handler_for_return_address),
383                          c_rarg0, c_rarg1);
384     __ ldr(Rexception_obj, Address(Rthread, Thread::pending_exception_offset()));
385     const Register Rzero = __ zero_register(Rtemp); // Rtemp OK (cleared by above call)
386     __ str(Rzero, Address(Rthread, Thread::pending_exception_offset()));
387 
388 #ifdef ASSERT
389     // make sure exception is set
390     { Label L;
391       __ cbnz(Rexception_obj, L);
392       __ stop("StubRoutines::forward exception: no pending exception (2)");
393       __ bind(L);
394     }
395 #endif
396 
397     // Verify that there is really a valid exception in RAX.
398     __ verify_oop(Rexception_obj);
399 
400     __ jump(R0); // handler is returned in R0 by runtime function
401     return start;
402   }
403 
404 
405 #ifndef AARCH64
406 
407   // Integer division shared routine
408   //   Input:
409   //     R0  - dividend
410   //     R2  - divisor
411   //   Output:
412   //     R0  - remainder
413   //     R1  - quotient
414   //   Destroys:
415   //     R2
416   //     LR
generate_idiv_irem()417   address generate_idiv_irem() {
418     Label positive_arguments, negative_or_zero, call_slow_path;
419     Register dividend  = R0;
420     Register divisor   = R2;
421     Register remainder = R0;
422     Register quotient  = R1;
423     Register tmp       = LR;
424     assert(dividend == remainder, "must be");
425 
426     address start = __ pc();
427 
428     // Check for special cases: divisor <= 0 or dividend < 0
429     __ cmp(divisor, 0);
430     __ orrs(quotient, dividend, divisor, ne);
431     __ b(negative_or_zero, le);
432 
433     __ bind(positive_arguments);
434     // Save return address on stack to free one extra register
435     __ push(LR);
436     // Approximate the mamximum order of the quotient
437     __ clz(tmp, dividend);
438     __ clz(quotient, divisor);
439     __ subs(tmp, quotient, tmp);
440     __ mov(quotient, 0);
441     // Jump to the appropriate place in the unrolled loop below
442     __ ldr(PC, Address(PC, tmp, lsl, 2), pl);
443     // If divisor is greater than dividend, return immediately
444     __ pop(PC);
445 
446     // Offset table
447     Label offset_table[32];
448     int i;
449     for (i = 0; i <= 31; i++) {
450       __ emit_address(offset_table[i]);
451     }
452 
453     // Unrolled loop of 32 division steps
454     for (i = 31; i >= 0; i--) {
455       __ bind(offset_table[i]);
456       __ cmp(remainder, AsmOperand(divisor, lsl, i));
457       __ sub(remainder, remainder, AsmOperand(divisor, lsl, i), hs);
458       __ add(quotient, quotient, 1 << i, hs);
459     }
460     __ pop(PC);
461 
462     __ bind(negative_or_zero);
463     // Find the combination of argument signs and jump to corresponding handler
464     __ andr(quotient, dividend, 0x80000000, ne);
465     __ orr(quotient, quotient, AsmOperand(divisor, lsr, 31), ne);
466     __ add(PC, PC, AsmOperand(quotient, ror, 26), ne);
467     __ str(LR, Address(Rthread, JavaThread::saved_exception_pc_offset()));
468 
469     // The leaf runtime function can destroy R0-R3 and R12 registers which are still alive
470     RegisterSet saved_registers = RegisterSet(R3) | RegisterSet(R12);
471 #if R9_IS_SCRATCHED
472     // Safer to save R9 here since callers may have been written
473     // assuming R9 survives. This is suboptimal but may not be worth
474     // revisiting for this slow case.
475 
476     // save also R10 for alignment
477     saved_registers = saved_registers | RegisterSet(R9, R10);
478 #endif
479     {
480       // divisor == 0
481       FixedSizeCodeBlock zero_divisor(_masm, 8, true);
482       __ push(saved_registers);
483       __ mov(R0, Rthread);
484       __ mov(R1, LR);
485       __ mov(R2, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO);
486       __ b(call_slow_path);
487     }
488 
489     {
490       // divisor > 0 && dividend < 0
491       FixedSizeCodeBlock positive_divisor_negative_dividend(_masm, 8, true);
492       __ push(LR);
493       __ rsb(dividend, dividend, 0);
494       __ bl(positive_arguments);
495       __ rsb(remainder, remainder, 0);
496       __ rsb(quotient, quotient, 0);
497       __ pop(PC);
498     }
499 
500     {
501       // divisor < 0 && dividend > 0
502       FixedSizeCodeBlock negative_divisor_positive_dividend(_masm, 8, true);
503       __ push(LR);
504       __ rsb(divisor, divisor, 0);
505       __ bl(positive_arguments);
506       __ rsb(quotient, quotient, 0);
507       __ pop(PC);
508     }
509 
510     {
511       // divisor < 0 && dividend < 0
512       FixedSizeCodeBlock negative_divisor_negative_dividend(_masm, 8, true);
513       __ push(LR);
514       __ rsb(dividend, dividend, 0);
515       __ rsb(divisor, divisor, 0);
516       __ bl(positive_arguments);
517       __ rsb(remainder, remainder, 0);
518       __ pop(PC);
519     }
520 
521     __ bind(call_slow_path);
522     __ call(CAST_FROM_FN_PTR(address, SharedRuntime::continuation_for_implicit_exception));
523     __ pop(saved_registers);
524     __ bx(R0);
525 
526     return start;
527   }
528 
529 
530  // As per atomic.hpp the Atomic read-modify-write operations must be logically implemented as:
531  //  <fence>; <op>; <membar StoreLoad|StoreStore>
532  // But for load-linked/store-conditional based systems a fence here simply means
533  // no load/store can be reordered with respect to the initial load-linked, so we have:
534  // <membar storeload|loadload> ; load-linked; <op>; store-conditional; <membar storeload|storestore>
535  // There are no memory actions in <op> so nothing further is needed.
536  //
537  // So we define the following for convenience:
538 #define MEMBAR_ATOMIC_OP_PRE \
539     MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad|MacroAssembler::LoadLoad)
540 #define MEMBAR_ATOMIC_OP_POST \
541     MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad|MacroAssembler::StoreStore)
542 
543   // Note: JDK 9 only supports ARMv7+ so we always have ldrexd available even though the
544   // code below allows for it to be otherwise. The else clause indicates an ARMv5 system
545   // for which we do not support MP and so membars are not necessary. This ARMv5 code will
546   // be removed in the future.
547 
548   // Support for jint Atomic::add(jint add_value, volatile jint *dest)
549   //
550   // Arguments :
551   //
552   //      add_value:      R0
553   //      dest:           R1
554   //
555   // Results:
556   //
557   //     R0: the new stored in dest
558   //
559   // Overwrites:
560   //
561   //     R1, R2, R3
562   //
generate_atomic_add()563   address generate_atomic_add() {
564     address start;
565 
566     StubCodeMark mark(this, "StubRoutines", "atomic_add");
567     Label retry;
568     start = __ pc();
569     Register addval    = R0;
570     Register dest      = R1;
571     Register prev      = R2;
572     Register ok        = R2;
573     Register newval    = R3;
574 
575     if (VM_Version::supports_ldrex()) {
576       __ membar(MEMBAR_ATOMIC_OP_PRE, prev);
577       __ bind(retry);
578       __ ldrex(newval, Address(dest));
579       __ add(newval, addval, newval);
580       __ strex(ok, newval, Address(dest));
581       __ cmp(ok, 0);
582       __ b(retry, ne);
583       __ mov (R0, newval);
584       __ membar(MEMBAR_ATOMIC_OP_POST, prev);
585     } else {
586       __ bind(retry);
587       __ ldr (prev, Address(dest));
588       __ add(newval, addval, prev);
589       __ atomic_cas_bool(prev, newval, dest, 0, noreg/*ignored*/);
590       __ b(retry, ne);
591       __ mov (R0, newval);
592     }
593     __ bx(LR);
594 
595     return start;
596   }
597 
598   // Support for jint Atomic::xchg(jint exchange_value, volatile jint *dest)
599   //
600   // Arguments :
601   //
602   //      exchange_value: R0
603   //      dest:           R1
604   //
605   // Results:
606   //
607   //     R0: the value previously stored in dest
608   //
609   // Overwrites:
610   //
611   //     R1, R2, R3
612   //
generate_atomic_xchg()613   address generate_atomic_xchg() {
614     address start;
615 
616     StubCodeMark mark(this, "StubRoutines", "atomic_xchg");
617     start = __ pc();
618     Register newval    = R0;
619     Register dest      = R1;
620     Register prev      = R2;
621 
622     Label retry;
623 
624     if (VM_Version::supports_ldrex()) {
625       Register ok=R3;
626       __ membar(MEMBAR_ATOMIC_OP_PRE, prev);
627       __ bind(retry);
628       __ ldrex(prev, Address(dest));
629       __ strex(ok, newval, Address(dest));
630       __ cmp(ok, 0);
631       __ b(retry, ne);
632       __ mov (R0, prev);
633       __ membar(MEMBAR_ATOMIC_OP_POST, prev);
634     } else {
635       __ bind(retry);
636       __ ldr (prev, Address(dest));
637       __ atomic_cas_bool(prev, newval, dest, 0, noreg/*ignored*/);
638       __ b(retry, ne);
639       __ mov (R0, prev);
640     }
641     __ bx(LR);
642 
643     return start;
644   }
645 
646   // Support for jint Atomic::cmpxchg(jint exchange_value, volatile jint *dest, jint compare_value)
647   //
648   // Arguments :
649   //
650   //      compare_value:  R0
651   //      exchange_value: R1
652   //      dest:           R2
653   //
654   // Results:
655   //
656   //     R0: the value previously stored in dest
657   //
658   // Overwrites:
659   //
660   //     R0, R1, R2, R3, Rtemp
661   //
generate_atomic_cmpxchg()662   address generate_atomic_cmpxchg() {
663     address start;
664 
665     StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg");
666     start = __ pc();
667     Register cmp       = R0;
668     Register newval    = R1;
669     Register dest      = R2;
670     Register temp1     = R3;
671     Register temp2     = Rtemp; // Rtemp free (native ABI)
672 
673     __ membar(MEMBAR_ATOMIC_OP_PRE, temp1);
674 
675     // atomic_cas returns previous value in R0
676     __ atomic_cas(temp1, temp2, cmp, newval, dest, 0);
677 
678     __ membar(MEMBAR_ATOMIC_OP_POST, temp1);
679 
680     __ bx(LR);
681 
682     return start;
683   }
684 
685   // Support for jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong *dest, jlong compare_value)
686   // reordered before by a wrapper to (jlong compare_value, jlong exchange_value, volatile jlong *dest)
687   //
688   // Arguments :
689   //
690   //      compare_value:  R1 (High), R0 (Low)
691   //      exchange_value: R3 (High), R2 (Low)
692   //      dest:           SP+0
693   //
694   // Results:
695   //
696   //     R0:R1: the value previously stored in dest
697   //
698   // Overwrites:
699   //
generate_atomic_cmpxchg_long()700   address generate_atomic_cmpxchg_long() {
701     address start;
702 
703     StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long");
704     start = __ pc();
705     Register cmp_lo      = R0;
706     Register cmp_hi      = R1;
707     Register newval_lo   = R2;
708     Register newval_hi   = R3;
709     Register addr        = Rtemp;  /* After load from stack */
710     Register temp_lo     = R4;
711     Register temp_hi     = R5;
712     Register temp_result = R8;
713     assert_different_registers(cmp_lo, newval_lo, temp_lo, addr, temp_result, R7);
714     assert_different_registers(cmp_hi, newval_hi, temp_hi, addr, temp_result, R7);
715 
716     __ membar(MEMBAR_ATOMIC_OP_PRE, Rtemp); // Rtemp free (native ABI)
717 
718     // Stack is unaligned, maintain double word alignment by pushing
719     // odd number of regs.
720     __ push(RegisterSet(temp_result) | RegisterSet(temp_lo, temp_hi));
721     __ ldr(addr, Address(SP, 12));
722 
723     // atomic_cas64 returns previous value in temp_lo, temp_hi
724     __ atomic_cas64(temp_lo, temp_hi, temp_result, cmp_lo, cmp_hi,
725                     newval_lo, newval_hi, addr, 0);
726     __ mov(R0, temp_lo);
727     __ mov(R1, temp_hi);
728 
729     __ pop(RegisterSet(temp_result) | RegisterSet(temp_lo, temp_hi));
730 
731     __ membar(MEMBAR_ATOMIC_OP_POST, Rtemp); // Rtemp free (native ABI)
732     __ bx(LR);
733 
734     return start;
735   }
736 
generate_atomic_load_long()737   address generate_atomic_load_long() {
738     address start;
739 
740     StubCodeMark mark(this, "StubRoutines", "atomic_load_long");
741     start = __ pc();
742     Register result_lo = R0;
743     Register result_hi = R1;
744     Register src       = R0;
745 
746     if (!os::is_MP()) {
747       __ ldmia(src, RegisterSet(result_lo, result_hi));
748       __ bx(LR);
749     } else if (VM_Version::supports_ldrexd()) {
750       __ ldrexd(result_lo, Address(src));
751       __ clrex(); // FIXME: safe to remove?
752       __ bx(LR);
753     } else {
754       __ stop("Atomic load(jlong) unsupported on this platform");
755       __ bx(LR);
756     }
757 
758     return start;
759   }
760 
generate_atomic_store_long()761   address generate_atomic_store_long() {
762     address start;
763 
764     StubCodeMark mark(this, "StubRoutines", "atomic_store_long");
765     start = __ pc();
766     Register newval_lo = R0;
767     Register newval_hi = R1;
768     Register dest      = R2;
769     Register scratch_lo    = R2;
770     Register scratch_hi    = R3;  /* After load from stack */
771     Register result    = R3;
772 
773     if (!os::is_MP()) {
774       __ stmia(dest, RegisterSet(newval_lo, newval_hi));
775       __ bx(LR);
776     } else if (VM_Version::supports_ldrexd()) {
777       __ mov(Rtemp, dest);  // get dest to Rtemp
778       Label retry;
779       __ bind(retry);
780       __ ldrexd(scratch_lo, Address(Rtemp));
781       __ strexd(result, R0, Address(Rtemp));
782       __ rsbs(result, result, 1);
783       __ b(retry, eq);
784       __ bx(LR);
785     } else {
786       __ stop("Atomic store(jlong) unsupported on this platform");
787       __ bx(LR);
788     }
789 
790     return start;
791   }
792 
793 
794 #endif // AARCH64
795 
796 #ifdef COMPILER2
797   // Support for uint StubRoutine::Arm::partial_subtype_check( Klass sub, Klass super );
798   // Arguments :
799   //
800   //      ret  : R0, returned
801   //      icc/xcc: set as R0 (depending on wordSize)
802   //      sub  : R1, argument, not changed
803   //      super: R2, argument, not changed
804   //      raddr: LR, blown by call
generate_partial_subtype_check()805   address generate_partial_subtype_check() {
806     __ align(CodeEntryAlignment);
807     StubCodeMark mark(this, "StubRoutines", "partial_subtype_check");
808     address start = __ pc();
809 
810     // based on SPARC check_klass_subtype_[fast|slow]_path (without CompressedOops)
811 
812     // R0 used as tmp_reg (in addition to return reg)
813     Register sub_klass = R1;
814     Register super_klass = R2;
815     Register tmp_reg2 = R3;
816     Register tmp_reg3 = R4;
817 #define saved_set tmp_reg2, tmp_reg3
818 
819     Label L_loop, L_fail;
820 
821     int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
822 
823     // fast check should be redundant
824 
825     // slow check
826     {
827       __ raw_push(saved_set);
828 
829       // a couple of useful fields in sub_klass:
830       int ss_offset = in_bytes(Klass::secondary_supers_offset());
831 
832       // Do a linear scan of the secondary super-klass chain.
833       // This code is rarely used, so simplicity is a virtue here.
834 
835       inc_counter_np(SharedRuntime::_partial_subtype_ctr, tmp_reg2, tmp_reg3);
836 
837       Register scan_temp = tmp_reg2;
838       Register count_temp = tmp_reg3;
839 
840       // We will consult the secondary-super array.
841       __ ldr(scan_temp, Address(sub_klass, ss_offset));
842 
843       Register search_key = super_klass;
844 
845       // Load the array length.
846       __ ldr_s32(count_temp, Address(scan_temp, Array<Klass*>::length_offset_in_bytes()));
847       __ add(scan_temp, scan_temp, Array<Klass*>::base_offset_in_bytes());
848 
849       __ add(count_temp, count_temp, 1);
850 
851       // Top of search loop
852       __ bind(L_loop);
853       // Notes:
854       //  scan_temp starts at the array elements
855       //  count_temp is 1+size
856       __ subs(count_temp, count_temp, 1);
857       __ b(L_fail, eq); // not found in the array
858 
859       // Load next super to check
860       // In the array of super classes elements are pointer sized.
861       int element_size = wordSize;
862       __ ldr(R0, Address(scan_temp, element_size, post_indexed));
863 
864       // Look for Rsuper_klass on Rsub_klass's secondary super-class-overflow list
865       __ subs(R0, R0, search_key); // set R0 to 0 on success (and flags to eq)
866 
867       // A miss means we are NOT a subtype and need to keep looping
868       __ b(L_loop, ne);
869 
870       // Falling out the bottom means we found a hit; we ARE a subtype
871 
872       // Success.  Cache the super we found and proceed in triumph.
873       __ str(super_klass, Address(sub_klass, sc_offset));
874 
875       // Return success
876       // R0 is already 0 and flags are already set to eq
877       __ raw_pop(saved_set);
878       __ ret();
879 
880       // Return failure
881       __ bind(L_fail);
882 #ifdef AARCH64
883       // count_temp is 0, can't use ZR here
884       __ adds(R0, count_temp, 1); // sets the flags
885 #else
886       __ movs(R0, 1); // sets the flags
887 #endif
888       __ raw_pop(saved_set);
889       __ ret();
890     }
891     return start;
892   }
893 #undef saved_set
894 #endif // COMPILER2
895 
896 
897   //----------------------------------------------------------------------------------------------------
898   // Non-destructive plausibility checks for oops
899 
generate_verify_oop()900   address generate_verify_oop() {
901     StubCodeMark mark(this, "StubRoutines", "verify_oop");
902     address start = __ pc();
903 
904     // Incoming arguments:
905     //
906     // R0: error message (char* )
907     // R1: address of register save area
908     // R2: oop to verify
909     //
910     // All registers are saved before calling this stub. However, condition flags should be saved here.
911 
912     const Register oop   = R2;
913     const Register klass = R3;
914     const Register tmp1  = R6;
915     const Register tmp2  = R8;
916 
917     const Register flags     = Rtmp_save0; // R4/R19
918     const Register ret_addr  = Rtmp_save1; // R5/R20
919     assert_different_registers(oop, klass, tmp1, tmp2, flags, ret_addr, R7);
920 
921     Label exit, error;
922     InlinedAddress verify_oop_count((address) StubRoutines::verify_oop_count_addr());
923 
924 #ifdef AARCH64
925     __ mrs(flags, Assembler::SysReg_NZCV);
926 #else
927     __ mrs(Assembler::CPSR, flags);
928 #endif // AARCH64
929 
930     __ ldr_literal(tmp1, verify_oop_count);
931     __ ldr_s32(tmp2, Address(tmp1));
932     __ add(tmp2, tmp2, 1);
933     __ str_32(tmp2, Address(tmp1));
934 
935     // make sure object is 'reasonable'
936     __ cbz(oop, exit);                           // if obj is NULL it is ok
937 
938     // Check if the oop is in the right area of memory
939     // Note: oop_mask and oop_bits must be updated if the code is saved/reused
940     const address oop_mask = (address) Universe::verify_oop_mask();
941     const address oop_bits = (address) Universe::verify_oop_bits();
942     __ mov_address(tmp1, oop_mask, symbolic_Relocation::oop_mask_reference);
943     __ andr(tmp2, oop, tmp1);
944     __ mov_address(tmp1, oop_bits, symbolic_Relocation::oop_bits_reference);
945     __ cmp(tmp2, tmp1);
946     __ b(error, ne);
947 
948     // make sure klass is 'reasonable'
949     __ load_klass(klass, oop);                   // get klass
950     __ cbz(klass, error);                        // if klass is NULL it is broken
951 
952     // return if everything seems ok
953     __ bind(exit);
954 
955 #ifdef AARCH64
956     __ msr(Assembler::SysReg_NZCV, flags);
957 #else
958     __ msr(Assembler::CPSR_f, flags);
959 #endif // AARCH64
960 
961     __ ret();
962 
963     // handle errors
964     __ bind(error);
965 
966     __ mov(ret_addr, LR);                      // save return address
967 
968     // R0: error message
969     // R1: register save area
970     __ call(CAST_FROM_FN_PTR(address, MacroAssembler::debug));
971 
972     __ mov(LR, ret_addr);
973     __ b(exit);
974 
975     __ bind_literal(verify_oop_count);
976 
977     return start;
978   }
979 
980   //----------------------------------------------------------------------------------------------------
981   // Array copy stubs
982 
983   //
984   //  Generate overlap test for array copy stubs
985   //
986   //  Input:
987   //    R0    -  array1
988   //    R1    -  array2
989   //    R2    -  element count, 32-bit int
990   //
991   //  input registers are preserved
992   //
array_overlap_test(address no_overlap_target,int log2_elem_size,Register tmp1,Register tmp2)993   void array_overlap_test(address no_overlap_target, int log2_elem_size, Register tmp1, Register tmp2) {
994     assert(no_overlap_target != NULL, "must be generated");
995     array_overlap_test(no_overlap_target, NULL, log2_elem_size, tmp1, tmp2);
996   }
array_overlap_test(Label & L_no_overlap,int log2_elem_size,Register tmp1,Register tmp2)997   void array_overlap_test(Label& L_no_overlap, int log2_elem_size, Register tmp1, Register tmp2) {
998     array_overlap_test(NULL, &L_no_overlap, log2_elem_size, tmp1, tmp2);
999   }
array_overlap_test(address no_overlap_target,Label * NOLp,int log2_elem_size,Register tmp1,Register tmp2)1000   void array_overlap_test(address no_overlap_target, Label* NOLp, int log2_elem_size, Register tmp1, Register tmp2) {
1001     const Register from       = R0;
1002     const Register to         = R1;
1003     const Register count      = R2;
1004     const Register to_from    = tmp1; // to - from
1005 #ifndef AARCH64
1006     const Register byte_count = (log2_elem_size == 0) ? count : tmp2; // count << log2_elem_size
1007 #endif // AARCH64
1008     assert_different_registers(from, to, count, tmp1, tmp2);
1009 
1010     // no_overlap version works if 'to' lower (unsigned) than 'from'
1011     // and or 'to' more than (count*size) from 'from'
1012 
1013     BLOCK_COMMENT("Array Overlap Test:");
1014     __ subs(to_from, to, from);
1015 #ifndef AARCH64
1016     if (log2_elem_size != 0) {
1017       __ mov(byte_count, AsmOperand(count, lsl, log2_elem_size));
1018     }
1019 #endif // !AARCH64
1020     if (NOLp == NULL)
1021       __ b(no_overlap_target,lo);
1022     else
1023       __ b((*NOLp), lo);
1024 #ifdef AARCH64
1025     __ subs(ZR, to_from, count, ex_sxtw, log2_elem_size);
1026 #else
1027     __ cmp(to_from, byte_count);
1028 #endif // AARCH64
1029     if (NOLp == NULL)
1030       __ b(no_overlap_target, ge);
1031     else
1032       __ b((*NOLp), ge);
1033   }
1034 
1035 #ifdef AARCH64
1036   // TODO-AARCH64: revise usages of bulk_* methods (probably ldp`s and stp`s should interlace)
1037 
1038   // Loads [from, from + count*wordSize) into regs[0], regs[1], ..., regs[count-1]
1039   // and increases 'from' by count*wordSize.
bulk_load_forward(Register from,const Register regs[],int count)1040   void bulk_load_forward(Register from, const Register regs[], int count) {
1041     assert (count > 0 && count % 2 == 0, "count must be positive even number");
1042     int bytes = count * wordSize;
1043 
1044     int offset = 0;
1045     __ ldp(regs[0], regs[1], Address(from, bytes, post_indexed));
1046     offset += 2*wordSize;
1047 
1048     for (int i = 2; i < count; i += 2) {
1049       __ ldp(regs[i], regs[i+1], Address(from, -bytes + offset));
1050       offset += 2*wordSize;
1051     }
1052 
1053     assert (offset == bytes, "must be");
1054   }
1055 
1056   // Stores regs[0], regs[1], ..., regs[count-1] to [to, to + count*wordSize)
1057   // and increases 'to' by count*wordSize.
bulk_store_forward(Register to,const Register regs[],int count)1058   void bulk_store_forward(Register to, const Register regs[], int count) {
1059     assert (count > 0 && count % 2 == 0, "count must be positive even number");
1060     int bytes = count * wordSize;
1061 
1062     int offset = 0;
1063     __ stp(regs[0], regs[1], Address(to, bytes, post_indexed));
1064     offset += 2*wordSize;
1065 
1066     for (int i = 2; i < count; i += 2) {
1067       __ stp(regs[i], regs[i+1], Address(to, -bytes + offset));
1068       offset += 2*wordSize;
1069     }
1070 
1071     assert (offset == bytes, "must be");
1072   }
1073 
1074   // Loads [from - count*wordSize, from) into regs[0], regs[1], ..., regs[count-1]
1075   // and decreases 'from' by count*wordSize.
1076   // Note that the word with lowest address goes to regs[0].
bulk_load_backward(Register from,const Register regs[],int count)1077   void bulk_load_backward(Register from, const Register regs[], int count) {
1078     assert (count > 0 && count % 2 == 0, "count must be positive even number");
1079     int bytes = count * wordSize;
1080 
1081     int offset = 0;
1082 
1083     for (int i = count - 2; i > 0; i -= 2) {
1084       offset += 2*wordSize;
1085       __ ldp(regs[i], regs[i+1], Address(from, -offset));
1086     }
1087 
1088     offset += 2*wordSize;
1089     __ ldp(regs[0], regs[1], Address(from, -bytes, pre_indexed));
1090 
1091     assert (offset == bytes, "must be");
1092   }
1093 
1094   // Stores regs[0], regs[1], ..., regs[count-1] into [to - count*wordSize, to)
1095   // and decreases 'to' by count*wordSize.
1096   // Note that regs[0] value goes into the memory with lowest address.
bulk_store_backward(Register to,const Register regs[],int count)1097   void bulk_store_backward(Register to, const Register regs[], int count) {
1098     assert (count > 0 && count % 2 == 0, "count must be positive even number");
1099     int bytes = count * wordSize;
1100 
1101     int offset = 0;
1102 
1103     for (int i = count - 2; i > 0; i -= 2) {
1104       offset += 2*wordSize;
1105       __ stp(regs[i], regs[i+1], Address(to, -offset));
1106     }
1107 
1108     offset += 2*wordSize;
1109     __ stp(regs[0], regs[1], Address(to, -bytes, pre_indexed));
1110 
1111     assert (offset == bytes, "must be");
1112   }
1113 #endif // AARCH64
1114 
1115   // TODO-AARCH64: rearrange in-loop prefetches:
1116   //   probably we should choose between "prefetch-store before or after store", not "before or after load".
prefetch(Register from,Register to,int offset,int to_delta=0)1117   void prefetch(Register from, Register to, int offset, int to_delta = 0) {
1118     __ prefetch_read(Address(from, offset));
1119 #ifdef AARCH64
1120   // Next line commented out to avoid significant loss of performance in memory copy - JDK-8078120
1121   // __ prfm(pstl1keep, Address(to, offset + to_delta));
1122 #endif // AARCH64
1123   }
1124 
1125   // Generate the inner loop for forward aligned array copy
1126   //
1127   // Arguments
1128   //      from:      src address, 64 bits  aligned
1129   //      to:        dst address, wordSize aligned
1130   //      count:     number of elements (32-bit int)
1131   //      bytes_per_count: number of bytes for each unit of 'count'
1132   //
1133   // Return the minimum initial value for count
1134   //
1135   // Notes:
1136   // - 'from' aligned on 64-bit (recommended for 32-bit ARM in case this speeds up LDMIA, required for AArch64)
1137   // - 'to' aligned on wordSize
1138   // - 'count' must be greater or equal than the returned value
1139   //
1140   // Increases 'from' and 'to' by count*bytes_per_count.
1141   //
1142   // Scratches 'count', R3.
1143   // On AArch64 also scratches R4-R10; on 32-bit ARM R4-R10 are preserved (saved/restored).
1144   //
generate_forward_aligned_copy_loop(Register from,Register to,Register count,int bytes_per_count)1145   int generate_forward_aligned_copy_loop(Register from, Register to, Register count, int bytes_per_count) {
1146     assert (from == R0 && to == R1 && count == R2, "adjust the implementation below");
1147 
1148     const int bytes_per_loop = 8*wordSize; // 8 registers are read and written on every loop iteration
1149     arraycopy_loop_config *config=&arraycopy_configurations[ArmCopyPlatform].forward_aligned;
1150     int pld_offset = config->pld_distance;
1151     const int count_per_loop = bytes_per_loop / bytes_per_count;
1152 
1153 #ifndef AARCH64
1154     bool split_read= config->split_ldm;
1155     bool split_write= config->split_stm;
1156 
1157     // XXX optim: use VLDM/VSTM when available (Neon) with PLD
1158     //  NEONCopyPLD
1159     //      PLD [r1, #0xC0]
1160     //      VLDM r1!,{d0-d7}
1161     //      VSTM r0!,{d0-d7}
1162     //      SUBS r2,r2,#0x40
1163     //      BGE NEONCopyPLD
1164 
1165     __ push(RegisterSet(R4,R10));
1166 #endif // !AARCH64
1167 
1168     const bool prefetch_before = pld_offset < 0;
1169     const bool prefetch_after = pld_offset > 0;
1170 
1171     Label L_skip_pld;
1172 
1173     // predecrease to exit when there is less than count_per_loop
1174     __ sub_32(count, count, count_per_loop);
1175 
1176     if (pld_offset != 0) {
1177       pld_offset = (pld_offset < 0) ? -pld_offset : pld_offset;
1178 
1179       prefetch(from, to, 0);
1180 
1181       if (prefetch_before) {
1182         // If prefetch is done ahead, final PLDs that overflow the
1183         // copied area can be easily avoided. 'count' is predecreased
1184         // by the prefetch distance to optimize the inner loop and the
1185         // outer loop skips the PLD.
1186         __ subs_32(count, count, (bytes_per_loop+pld_offset)/bytes_per_count);
1187 
1188         // skip prefetch for small copies
1189         __ b(L_skip_pld, lt);
1190       }
1191 
1192       int offset = ArmCopyCacheLineSize;
1193       while (offset <= pld_offset) {
1194         prefetch(from, to, offset);
1195         offset += ArmCopyCacheLineSize;
1196       };
1197     }
1198 
1199 #ifdef AARCH64
1200     const Register data_regs[8] = {R3, R4, R5, R6, R7, R8, R9, R10};
1201 #endif // AARCH64
1202     {
1203       // LDM (32-bit ARM) / LDP (AArch64) copy of 'bytes_per_loop' bytes
1204 
1205       // 32-bit ARM note: we have tried implementing loop unrolling to skip one
1206       // PLD with 64 bytes cache line but the gain was not significant.
1207 
1208       Label L_copy_loop;
1209       __ align(OptoLoopAlignment);
1210       __ BIND(L_copy_loop);
1211 
1212       if (prefetch_before) {
1213         prefetch(from, to, bytes_per_loop + pld_offset);
1214         __ BIND(L_skip_pld);
1215       }
1216 
1217 #ifdef AARCH64
1218       bulk_load_forward(from, data_regs, 8);
1219 #else
1220       if (split_read) {
1221         // Split the register set in two sets so that there is less
1222         // latency between LDM and STM (R3-R6 available while R7-R10
1223         // still loading) and less register locking issue when iterating
1224         // on the first LDM.
1225         __ ldmia(from, RegisterSet(R3, R6), writeback);
1226         __ ldmia(from, RegisterSet(R7, R10), writeback);
1227       } else {
1228         __ ldmia(from, RegisterSet(R3, R10), writeback);
1229       }
1230 #endif // AARCH64
1231 
1232       __ subs_32(count, count, count_per_loop);
1233 
1234       if (prefetch_after) {
1235         prefetch(from, to, pld_offset, bytes_per_loop);
1236       }
1237 
1238 #ifdef AARCH64
1239       bulk_store_forward(to, data_regs, 8);
1240 #else
1241       if (split_write) {
1242         __ stmia(to, RegisterSet(R3, R6), writeback);
1243         __ stmia(to, RegisterSet(R7, R10), writeback);
1244       } else {
1245         __ stmia(to, RegisterSet(R3, R10), writeback);
1246       }
1247 #endif // AARCH64
1248 
1249       __ b(L_copy_loop, ge);
1250 
1251       if (prefetch_before) {
1252         // the inner loop may end earlier, allowing to skip PLD for the last iterations
1253         __ cmn_32(count, (bytes_per_loop + pld_offset)/bytes_per_count);
1254         __ b(L_skip_pld, ge);
1255       }
1256     }
1257     BLOCK_COMMENT("Remaining bytes:");
1258     // still 0..bytes_per_loop-1 aligned bytes to copy, count already decreased by (at least) bytes_per_loop bytes
1259 
1260     // __ add(count, count, ...); // addition useless for the bit tests
1261     assert (pld_offset % bytes_per_loop == 0, "decreasing count by pld_offset before loop must not change tested bits");
1262 
1263 #ifdef AARCH64
1264     assert (bytes_per_loop == 64, "adjust the code below");
1265     assert (bytes_per_count <= 8, "adjust the code below");
1266 
1267     {
1268       Label L;
1269       __ tbz(count, exact_log2(32/bytes_per_count), L);
1270 
1271       bulk_load_forward(from, data_regs, 4);
1272       bulk_store_forward(to, data_regs, 4);
1273 
1274       __ bind(L);
1275     }
1276 
1277     {
1278       Label L;
1279       __ tbz(count, exact_log2(16/bytes_per_count), L);
1280 
1281       bulk_load_forward(from, data_regs, 2);
1282       bulk_store_forward(to, data_regs, 2);
1283 
1284       __ bind(L);
1285     }
1286 
1287     {
1288       Label L;
1289       __ tbz(count, exact_log2(8/bytes_per_count), L);
1290 
1291       __ ldr(R3, Address(from, 8, post_indexed));
1292       __ str(R3, Address(to,   8, post_indexed));
1293 
1294       __ bind(L);
1295     }
1296 
1297     if (bytes_per_count <= 4) {
1298       Label L;
1299       __ tbz(count, exact_log2(4/bytes_per_count), L);
1300 
1301       __ ldr_w(R3, Address(from, 4, post_indexed));
1302       __ str_w(R3, Address(to,   4, post_indexed));
1303 
1304       __ bind(L);
1305     }
1306 
1307     if (bytes_per_count <= 2) {
1308       Label L;
1309       __ tbz(count, exact_log2(2/bytes_per_count), L);
1310 
1311       __ ldrh(R3, Address(from, 2, post_indexed));
1312       __ strh(R3, Address(to,   2, post_indexed));
1313 
1314       __ bind(L);
1315     }
1316 
1317     if (bytes_per_count <= 1) {
1318       Label L;
1319       __ tbz(count, 0, L);
1320 
1321       __ ldrb(R3, Address(from, 1, post_indexed));
1322       __ strb(R3, Address(to,   1, post_indexed));
1323 
1324       __ bind(L);
1325     }
1326 #else
1327     __ tst(count, 16 / bytes_per_count);
1328     __ ldmia(from, RegisterSet(R3, R6), writeback, ne); // copy 16 bytes
1329     __ stmia(to, RegisterSet(R3, R6), writeback, ne);
1330 
1331     __ tst(count, 8 / bytes_per_count);
1332     __ ldmia(from, RegisterSet(R3, R4), writeback, ne); // copy 8 bytes
1333     __ stmia(to, RegisterSet(R3, R4), writeback, ne);
1334 
1335     if (bytes_per_count <= 4) {
1336       __ tst(count, 4 / bytes_per_count);
1337       __ ldr(R3, Address(from, 4, post_indexed), ne); // copy 4 bytes
1338       __ str(R3, Address(to, 4, post_indexed), ne);
1339     }
1340 
1341     if (bytes_per_count <= 2) {
1342       __ tst(count, 2 / bytes_per_count);
1343       __ ldrh(R3, Address(from, 2, post_indexed), ne); // copy 2 bytes
1344       __ strh(R3, Address(to, 2, post_indexed), ne);
1345     }
1346 
1347     if (bytes_per_count == 1) {
1348       __ tst(count, 1);
1349       __ ldrb(R3, Address(from, 1, post_indexed), ne);
1350       __ strb(R3, Address(to, 1, post_indexed), ne);
1351     }
1352 
1353     __ pop(RegisterSet(R4,R10));
1354 #endif // AARCH64
1355 
1356     return count_per_loop;
1357   }
1358 
1359 
1360   // Generate the inner loop for backward aligned array copy
1361   //
1362   // Arguments
1363   //      end_from:      src end address, 64 bits  aligned
1364   //      end_to:        dst end address, wordSize aligned
1365   //      count:         number of elements (32-bit int)
1366   //      bytes_per_count: number of bytes for each unit of 'count'
1367   //
1368   // Return the minimum initial value for count
1369   //
1370   // Notes:
1371   // - 'end_from' aligned on 64-bit (recommended for 32-bit ARM in case this speeds up LDMIA, required for AArch64)
1372   // - 'end_to' aligned on wordSize
1373   // - 'count' must be greater or equal than the returned value
1374   //
1375   // Decreases 'end_from' and 'end_to' by count*bytes_per_count.
1376   //
1377   // Scratches 'count', R3.
1378   // On AArch64 also scratches R4-R10; on 32-bit ARM R4-R10 are preserved (saved/restored).
1379   //
generate_backward_aligned_copy_loop(Register end_from,Register end_to,Register count,int bytes_per_count)1380   int generate_backward_aligned_copy_loop(Register end_from, Register end_to, Register count, int bytes_per_count) {
1381     assert (end_from == R0 && end_to == R1 && count == R2, "adjust the implementation below");
1382 
1383     const int bytes_per_loop = 8*wordSize; // 8 registers are read and written on every loop iteration
1384     const int count_per_loop = bytes_per_loop / bytes_per_count;
1385 
1386     arraycopy_loop_config *config=&arraycopy_configurations[ArmCopyPlatform].backward_aligned;
1387     int pld_offset = config->pld_distance;
1388 
1389 #ifndef AARCH64
1390     bool split_read= config->split_ldm;
1391     bool split_write= config->split_stm;
1392 
1393     // See the forward copy variant for additional comments.
1394 
1395     __ push(RegisterSet(R4,R10));
1396 #endif // !AARCH64
1397 
1398     __ sub_32(count, count, count_per_loop);
1399 
1400     const bool prefetch_before = pld_offset < 0;
1401     const bool prefetch_after = pld_offset > 0;
1402 
1403     Label L_skip_pld;
1404 
1405     if (pld_offset != 0) {
1406       pld_offset = (pld_offset < 0) ? -pld_offset : pld_offset;
1407 
1408       prefetch(end_from, end_to, -wordSize);
1409 
1410       if (prefetch_before) {
1411         __ subs_32(count, count, (bytes_per_loop + pld_offset) / bytes_per_count);
1412         __ b(L_skip_pld, lt);
1413       }
1414 
1415       int offset = ArmCopyCacheLineSize;
1416       while (offset <= pld_offset) {
1417         prefetch(end_from, end_to, -(wordSize + offset));
1418         offset += ArmCopyCacheLineSize;
1419       };
1420     }
1421 
1422 #ifdef AARCH64
1423     const Register data_regs[8] = {R3, R4, R5, R6, R7, R8, R9, R10};
1424 #endif // AARCH64
1425     {
1426       // LDM (32-bit ARM) / LDP (AArch64) copy of 'bytes_per_loop' bytes
1427 
1428       // 32-bit ARM note: we have tried implementing loop unrolling to skip one
1429       // PLD with 64 bytes cache line but the gain was not significant.
1430 
1431       Label L_copy_loop;
1432       __ align(OptoLoopAlignment);
1433       __ BIND(L_copy_loop);
1434 
1435       if (prefetch_before) {
1436         prefetch(end_from, end_to, -(wordSize + bytes_per_loop + pld_offset));
1437         __ BIND(L_skip_pld);
1438       }
1439 
1440 #ifdef AARCH64
1441       bulk_load_backward(end_from, data_regs, 8);
1442 #else
1443       if (split_read) {
1444         __ ldmdb(end_from, RegisterSet(R7, R10), writeback);
1445         __ ldmdb(end_from, RegisterSet(R3, R6), writeback);
1446       } else {
1447         __ ldmdb(end_from, RegisterSet(R3, R10), writeback);
1448       }
1449 #endif // AARCH64
1450 
1451       __ subs_32(count, count, count_per_loop);
1452 
1453       if (prefetch_after) {
1454         prefetch(end_from, end_to, -(wordSize + pld_offset), -bytes_per_loop);
1455       }
1456 
1457 #ifdef AARCH64
1458       bulk_store_backward(end_to, data_regs, 8);
1459 #else
1460       if (split_write) {
1461         __ stmdb(end_to, RegisterSet(R7, R10), writeback);
1462         __ stmdb(end_to, RegisterSet(R3, R6), writeback);
1463       } else {
1464         __ stmdb(end_to, RegisterSet(R3, R10), writeback);
1465       }
1466 #endif // AARCH64
1467 
1468       __ b(L_copy_loop, ge);
1469 
1470       if (prefetch_before) {
1471         __ cmn_32(count, (bytes_per_loop + pld_offset)/bytes_per_count);
1472         __ b(L_skip_pld, ge);
1473       }
1474     }
1475     BLOCK_COMMENT("Remaining bytes:");
1476     // still 0..bytes_per_loop-1 aligned bytes to copy, count already decreased by (at least) bytes_per_loop bytes
1477 
1478     // __ add(count, count, ...); // addition useless for the bit tests
1479     assert (pld_offset % bytes_per_loop == 0, "decreasing count by pld_offset before loop must not change tested bits");
1480 
1481 #ifdef AARCH64
1482     assert (bytes_per_loop == 64, "adjust the code below");
1483     assert (bytes_per_count <= 8, "adjust the code below");
1484 
1485     {
1486       Label L;
1487       __ tbz(count, exact_log2(32/bytes_per_count), L);
1488 
1489       bulk_load_backward(end_from, data_regs, 4);
1490       bulk_store_backward(end_to, data_regs, 4);
1491 
1492       __ bind(L);
1493     }
1494 
1495     {
1496       Label L;
1497       __ tbz(count, exact_log2(16/bytes_per_count), L);
1498 
1499       bulk_load_backward(end_from, data_regs, 2);
1500       bulk_store_backward(end_to, data_regs, 2);
1501 
1502       __ bind(L);
1503     }
1504 
1505     {
1506       Label L;
1507       __ tbz(count, exact_log2(8/bytes_per_count), L);
1508 
1509       __ ldr(R3, Address(end_from, -8, pre_indexed));
1510       __ str(R3, Address(end_to,   -8, pre_indexed));
1511 
1512       __ bind(L);
1513     }
1514 
1515     if (bytes_per_count <= 4) {
1516       Label L;
1517       __ tbz(count, exact_log2(4/bytes_per_count), L);
1518 
1519       __ ldr_w(R3, Address(end_from, -4, pre_indexed));
1520       __ str_w(R3, Address(end_to,   -4, pre_indexed));
1521 
1522       __ bind(L);
1523     }
1524 
1525     if (bytes_per_count <= 2) {
1526       Label L;
1527       __ tbz(count, exact_log2(2/bytes_per_count), L);
1528 
1529       __ ldrh(R3, Address(end_from, -2, pre_indexed));
1530       __ strh(R3, Address(end_to,   -2, pre_indexed));
1531 
1532       __ bind(L);
1533     }
1534 
1535     if (bytes_per_count <= 1) {
1536       Label L;
1537       __ tbz(count, 0, L);
1538 
1539       __ ldrb(R3, Address(end_from, -1, pre_indexed));
1540       __ strb(R3, Address(end_to,   -1, pre_indexed));
1541 
1542       __ bind(L);
1543     }
1544 #else
1545     __ tst(count, 16 / bytes_per_count);
1546     __ ldmdb(end_from, RegisterSet(R3, R6), writeback, ne); // copy 16 bytes
1547     __ stmdb(end_to, RegisterSet(R3, R6), writeback, ne);
1548 
1549     __ tst(count, 8 / bytes_per_count);
1550     __ ldmdb(end_from, RegisterSet(R3, R4), writeback, ne); // copy 8 bytes
1551     __ stmdb(end_to, RegisterSet(R3, R4), writeback, ne);
1552 
1553     if (bytes_per_count <= 4) {
1554       __ tst(count, 4 / bytes_per_count);
1555       __ ldr(R3, Address(end_from, -4, pre_indexed), ne); // copy 4 bytes
1556       __ str(R3, Address(end_to, -4, pre_indexed), ne);
1557     }
1558 
1559     if (bytes_per_count <= 2) {
1560       __ tst(count, 2 / bytes_per_count);
1561       __ ldrh(R3, Address(end_from, -2, pre_indexed), ne); // copy 2 bytes
1562       __ strh(R3, Address(end_to, -2, pre_indexed), ne);
1563     }
1564 
1565     if (bytes_per_count == 1) {
1566       __ tst(count, 1);
1567       __ ldrb(R3, Address(end_from, -1, pre_indexed), ne);
1568       __ strb(R3, Address(end_to, -1, pre_indexed), ne);
1569     }
1570 
1571     __ pop(RegisterSet(R4,R10));
1572 #endif // AARCH64
1573 
1574     return count_per_loop;
1575   }
1576 
1577 
1578   // Generate the inner loop for shifted forward array copy (unaligned copy).
1579   // It can be used when bytes_per_count < wordSize, i.e.
1580   //  byte/short copy on 32-bit ARM, byte/short/int/compressed-oop copy on AArch64.
1581   //
1582   // Arguments
1583   //      from:      start src address, 64 bits aligned
1584   //      to:        start dst address, (now) wordSize aligned
1585   //      count:     number of elements (32-bit int)
1586   //      bytes_per_count: number of bytes for each unit of 'count'
1587   //      lsr_shift: shift applied to 'old' value to skipped already written bytes
1588   //      lsl_shift: shift applied to 'new' value to set the high bytes of the next write
1589   //
1590   // Return the minimum initial value for count
1591   //
1592   // Notes:
1593   // - 'from' aligned on 64-bit (recommended for 32-bit ARM in case this speeds up LDMIA, required for AArch64)
1594   // - 'to' aligned on wordSize
1595   // - 'count' must be greater or equal than the returned value
1596   // - 'lsr_shift' + 'lsl_shift' = BitsPerWord
1597   // - 'bytes_per_count' is 1 or 2 on 32-bit ARM; 1, 2 or 4 on AArch64
1598   //
1599   // Increases 'to' by count*bytes_per_count.
1600   //
1601   // Scratches 'from' and 'count', R3-R10, R12
1602   //
1603   // On entry:
1604   // - R12 is preloaded with the first 'BitsPerWord' bits read just before 'from'
1605   // - (R12 >> lsr_shift) is the part not yet written (just before 'to')
1606   // --> (*to) = (R12 >> lsr_shift) | (*from) << lsl_shift); ...
1607   //
1608   // This implementation may read more bytes than required.
1609   // Actually, it always reads exactly all data from the copied region with upper bound aligned up by wordSize,
1610   // so excessive read do not cross a word bound and is thus harmless.
1611   //
generate_forward_shifted_copy_loop(Register from,Register to,Register count,int bytes_per_count,int lsr_shift,int lsl_shift)1612   int generate_forward_shifted_copy_loop(Register from, Register to, Register count, int bytes_per_count, int lsr_shift, int lsl_shift) {
1613     assert (from == R0 && to == R1 && count == R2, "adjust the implementation below");
1614 
1615     const int bytes_per_loop = 8*wordSize; // 8 registers are read and written on every loop iter
1616     const int count_per_loop = bytes_per_loop / bytes_per_count;
1617 
1618     arraycopy_loop_config *config=&arraycopy_configurations[ArmCopyPlatform].forward_shifted;
1619     int pld_offset = config->pld_distance;
1620 
1621 #ifndef AARCH64
1622     bool split_read= config->split_ldm;
1623     bool split_write= config->split_stm;
1624 #endif // !AARCH64
1625 
1626     const bool prefetch_before = pld_offset < 0;
1627     const bool prefetch_after = pld_offset > 0;
1628     Label L_skip_pld, L_last_read, L_done;
1629     if (pld_offset != 0) {
1630 
1631       pld_offset = (pld_offset < 0) ? -pld_offset : pld_offset;
1632 
1633       prefetch(from, to, 0);
1634 
1635       if (prefetch_before) {
1636         __ cmp_32(count, count_per_loop);
1637         __ b(L_last_read, lt);
1638         // skip prefetch for small copies
1639         // warning: count is predecreased by the prefetch distance to optimize the inner loop
1640         __ subs_32(count, count, ((bytes_per_loop + pld_offset) / bytes_per_count) + count_per_loop);
1641         __ b(L_skip_pld, lt);
1642       }
1643 
1644       int offset = ArmCopyCacheLineSize;
1645       while (offset <= pld_offset) {
1646         prefetch(from, to, offset);
1647         offset += ArmCopyCacheLineSize;
1648       };
1649     }
1650 
1651     Label L_shifted_loop;
1652 
1653     __ align(OptoLoopAlignment);
1654     __ BIND(L_shifted_loop);
1655 
1656     if (prefetch_before) {
1657       // do it early if there might be register locking issues
1658       prefetch(from, to, bytes_per_loop + pld_offset);
1659       __ BIND(L_skip_pld);
1660     } else {
1661       __ cmp_32(count, count_per_loop);
1662       __ b(L_last_read, lt);
1663     }
1664 
1665 #ifdef AARCH64
1666     const Register data_regs[9] = {R3, R4, R5, R6, R7, R8, R9, R10, R12};
1667     __ logical_shift_right(R3, R12, lsr_shift); // part of R12 not yet written
1668     __ subs_32(count, count, count_per_loop);
1669     bulk_load_forward(from, &data_regs[1], 8);
1670 #else
1671     // read 32 bytes
1672     if (split_read) {
1673       // if write is not split, use less registers in first set to reduce locking
1674       RegisterSet set1 = split_write ? RegisterSet(R4, R7) : RegisterSet(R4, R5);
1675       RegisterSet set2 = (split_write ? RegisterSet(R8, R10) : RegisterSet(R6, R10)) | R12;
1676       __ ldmia(from, set1, writeback);
1677       __ mov(R3, AsmOperand(R12, lsr, lsr_shift)); // part of R12 not yet written
1678       __ ldmia(from, set2, writeback);
1679       __ subs(count, count, count_per_loop); // XXX: should it be before the 2nd LDM ? (latency vs locking)
1680     } else {
1681       __ mov(R3, AsmOperand(R12, lsr, lsr_shift)); // part of R12 not yet written
1682       __ ldmia(from, RegisterSet(R4, R10) | R12, writeback); // Note: small latency on R4
1683       __ subs(count, count, count_per_loop);
1684     }
1685 #endif // AARCH64
1686 
1687     if (prefetch_after) {
1688       // do it after the 1st ldm/ldp anyway  (no locking issues with early STM/STP)
1689       prefetch(from, to, pld_offset, bytes_per_loop);
1690     }
1691 
1692     // prepare (shift) the values in R3..R10
1693     __ orr(R3, R3, AsmOperand(R4, lsl, lsl_shift)); // merged below low bytes of next val
1694     __ logical_shift_right(R4, R4, lsr_shift); // unused part of next val
1695     __ orr(R4, R4, AsmOperand(R5, lsl, lsl_shift)); // ...
1696     __ logical_shift_right(R5, R5, lsr_shift);
1697     __ orr(R5, R5, AsmOperand(R6, lsl, lsl_shift));
1698     __ logical_shift_right(R6, R6, lsr_shift);
1699     __ orr(R6, R6, AsmOperand(R7, lsl, lsl_shift));
1700 #ifndef AARCH64
1701     if (split_write) {
1702       // write the first half as soon as possible to reduce stm locking
1703       __ stmia(to, RegisterSet(R3, R6), writeback, prefetch_before ? gt : ge);
1704     }
1705 #endif // !AARCH64
1706     __ logical_shift_right(R7, R7, lsr_shift);
1707     __ orr(R7, R7, AsmOperand(R8, lsl, lsl_shift));
1708     __ logical_shift_right(R8, R8, lsr_shift);
1709     __ orr(R8, R8, AsmOperand(R9, lsl, lsl_shift));
1710     __ logical_shift_right(R9, R9, lsr_shift);
1711     __ orr(R9, R9, AsmOperand(R10, lsl, lsl_shift));
1712     __ logical_shift_right(R10, R10, lsr_shift);
1713     __ orr(R10, R10, AsmOperand(R12, lsl, lsl_shift));
1714 
1715 #ifdef AARCH64
1716     bulk_store_forward(to, data_regs, 8);
1717 #else
1718     if (split_write) {
1719       __ stmia(to, RegisterSet(R7, R10), writeback, prefetch_before ? gt : ge);
1720     } else {
1721       __ stmia(to, RegisterSet(R3, R10), writeback, prefetch_before ? gt : ge);
1722     }
1723 #endif // AARCH64
1724     __ b(L_shifted_loop, gt); // no need to loop if 0 (when count need not be precise modulo bytes_per_loop)
1725 
1726     if (prefetch_before) {
1727       // the first loop may end earlier, allowing to skip pld at the end
1728       __ cmn_32(count, (bytes_per_loop + pld_offset)/bytes_per_count);
1729 #ifndef AARCH64
1730       __ stmia(to, RegisterSet(R3, R10), writeback); // stmia was skipped
1731 #endif // !AARCH64
1732       __ b(L_skip_pld, ge);
1733       __ adds_32(count, count, ((bytes_per_loop + pld_offset) / bytes_per_count) + count_per_loop);
1734     }
1735 
1736     __ BIND(L_last_read);
1737     __ b(L_done, eq);
1738 
1739 #ifdef AARCH64
1740     assert(bytes_per_count < 8, "adjust the code below");
1741 
1742     __ logical_shift_right(R3, R12, lsr_shift);
1743 
1744     {
1745       Label L;
1746       __ tbz(count, exact_log2(32/bytes_per_count), L);
1747       bulk_load_forward(from, &data_regs[1], 4);
1748       __ orr(R3, R3, AsmOperand(R4, lsl, lsl_shift));
1749       __ logical_shift_right(R4, R4, lsr_shift);
1750       __ orr(R4, R4, AsmOperand(R5, lsl, lsl_shift));
1751       __ logical_shift_right(R5, R5, lsr_shift);
1752       __ orr(R5, R5, AsmOperand(R6, lsl, lsl_shift));
1753       __ logical_shift_right(R6, R6, lsr_shift);
1754       __ orr(R6, R6, AsmOperand(R7, lsl, lsl_shift));
1755       bulk_store_forward(to, data_regs, 4);
1756       __ logical_shift_right(R3, R7, lsr_shift);
1757       __ bind(L);
1758     }
1759 
1760     {
1761       Label L;
1762       __ tbz(count, exact_log2(16/bytes_per_count), L);
1763       bulk_load_forward(from, &data_regs[1], 2);
1764       __ orr(R3, R3, AsmOperand(R4, lsl, lsl_shift));
1765       __ logical_shift_right(R4, R4, lsr_shift);
1766       __ orr(R4, R4, AsmOperand(R5, lsl, lsl_shift));
1767       bulk_store_forward(to, data_regs, 2);
1768       __ logical_shift_right(R3, R5, lsr_shift);
1769       __ bind(L);
1770     }
1771 
1772     {
1773       Label L;
1774       __ tbz(count, exact_log2(8/bytes_per_count), L);
1775       __ ldr(R4, Address(from, 8, post_indexed));
1776       __ orr(R3, R3, AsmOperand(R4, lsl, lsl_shift));
1777       __ str(R3, Address(to, 8, post_indexed));
1778       __ logical_shift_right(R3, R4, lsr_shift);
1779       __ bind(L);
1780     }
1781 
1782     const int have_bytes = lsl_shift/BitsPerByte; // number of already read bytes in R3
1783 
1784     // It remains less than wordSize to write.
1785     // Do not check count if R3 already has maximal number of loaded elements (one less than wordSize).
1786     if (have_bytes < wordSize - bytes_per_count) {
1787       Label L;
1788       __ andr(count, count, (uintx)(8/bytes_per_count-1)); // make count exact
1789       __ cmp_32(count, have_bytes/bytes_per_count); // do we have enough bytes to store?
1790       __ b(L, le);
1791       __ ldr(R4, Address(from, 8, post_indexed));
1792       __ orr(R3, R3, AsmOperand(R4, lsl, lsl_shift));
1793       __ bind(L);
1794     }
1795 
1796     {
1797       Label L;
1798       __ tbz(count, exact_log2(4/bytes_per_count), L);
1799       __ str_w(R3, Address(to, 4, post_indexed));
1800       if (bytes_per_count < 4) {
1801         __ logical_shift_right(R3, R3, 4*BitsPerByte);
1802       }
1803       __ bind(L);
1804     }
1805 
1806     if (bytes_per_count <= 2) {
1807       Label L;
1808       __ tbz(count, exact_log2(2/bytes_per_count), L);
1809       __ strh(R3, Address(to, 2, post_indexed));
1810       if (bytes_per_count < 2) {
1811         __ logical_shift_right(R3, R3, 2*BitsPerByte);
1812       }
1813       __ bind(L);
1814     }
1815 
1816     if (bytes_per_count <= 1) {
1817       Label L;
1818       __ tbz(count, exact_log2(1/bytes_per_count), L);
1819       __ strb(R3, Address(to, 1, post_indexed));
1820       __ bind(L);
1821     }
1822 #else
1823     switch (bytes_per_count) {
1824     case 2:
1825       __ mov(R3, AsmOperand(R12, lsr, lsr_shift));
1826       __ tst(count, 8);
1827       __ ldmia(from, RegisterSet(R4, R7), writeback, ne);
1828       __ orr(R3, R3, AsmOperand(R4, lsl, lsl_shift), ne); // merged below low bytes of next val
1829       __ mov(R4, AsmOperand(R4, lsr, lsr_shift), ne); // unused part of next val
1830       __ orr(R4, R4, AsmOperand(R5, lsl, lsl_shift), ne); // ...
1831       __ mov(R5, AsmOperand(R5, lsr, lsr_shift), ne);
1832       __ orr(R5, R5, AsmOperand(R6, lsl, lsl_shift), ne);
1833       __ mov(R6, AsmOperand(R6, lsr, lsr_shift), ne);
1834       __ orr(R6, R6, AsmOperand(R7, lsl, lsl_shift), ne);
1835       __ stmia(to, RegisterSet(R3, R6), writeback, ne);
1836       __ mov(R3, AsmOperand(R7, lsr, lsr_shift), ne);
1837 
1838       __ tst(count, 4);
1839       __ ldmia(from, RegisterSet(R4, R5), writeback, ne);
1840       __ orr(R3, R3, AsmOperand(R4, lsl, lsl_shift), ne); // merged below low bytes of next val
1841       __ mov(R4, AsmOperand(R4, lsr, lsr_shift), ne); // unused part of next val
1842       __ orr(R4, R4, AsmOperand(R5, lsl, lsl_shift), ne); // ...
1843       __ stmia(to, RegisterSet(R3, R4), writeback, ne);
1844       __ mov(R3, AsmOperand(R5, lsr, lsr_shift), ne);
1845 
1846       __ tst(count, 2);
1847       __ ldr(R4, Address(from, 4, post_indexed), ne);
1848       __ orr(R3, R3, AsmOperand(R4, lsl, lsl_shift), ne);
1849       __ str(R3, Address(to, 4, post_indexed), ne);
1850       __ mov(R3, AsmOperand(R4, lsr, lsr_shift), ne);
1851 
1852       __ tst(count, 1);
1853       __ strh(R3, Address(to, 2, post_indexed), ne); // one last short
1854       break;
1855 
1856     case 1:
1857       __ mov(R3, AsmOperand(R12, lsr, lsr_shift));
1858       __ tst(count, 16);
1859       __ ldmia(from, RegisterSet(R4, R7), writeback, ne);
1860       __ orr(R3, R3, AsmOperand(R4, lsl, lsl_shift), ne); // merged below low bytes of next val
1861       __ mov(R4, AsmOperand(R4, lsr, lsr_shift), ne); // unused part of next val
1862       __ orr(R4, R4, AsmOperand(R5, lsl, lsl_shift), ne); // ...
1863       __ mov(R5, AsmOperand(R5, lsr, lsr_shift), ne);
1864       __ orr(R5, R5, AsmOperand(R6, lsl, lsl_shift), ne);
1865       __ mov(R6, AsmOperand(R6, lsr, lsr_shift), ne);
1866       __ orr(R6, R6, AsmOperand(R7, lsl, lsl_shift), ne);
1867       __ stmia(to, RegisterSet(R3, R6), writeback, ne);
1868       __ mov(R3, AsmOperand(R7, lsr, lsr_shift), ne);
1869 
1870       __ tst(count, 8);
1871       __ ldmia(from, RegisterSet(R4, R5), writeback, ne);
1872       __ orr(R3, R3, AsmOperand(R4, lsl, lsl_shift), ne); // merged below low bytes of next val
1873       __ mov(R4, AsmOperand(R4, lsr, lsr_shift), ne); // unused part of next val
1874       __ orr(R4, R4, AsmOperand(R5, lsl, lsl_shift), ne); // ...
1875       __ stmia(to, RegisterSet(R3, R4), writeback, ne);
1876       __ mov(R3, AsmOperand(R5, lsr, lsr_shift), ne);
1877 
1878       __ tst(count, 4);
1879       __ ldr(R4, Address(from, 4, post_indexed), ne);
1880       __ orr(R3, R3, AsmOperand(R4, lsl, lsl_shift), ne);
1881       __ str(R3, Address(to, 4, post_indexed), ne);
1882       __ mov(R3, AsmOperand(R4, lsr, lsr_shift), ne);
1883 
1884       __ andr(count, count, 3);
1885       __ cmp(count, 2);
1886 
1887       // Note: R3 might contain enough bytes ready to write (3 needed at most),
1888       // thus load on lsl_shift==24 is not needed (in fact forces reading
1889       // beyond source buffer end boundary)
1890       if (lsl_shift == 8) {
1891         __ ldr(R4, Address(from, 4, post_indexed), ge);
1892         __ orr(R3, R3, AsmOperand(R4, lsl, lsl_shift), ge);
1893       } else if (lsl_shift == 16) {
1894         __ ldr(R4, Address(from, 4, post_indexed), gt);
1895         __ orr(R3, R3, AsmOperand(R4, lsl, lsl_shift), gt);
1896       }
1897 
1898       __ strh(R3, Address(to, 2, post_indexed), ge); // two last bytes
1899       __ mov(R3, AsmOperand(R3, lsr, 16), gt);
1900 
1901       __ tst(count, 1);
1902       __ strb(R3, Address(to, 1, post_indexed), ne); // one last byte
1903       break;
1904     }
1905 #endif // AARCH64
1906 
1907     __ BIND(L_done);
1908     return 0; // no minimum
1909   }
1910 
1911   // Generate the inner loop for shifted backward array copy (unaligned copy).
1912   // It can be used when bytes_per_count < wordSize, i.e.
1913   //  byte/short copy on 32-bit ARM, byte/short/int/compressed-oop copy on AArch64.
1914   //
1915   // Arguments
1916   //      end_from:  end src address, 64 bits aligned
1917   //      end_to:    end dst address, (now) wordSize aligned
1918   //      count:     number of elements (32-bit int)
1919   //      bytes_per_count: number of bytes for each unit of 'count'
1920   //      lsl_shift: shift applied to 'old' value to skipped already written bytes
1921   //      lsr_shift: shift applied to 'new' value to set the low bytes of the next write
1922   //
1923   // Return the minimum initial value for count
1924   //
1925   // Notes:
1926   // - 'end_from' aligned on 64-bit (recommended for 32-bit ARM in case this speeds up LDMIA, required for AArch64)
1927   // - 'end_to' aligned on wordSize
1928   // - 'count' must be greater or equal than the returned value
1929   // - 'lsr_shift' + 'lsl_shift' = 'BitsPerWord'
1930   // - 'bytes_per_count' is 1 or 2 on 32-bit ARM; 1, 2 or 4 on AArch64
1931   //
1932   // Decreases 'end_to' by count*bytes_per_count.
1933   //
1934   // Scratches 'end_from', 'count', R3-R10, R12
1935   //
1936   // On entry:
1937   // - R3 is preloaded with the first 'BitsPerWord' bits read just after 'from'
1938   // - (R3 << lsl_shift) is the part not yet written
1939   // --> (*--to) = (R3 << lsl_shift) | (*--from) >> lsr_shift); ...
1940   //
1941   // This implementation may read more bytes than required.
1942   // Actually, it always reads exactly all data from the copied region with beginning aligned down by wordSize,
1943   // so excessive read do not cross a word bound and is thus harmless.
1944   //
generate_backward_shifted_copy_loop(Register end_from,Register end_to,Register count,int bytes_per_count,int lsr_shift,int lsl_shift)1945   int generate_backward_shifted_copy_loop(Register end_from, Register end_to, Register count, int bytes_per_count, int lsr_shift, int lsl_shift) {
1946     assert (end_from == R0 && end_to == R1 && count == R2, "adjust the implementation below");
1947 
1948     const int bytes_per_loop = 8*wordSize; // 8 registers are read and written on every loop iter
1949     const int count_per_loop = bytes_per_loop / bytes_per_count;
1950 
1951     arraycopy_loop_config *config=&arraycopy_configurations[ArmCopyPlatform].backward_shifted;
1952     int pld_offset = config->pld_distance;
1953 
1954 #ifndef AARCH64
1955     bool split_read= config->split_ldm;
1956     bool split_write= config->split_stm;
1957 #endif // !AARCH64
1958 
1959 
1960     const bool prefetch_before = pld_offset < 0;
1961     const bool prefetch_after = pld_offset > 0;
1962 
1963     Label L_skip_pld, L_done, L_last_read;
1964     if (pld_offset != 0) {
1965 
1966       pld_offset = (pld_offset < 0) ? -pld_offset : pld_offset;
1967 
1968       prefetch(end_from, end_to, -wordSize);
1969 
1970       if (prefetch_before) {
1971         __ cmp_32(count, count_per_loop);
1972         __ b(L_last_read, lt);
1973 
1974         // skip prefetch for small copies
1975         // warning: count is predecreased by the prefetch distance to optimize the inner loop
1976         __ subs_32(count, count, ((bytes_per_loop + pld_offset)/bytes_per_count) + count_per_loop);
1977         __ b(L_skip_pld, lt);
1978       }
1979 
1980       int offset = ArmCopyCacheLineSize;
1981       while (offset <= pld_offset) {
1982         prefetch(end_from, end_to, -(wordSize + offset));
1983         offset += ArmCopyCacheLineSize;
1984       };
1985     }
1986 
1987     Label L_shifted_loop;
1988     __ align(OptoLoopAlignment);
1989     __ BIND(L_shifted_loop);
1990 
1991     if (prefetch_before) {
1992       // do the 1st ldm/ldp first anyway (no locking issues with early STM/STP)
1993       prefetch(end_from, end_to, -(wordSize + bytes_per_loop + pld_offset));
1994       __ BIND(L_skip_pld);
1995     } else {
1996       __ cmp_32(count, count_per_loop);
1997       __ b(L_last_read, lt);
1998     }
1999 
2000 #ifdef AARCH64
2001     __ logical_shift_left(R12, R3, lsl_shift);
2002     const Register data_regs[9] = {R3, R4, R5, R6, R7, R8, R9, R10, R12};
2003     bulk_load_backward(end_from, data_regs, 8);
2004 #else
2005     if (split_read) {
2006       __ ldmdb(end_from, RegisterSet(R7, R10), writeback);
2007       __ mov(R12, AsmOperand(R3, lsl, lsl_shift)); // part of R3 not yet written
2008       __ ldmdb(end_from, RegisterSet(R3, R6), writeback);
2009     } else {
2010       __ mov(R12, AsmOperand(R3, lsl, lsl_shift)); // part of R3 not yet written
2011       __ ldmdb(end_from, RegisterSet(R3, R10), writeback);
2012     }
2013 #endif // AARCH64
2014 
2015     __ subs_32(count, count, count_per_loop);
2016 
2017     if (prefetch_after) { // do prefetch during ldm/ldp latency
2018       prefetch(end_from, end_to, -(wordSize + pld_offset), -bytes_per_loop);
2019     }
2020 
2021     // prepare the values in R4..R10,R12
2022     __ orr(R12, R12, AsmOperand(R10, lsr, lsr_shift)); // merged above high  bytes of prev val
2023     __ logical_shift_left(R10, R10, lsl_shift); // unused part of prev val
2024     __ orr(R10, R10, AsmOperand(R9, lsr, lsr_shift)); // ...
2025     __ logical_shift_left(R9, R9, lsl_shift);
2026     __ orr(R9, R9, AsmOperand(R8, lsr, lsr_shift));
2027     __ logical_shift_left(R8, R8, lsl_shift);
2028     __ orr(R8, R8, AsmOperand(R7, lsr, lsr_shift));
2029     __ logical_shift_left(R7, R7, lsl_shift);
2030     __ orr(R7, R7, AsmOperand(R6, lsr, lsr_shift));
2031     __ logical_shift_left(R6, R6, lsl_shift);
2032     __ orr(R6, R6, AsmOperand(R5, lsr, lsr_shift));
2033 #ifndef AARCH64
2034     if (split_write) {
2035       // store early to reduce locking issues
2036       __ stmdb(end_to, RegisterSet(R6, R10) | R12, writeback, prefetch_before ? gt : ge);
2037     }
2038 #endif // !AARCH64
2039     __ logical_shift_left(R5, R5, lsl_shift);
2040     __ orr(R5, R5, AsmOperand(R4, lsr, lsr_shift));
2041     __ logical_shift_left(R4, R4, lsl_shift);
2042     __ orr(R4, R4, AsmOperand(R3, lsr, lsr_shift));
2043 
2044 #ifdef AARCH64
2045     bulk_store_backward(end_to, &data_regs[1], 8);
2046 #else
2047     if (split_write) {
2048       __ stmdb(end_to, RegisterSet(R4, R5), writeback, prefetch_before ? gt : ge);
2049     } else {
2050       __ stmdb(end_to, RegisterSet(R4, R10) | R12, writeback, prefetch_before ? gt : ge);
2051     }
2052 #endif // AARCH64
2053 
2054     __ b(L_shifted_loop, gt); // no need to loop if 0 (when count need not be precise modulo bytes_per_loop)
2055 
2056     if (prefetch_before) {
2057       // the first loop may end earlier, allowing to skip pld at the end
2058       __ cmn_32(count, ((bytes_per_loop + pld_offset)/bytes_per_count));
2059 #ifndef AARCH64
2060       __ stmdb(end_to, RegisterSet(R4, R10) | R12, writeback); // stmdb was skipped
2061 #endif // !AARCH64
2062       __ b(L_skip_pld, ge);
2063       __ adds_32(count, count, ((bytes_per_loop + pld_offset) / bytes_per_count) + count_per_loop);
2064     }
2065 
2066     __ BIND(L_last_read);
2067     __ b(L_done, eq);
2068 
2069 #ifdef AARCH64
2070     assert(bytes_per_count < 8, "adjust the code below");
2071 
2072     __ logical_shift_left(R12, R3, lsl_shift);
2073 
2074     {
2075       Label L;
2076       __ tbz(count, exact_log2(32/bytes_per_count), L);
2077       bulk_load_backward(end_from, &data_regs[4], 4);
2078 
2079       __ orr(R12, R12, AsmOperand(R10, lsr, lsr_shift));
2080       __ logical_shift_left(R10, R10, lsl_shift);
2081       __ orr(R10, R10, AsmOperand(R9, lsr, lsr_shift));
2082       __ logical_shift_left(R9, R9, lsl_shift);
2083       __ orr(R9, R9, AsmOperand(R8, lsr, lsr_shift));
2084       __ logical_shift_left(R8, R8, lsl_shift);
2085       __ orr(R8, R8, AsmOperand(R7, lsr, lsr_shift));
2086 
2087       bulk_store_backward(end_to, &data_regs[5], 4);
2088       __ logical_shift_left(R12, R7, lsl_shift);
2089       __ bind(L);
2090     }
2091 
2092     {
2093       Label L;
2094       __ tbz(count, exact_log2(16/bytes_per_count), L);
2095       bulk_load_backward(end_from, &data_regs[6], 2);
2096 
2097       __ orr(R12, R12, AsmOperand(R10, lsr, lsr_shift));
2098       __ logical_shift_left(R10, R10, lsl_shift);
2099       __ orr(R10, R10, AsmOperand(R9, lsr, lsr_shift));
2100 
2101       bulk_store_backward(end_to, &data_regs[7], 2);
2102       __ logical_shift_left(R12, R9, lsl_shift);
2103       __ bind(L);
2104     }
2105 
2106     {
2107       Label L;
2108       __ tbz(count, exact_log2(8/bytes_per_count), L);
2109       __ ldr(R10, Address(end_from, -8, pre_indexed));
2110       __ orr(R12, R12, AsmOperand(R10, lsr, lsr_shift));
2111       __ str(R12, Address(end_to, -8, pre_indexed));
2112       __ logical_shift_left(R12, R10, lsl_shift);
2113       __ bind(L);
2114     }
2115 
2116     const int have_bytes = lsr_shift/BitsPerByte; // number of already read bytes in R12
2117 
2118     // It remains less than wordSize to write.
2119     // Do not check count if R12 already has maximal number of loaded elements (one less than wordSize).
2120     if (have_bytes < wordSize - bytes_per_count) {
2121       Label L;
2122       __ andr(count, count, (uintx)(8/bytes_per_count-1)); // make count exact
2123       __ cmp_32(count, have_bytes/bytes_per_count); // do we have enough bytes to store?
2124       __ b(L, le);
2125       __ ldr(R10, Address(end_from, -8, pre_indexed));
2126       __ orr(R12, R12, AsmOperand(R10, lsr, lsr_shift));
2127       __ bind(L);
2128     }
2129 
2130     assert (bytes_per_count <= 4, "must be");
2131 
2132     {
2133       Label L;
2134       __ tbz(count, exact_log2(4/bytes_per_count), L);
2135       __ logical_shift_right(R9, R12, (wordSize-4)*BitsPerByte);
2136       __ str_w(R9, Address(end_to, -4, pre_indexed)); // Write 4 MSB
2137       if (bytes_per_count < 4) {
2138         __ logical_shift_left(R12, R12, 4*BitsPerByte); // Promote remaining bytes to MSB
2139       }
2140       __ bind(L);
2141     }
2142 
2143     if (bytes_per_count <= 2) {
2144       Label L;
2145       __ tbz(count, exact_log2(2/bytes_per_count), L);
2146       __ logical_shift_right(R9, R12, (wordSize-2)*BitsPerByte);
2147       __ strh(R9, Address(end_to, -2, pre_indexed)); // Write 2 MSB
2148       if (bytes_per_count < 2) {
2149         __ logical_shift_left(R12, R12, 2*BitsPerByte); // Promote remaining bytes to MSB
2150       }
2151       __ bind(L);
2152     }
2153 
2154     if (bytes_per_count <= 1) {
2155       Label L;
2156       __ tbz(count, exact_log2(1/bytes_per_count), L);
2157       __ logical_shift_right(R9, R12, (wordSize-1)*BitsPerByte);
2158       __ strb(R9, Address(end_to, -1, pre_indexed)); // Write 1 MSB
2159       __ bind(L);
2160     }
2161 #else
2162       switch(bytes_per_count) {
2163       case 2:
2164       __ mov(R12, AsmOperand(R3, lsl, lsl_shift)); // part of R3 not yet written
2165       __ tst(count, 8);
2166       __ ldmdb(end_from, RegisterSet(R7,R10), writeback, ne);
2167       __ orr(R12, R12, AsmOperand(R10, lsr, lsr_shift), ne);
2168       __ mov(R10, AsmOperand(R10, lsl, lsl_shift),ne); // unused part of prev val
2169       __ orr(R10, R10, AsmOperand(R9, lsr, lsr_shift),ne); // ...
2170       __ mov(R9, AsmOperand(R9, lsl, lsl_shift),ne);
2171       __ orr(R9, R9, AsmOperand(R8, lsr, lsr_shift),ne);
2172       __ mov(R8, AsmOperand(R8, lsl, lsl_shift),ne);
2173       __ orr(R8, R8, AsmOperand(R7, lsr, lsr_shift),ne);
2174       __ stmdb(end_to, RegisterSet(R8,R10)|R12, writeback, ne);
2175       __ mov(R12, AsmOperand(R7, lsl, lsl_shift), ne);
2176 
2177       __ tst(count, 4);
2178       __ ldmdb(end_from, RegisterSet(R9, R10), writeback, ne);
2179       __ orr(R12, R12, AsmOperand(R10, lsr, lsr_shift), ne);
2180       __ mov(R10, AsmOperand(R10, lsl, lsl_shift),ne); // unused part of prev val
2181       __ orr(R10, R10, AsmOperand(R9, lsr,lsr_shift),ne); // ...
2182       __ stmdb(end_to, RegisterSet(R10)|R12, writeback, ne);
2183       __ mov(R12, AsmOperand(R9, lsl, lsl_shift), ne);
2184 
2185       __ tst(count, 2);
2186       __ ldr(R10, Address(end_from, -4, pre_indexed), ne);
2187       __ orr(R12, R12, AsmOperand(R10, lsr, lsr_shift), ne);
2188       __ str(R12, Address(end_to, -4, pre_indexed), ne);
2189       __ mov(R12, AsmOperand(R10, lsl, lsl_shift), ne);
2190 
2191       __ tst(count, 1);
2192       __ mov(R12, AsmOperand(R12, lsr, lsr_shift),ne);
2193       __ strh(R12, Address(end_to, -2, pre_indexed), ne); // one last short
2194       break;
2195 
2196       case 1:
2197       __ mov(R12, AsmOperand(R3, lsl, lsl_shift)); // part of R3 not yet written
2198       __ tst(count, 16);
2199       __ ldmdb(end_from, RegisterSet(R7,R10), writeback, ne);
2200       __ orr(R12, R12, AsmOperand(R10, lsr, lsr_shift), ne);
2201       __ mov(R10, AsmOperand(R10, lsl, lsl_shift),ne); // unused part of prev val
2202       __ orr(R10, R10, AsmOperand(R9, lsr, lsr_shift),ne); // ...
2203       __ mov(R9, AsmOperand(R9, lsl, lsl_shift),ne);
2204       __ orr(R9, R9, AsmOperand(R8, lsr, lsr_shift),ne);
2205       __ mov(R8, AsmOperand(R8, lsl, lsl_shift),ne);
2206       __ orr(R8, R8, AsmOperand(R7, lsr, lsr_shift),ne);
2207       __ stmdb(end_to, RegisterSet(R8,R10)|R12, writeback, ne);
2208       __ mov(R12, AsmOperand(R7, lsl, lsl_shift), ne);
2209 
2210       __ tst(count, 8);
2211       __ ldmdb(end_from, RegisterSet(R9,R10), writeback, ne);
2212       __ orr(R12, R12, AsmOperand(R10, lsr, lsr_shift), ne);
2213       __ mov(R10, AsmOperand(R10, lsl, lsl_shift),ne); // unused part of prev val
2214       __ orr(R10, R10, AsmOperand(R9, lsr, lsr_shift),ne); // ...
2215       __ stmdb(end_to, RegisterSet(R10)|R12, writeback, ne);
2216       __ mov(R12, AsmOperand(R9, lsl, lsl_shift), ne);
2217 
2218       __ tst(count, 4);
2219       __ ldr(R10, Address(end_from, -4, pre_indexed), ne);
2220       __ orr(R12, R12, AsmOperand(R10, lsr, lsr_shift), ne);
2221       __ str(R12, Address(end_to, -4, pre_indexed), ne);
2222       __ mov(R12, AsmOperand(R10, lsl, lsl_shift), ne);
2223 
2224       __ tst(count, 2);
2225       if (lsr_shift != 24) {
2226         // avoid useless reading R10 when we already have 3 bytes ready in R12
2227         __ ldr(R10, Address(end_from, -4, pre_indexed), ne);
2228         __ orr(R12, R12, AsmOperand(R10, lsr,lsr_shift), ne);
2229       }
2230 
2231       // Note: R12 contains enough bytes ready to write (3 needed at most)
2232       // write the 2 MSBs
2233       __ mov(R9, AsmOperand(R12, lsr, 16), ne);
2234       __ strh(R9, Address(end_to, -2, pre_indexed), ne);
2235       // promote remaining to MSB
2236       __ mov(R12, AsmOperand(R12, lsl, 16), ne);
2237 
2238       __ tst(count, 1);
2239       // write the MSB of R12
2240       __ mov(R12, AsmOperand(R12, lsr, 24), ne);
2241       __ strb(R12, Address(end_to, -1, pre_indexed), ne);
2242 
2243       break;
2244       }
2245 #endif // AARCH64
2246 
2247     __ BIND(L_done);
2248     return 0; // no minimum
2249   }
2250 
2251   // This method is very useful for merging forward/backward implementations
get_addr_with_indexing(Register base,int delta,bool forward)2252   Address get_addr_with_indexing(Register base, int delta, bool forward) {
2253     if (forward) {
2254       return Address(base, delta, post_indexed);
2255     } else {
2256       return Address(base, -delta, pre_indexed);
2257     }
2258   }
2259 
2260 #ifdef AARCH64
2261   // Loads one 'size_in_bytes'-sized value from 'from' in given direction, i.e.
2262   //   if forward:  loads value at from and increases from by size
2263   //   if !forward: loads value at from-size_in_bytes and decreases from by size
load_one(Register rd,Register from,int size_in_bytes,bool forward)2264   void load_one(Register rd, Register from, int size_in_bytes, bool forward) {
2265     assert_different_registers(from, rd);
2266     Address addr = get_addr_with_indexing(from, size_in_bytes, forward);
2267     __ load_sized_value(rd, addr, size_in_bytes, false);
2268   }
2269 
2270   // Stores one 'size_in_bytes'-sized value to 'to' in given direction (see load_one)
store_one(Register rd,Register to,int size_in_bytes,bool forward)2271   void store_one(Register rd, Register to, int size_in_bytes, bool forward) {
2272     assert_different_registers(to, rd);
2273     Address addr = get_addr_with_indexing(to, size_in_bytes, forward);
2274     __ store_sized_value(rd, addr, size_in_bytes);
2275   }
2276 #else
2277   // load_one and store_one are the same as for AArch64 except for
2278   //   *) Support for condition execution
2279   //   *) Second value register argument for 8-byte values
2280 
load_one(Register rd,Register from,int size_in_bytes,bool forward,AsmCondition cond=al,Register rd2=noreg)2281   void load_one(Register rd, Register from, int size_in_bytes, bool forward, AsmCondition cond = al, Register rd2 = noreg) {
2282     assert_different_registers(from, rd, rd2);
2283     if (size_in_bytes < 8) {
2284       Address addr = get_addr_with_indexing(from, size_in_bytes, forward);
2285       __ load_sized_value(rd, addr, size_in_bytes, false, cond);
2286     } else {
2287       assert (rd2 != noreg, "second value register must be specified");
2288       assert (rd->encoding() < rd2->encoding(), "wrong value register set");
2289 
2290       if (forward) {
2291         __ ldmia(from, RegisterSet(rd) | rd2, writeback, cond);
2292       } else {
2293         __ ldmdb(from, RegisterSet(rd) | rd2, writeback, cond);
2294       }
2295     }
2296   }
2297 
store_one(Register rd,Register to,int size_in_bytes,bool forward,AsmCondition cond=al,Register rd2=noreg)2298   void store_one(Register rd, Register to, int size_in_bytes, bool forward, AsmCondition cond = al, Register rd2 = noreg) {
2299     assert_different_registers(to, rd, rd2);
2300     if (size_in_bytes < 8) {
2301       Address addr = get_addr_with_indexing(to, size_in_bytes, forward);
2302       __ store_sized_value(rd, addr, size_in_bytes, cond);
2303     } else {
2304       assert (rd2 != noreg, "second value register must be specified");
2305       assert (rd->encoding() < rd2->encoding(), "wrong value register set");
2306 
2307       if (forward) {
2308         __ stmia(to, RegisterSet(rd) | rd2, writeback, cond);
2309       } else {
2310         __ stmdb(to, RegisterSet(rd) | rd2, writeback, cond);
2311       }
2312     }
2313   }
2314 #endif // AARCH64
2315 
2316   // Copies data from 'from' to 'to' in specified direction to align 'from' by 64 bits.
2317   // (on 32-bit ARM 64-bit alignment is better for LDM).
2318   //
2319   // Arguments:
2320   //     from:              beginning (if forward) or upper bound (if !forward) of the region to be read
2321   //     to:                beginning (if forward) or upper bound (if !forward) of the region to be written
2322   //     count:             32-bit int, maximum number of elements which can be copied
2323   //     bytes_per_count:   size of an element
2324   //     forward:           specifies copy direction
2325   //
2326   // Notes:
2327   //   'from' and 'to' must be aligned by 'bytes_per_count'
2328   //   'count' must not be less than the returned value
2329   //   shifts 'from' and 'to' by the number of copied bytes in corresponding direction
2330   //   decreases 'count' by the number of elements copied
2331   //
2332   // Returns maximum number of bytes which may be copied.
align_src(Register from,Register to,Register count,Register tmp,int bytes_per_count,bool forward)2333   int align_src(Register from, Register to, Register count, Register tmp, int bytes_per_count, bool forward) {
2334     assert_different_registers(from, to, count, tmp);
2335 #ifdef AARCH64
2336     // TODO-AARCH64: replace by simple loop?
2337     Label Laligned_by_2, Laligned_by_4, Laligned_by_8;
2338 
2339     if (bytes_per_count == 1) {
2340       __ tbz(from, 0, Laligned_by_2);
2341       __ sub_32(count, count, 1);
2342       load_one(tmp, from, 1, forward);
2343       store_one(tmp, to, 1, forward);
2344     }
2345 
2346     __ BIND(Laligned_by_2);
2347 
2348     if (bytes_per_count <= 2) {
2349       __ tbz(from, 1, Laligned_by_4);
2350       __ sub_32(count, count, 2/bytes_per_count);
2351       load_one(tmp, from, 2, forward);
2352       store_one(tmp, to, 2, forward);
2353     }
2354 
2355     __ BIND(Laligned_by_4);
2356 
2357     if (bytes_per_count <= 4) {
2358       __ tbz(from, 2, Laligned_by_8);
2359       __ sub_32(count, count, 4/bytes_per_count);
2360       load_one(tmp, from, 4, forward);
2361       store_one(tmp, to, 4, forward);
2362     }
2363     __ BIND(Laligned_by_8);
2364 #else // AARCH64
2365     if (bytes_per_count < 8) {
2366       Label L_align_src;
2367       __ BIND(L_align_src);
2368       __ tst(from, 7);
2369       // ne => not aligned: copy one element and (if bytes_per_count < 4) loop
2370       __ sub(count, count, 1, ne);
2371       load_one(tmp, from, bytes_per_count, forward, ne);
2372       store_one(tmp, to, bytes_per_count, forward, ne);
2373       if (bytes_per_count < 4) {
2374         __ b(L_align_src, ne); // if bytes_per_count == 4, then 0 or 1 loop iterations are enough
2375       }
2376     }
2377 #endif // AARCH64
2378     return 7/bytes_per_count;
2379   }
2380 
2381   // Copies 'count' of 'bytes_per_count'-sized elements in the specified direction.
2382   //
2383   // Arguments:
2384   //     from:              beginning (if forward) or upper bound (if !forward) of the region to be read
2385   //     to:                beginning (if forward) or upper bound (if !forward) of the region to be written
2386   //     count:             32-bit int, number of elements to be copied
2387   //     entry:             copy loop entry point
2388   //     bytes_per_count:   size of an element
2389   //     forward:           specifies copy direction
2390   //
2391   // Notes:
2392   //     shifts 'from' and 'to'
copy_small_array(Register from,Register to,Register count,Register tmp,Register tmp2,int bytes_per_count,bool forward,Label & entry)2393   void copy_small_array(Register from, Register to, Register count, Register tmp, Register tmp2, int bytes_per_count, bool forward, Label & entry) {
2394     assert_different_registers(from, to, count, tmp);
2395 
2396     __ align(OptoLoopAlignment);
2397 #ifdef AARCH64
2398     Label L_small_array_done, L_small_array_loop;
2399     __ BIND(entry);
2400     __ cbz_32(count, L_small_array_done);
2401 
2402     __ BIND(L_small_array_loop);
2403     __ subs_32(count, count, 1);
2404     load_one(tmp, from, bytes_per_count, forward);
2405     store_one(tmp, to, bytes_per_count, forward);
2406     __ b(L_small_array_loop, gt);
2407 
2408     __ BIND(L_small_array_done);
2409 #else
2410     Label L_small_loop;
2411     __ BIND(L_small_loop);
2412     store_one(tmp, to, bytes_per_count, forward, al, tmp2);
2413     __ BIND(entry); // entry point
2414     __ subs(count, count, 1);
2415     load_one(tmp, from, bytes_per_count, forward, ge, tmp2);
2416     __ b(L_small_loop, ge);
2417 #endif // AARCH64
2418   }
2419 
2420   // Aligns 'to' by reading one word from 'from' and writting its part to 'to'.
2421   //
2422   // Arguments:
2423   //     to:                beginning (if forward) or upper bound (if !forward) of the region to be written
2424   //     count:             32-bit int, number of elements allowed to be copied
2425   //     to_remainder:      remainder of dividing 'to' by wordSize
2426   //     bytes_per_count:   size of an element
2427   //     forward:           specifies copy direction
2428   //     Rval:              contains an already read but not yet written word;
2429   //                        its' LSBs (if forward) or MSBs (if !forward) are to be written to align 'to'.
2430   //
2431   // Notes:
2432   //     'count' must not be less then the returned value
2433   //     'to' must be aligned by bytes_per_count but must not be aligned by wordSize
2434   //     shifts 'to' by the number of written bytes (so that it becomes the bound of memory to be written)
2435   //     decreases 'count' by the the number of elements written
2436   //     Rval's MSBs or LSBs remain to be written further by generate_{forward,backward}_shifted_copy_loop
align_dst(Register to,Register count,Register Rval,Register tmp,int to_remainder,int bytes_per_count,bool forward)2437   int align_dst(Register to, Register count, Register Rval, Register tmp,
2438                                         int to_remainder, int bytes_per_count, bool forward) {
2439     assert_different_registers(to, count, tmp, Rval);
2440 
2441     assert (0 < to_remainder && to_remainder < wordSize, "to_remainder is not valid");
2442     assert (to_remainder % bytes_per_count == 0, "to must be aligned by bytes_per_count");
2443 
2444     int bytes_to_write = forward ? (wordSize - to_remainder) : to_remainder;
2445 
2446     int offset = 0;
2447 
2448     for (int l = 0; l < LogBytesPerWord; ++l) {
2449       int s = (1 << l);
2450       if (bytes_to_write & s) {
2451         int new_offset = offset + s*BitsPerByte;
2452         if (forward) {
2453           if (offset == 0) {
2454             store_one(Rval, to, s, forward);
2455           } else {
2456             __ logical_shift_right(tmp, Rval, offset);
2457             store_one(tmp, to, s, forward);
2458           }
2459         } else {
2460           __ logical_shift_right(tmp, Rval, BitsPerWord - new_offset);
2461           store_one(tmp, to, s, forward);
2462         }
2463 
2464         offset = new_offset;
2465       }
2466     }
2467 
2468     assert (offset == bytes_to_write * BitsPerByte, "all bytes must be copied");
2469 
2470     __ sub_32(count, count, bytes_to_write/bytes_per_count);
2471 
2472     return bytes_to_write / bytes_per_count;
2473   }
2474 
2475   // Copies 'count' of elements using shifted copy loop
2476   //
2477   // Arguments:
2478   //     from:              beginning (if forward) or upper bound (if !forward) of the region to be read
2479   //     to:                beginning (if forward) or upper bound (if !forward) of the region to be written
2480   //     count:             32-bit int, number of elements to be copied
2481   //     to_remainder:      remainder of dividing 'to' by wordSize
2482   //     bytes_per_count:   size of an element
2483   //     forward:           specifies copy direction
2484   //     Rval:              contains an already read but not yet written word
2485   //
2486   //
2487   // Notes:
2488   //     'count' must not be less then the returned value
2489   //     'from' must be aligned by wordSize
2490   //     'to' must be aligned by bytes_per_count but must not be aligned by wordSize
2491   //     shifts 'to' by the number of copied bytes
2492   //
2493   // Scratches R3-R10, R12
align_dst_and_generate_shifted_copy_loop(Register from,Register to,Register count,Register Rval,int to_remainder,int bytes_per_count,bool forward)2494   int align_dst_and_generate_shifted_copy_loop(Register from, Register to, Register count, Register Rval,
2495                                                         int to_remainder, int bytes_per_count, bool forward) {
2496 
2497     assert (0 < to_remainder && to_remainder < wordSize, "to_remainder is invalid");
2498 
2499     const Register tmp  = forward ? R3 : R12; // TODO-AARCH64: on cojoint_short R4 was used for tmp
2500     assert_different_registers(from, to, count, Rval, tmp);
2501 
2502     int required_to_align = align_dst(to, count, Rval, tmp, to_remainder, bytes_per_count, forward);
2503 
2504     int lsr_shift = (wordSize - to_remainder) * BitsPerByte;
2505     int lsl_shift = to_remainder * BitsPerByte;
2506 
2507     int min_copy;
2508     if (forward) {
2509       min_copy = generate_forward_shifted_copy_loop(from, to, count, bytes_per_count, lsr_shift, lsl_shift);
2510     } else {
2511       min_copy = generate_backward_shifted_copy_loop(from, to, count, bytes_per_count, lsr_shift, lsl_shift);
2512     }
2513 
2514     return min_copy + required_to_align;
2515   }
2516 
2517   // Copies 'count' of elements using shifted copy loop
2518   //
2519   // Arguments:
2520   //     from:              beginning (if forward) or upper bound (if !forward) of the region to be read
2521   //     to:                beginning (if forward) or upper bound (if !forward) of the region to be written
2522   //     count:             32-bit int, number of elements to be copied
2523   //     bytes_per_count:   size of an element
2524   //     forward:           specifies copy direction
2525   //
2526   // Notes:
2527   //     'count' must not be less then the returned value
2528   //     'from' must be aligned by wordSize
2529   //     'to' must be aligned by bytes_per_count but must not be aligned by wordSize
2530   //     shifts 'to' by the number of copied bytes
2531   //
2532   // Scratches 'from', 'count', R3 and R12.
2533   // On AArch64 also scratches R4-R10, on 32-bit ARM saves them to use.
align_dst_and_generate_shifted_copy_loop(Register from,Register to,Register count,int bytes_per_count,bool forward)2534   int align_dst_and_generate_shifted_copy_loop(Register from, Register to, Register count, int bytes_per_count, bool forward) {
2535 
2536     const Register Rval = forward ? R12 : R3; // as generate_{forward,backward}_shifted_copy_loop expect
2537 
2538     int min_copy = 0;
2539 
2540     // Note: if {seq} is a sequence of numbers, L{seq} means that if the execution reaches this point,
2541     // then the remainder of 'to' divided by wordSize is one of elements of {seq}.
2542 
2543 #ifdef AARCH64
2544     // TODO-AARCH64: simplify, tune
2545 
2546     load_one(Rval, from, wordSize, forward);
2547 
2548     Label L_loop_finished;
2549 
2550     switch (bytes_per_count) {
2551       case 4:
2552         min_copy = align_dst_and_generate_shifted_copy_loop(from, to, count, Rval, 4, bytes_per_count, forward);
2553         break;
2554       case 2:
2555       {
2556         Label L2, L4, L6;
2557 
2558         __ tbz(to, 1, L4);
2559         __ tbz(to, 2, L2);
2560 
2561         __ BIND(L6);
2562         int min_copy6 = align_dst_and_generate_shifted_copy_loop(from, to, count, Rval, 6, bytes_per_count, forward);
2563         __ b(L_loop_finished);
2564 
2565         __ BIND(L2);
2566         int min_copy2 = align_dst_and_generate_shifted_copy_loop(from, to, count, Rval, 2, bytes_per_count, forward);
2567         __ b(L_loop_finished);
2568 
2569         __ BIND(L4);
2570         int min_copy4 = align_dst_and_generate_shifted_copy_loop(from, to, count, Rval, 4, bytes_per_count, forward);
2571 
2572         min_copy = MAX2(MAX2(min_copy2, min_copy4), min_copy6);
2573         break;
2574       }
2575       case 1:
2576       {
2577         Label L1, L2, L3, L4, L5, L6, L7;
2578         Label L15, L26;
2579         Label L246;
2580 
2581         __ tbz(to, 0, L246);
2582         __ tbz(to, 1, L15);
2583         __ tbz(to, 2, L3);
2584 
2585         __ BIND(L7);
2586         int min_copy7 = align_dst_and_generate_shifted_copy_loop(from, to, count, Rval, 7, bytes_per_count, forward);
2587         __ b(L_loop_finished);
2588 
2589         __ BIND(L246);
2590         __ tbnz(to, 1, L26);
2591 
2592         __ BIND(L4);
2593         int min_copy4 = align_dst_and_generate_shifted_copy_loop(from, to, count, Rval, 4, bytes_per_count, forward);
2594         __ b(L_loop_finished);
2595 
2596         __ BIND(L15);
2597         __ tbz(to, 2, L1);
2598 
2599         __ BIND(L5);
2600         int min_copy5 = align_dst_and_generate_shifted_copy_loop(from, to, count, Rval, 5, bytes_per_count, forward);
2601         __ b(L_loop_finished);
2602 
2603         __ BIND(L3);
2604         int min_copy3 = align_dst_and_generate_shifted_copy_loop(from, to, count, Rval, 3, bytes_per_count, forward);
2605         __ b(L_loop_finished);
2606 
2607         __ BIND(L26);
2608         __ tbz(to, 2, L2);
2609 
2610         __ BIND(L6);
2611         int min_copy6 = align_dst_and_generate_shifted_copy_loop(from, to, count, Rval, 6, bytes_per_count, forward);
2612         __ b(L_loop_finished);
2613 
2614         __ BIND(L1);
2615         int min_copy1 = align_dst_and_generate_shifted_copy_loop(from, to, count, Rval, 1, bytes_per_count, forward);
2616         __ b(L_loop_finished);
2617 
2618         __ BIND(L2);
2619         int min_copy2 = align_dst_and_generate_shifted_copy_loop(from, to, count, Rval, 2, bytes_per_count, forward);
2620 
2621 
2622         min_copy = MAX2(min_copy1, min_copy2);
2623         min_copy = MAX2(min_copy,  min_copy3);
2624         min_copy = MAX2(min_copy,  min_copy4);
2625         min_copy = MAX2(min_copy,  min_copy5);
2626         min_copy = MAX2(min_copy,  min_copy6);
2627         min_copy = MAX2(min_copy,  min_copy7);
2628         break;
2629       }
2630       default:
2631         ShouldNotReachHere();
2632         break;
2633     }
2634     __ BIND(L_loop_finished);
2635 
2636 #else
2637     __ push(RegisterSet(R4,R10));
2638     load_one(Rval, from, wordSize, forward);
2639 
2640     switch (bytes_per_count) {
2641       case 2:
2642         min_copy = align_dst_and_generate_shifted_copy_loop(from, to, count, Rval, 2, bytes_per_count, forward);
2643         break;
2644       case 1:
2645       {
2646         Label L1, L2, L3;
2647         int min_copy1, min_copy2, min_copy3;
2648 
2649         Label L_loop_finished;
2650 
2651         if (forward) {
2652             __ tbz(to, 0, L2);
2653             __ tbz(to, 1, L1);
2654 
2655             __ BIND(L3);
2656             min_copy3 = align_dst_and_generate_shifted_copy_loop(from, to, count, Rval, 3, bytes_per_count, forward);
2657             __ b(L_loop_finished);
2658 
2659             __ BIND(L1);
2660             min_copy1 = align_dst_and_generate_shifted_copy_loop(from, to, count, Rval, 1, bytes_per_count, forward);
2661             __ b(L_loop_finished);
2662 
2663             __ BIND(L2);
2664             min_copy2 = align_dst_and_generate_shifted_copy_loop(from, to, count, Rval, 2, bytes_per_count, forward);
2665         } else {
2666             __ tbz(to, 0, L2);
2667             __ tbnz(to, 1, L3);
2668 
2669             __ BIND(L1);
2670             min_copy1 = align_dst_and_generate_shifted_copy_loop(from, to, count, Rval, 1, bytes_per_count, forward);
2671             __ b(L_loop_finished);
2672 
2673              __ BIND(L3);
2674             min_copy3 = align_dst_and_generate_shifted_copy_loop(from, to, count, Rval, 3, bytes_per_count, forward);
2675             __ b(L_loop_finished);
2676 
2677            __ BIND(L2);
2678             min_copy2 = align_dst_and_generate_shifted_copy_loop(from, to, count, Rval, 2, bytes_per_count, forward);
2679         }
2680 
2681         min_copy = MAX2(MAX2(min_copy1, min_copy2), min_copy3);
2682 
2683         __ BIND(L_loop_finished);
2684 
2685         break;
2686       }
2687       default:
2688         ShouldNotReachHere();
2689         break;
2690     }
2691 
2692     __ pop(RegisterSet(R4,R10));
2693 #endif // AARCH64
2694 
2695     return min_copy;
2696   }
2697 
2698 #ifndef PRODUCT
get_arraycopy_counter(int bytes_per_count)2699   int * get_arraycopy_counter(int bytes_per_count) {
2700     switch (bytes_per_count) {
2701       case 1:
2702         return &SharedRuntime::_jbyte_array_copy_ctr;
2703       case 2:
2704         return &SharedRuntime::_jshort_array_copy_ctr;
2705       case 4:
2706         return &SharedRuntime::_jint_array_copy_ctr;
2707       case 8:
2708         return &SharedRuntime::_jlong_array_copy_ctr;
2709       default:
2710         ShouldNotReachHere();
2711         return NULL;
2712     }
2713   }
2714 #endif // !PRODUCT
2715 
2716   //
2717   //  Generate stub for primitive array copy.  If "aligned" is true, the
2718   //  "from" and "to" addresses are assumed to be heapword aligned.
2719   //
2720   //  If "disjoint" is true, arrays are assumed to be disjoint, otherwise they may overlap and
2721   //  "nooverlap_target" must be specified as the address to jump if they don't.
2722   //
2723   // Arguments for generated stub:
2724   //      from:  R0
2725   //      to:    R1
2726   //      count: R2 treated as signed 32-bit int
2727   //
generate_primitive_copy(bool aligned,const char * name,bool status,int bytes_per_count,bool disjoint,address nooverlap_target=NULL)2728   address generate_primitive_copy(bool aligned, const char * name, bool status, int bytes_per_count, bool disjoint, address nooverlap_target = NULL) {
2729     __ align(CodeEntryAlignment);
2730     StubCodeMark mark(this, "StubRoutines", name);
2731     address start = __ pc();
2732 
2733     const Register from  = R0;   // source array address
2734     const Register to    = R1;   // destination array address
2735     const Register count = R2;   // elements count
2736     const Register tmp1  = R3;
2737     const Register tmp2  = R12;
2738 
2739     if (!aligned)  {
2740       BLOCK_COMMENT("Entry:");
2741     }
2742 
2743     __ zap_high_non_significant_bits(R2);
2744 
2745     if (!disjoint) {
2746       assert (nooverlap_target != NULL, "must be specified for conjoint case");
2747       array_overlap_test(nooverlap_target, exact_log2(bytes_per_count), tmp1, tmp2);
2748     }
2749 
2750     inc_counter_np(*get_arraycopy_counter(bytes_per_count), tmp1, tmp2);
2751 
2752     // Conjoint case: since execution reaches this point, the arrays overlap, so performing backward copy
2753     // Disjoint case: perform forward copy
2754     bool forward = disjoint;
2755 
2756 
2757     if (!forward) {
2758       // Set 'from' and 'to' to upper bounds
2759       int log_bytes_per_count = exact_log2(bytes_per_count);
2760       __ add_ptr_scaled_int32(to,   to,   count, log_bytes_per_count);
2761       __ add_ptr_scaled_int32(from, from, count, log_bytes_per_count);
2762     }
2763 
2764     // There are two main copy loop implementations:
2765     //  *) The huge and complex one applicable only for large enough arrays
2766     //  *) The small and simple one applicable for any array (but not efficient for large arrays).
2767     // Currently "small" implementation is used if and only if the "large" one could not be used.
2768     // XXX optim: tune the limit higher ?
2769     // Large implementation lower applicability bound is actually determined by
2770     // aligned copy loop which require <=7 bytes for src alignment, and 8 words for aligned copy loop.
2771     const int small_copy_limit = (8*wordSize + 7) / bytes_per_count;
2772 
2773     Label L_small_array;
2774     __ cmp_32(count, small_copy_limit);
2775     __ b(L_small_array, le); // TODO-AARCH64: le vs lt
2776 
2777     // Otherwise proceed with large implementation.
2778 
2779     bool from_is_aligned = (bytes_per_count >= 8);
2780     if (aligned && forward && (HeapWordSize % 8 == 0)) {
2781         // if 'from' is heapword aligned and HeapWordSize is divisible by 8,
2782         //  then from is aligned by 8
2783         from_is_aligned = true;
2784     }
2785 
2786     int count_required_to_align = from_is_aligned ? 0 : align_src(from, to, count, tmp1, bytes_per_count, forward);
2787     assert (small_copy_limit >= count_required_to_align, "alignment could exhaust count");
2788 
2789     // now 'from' is aligned
2790 
2791     bool to_is_aligned = false;
2792 
2793     if (bytes_per_count >= wordSize) {
2794       // 'to' is aligned by bytes_per_count, so it is aligned by wordSize
2795       to_is_aligned = true;
2796     } else {
2797       if (aligned && (8 % HeapWordSize == 0) && (HeapWordSize % wordSize == 0)) {
2798         // Originally 'from' and 'to' were heapword aligned;
2799         // (from - to) has not been changed, so since now 'from' is 8-byte aligned, then it is also heapword aligned,
2800         //  so 'to' is also heapword aligned and thus aligned by wordSize.
2801         to_is_aligned = true;
2802       }
2803     }
2804 
2805     Label L_unaligned_dst;
2806 
2807     if (!to_is_aligned) {
2808       BLOCK_COMMENT("Check dst alignment:");
2809       __ tst(to, wordSize - 1);
2810       __ b(L_unaligned_dst, ne); // 'to' is not aligned
2811     }
2812 
2813     // 'from' and 'to' are properly aligned
2814 
2815     int min_copy;
2816     if (forward) {
2817       min_copy = generate_forward_aligned_copy_loop (from, to, count, bytes_per_count);
2818     } else {
2819       min_copy = generate_backward_aligned_copy_loop(from, to, count, bytes_per_count);
2820     }
2821     assert(small_copy_limit >= count_required_to_align + min_copy, "first loop might exhaust count");
2822 
2823     if (status) {
2824       __ mov(R0, 0); // OK
2825     }
2826 
2827     __ ret();
2828 
2829     {
2830       copy_small_array(from, to, count, tmp1, tmp2, bytes_per_count, forward, L_small_array /* entry */);
2831 
2832       if (status) {
2833         __ mov(R0, 0); // OK
2834       }
2835 
2836       __ ret();
2837     }
2838 
2839     if (! to_is_aligned) {
2840       __ BIND(L_unaligned_dst);
2841       int min_copy_shifted = align_dst_and_generate_shifted_copy_loop(from, to, count, bytes_per_count, forward);
2842       assert (small_copy_limit >= count_required_to_align + min_copy_shifted, "first loop might exhaust count");
2843 
2844       if (status) {
2845         __ mov(R0, 0); // OK
2846       }
2847 
2848       __ ret();
2849     }
2850 
2851     return start;
2852   }
2853 
2854 
2855   // Generates pattern of code to be placed after raw data copying in generate_oop_copy
2856   // Includes return from arraycopy stub.
2857   //
2858   // Arguments:
2859   //     to:       destination pointer after copying.
2860   //               if 'forward' then 'to' == upper bound, else 'to' == beginning of the modified region
2861   //     count:    total number of copied elements, 32-bit int
2862   //
2863   // Blows all volatile (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR) and 'to', 'count', 'tmp' registers.
oop_arraycopy_stub_epilogue_helper(Register to,Register count,Register tmp,bool status,bool forward,DecoratorSet decorators)2864   void oop_arraycopy_stub_epilogue_helper(Register to, Register count, Register tmp, bool status, bool forward, DecoratorSet decorators) {
2865     assert_different_registers(to, count, tmp);
2866 
2867     if (forward) {
2868       // 'to' is upper bound of the modified region
2869       // restore initial dst:
2870       __ sub_ptr_scaled_int32(to, to, count, LogBytesPerHeapOop);
2871     }
2872 
2873     // 'to' is the beginning of the region
2874 
2875     BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
2876     bs->arraycopy_epilogue(_masm, decorators, true, to, count, tmp);
2877 
2878     if (status) {
2879       __ mov(R0, 0); // OK
2880     }
2881 
2882 #ifdef AARCH64
2883     __ raw_pop(LR, ZR);
2884     __ ret();
2885 #else
2886     __ pop(PC);
2887 #endif // AARCH64
2888   }
2889 
2890 
2891   //  Generate stub for assign-compatible oop copy.  If "aligned" is true, the
2892   //  "from" and "to" addresses are assumed to be heapword aligned.
2893   //
2894   //  If "disjoint" is true, arrays are assumed to be disjoint, otherwise they may overlap and
2895   //  "nooverlap_target" must be specified as the address to jump if they don't.
2896   //
2897   // Arguments for generated stub:
2898   //      from:  R0
2899   //      to:    R1
2900   //      count: R2 treated as signed 32-bit int
2901   //
generate_oop_copy(bool aligned,const char * name,bool status,bool disjoint,address nooverlap_target=NULL)2902   address generate_oop_copy(bool aligned, const char * name, bool status, bool disjoint, address nooverlap_target = NULL) {
2903     __ align(CodeEntryAlignment);
2904     StubCodeMark mark(this, "StubRoutines", name);
2905     address start = __ pc();
2906 
2907     Register from  = R0;
2908     Register to    = R1;
2909     Register count = R2;
2910     Register tmp1  = R3;
2911     Register tmp2  = R12;
2912 
2913 
2914     if (!aligned) {
2915       BLOCK_COMMENT("Entry:");
2916     }
2917 
2918     __ zap_high_non_significant_bits(R2);
2919 
2920     if (!disjoint) {
2921       assert (nooverlap_target != NULL, "must be specified for conjoint case");
2922       array_overlap_test(nooverlap_target, LogBytesPerHeapOop, tmp1, tmp2);
2923     }
2924 
2925     inc_counter_np(SharedRuntime::_oop_array_copy_ctr, tmp1, tmp2);
2926 
2927     // Conjoint case: since execution reaches this point, the arrays overlap, so performing backward copy
2928     // Disjoint case: perform forward copy
2929     bool forward = disjoint;
2930 
2931     const int bytes_per_count = BytesPerHeapOop;
2932     const int log_bytes_per_count = LogBytesPerHeapOop;
2933 
2934     const Register saved_count = LR;
2935     const int callee_saved_regs = 3; // R0-R2
2936 
2937     // LR is used later to save barrier args
2938 #ifdef AARCH64
2939     __ raw_push(LR, ZR);
2940 #else
2941     __ push(LR);
2942 #endif // AARCH64
2943 
2944     DecoratorSet decorators = IN_HEAP | IS_ARRAY;
2945     if (disjoint) {
2946       decorators |= ARRAYCOPY_DISJOINT;
2947     }
2948     if (aligned) {
2949       decorators |= ARRAYCOPY_ALIGNED;
2950     }
2951 
2952     BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
2953     bs->arraycopy_prologue(_masm, decorators, true, to, count, callee_saved_regs);
2954 
2955     // save arguments for barrier generation (after the pre barrier)
2956     __ mov(saved_count, count);
2957 
2958     if (!forward) {
2959       __ add_ptr_scaled_int32(to,   to,   count, log_bytes_per_count);
2960       __ add_ptr_scaled_int32(from, from, count, log_bytes_per_count);
2961     }
2962 
2963     // for short arrays, just do single element copy
2964     Label L_small_array;
2965     const int small_copy_limit = (8*wordSize + 7)/bytes_per_count; // XXX optim: tune the limit higher ?
2966     __ cmp_32(count, small_copy_limit);
2967     __ b(L_small_array, le);
2968 
2969     bool from_is_aligned = (bytes_per_count >= 8);
2970     if (aligned && forward && (HeapWordSize % 8 == 0)) {
2971         // if 'from' is heapword aligned and HeapWordSize is divisible by 8,
2972         //  then from is aligned by 8
2973         from_is_aligned = true;
2974     }
2975 
2976     int count_required_to_align = from_is_aligned ? 0 : align_src(from, to, count, tmp1, bytes_per_count, forward);
2977     assert (small_copy_limit >= count_required_to_align, "alignment could exhaust count");
2978 
2979     // now 'from' is aligned
2980 
2981     bool to_is_aligned = false;
2982 
2983     if (bytes_per_count >= wordSize) {
2984       // 'to' is aligned by bytes_per_count, so it is aligned by wordSize
2985       to_is_aligned = true;
2986     } else {
2987       if (aligned && (8 % HeapWordSize == 0) && (HeapWordSize % wordSize == 0)) {
2988         // Originally 'from' and 'to' were heapword aligned;
2989         // (from - to) has not been changed, so since now 'from' is 8-byte aligned, then it is also heapword aligned,
2990         //  so 'to' is also heapword aligned and thus aligned by wordSize.
2991         to_is_aligned = true;
2992       }
2993     }
2994 
2995     Label L_unaligned_dst;
2996 
2997     if (!to_is_aligned) {
2998       BLOCK_COMMENT("Check dst alignment:");
2999       __ tst(to, wordSize - 1);
3000       __ b(L_unaligned_dst, ne); // 'to' is not aligned
3001     }
3002 
3003     int min_copy;
3004     if (forward) {
3005       min_copy = generate_forward_aligned_copy_loop(from, to, count, bytes_per_count);
3006     } else {
3007       min_copy = generate_backward_aligned_copy_loop(from, to, count, bytes_per_count);
3008     }
3009     assert(small_copy_limit >= count_required_to_align + min_copy, "first loop might exhaust count");
3010 
3011     oop_arraycopy_stub_epilogue_helper(to, saved_count, /* tmp */ tmp1, status, forward, decorators);
3012 
3013     {
3014       copy_small_array(from, to, count, tmp1, noreg, bytes_per_count, forward, L_small_array);
3015 
3016       oop_arraycopy_stub_epilogue_helper(to, saved_count, /* tmp */ tmp1, status, forward, decorators);
3017     }
3018 
3019     if (!to_is_aligned) {
3020       // !to_is_aligned <=> UseCompressedOops && AArch64
3021       __ BIND(L_unaligned_dst);
3022 #ifdef AARCH64
3023       assert (UseCompressedOops, "unaligned oop array copy may be requested only with UseCompressedOops");
3024 #else
3025       ShouldNotReachHere();
3026 #endif // AARCH64
3027       int min_copy_shifted = align_dst_and_generate_shifted_copy_loop(from, to, count, bytes_per_count, forward);
3028       assert (small_copy_limit >= count_required_to_align + min_copy_shifted, "first loop might exhaust count");
3029 
3030       oop_arraycopy_stub_epilogue_helper(to, saved_count, /* tmp */ tmp1, status, forward, decorators);
3031     }
3032 
3033     return start;
3034   }
3035 
3036   //  Generate 'unsafe' array copy stub
3037   //  Though just as safe as the other stubs, it takes an unscaled
3038   //  size_t argument instead of an element count.
3039   //
3040   // Arguments for generated stub:
3041   //      from:  R0
3042   //      to:    R1
3043   //      count: R2 byte count, treated as ssize_t, can be zero
3044   //
3045   // Examines the alignment of the operands and dispatches
3046   // to a long, int, short, or byte copy loop.
3047   //
generate_unsafe_copy(const char * name)3048   address generate_unsafe_copy(const char* name) {
3049 
3050     const Register R0_from   = R0;      // source array address
3051     const Register R1_to     = R1;      // destination array address
3052     const Register R2_count  = R2;      // elements count
3053 
3054     const Register R3_bits   = R3;      // test copy of low bits
3055 
3056     __ align(CodeEntryAlignment);
3057     StubCodeMark mark(this, "StubRoutines", name);
3058     address start = __ pc();
3059 #ifdef AARCH64
3060     __ NOT_IMPLEMENTED();
3061     start = NULL;
3062 #else
3063     const Register tmp = Rtemp;
3064 
3065     // bump this on entry, not on exit:
3066     inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr, R3, tmp);
3067 
3068     __ orr(R3_bits, R0_from, R1_to);
3069     __ orr(R3_bits, R2_count, R3_bits);
3070 
3071     __ tst(R3_bits, BytesPerLong-1);
3072     __ mov(R2_count,AsmOperand(R2_count,asr,LogBytesPerLong), eq);
3073     __ jump(StubRoutines::_jlong_arraycopy, relocInfo::runtime_call_type, tmp, eq);
3074 
3075     __ tst(R3_bits, BytesPerInt-1);
3076     __ mov(R2_count,AsmOperand(R2_count,asr,LogBytesPerInt), eq);
3077     __ jump(StubRoutines::_jint_arraycopy, relocInfo::runtime_call_type, tmp, eq);
3078 
3079     __ tst(R3_bits, BytesPerShort-1);
3080     __ mov(R2_count,AsmOperand(R2_count,asr,LogBytesPerShort), eq);
3081     __ jump(StubRoutines::_jshort_arraycopy, relocInfo::runtime_call_type, tmp, eq);
3082 
3083     __ jump(StubRoutines::_jbyte_arraycopy, relocInfo::runtime_call_type, tmp);
3084 #endif
3085     return start;
3086   }
3087 
3088   // Helper for generating a dynamic type check.
3089   // Smashes only the given temp registers.
generate_type_check(Register sub_klass,Register super_check_offset,Register super_klass,Register tmp1,Register tmp2,Register tmp3,Label & L_success)3090   void generate_type_check(Register sub_klass,
3091                            Register super_check_offset,
3092                            Register super_klass,
3093                            Register tmp1,
3094                            Register tmp2,
3095                            Register tmp3,
3096                            Label& L_success) {
3097     assert_different_registers(sub_klass, super_check_offset, super_klass, tmp1, tmp2, tmp3);
3098 
3099     BLOCK_COMMENT("type_check:");
3100 
3101     // If the pointers are equal, we are done (e.g., String[] elements).
3102 
3103     __ cmp(super_klass, sub_klass);
3104     __ b(L_success, eq); // fast success
3105 
3106 
3107     Label L_loop, L_fail;
3108 
3109     int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
3110 
3111     // Check the supertype display:
3112     __ ldr(tmp1, Address(sub_klass, super_check_offset));
3113     __ cmp(tmp1, super_klass);
3114     __ b(L_success, eq);
3115 
3116     __ cmp(super_check_offset, sc_offset);
3117     __ b(L_fail, ne); // failure
3118 
3119     BLOCK_COMMENT("type_check_slow_path:");
3120 
3121     // a couple of useful fields in sub_klass:
3122     int ss_offset = in_bytes(Klass::secondary_supers_offset());
3123 
3124     // Do a linear scan of the secondary super-klass chain.
3125 
3126 #ifndef PRODUCT
3127     int* pst_counter = &SharedRuntime::_partial_subtype_ctr;
3128     __ inc_counter((address) pst_counter, tmp1, tmp2);
3129 #endif
3130 
3131     Register scan_temp = tmp1;
3132     Register count_temp = tmp2;
3133 
3134     // We will consult the secondary-super array.
3135     __ ldr(scan_temp, Address(sub_klass, ss_offset));
3136 
3137     Register search_key = super_klass;
3138 
3139     // Load the array length.
3140     __ ldr_s32(count_temp, Address(scan_temp, Array<Klass*>::length_offset_in_bytes()));
3141     __ add(scan_temp, scan_temp, Array<Klass*>::base_offset_in_bytes());
3142 
3143     __ add(count_temp, count_temp, 1);
3144 
3145     // Top of search loop
3146     __ bind(L_loop);
3147     // Notes:
3148     //  scan_temp starts at the array elements
3149     //  count_temp is 1+size
3150 
3151     __ subs(count_temp, count_temp, 1);
3152     __ b(L_fail, eq); // not found
3153 
3154     // Load next super to check
3155     // In the array of super classes elements are pointer sized.
3156     int element_size = wordSize;
3157     __ ldr(tmp3, Address(scan_temp, element_size, post_indexed));
3158 
3159     // Look for Rsuper_klass on Rsub_klass's secondary super-class-overflow list
3160     __ cmp(tmp3, search_key);
3161 
3162     // A miss means we are NOT a subtype and need to keep looping
3163     __ b(L_loop, ne);
3164 
3165     // Falling out the bottom means we found a hit; we ARE a subtype
3166 
3167     // Success.  Cache the super we found and proceed in triumph.
3168     __ str(super_klass, Address(sub_klass, sc_offset));
3169 
3170     // Jump to success
3171     __ b(L_success);
3172 
3173     // Fall through on failure!
3174     __ bind(L_fail);
3175   }
3176 
3177   //  Generate stub for checked oop copy.
3178   //
3179   // Arguments for generated stub:
3180   //      from:  R0
3181   //      to:    R1
3182   //      count: R2 treated as signed 32-bit int
3183   //      ckoff: R3 (super_check_offset)
3184   //      ckval: R4 (AArch64) / SP[0] (32-bit ARM) (super_klass)
3185   //      ret:   R0 zero for success; (-1^K) where K is partial transfer count (32-bit)
3186   //
generate_checkcast_copy(const char * name)3187   address generate_checkcast_copy(const char * name) {
3188     __ align(CodeEntryAlignment);
3189     StubCodeMark mark(this, "StubRoutines", name);
3190     address start = __ pc();
3191 
3192     const Register from  = R0;  // source array address
3193     const Register to    = R1;  // destination array address
3194     const Register count = R2;  // elements count
3195 
3196     const Register R3_ckoff  = R3;      // super_check_offset
3197     const Register R4_ckval  = R4;      // super_klass
3198 
3199     const int callee_saved_regs = AARCH64_ONLY(5) NOT_AARCH64(4); // LR saved differently
3200 
3201     Label load_element, store_element, do_epilogue, fail;
3202 
3203     BLOCK_COMMENT("Entry:");
3204 
3205     __ zap_high_non_significant_bits(R2);
3206 
3207 #ifdef AARCH64
3208     __ raw_push(LR, ZR);
3209     __ raw_push(R19, R20);
3210 #else
3211     int pushed = 0;
3212     __ push(LR);
3213     pushed+=1;
3214 #endif // AARCH64
3215 
3216     DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_CHECKCAST;
3217 
3218     BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
3219     bs->arraycopy_prologue(_masm, decorators, true, to, count, callee_saved_regs);
3220 
3221 #ifndef AARCH64
3222     const RegisterSet caller_saved_regs = RegisterSet(R4,R6) | RegisterSet(R8,R9) | altFP_7_11;
3223     __ push(caller_saved_regs);
3224     assert(caller_saved_regs.size() == 6, "check the count");
3225     pushed+=6;
3226 
3227     __ ldr(R4_ckval,Address(SP, wordSize*pushed)); // read the argument that was on the stack
3228 #endif // !AARCH64
3229 
3230     // Save arguments for barrier generation (after the pre barrier):
3231     // - must be a caller saved register and not LR
3232     // - ARM32: avoid R10 in case RThread is needed
3233     const Register saved_count = AARCH64_ONLY(R19) NOT_AARCH64(altFP_7_11);
3234 #ifdef AARCH64
3235     __ mov_w(saved_count, count);
3236     __ cbnz_w(count, load_element); // and test count
3237 #else
3238     __ movs(saved_count, count); // and test count
3239     __ b(load_element,ne);
3240 #endif // AARCH64
3241 
3242     // nothing to copy
3243     __ mov(R0, 0);
3244 
3245 #ifdef AARCH64
3246     __ raw_pop(R19, R20);
3247     __ raw_pop(LR, ZR);
3248     __ ret();
3249 #else
3250     __ pop(caller_saved_regs);
3251     __ pop(PC);
3252 #endif // AARCH64
3253 
3254     // ======== begin loop ========
3255     // (Loop is rotated; its entry is load_element.)
3256     __ align(OptoLoopAlignment);
3257     __ BIND(store_element);
3258     if (UseCompressedOops) {
3259       __ store_heap_oop(Address(to, BytesPerHeapOop, post_indexed), R5);  // store the oop, changes flags
3260       __ subs_32(count,count,1);
3261     } else {
3262       __ subs_32(count,count,1);
3263       __ str(R5, Address(to, BytesPerHeapOop, post_indexed));             // store the oop
3264     }
3265     __ b(do_epilogue, eq); // count exhausted
3266 
3267     // ======== loop entry is here ========
3268     __ BIND(load_element);
3269     __ load_heap_oop(R5, Address(from, BytesPerHeapOop, post_indexed));  // load the oop
3270     __ cbz(R5, store_element); // NULL
3271 
3272     __ load_klass(R6, R5);
3273 
3274     generate_type_check(R6, R3_ckoff, R4_ckval, /*tmps*/ R12, R8, R9,
3275                         // branch to this on success:
3276                         store_element);
3277     // ======== end loop ========
3278 
3279     // It was a real error; we must depend on the caller to finish the job.
3280     // Register count has number of *remaining* oops, saved_count number of *total* oops.
3281     // Emit GC store barriers for the oops we have copied
3282     // and report their number to the caller (0 or (-1^n))
3283     __ BIND(fail);
3284 
3285     // Note: fail marked by the fact that count differs from saved_count
3286 
3287     __ BIND(do_epilogue);
3288 
3289     Register copied = AARCH64_ONLY(R20) NOT_AARCH64(R4); // saved
3290     Label L_not_copied;
3291 
3292     __ subs_32(copied, saved_count, count); // copied count (in saved reg)
3293     __ b(L_not_copied, eq); // nothing was copied, skip post barrier
3294     __ sub(to, to, AsmOperand(copied, lsl, LogBytesPerHeapOop)); // initial to value
3295     __ mov(R12, copied); // count arg scratched by post barrier
3296 
3297     bs->arraycopy_epilogue(_masm, decorators, true, to, R12, R3);
3298 
3299     assert_different_registers(R3,R12,LR,copied,saved_count);
3300     inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, R3, R12);
3301 
3302     __ BIND(L_not_copied);
3303     __ cmp_32(copied, saved_count); // values preserved in saved registers
3304 
3305 #ifdef AARCH64
3306     __ csinv(R0, ZR, copied, eq); // 0 if all copied else NOT(copied)
3307     __ raw_pop(R19, R20);
3308     __ raw_pop(LR, ZR);
3309     __ ret();
3310 #else
3311     __ mov(R0, 0, eq); // 0 if all copied
3312     __ mvn(R0, copied, ne); // else NOT(copied)
3313     __ pop(caller_saved_regs);
3314     __ pop(PC);
3315 #endif // AARCH64
3316 
3317     return start;
3318   }
3319 
3320   // Perform range checks on the proposed arraycopy.
3321   // Kills the two temps, but nothing else.
arraycopy_range_checks(Register src,Register src_pos,Register dst,Register dst_pos,Register length,Register temp1,Register temp2,Label & L_failed)3322   void arraycopy_range_checks(Register src,     // source array oop
3323                               Register src_pos, // source position (32-bit int)
3324                               Register dst,     // destination array oop
3325                               Register dst_pos, // destination position (32-bit int)
3326                               Register length,  // length of copy (32-bit int)
3327                               Register temp1, Register temp2,
3328                               Label& L_failed) {
3329 
3330     BLOCK_COMMENT("arraycopy_range_checks:");
3331 
3332     //  if (src_pos + length > arrayOop(src)->length() ) FAIL;
3333 
3334     const Register array_length = temp1;  // scratch
3335     const Register end_pos      = temp2;  // scratch
3336 
3337     __ add_32(end_pos, length, src_pos);  // src_pos + length
3338     __ ldr_s32(array_length, Address(src, arrayOopDesc::length_offset_in_bytes()));
3339     __ cmp_32(end_pos, array_length);
3340     __ b(L_failed, hi);
3341 
3342     //  if (dst_pos + length > arrayOop(dst)->length() ) FAIL;
3343     __ add_32(end_pos, length, dst_pos); // dst_pos + length
3344     __ ldr_s32(array_length, Address(dst, arrayOopDesc::length_offset_in_bytes()));
3345     __ cmp_32(end_pos, array_length);
3346     __ b(L_failed, hi);
3347 
3348     BLOCK_COMMENT("arraycopy_range_checks done");
3349   }
3350 
3351   //
3352   //  Generate generic array copy stubs
3353   //
3354   //  Input:
3355   //    R0    -  src oop
3356   //    R1    -  src_pos (32-bit int)
3357   //    R2    -  dst oop
3358   //    R3    -  dst_pos (32-bit int)
3359   //    R4 (AArch64) / SP[0] (32-bit ARM) -  element count (32-bit int)
3360   //
3361   //  Output: (32-bit int)
3362   //    R0 ==  0  -  success
3363   //    R0 <   0  -  need to call System.arraycopy
3364   //
generate_generic_copy(const char * name)3365   address generate_generic_copy(const char *name) {
3366     Label L_failed, L_objArray;
3367 
3368     // Input registers
3369     const Register src      = R0;  // source array oop
3370     const Register src_pos  = R1;  // source position
3371     const Register dst      = R2;  // destination array oop
3372     const Register dst_pos  = R3;  // destination position
3373 
3374     // registers used as temp
3375     const Register R5_src_klass = R5; // source array klass
3376     const Register R6_dst_klass = R6; // destination array klass
3377     const Register R_lh         = AARCH64_ONLY(R7) NOT_AARCH64(altFP_7_11); // layout handler
3378     const Register R8_temp      = R8;
3379 
3380     __ align(CodeEntryAlignment);
3381     StubCodeMark mark(this, "StubRoutines", name);
3382     address start = __ pc();
3383 
3384     __ zap_high_non_significant_bits(R1);
3385     __ zap_high_non_significant_bits(R3);
3386     __ zap_high_non_significant_bits(R4);
3387 
3388 #ifndef AARCH64
3389     int pushed = 0;
3390     const RegisterSet saved_regs = RegisterSet(R4,R6) | RegisterSet(R8,R9) | altFP_7_11;
3391     __ push(saved_regs);
3392     assert(saved_regs.size() == 6, "check the count");
3393     pushed+=6;
3394 #endif // !AARCH64
3395 
3396     // bump this on entry, not on exit:
3397     inc_counter_np(SharedRuntime::_generic_array_copy_ctr, R5, R12);
3398 
3399     const Register length   = R4;  // elements count
3400 #ifndef AARCH64
3401     __ ldr(length, Address(SP,4*pushed));
3402 #endif // !AARCH64
3403 
3404 
3405     //-----------------------------------------------------------------------
3406     // Assembler stubs will be used for this call to arraycopy
3407     // if the following conditions are met:
3408     //
3409     // (1) src and dst must not be null.
3410     // (2) src_pos must not be negative.
3411     // (3) dst_pos must not be negative.
3412     // (4) length  must not be negative.
3413     // (5) src klass and dst klass should be the same and not NULL.
3414     // (6) src and dst should be arrays.
3415     // (7) src_pos + length must not exceed length of src.
3416     // (8) dst_pos + length must not exceed length of dst.
3417     BLOCK_COMMENT("arraycopy initial argument checks");
3418 
3419     //  if (src == NULL) return -1;
3420     __ cbz(src, L_failed);
3421 
3422     //  if (src_pos < 0) return -1;
3423     __ cmp_32(src_pos, 0);
3424     __ b(L_failed, lt);
3425 
3426     //  if (dst == NULL) return -1;
3427     __ cbz(dst, L_failed);
3428 
3429     //  if (dst_pos < 0) return -1;
3430     __ cmp_32(dst_pos, 0);
3431     __ b(L_failed, lt);
3432 
3433     //  if (length < 0) return -1;
3434     __ cmp_32(length, 0);
3435     __ b(L_failed, lt);
3436 
3437     BLOCK_COMMENT("arraycopy argument klass checks");
3438     //  get src->klass()
3439     __ load_klass(R5_src_klass, src);
3440 
3441     // Load layout helper
3442     //
3443     //  |array_tag|     | header_size | element_type |     |log2_element_size|
3444     // 32        30    24            16              8     2                 0
3445     //
3446     //   array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
3447     //
3448 
3449     int lh_offset = in_bytes(Klass::layout_helper_offset());
3450     __ ldr_u32(R_lh, Address(R5_src_klass, lh_offset));
3451 
3452     __ load_klass(R6_dst_klass, dst);
3453 
3454     // Handle objArrays completely differently...
3455     juint objArray_lh = Klass::array_layout_helper(T_OBJECT);
3456     __ mov_slow(R8_temp, objArray_lh);
3457     __ cmp_32(R_lh, R8_temp);
3458     __ b(L_objArray,eq);
3459 
3460     //  if (src->klass() != dst->klass()) return -1;
3461     __ cmp(R5_src_klass, R6_dst_klass);
3462     __ b(L_failed, ne);
3463 
3464     //  if (!src->is_Array()) return -1;
3465     __ cmp_32(R_lh, Klass::_lh_neutral_value); // < 0
3466     __ b(L_failed, ge);
3467 
3468     arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
3469                            R8_temp, R6_dst_klass, L_failed);
3470 
3471     {
3472       // TypeArrayKlass
3473       //
3474       // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize);
3475       // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize);
3476       //
3477 
3478       const Register R6_offset = R6_dst_klass;    // array offset
3479       const Register R12_elsize = R12;            // log2 element size
3480 
3481       __ logical_shift_right(R6_offset, R_lh, Klass::_lh_header_size_shift);
3482       __ andr(R6_offset, R6_offset, (unsigned int)Klass::_lh_header_size_mask); // array_offset
3483       __ add(src, src, R6_offset);       // src array offset
3484       __ add(dst, dst, R6_offset);       // dst array offset
3485       __ andr(R12_elsize, R_lh, (unsigned int)Klass::_lh_log2_element_size_mask); // log2 element size
3486 
3487       // next registers should be set before the jump to corresponding stub
3488       const Register from     = R0;  // source array address
3489       const Register to       = R1;  // destination array address
3490       const Register count    = R2;  // elements count
3491 
3492       // 'from', 'to', 'count' registers should be set in this order
3493       // since they are the same as 'src', 'src_pos', 'dst'.
3494 
3495 #ifdef AARCH64
3496 
3497       BLOCK_COMMENT("choose copy loop based on element size and scale indexes");
3498       Label Lbyte, Lshort, Lint, Llong;
3499 
3500       __ cbz(R12_elsize, Lbyte);
3501 
3502       assert (LogBytesPerShort < LogBytesPerInt && LogBytesPerInt < LogBytesPerLong, "must be");
3503       __ cmp(R12_elsize, LogBytesPerInt);
3504       __ b(Lint,  eq);
3505       __ b(Llong, gt);
3506 
3507       __ BIND(Lshort);
3508       __ add_ptr_scaled_int32(from, src, src_pos, LogBytesPerShort);
3509       __ add_ptr_scaled_int32(to,   dst, dst_pos, LogBytesPerShort);
3510       __ mov(count, length);
3511       __ b(StubRoutines::_jshort_arraycopy);
3512 
3513       __ BIND(Lint);
3514       __ add_ptr_scaled_int32(from, src, src_pos, LogBytesPerInt);
3515       __ add_ptr_scaled_int32(to,   dst, dst_pos, LogBytesPerInt);
3516       __ mov(count, length);
3517       __ b(StubRoutines::_jint_arraycopy);
3518 
3519       __ BIND(Lbyte);
3520       __ add_ptr_scaled_int32(from, src, src_pos, 0);
3521       __ add_ptr_scaled_int32(to,   dst, dst_pos, 0);
3522       __ mov(count, length);
3523       __ b(StubRoutines::_jbyte_arraycopy);
3524 
3525       __ BIND(Llong);
3526       __ add_ptr_scaled_int32(from, src, src_pos, LogBytesPerLong);
3527       __ add_ptr_scaled_int32(to,   dst, dst_pos, LogBytesPerLong);
3528       __ mov(count, length);
3529       __ b(StubRoutines::_jlong_arraycopy);
3530 
3531 #else // AARCH64
3532 
3533       BLOCK_COMMENT("scale indexes to element size");
3534       __ add(from, src, AsmOperand(src_pos, lsl, R12_elsize));       // src_addr
3535       __ add(to, dst, AsmOperand(dst_pos, lsl, R12_elsize));         // dst_addr
3536 
3537       __ mov(count, length);  // length
3538 
3539       // XXX optim: avoid later push in arraycopy variants ?
3540 
3541       __ pop(saved_regs);
3542 
3543       BLOCK_COMMENT("choose copy loop based on element size");
3544       __ cmp(R12_elsize, 0);
3545       __ b(StubRoutines::_jbyte_arraycopy,eq);
3546 
3547       __ cmp(R12_elsize, LogBytesPerShort);
3548       __ b(StubRoutines::_jshort_arraycopy,eq);
3549 
3550       __ cmp(R12_elsize, LogBytesPerInt);
3551       __ b(StubRoutines::_jint_arraycopy,eq);
3552 
3553       __ b(StubRoutines::_jlong_arraycopy);
3554 
3555 #endif // AARCH64
3556     }
3557 
3558     // ObjArrayKlass
3559     __ BIND(L_objArray);
3560     // live at this point:  R5_src_klass, R6_dst_klass, src[_pos], dst[_pos], length
3561 
3562     Label L_plain_copy, L_checkcast_copy;
3563     //  test array classes for subtyping
3564     __ cmp(R5_src_klass, R6_dst_klass);         // usual case is exact equality
3565     __ b(L_checkcast_copy, ne);
3566 
3567     BLOCK_COMMENT("Identically typed arrays");
3568     {
3569       // Identically typed arrays can be copied without element-wise checks.
3570       arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
3571                              R8_temp, R_lh, L_failed);
3572 
3573       // next registers should be set before the jump to corresponding stub
3574       const Register from     = R0;  // source array address
3575       const Register to       = R1;  // destination array address
3576       const Register count    = R2;  // elements count
3577 
3578       __ add(src, src, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); //src offset
3579       __ add(dst, dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); //dst offset
3580       __ add_ptr_scaled_int32(from, src, src_pos, LogBytesPerHeapOop);         // src_addr
3581       __ add_ptr_scaled_int32(to, dst, dst_pos, LogBytesPerHeapOop);           // dst_addr
3582       __ BIND(L_plain_copy);
3583       __ mov(count, length);
3584 
3585 #ifndef AARCH64
3586       __ pop(saved_regs); // XXX optim: avoid later push in oop_arraycopy ?
3587 #endif // !AARCH64
3588       __ b(StubRoutines::_oop_arraycopy);
3589     }
3590 
3591     {
3592       __ BIND(L_checkcast_copy);
3593       // live at this point:  R5_src_klass, R6_dst_klass
3594 
3595       // Before looking at dst.length, make sure dst is also an objArray.
3596       __ ldr_u32(R8_temp, Address(R6_dst_klass, lh_offset));
3597       __ cmp_32(R_lh, R8_temp);
3598       __ b(L_failed, ne);
3599 
3600       // It is safe to examine both src.length and dst.length.
3601 
3602       arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
3603                              R8_temp, R_lh, L_failed);
3604 
3605       // next registers should be set before the jump to corresponding stub
3606       const Register from     = R0;  // source array address
3607       const Register to       = R1;  // destination array address
3608       const Register count    = R2;  // elements count
3609 
3610       // Marshal the base address arguments now, freeing registers.
3611       __ add(src, src, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); //src offset
3612       __ add(dst, dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); //dst offset
3613       __ add_ptr_scaled_int32(from, src, src_pos, LogBytesPerHeapOop);         // src_addr
3614       __ add_ptr_scaled_int32(to, dst, dst_pos, LogBytesPerHeapOop);           // dst_addr
3615 
3616       __ mov(count, length); // length (reloaded)
3617 
3618       Register sco_temp = R3;                   // this register is free now
3619       assert_different_registers(from, to, count, sco_temp,
3620                                  R6_dst_klass, R5_src_klass);
3621 
3622       // Generate the type check.
3623       int sco_offset = in_bytes(Klass::super_check_offset_offset());
3624       __ ldr_u32(sco_temp, Address(R6_dst_klass, sco_offset));
3625       generate_type_check(R5_src_klass, sco_temp, R6_dst_klass,
3626                           R8_temp, R9,
3627                           AARCH64_ONLY(R10) NOT_AARCH64(R12),
3628                           L_plain_copy);
3629 
3630       // Fetch destination element klass from the ObjArrayKlass header.
3631       int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
3632 
3633       // the checkcast_copy loop needs two extra arguments:
3634       const Register Rdst_elem_klass = AARCH64_ONLY(R4) NOT_AARCH64(R3);
3635       __ ldr(Rdst_elem_klass, Address(R6_dst_klass, ek_offset));   // dest elem klass
3636 #ifndef AARCH64
3637       __ pop(saved_regs); // XXX optim: avoid later push in oop_arraycopy ?
3638       __ str(Rdst_elem_klass, Address(SP,0));    // dest elem klass argument
3639 #endif // !AARCH64
3640       __ ldr_u32(R3, Address(Rdst_elem_klass, sco_offset));  // sco of elem klass
3641       __ b(StubRoutines::_checkcast_arraycopy);
3642     }
3643 
3644     __ BIND(L_failed);
3645 
3646 #ifndef AARCH64
3647     __ pop(saved_regs);
3648 #endif // !AARCH64
3649     __ mvn(R0, 0); // failure, with 0 copied
3650     __ ret();
3651 
3652     return start;
3653   }
3654 
3655   // Safefetch stubs.
generate_safefetch(const char * name,int size,address * entry,address * fault_pc,address * continuation_pc)3656   void generate_safefetch(const char* name, int size, address* entry, address* fault_pc, address* continuation_pc) {
3657     // safefetch signatures:
3658     //   int      SafeFetch32(int*      adr, int      errValue);
3659     //   intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue);
3660     //
3661     // arguments:
3662     //   R0 = adr
3663     //   R1 = errValue
3664     //
3665     // result:
3666     //   R0  = *adr or errValue
3667 
3668     StubCodeMark mark(this, "StubRoutines", name);
3669 
3670     // Entry point, pc or function descriptor.
3671     *entry = __ pc();
3672 
3673     // Load *adr into c_rarg2, may fault.
3674     *fault_pc = __ pc();
3675 
3676     switch (size) {
3677       case 4: // int32_t
3678         __ ldr_s32(R1, Address(R0));
3679         break;
3680 
3681       case 8: // int64_t
3682 #ifdef AARCH64
3683         __ ldr(R1, Address(R0));
3684 #else
3685         Unimplemented();
3686 #endif // AARCH64
3687         break;
3688 
3689       default:
3690         ShouldNotReachHere();
3691     }
3692 
3693     // return errValue or *adr
3694     *continuation_pc = __ pc();
3695     __ mov(R0, R1);
3696     __ ret();
3697   }
3698 
generate_arraycopy_stubs()3699   void generate_arraycopy_stubs() {
3700 
3701     // Note:  the disjoint stubs must be generated first, some of
3702     //        the conjoint stubs use them.
3703 
3704     bool status = false; // non failing C2 stubs need not return a status in R0
3705 
3706 #ifdef TEST_C2_GENERIC_ARRAYCOPY /* Internal development flag */
3707     // With this flag, the C2 stubs are tested by generating calls to
3708     // generic_arraycopy instead of Runtime1::arraycopy
3709 
3710     // Runtime1::arraycopy return a status in R0 (0 if OK, else ~copied)
3711     // and the result is tested to see whether the arraycopy stub should
3712     // be called.
3713 
3714     // When we test arraycopy this way, we must generate extra code in the
3715     // arraycopy methods callable from C2 generic_arraycopy to set the
3716     // status to 0 for those who always succeed (calling the slow path stub might
3717     // lead to errors since the copy has already been performed).
3718 
3719     status = true; // generate a status compatible with C1 calls
3720 #endif
3721 
3722     // these need always status in case they are called from generic_arraycopy
3723     StubRoutines::_jbyte_disjoint_arraycopy  = generate_primitive_copy(false, "jbyte_disjoint_arraycopy",  true, 1, true);
3724     StubRoutines::_jshort_disjoint_arraycopy = generate_primitive_copy(false, "jshort_disjoint_arraycopy", true, 2, true);
3725     StubRoutines::_jint_disjoint_arraycopy   = generate_primitive_copy(false, "jint_disjoint_arraycopy",   true, 4, true);
3726     StubRoutines::_jlong_disjoint_arraycopy  = generate_primitive_copy(false, "jlong_disjoint_arraycopy",  true, 8, true);
3727     StubRoutines::_oop_disjoint_arraycopy    = generate_oop_copy      (false, "oop_disjoint_arraycopy",    true,    true);
3728 
3729     StubRoutines::_arrayof_jbyte_disjoint_arraycopy  = generate_primitive_copy(true, "arrayof_jbyte_disjoint_arraycopy", status, 1, true);
3730     StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_primitive_copy(true, "arrayof_jshort_disjoint_arraycopy",status, 2, true);
3731     StubRoutines::_arrayof_jint_disjoint_arraycopy   = generate_primitive_copy(true, "arrayof_jint_disjoint_arraycopy",  status, 4, true);
3732     StubRoutines::_arrayof_jlong_disjoint_arraycopy  = generate_primitive_copy(true, "arrayof_jlong_disjoint_arraycopy", status, 8, true);
3733     StubRoutines::_arrayof_oop_disjoint_arraycopy    = generate_oop_copy      (true, "arrayof_oop_disjoint_arraycopy",   status,    true);
3734 
3735     // these need always status in case they are called from generic_arraycopy
3736     StubRoutines::_jbyte_arraycopy  = generate_primitive_copy(false, "jbyte_arraycopy",  true, 1, false, StubRoutines::_jbyte_disjoint_arraycopy);
3737     StubRoutines::_jshort_arraycopy = generate_primitive_copy(false, "jshort_arraycopy", true, 2, false, StubRoutines::_jshort_disjoint_arraycopy);
3738     StubRoutines::_jint_arraycopy   = generate_primitive_copy(false, "jint_arraycopy",   true, 4, false, StubRoutines::_jint_disjoint_arraycopy);
3739     StubRoutines::_jlong_arraycopy  = generate_primitive_copy(false, "jlong_arraycopy",  true, 8, false, StubRoutines::_jlong_disjoint_arraycopy);
3740     StubRoutines::_oop_arraycopy    = generate_oop_copy      (false, "oop_arraycopy",    true,    false, StubRoutines::_oop_disjoint_arraycopy);
3741 
3742     StubRoutines::_arrayof_jbyte_arraycopy    = generate_primitive_copy(true, "arrayof_jbyte_arraycopy",  status, 1, false, StubRoutines::_arrayof_jbyte_disjoint_arraycopy);
3743     StubRoutines::_arrayof_jshort_arraycopy   = generate_primitive_copy(true, "arrayof_jshort_arraycopy", status, 2, false, StubRoutines::_arrayof_jshort_disjoint_arraycopy);
3744 #ifdef _LP64
3745     // since sizeof(jint) < sizeof(HeapWord), there's a different flavor:
3746     StubRoutines::_arrayof_jint_arraycopy     = generate_primitive_copy(true, "arrayof_jint_arraycopy",   status, 4, false, StubRoutines::_arrayof_jint_disjoint_arraycopy);
3747 #else
3748     StubRoutines::_arrayof_jint_arraycopy     = StubRoutines::_jint_arraycopy;
3749 #endif
3750     if (BytesPerHeapOop < HeapWordSize) {
3751       StubRoutines::_arrayof_oop_arraycopy    = generate_oop_copy      (true, "arrayof_oop_arraycopy",    status,    false, StubRoutines::_arrayof_oop_disjoint_arraycopy);
3752     } else {
3753       StubRoutines::_arrayof_oop_arraycopy    = StubRoutines::_oop_arraycopy;
3754     }
3755     StubRoutines::_arrayof_jlong_arraycopy    = StubRoutines::_jlong_arraycopy;
3756 
3757     StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy");
3758     StubRoutines::_unsafe_arraycopy    = generate_unsafe_copy("unsafe_arraycopy");
3759     StubRoutines::_generic_arraycopy   = generate_generic_copy("generic_arraycopy");
3760 
3761 
3762   }
3763 
3764 #ifndef AARCH64
3765 #define COMPILE_CRYPTO
3766 #include "stubRoutinesCrypto_arm.cpp"
3767 #else
3768 
3769 #ifdef COMPILER2
3770   // Arguments:
3771   //
3772   // Inputs:
3773   //   c_rarg0   - source byte array address
3774   //   c_rarg1   - destination byte array address
3775   //   c_rarg2   - K (key) in little endian int array
3776   //
generate_aescrypt_encryptBlock()3777   address generate_aescrypt_encryptBlock() {
3778     __ align(CodeEntryAlignment);
3779     StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock");
3780 
3781     Label L_doLast;
3782 
3783     const Register from        = c_rarg0;  // source array address
3784     const Register to          = c_rarg1;  // destination array address
3785     const Register key         = c_rarg2;  // key array address
3786     const Register keylen      = R8;
3787 
3788     address start = __ pc();
3789     __ stp(FP, LR, Address(SP, -2 * wordSize, pre_indexed));
3790     __ mov(FP, SP);
3791 
3792     __ ldr_w(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
3793 
3794     __ vld1(V0, Address(from), MacroAssembler::VELEM_SIZE_8, 128); // get 16 bytes of input
3795 
3796     __ vld1(V1, V2, V3, V4, Address(key, 64, post_indexed), MacroAssembler::VELEM_SIZE_8, 128);
3797 
3798     int quad = 1;
3799     __ rev32(V1, V1, MacroAssembler::VELEM_SIZE_8, quad);
3800     __ rev32(V2, V2, MacroAssembler::VELEM_SIZE_8, quad);
3801     __ rev32(V3, V3, MacroAssembler::VELEM_SIZE_8, quad);
3802     __ rev32(V4, V4, MacroAssembler::VELEM_SIZE_8, quad);
3803     __ aese(V0, V1);
3804     __ aesmc(V0, V0);
3805     __ aese(V0, V2);
3806     __ aesmc(V0, V0);
3807     __ aese(V0, V3);
3808     __ aesmc(V0, V0);
3809     __ aese(V0, V4);
3810     __ aesmc(V0, V0);
3811 
3812     __ vld1(V1, V2, V3, V4, Address(key, 64, post_indexed), MacroAssembler::VELEM_SIZE_8, 128);
3813     __ rev32(V1, V1, MacroAssembler::VELEM_SIZE_8, quad);
3814     __ rev32(V2, V2, MacroAssembler::VELEM_SIZE_8, quad);
3815     __ rev32(V3, V3, MacroAssembler::VELEM_SIZE_8, quad);
3816     __ rev32(V4, V4, MacroAssembler::VELEM_SIZE_8, quad);
3817     __ aese(V0, V1);
3818     __ aesmc(V0, V0);
3819     __ aese(V0, V2);
3820     __ aesmc(V0, V0);
3821     __ aese(V0, V3);
3822     __ aesmc(V0, V0);
3823     __ aese(V0, V4);
3824     __ aesmc(V0, V0);
3825 
3826     __ vld1(V1, V2, Address(key, 32, post_indexed), MacroAssembler::VELEM_SIZE_8, 128);
3827     __ rev32(V1, V1, MacroAssembler::VELEM_SIZE_8, quad);
3828     __ rev32(V2, V2, MacroAssembler::VELEM_SIZE_8, quad);
3829 
3830     __ cmp_w(keylen, 44);
3831     __ b(L_doLast, eq);
3832 
3833     __ aese(V0, V1);
3834     __ aesmc(V0, V0);
3835     __ aese(V0, V2);
3836     __ aesmc(V0, V0);
3837 
3838     __ vld1(V1, V2, Address(key, 32, post_indexed), MacroAssembler::VELEM_SIZE_8, 128);
3839     __ rev32(V1, V1, MacroAssembler::VELEM_SIZE_8, quad);
3840     __ rev32(V2, V2, MacroAssembler::VELEM_SIZE_8, quad);
3841 
3842     __ cmp_w(keylen, 52);
3843     __ b(L_doLast, eq);
3844 
3845     __ aese(V0, V1);
3846     __ aesmc(V0, V0);
3847     __ aese(V0, V2);
3848     __ aesmc(V0, V0);
3849 
3850     __ vld1(V1, V2, Address(key, 32, post_indexed), MacroAssembler::VELEM_SIZE_8, 128);
3851     __ rev32(V1, V1, MacroAssembler::VELEM_SIZE_8, quad);
3852     __ rev32(V2, V2, MacroAssembler::VELEM_SIZE_8, quad);
3853 
3854     __ BIND(L_doLast);
3855 
3856     __ aese(V0, V1);
3857     __ aesmc(V0, V0);
3858     __ aese(V0, V2);
3859 
3860     __ vld1(V1, Address(key), MacroAssembler::VELEM_SIZE_8, 128);
3861     __ rev32(V1, V1, MacroAssembler::VELEM_SIZE_8, quad);
3862     __ eor(V0, V0, V1, MacroAssembler::VELEM_SIZE_8, quad);
3863 
3864     __ vst1(V0, Address(to), MacroAssembler::VELEM_SIZE_8, 128);
3865 
3866     __ mov(R0, 0);
3867 
3868     __ mov(SP, FP);
3869     __ ldp(FP, LR, Address(SP, 2 * wordSize, post_indexed));
3870     __ ret(LR);
3871 
3872     return start;
3873   }
3874 
3875   // Arguments:
3876   //
3877   // Inputs:
3878   //   c_rarg0   - source byte array address
3879   //   c_rarg1   - destination byte array address
3880   //   c_rarg2   - K (key) in little endian int array
3881   //
generate_aescrypt_decryptBlock()3882   address generate_aescrypt_decryptBlock() {
3883     assert(UseAES, "need AES instructions and misaligned SSE support");
3884     __ align(CodeEntryAlignment);
3885     StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock");
3886     Label L_doLast;
3887 
3888     const Register from        = c_rarg0;  // source array address
3889     const Register to          = c_rarg1;  // destination array address
3890     const Register key         = c_rarg2;  // key array address
3891     const Register keylen      = R8;
3892 
3893     address start = __ pc();
3894     __ stp(FP, LR, Address(SP, -2 * wordSize, pre_indexed));
3895     __ mov(FP, SP);
3896 
3897     __ ldr_w(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
3898 
3899     __ vld1(V0, Address(from), MacroAssembler::VELEM_SIZE_8, 128); // get 16 bytes of input
3900 
3901     __ vld1(V5, Address(key, 16, post_indexed), MacroAssembler::VELEM_SIZE_8, 128);
3902 
3903     int quad = 1;
3904     __ rev32(V5, V5, MacroAssembler::VELEM_SIZE_8, quad);
3905 
3906     __ vld1(V1, V2, V3, V4, Address(key, 64, post_indexed), MacroAssembler::VELEM_SIZE_8, 128);
3907     __ rev32(V1, V1, MacroAssembler::VELEM_SIZE_8, quad);
3908     __ rev32(V2, V2, MacroAssembler::VELEM_SIZE_8, quad);
3909     __ rev32(V3, V3, MacroAssembler::VELEM_SIZE_8, quad);
3910     __ rev32(V4, V4, MacroAssembler::VELEM_SIZE_8, quad);
3911     __ aesd(V0, V1);
3912     __ aesimc(V0, V0);
3913     __ aesd(V0, V2);
3914     __ aesimc(V0, V0);
3915     __ aesd(V0, V3);
3916     __ aesimc(V0, V0);
3917     __ aesd(V0, V4);
3918     __ aesimc(V0, V0);
3919 
3920     __ vld1(V1, V2, V3, V4, Address(key, 64, post_indexed), MacroAssembler::VELEM_SIZE_8, 128);
3921     __ rev32(V1, V1, MacroAssembler::VELEM_SIZE_8, quad);
3922     __ rev32(V2, V2, MacroAssembler::VELEM_SIZE_8, quad);
3923     __ rev32(V3, V3, MacroAssembler::VELEM_SIZE_8, quad);
3924     __ rev32(V4, V4, MacroAssembler::VELEM_SIZE_8, quad);
3925     __ aesd(V0, V1);
3926     __ aesimc(V0, V0);
3927     __ aesd(V0, V2);
3928     __ aesimc(V0, V0);
3929     __ aesd(V0, V3);
3930     __ aesimc(V0, V0);
3931     __ aesd(V0, V4);
3932     __ aesimc(V0, V0);
3933 
3934     __ vld1(V1, V2, Address(key, 32, post_indexed), MacroAssembler::VELEM_SIZE_8, 128);
3935     __ rev32(V1, V1, MacroAssembler::VELEM_SIZE_8, quad);
3936     __ rev32(V2, V2, MacroAssembler::VELEM_SIZE_8, quad);
3937 
3938     __ cmp_w(keylen, 44);
3939     __ b(L_doLast, eq);
3940 
3941     __ aesd(V0, V1);
3942     __ aesimc(V0, V0);
3943     __ aesd(V0, V2);
3944     __ aesimc(V0, V0);
3945 
3946     __ vld1(V1, V2, Address(key, 32, post_indexed), MacroAssembler::VELEM_SIZE_8, 128);
3947     __ rev32(V1, V1, MacroAssembler::VELEM_SIZE_8, quad);
3948     __ rev32(V2, V2, MacroAssembler::VELEM_SIZE_8, quad);
3949 
3950     __ cmp_w(keylen, 52);
3951     __ b(L_doLast, eq);
3952 
3953     __ aesd(V0, V1);
3954     __ aesimc(V0, V0);
3955     __ aesd(V0, V2);
3956     __ aesimc(V0, V0);
3957 
3958     __ vld1(V1, V2, Address(key, 32, post_indexed), MacroAssembler::VELEM_SIZE_8, 128);
3959     __ rev32(V1, V1, MacroAssembler::VELEM_SIZE_8, quad);
3960     __ rev32(V2, V2, MacroAssembler::VELEM_SIZE_8, quad);
3961 
3962     __ BIND(L_doLast);
3963 
3964     __ aesd(V0, V1);
3965     __ aesimc(V0, V0);
3966     __ aesd(V0, V2);
3967 
3968     __ eor(V0, V0, V5, MacroAssembler::VELEM_SIZE_8, quad);
3969 
3970     __ vst1(V0, Address(to), MacroAssembler::VELEM_SIZE_8, 128);
3971 
3972     __ mov(R0, 0);
3973 
3974     __ mov(SP, FP);
3975     __ ldp(FP, LR, Address(SP, 2 * wordSize, post_indexed));
3976     __ ret(LR);
3977 
3978 
3979     return start;
3980   }
3981 
3982   // Arguments:
3983   //
3984   // Inputs:
3985   //   c_rarg0   - source byte array address
3986   //   c_rarg1   - destination byte array address
3987   //   c_rarg2   - K (key) in little endian int array
3988   //   c_rarg3   - r vector byte array address
3989   //   c_rarg4   - input length
3990   //
3991   // Output:
3992   //   x0        - input length
3993   //
generate_cipherBlockChaining_encryptAESCrypt()3994   address generate_cipherBlockChaining_encryptAESCrypt() {
3995     assert(UseAES, "need AES instructions and misaligned SSE support");
3996     __ align(CodeEntryAlignment);
3997     StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt");
3998 
3999     Label L_loadkeys_44, L_loadkeys_52, L_aes_loop, L_rounds_44, L_rounds_52;
4000 
4001     const Register from        = c_rarg0;  // source array address
4002     const Register to          = c_rarg1;  // destination array address
4003     const Register key         = c_rarg2;  // key array address
4004     const Register rvec        = c_rarg3;  // r byte array initialized from initvector array address
4005                                            // and left with the results of the last encryption block
4006     const Register len_reg     = c_rarg4;  // src len (must be multiple of blocksize 16)
4007     const Register keylen      = R8;
4008 
4009     address start = __ pc();
4010     __ stp(FP, LR, Address(SP, -2 * wordSize, pre_indexed));
4011     __ mov(FP, SP);
4012 
4013     __ mov(R9, len_reg);
4014     __ ldr_w(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
4015 
4016     __ vld1(V0, Address(rvec), MacroAssembler::VELEM_SIZE_8, 128);
4017 
4018     __ cmp_w(keylen, 52);
4019     __ b(L_loadkeys_44, cc);
4020     __ b(L_loadkeys_52, eq);
4021 
4022     __ vld1(V17, V18, Address(key, 32, post_indexed), MacroAssembler::VELEM_SIZE_8, 128);
4023 
4024     int quad = 1;
4025     __ rev32(V17, V17, MacroAssembler::VELEM_SIZE_8, quad);
4026     __ rev32(V18, V18, MacroAssembler::VELEM_SIZE_8, quad);
4027     __ BIND(L_loadkeys_52);
4028     __ vld1(V19, V20, Address(key, 32, post_indexed), MacroAssembler::VELEM_SIZE_8, 128);
4029     __ rev32(V19, V19, MacroAssembler::VELEM_SIZE_8, quad);
4030     __ rev32(V20, V20, MacroAssembler::VELEM_SIZE_8, quad);
4031     __ BIND(L_loadkeys_44);
4032     __ vld1(V21, V22, V23, V24, Address(key, 64, post_indexed), MacroAssembler::VELEM_SIZE_8, 128);
4033     __ rev32(V21, V21, MacroAssembler::VELEM_SIZE_8, quad);
4034     __ rev32(V22, V22, MacroAssembler::VELEM_SIZE_8, quad);
4035     __ rev32(V23, V23, MacroAssembler::VELEM_SIZE_8, quad);
4036     __ rev32(V24, V24, MacroAssembler::VELEM_SIZE_8, quad);
4037     __ vld1(V25, V26, V27, V28, Address(key, 64, post_indexed), MacroAssembler::VELEM_SIZE_8, 128);
4038     __ rev32(V25, V25, MacroAssembler::VELEM_SIZE_8, quad);
4039     __ rev32(V26, V26, MacroAssembler::VELEM_SIZE_8, quad);
4040     __ rev32(V27, V27, MacroAssembler::VELEM_SIZE_8, quad);
4041     __ rev32(V28, V28, MacroAssembler::VELEM_SIZE_8, quad);
4042     __ vld1(V29, V30, V31, Address(key), MacroAssembler::VELEM_SIZE_8, 128);
4043     __ rev32(V29, V29, MacroAssembler::VELEM_SIZE_8, quad);
4044     __ rev32(V30, V30, MacroAssembler::VELEM_SIZE_8, quad);
4045     __ rev32(V31, V31, MacroAssembler::VELEM_SIZE_8, quad);
4046 
4047     __ BIND(L_aes_loop);
4048     __ vld1(V1, Address(from, 16, post_indexed), MacroAssembler::VELEM_SIZE_8, 128);
4049     __ eor(V0, V0, V1, MacroAssembler::VELEM_SIZE_8, quad);
4050 
4051     __ b(L_rounds_44, cc);
4052     __ b(L_rounds_52, eq);
4053 
4054     __ aese(V0, V17);
4055     __ aesmc(V0, V0);
4056     __ aese(V0, V18);
4057     __ aesmc(V0, V0);
4058     __ BIND(L_rounds_52);
4059     __ aese(V0, V19);
4060     __ aesmc(V0, V0);
4061     __ aese(V0, V20);
4062     __ aesmc(V0, V0);
4063     __ BIND(L_rounds_44);
4064     __ aese(V0, V21);
4065     __ aesmc(V0, V0);
4066     __ aese(V0, V22);
4067     __ aesmc(V0, V0);
4068     __ aese(V0, V23);
4069     __ aesmc(V0, V0);
4070     __ aese(V0, V24);
4071     __ aesmc(V0, V0);
4072     __ aese(V0, V25);
4073     __ aesmc(V0, V0);
4074     __ aese(V0, V26);
4075     __ aesmc(V0, V0);
4076     __ aese(V0, V27);
4077     __ aesmc(V0, V0);
4078     __ aese(V0, V28);
4079     __ aesmc(V0, V0);
4080     __ aese(V0, V29);
4081     __ aesmc(V0, V0);
4082     __ aese(V0, V30);
4083     __ eor(V0, V0, V31, MacroAssembler::VELEM_SIZE_8, quad);
4084 
4085     __ vst1(V0, Address(to, 16, post_indexed), MacroAssembler::VELEM_SIZE_8, 128);
4086     __ sub(len_reg, len_reg, 16);
4087     __ cbnz(len_reg, L_aes_loop);
4088 
4089     __ vst1(V0, Address(rvec), MacroAssembler::VELEM_SIZE_8, 128);
4090 
4091     __ mov(R0, R9);
4092 
4093     __ mov(SP, FP);
4094     __ ldp(FP, LR, Address(SP, 2 * wordSize, post_indexed));
4095     __ ret(LR);
4096 
4097     return start;
4098   }
4099 
4100   // Arguments:
4101   //
4102   // Inputs:
4103   //   c_rarg0   - source byte array address
4104   //   c_rarg1   - destination byte array address
4105   //   c_rarg2   - K (key) in little endian int array
4106   //   c_rarg3   - r vector byte array address
4107   //   c_rarg4   - input length
4108   //
4109   // Output:
4110   //   rax       - input length
4111   //
generate_cipherBlockChaining_decryptAESCrypt()4112   address generate_cipherBlockChaining_decryptAESCrypt() {
4113     assert(UseAES, "need AES instructions and misaligned SSE support");
4114     __ align(CodeEntryAlignment);
4115     StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt");
4116 
4117     Label L_loadkeys_44, L_loadkeys_52, L_aes_loop, L_rounds_44, L_rounds_52;
4118 
4119     const Register from        = c_rarg0;  // source array address
4120     const Register to          = c_rarg1;  // destination array address
4121     const Register key         = c_rarg2;  // key array address
4122     const Register rvec        = c_rarg3;  // r byte array initialized from initvector array address
4123                                            // and left with the results of the last encryption block
4124     const Register len_reg     = c_rarg4;  // src len (must be multiple of blocksize 16)
4125     const Register keylen      = R8;
4126 
4127     address start = __ pc();
4128     __ stp(FP, LR, Address(SP, -2 * wordSize, pre_indexed));
4129     __ mov(FP, SP);
4130 
4131     __ mov(R9, len_reg);
4132     __ ldr_w(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
4133 
4134     __ vld1(V2, Address(rvec), MacroAssembler::VELEM_SIZE_8, 128);
4135 
4136     __ vld1(V31, Address(key, 16, post_indexed), MacroAssembler::VELEM_SIZE_8, 128);
4137 
4138     int quad = 1;
4139     __ rev32(V31, V31, MacroAssembler::VELEM_SIZE_8, quad);
4140 
4141     __ cmp_w(keylen, 52);
4142     __ b(L_loadkeys_44, cc);
4143     __ b(L_loadkeys_52, eq);
4144 
4145     __ vld1(V17, V18, Address(key, 32, post_indexed), MacroAssembler::VELEM_SIZE_8, 128);
4146     __ rev32(V17, V17, MacroAssembler::VELEM_SIZE_8, quad);
4147     __ rev32(V18, V18, MacroAssembler::VELEM_SIZE_8, quad);
4148     __ BIND(L_loadkeys_52);
4149     __ vld1(V19, V20, Address(key, 32, post_indexed), MacroAssembler::VELEM_SIZE_8, 128);
4150     __ rev32(V19, V19, MacroAssembler::VELEM_SIZE_8, quad);
4151     __ rev32(V20, V20, MacroAssembler::VELEM_SIZE_8, quad);
4152     __ BIND(L_loadkeys_44);
4153     __ vld1(V21, V22, V23, V24, Address(key, 64, post_indexed), MacroAssembler::VELEM_SIZE_8, 128);
4154     __ rev32(V21, V21, MacroAssembler::VELEM_SIZE_8, quad);
4155     __ rev32(V22, V22, MacroAssembler::VELEM_SIZE_8, quad);
4156     __ rev32(V23, V23, MacroAssembler::VELEM_SIZE_8, quad);
4157     __ rev32(V24, V24, MacroAssembler::VELEM_SIZE_8, quad);
4158     __ vld1(V25, V26, V27, V28, Address(key, 64, post_indexed), MacroAssembler::VELEM_SIZE_8, 128);
4159     __ rev32(V25, V25, MacroAssembler::VELEM_SIZE_8, quad);
4160     __ rev32(V26, V26, MacroAssembler::VELEM_SIZE_8, quad);
4161     __ rev32(V27, V27, MacroAssembler::VELEM_SIZE_8, quad);
4162     __ rev32(V28, V28, MacroAssembler::VELEM_SIZE_8, quad);
4163     __ vld1(V29, V30, Address(key), MacroAssembler::VELEM_SIZE_8, 128);
4164     __ rev32(V29, V29, MacroAssembler::VELEM_SIZE_8, quad);
4165     __ rev32(V30, V30, MacroAssembler::VELEM_SIZE_8, quad);
4166 
4167     __ BIND(L_aes_loop);
4168     __ vld1(V0, Address(from, 16, post_indexed), MacroAssembler::VELEM_SIZE_8, 128);
4169     __ orr(V1, V0, V0, MacroAssembler::VELEM_SIZE_8, quad);
4170 
4171     __ b(L_rounds_44, cc);
4172     __ b(L_rounds_52, eq);
4173 
4174     __ aesd(V0, V17);
4175     __ aesimc(V0, V0);
4176     __ aesd(V0, V17);
4177     __ aesimc(V0, V0);
4178     __ BIND(L_rounds_52);
4179     __ aesd(V0, V19);
4180     __ aesimc(V0, V0);
4181     __ aesd(V0, V20);
4182     __ aesimc(V0, V0);
4183     __ BIND(L_rounds_44);
4184     __ aesd(V0, V21);
4185     __ aesimc(V0, V0);
4186     __ aesd(V0, V22);
4187     __ aesimc(V0, V0);
4188     __ aesd(V0, V23);
4189     __ aesimc(V0, V0);
4190     __ aesd(V0, V24);
4191     __ aesimc(V0, V0);
4192     __ aesd(V0, V25);
4193     __ aesimc(V0, V0);
4194     __ aesd(V0, V26);
4195     __ aesimc(V0, V0);
4196     __ aesd(V0, V27);
4197     __ aesimc(V0, V0);
4198     __ aesd(V0, V28);
4199     __ aesimc(V0, V0);
4200     __ aesd(V0, V29);
4201     __ aesimc(V0, V0);
4202     __ aesd(V0, V30);
4203     __ eor(V0, V0, V31, MacroAssembler::VELEM_SIZE_8, quad);
4204     __ eor(V0, V0, V2, MacroAssembler::VELEM_SIZE_8, quad);
4205 
4206     __ vst1(V0, Address(to, 16, post_indexed), MacroAssembler::VELEM_SIZE_8, 128);
4207     __ orr(V2, V1, V1, MacroAssembler::VELEM_SIZE_8, quad);
4208 
4209     __ sub(len_reg, len_reg, 16);
4210     __ cbnz(len_reg, L_aes_loop);
4211 
4212     __ vst1(V2, Address(rvec), MacroAssembler::VELEM_SIZE_8, 128);
4213 
4214     __ mov(R0, R9);
4215 
4216     __ mov(SP, FP);
4217     __ ldp(FP, LR, Address(SP, 2 * wordSize, post_indexed));
4218     __ ret(LR);
4219 
4220     return start;
4221   }
4222 
4223 #endif // COMPILER2
4224 #endif // AARCH64
4225 
4226  private:
4227 
4228 #undef  __
4229 #define __ masm->
4230 
4231   //------------------------------------------------------------------------------------------------------------------------
4232   // Continuation point for throwing of implicit exceptions that are not handled in
4233   // the current activation. Fabricates an exception oop and initiates normal
4234   // exception dispatching in this frame.
generate_throw_exception(const char * name,address runtime_entry)4235   address generate_throw_exception(const char* name, address runtime_entry) {
4236     int insts_size = 128;
4237     int locs_size  = 32;
4238     CodeBuffer code(name, insts_size, locs_size);
4239     OopMapSet* oop_maps;
4240     int frame_size;
4241     int frame_complete;
4242 
4243     oop_maps = new OopMapSet();
4244     MacroAssembler* masm = new MacroAssembler(&code);
4245 
4246     address start = __ pc();
4247 
4248     frame_size = 2;
4249     __ mov(Rexception_pc, LR);
4250     __ raw_push(FP, LR);
4251 
4252     frame_complete = __ pc() - start;
4253 
4254     // Any extra arguments are already supposed to be R1 and R2
4255     __ mov(R0, Rthread);
4256 
4257     int pc_offset = __ set_last_Java_frame(SP, FP, false, Rtemp);
4258     assert(((__ pc()) - start) == __ offset(), "warning: start differs from code_begin");
4259     __ call(runtime_entry);
4260     if (pc_offset == -1) {
4261       pc_offset = __ offset();
4262     }
4263 
4264     // Generate oop map
4265     OopMap* map =  new OopMap(frame_size*VMRegImpl::slots_per_word, 0);
4266     oop_maps->add_gc_map(pc_offset, map);
4267     __ reset_last_Java_frame(Rtemp); // Rtemp free since scratched by far call
4268 
4269     __ raw_pop(FP, LR);
4270     __ jump(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type, Rtemp);
4271 
4272     RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &code, frame_complete,
4273                                                       frame_size, oop_maps, false);
4274     return stub->entry_point();
4275   }
4276 
4277   //---------------------------------------------------------------------------
4278   // Initialization
4279 
generate_initial()4280   void generate_initial() {
4281     // Generates all stubs and initializes the entry points
4282 
4283     //------------------------------------------------------------------------------------------------------------------------
4284     // entry points that exist in all platforms
4285     // Note: This is code that could be shared among different platforms - however the benefit seems to be smaller than
4286     //       the disadvantage of having a much more complicated generator structure. See also comment in stubRoutines.hpp.
4287     StubRoutines::_forward_exception_entry      = generate_forward_exception();
4288 
4289     StubRoutines::_call_stub_entry              =
4290       generate_call_stub(StubRoutines::_call_stub_return_address);
4291     // is referenced by megamorphic call
4292     StubRoutines::_catch_exception_entry        = generate_catch_exception();
4293 
4294     // stub for throwing stack overflow error used both by interpreter and compiler
4295     StubRoutines::_throw_StackOverflowError_entry  = generate_throw_exception("StackOverflowError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError));
4296 
4297 #ifndef AARCH64
4298     // integer division used both by interpreter and compiler
4299     StubRoutines::Arm::_idiv_irem_entry = generate_idiv_irem();
4300 
4301     StubRoutines::_atomic_add_entry = generate_atomic_add();
4302     StubRoutines::_atomic_xchg_entry = generate_atomic_xchg();
4303     StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg();
4304     StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
4305     StubRoutines::_atomic_load_long_entry = generate_atomic_load_long();
4306     StubRoutines::_atomic_store_long_entry = generate_atomic_store_long();
4307 #endif // !AARCH64
4308   }
4309 
generate_all()4310   void generate_all() {
4311     // Generates all stubs and initializes the entry points
4312 
4313 #ifdef COMPILER2
4314     // Generate partial_subtype_check first here since its code depends on
4315     // UseZeroBaseCompressedOops which is defined after heap initialization.
4316     StubRoutines::Arm::_partial_subtype_check                = generate_partial_subtype_check();
4317 #endif
4318     // These entry points require SharedInfo::stack0 to be set up in non-core builds
4319     // and need to be relocatable, so they each fabricate a RuntimeStub internally.
4320     StubRoutines::_throw_AbstractMethodError_entry         = generate_throw_exception("AbstractMethodError throw_exception",          CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError));
4321     StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError));
4322     StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call));
4323 
4324     //------------------------------------------------------------------------------------------------------------------------
4325     // entry points that are platform specific
4326 
4327     // support for verify_oop (must happen after universe_init)
4328     StubRoutines::_verify_oop_subroutine_entry     = generate_verify_oop();
4329 
4330     // arraycopy stubs used by compilers
4331     generate_arraycopy_stubs();
4332 
4333     // Safefetch stubs.
4334     generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry,
4335                                                    &StubRoutines::_safefetch32_fault_pc,
4336                                                    &StubRoutines::_safefetch32_continuation_pc);
4337 #ifdef AARCH64
4338     generate_safefetch("SafeFetchN", wordSize, &StubRoutines::_safefetchN_entry,
4339                                                &StubRoutines::_safefetchN_fault_pc,
4340                                                &StubRoutines::_safefetchN_continuation_pc);
4341 #ifdef COMPILER2
4342     if (UseAESIntrinsics) {
4343       StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock();
4344       StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock();
4345       StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt();
4346       StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt();
4347     }
4348 #endif
4349 #else
4350     assert (sizeof(int) == wordSize, "32-bit architecture");
4351     StubRoutines::_safefetchN_entry           = StubRoutines::_safefetch32_entry;
4352     StubRoutines::_safefetchN_fault_pc        = StubRoutines::_safefetch32_fault_pc;
4353     StubRoutines::_safefetchN_continuation_pc = StubRoutines::_safefetch32_continuation_pc;
4354 #endif // AARCH64
4355 
4356 #ifdef COMPILE_CRYPTO
4357     // generate AES intrinsics code
4358     if (UseAESIntrinsics) {
4359       aes_init();
4360       StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock();
4361       StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock();
4362       StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt();
4363       StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt();
4364     }
4365 #endif // COMPILE_CRYPTO
4366   }
4367 
4368 
4369  public:
StubGenerator(CodeBuffer * code,bool all)4370   StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) {
4371     if (all) {
4372       generate_all();
4373     } else {
4374       generate_initial();
4375     }
4376   }
4377 }; // end class declaration
4378 
StubGenerator_generate(CodeBuffer * code,bool all)4379 void StubGenerator_generate(CodeBuffer* code, bool all) {
4380   StubGenerator g(code, all);
4381 }
4382