1 /*
2  * Copyright (c) 2008, 2020, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #ifndef CPU_ARM_MACROASSEMBLER_ARM_HPP
26 #define CPU_ARM_MACROASSEMBLER_ARM_HPP
27 
28 #include "code/relocInfo.hpp"
29 #include "utilities/powerOfTwo.hpp"
30 
31 class BiasedLockingCounters;
32 
33 // Introduced AddressLiteral and its subclasses to ease portability from
34 // x86 and avoid relocation issues
35 class AddressLiteral {
36   RelocationHolder _rspec;
37   // Typically we use AddressLiterals we want to use their rval
38   // However in some situations we want the lval (effect address) of the item.
39   // We provide a special factory for making those lvals.
40   bool _is_lval;
41 
42   address          _target;
43 
44  private:
reloc_for_target(address target)45   static relocInfo::relocType reloc_for_target(address target) {
46     // Used for ExternalAddress or when the type is not specified
47     // Sometimes ExternalAddress is used for values which aren't
48     // exactly addresses, like the card table base.
49     // external_word_type can't be used for values in the first page
50     // so just skip the reloc in that case.
51     return external_word_Relocation::can_be_relocated(target) ? relocInfo::external_word_type : relocInfo::none;
52   }
53 
54   void set_rspec(relocInfo::relocType rtype);
55 
56  protected:
57   // creation
AddressLiteral()58   AddressLiteral()
59     : _is_lval(false),
60       _target(NULL)
61   {}
62 
63   public:
64 
AddressLiteral(address target,relocInfo::relocType rtype)65   AddressLiteral(address target, relocInfo::relocType rtype) {
66     _is_lval = false;
67     _target = target;
68     set_rspec(rtype);
69   }
70 
AddressLiteral(address target,RelocationHolder const & rspec)71   AddressLiteral(address target, RelocationHolder const& rspec)
72     : _rspec(rspec),
73       _is_lval(false),
74       _target(target)
75   {}
76 
AddressLiteral(address target)77   AddressLiteral(address target) {
78     _is_lval = false;
79     _target = target;
80     set_rspec(reloc_for_target(target));
81   }
82 
addr()83   AddressLiteral addr() {
84     AddressLiteral ret = *this;
85     ret._is_lval = true;
86     return ret;
87   }
88 
89  private:
90 
target()91   address target() { return _target; }
is_lval()92   bool is_lval() { return _is_lval; }
93 
reloc() const94   relocInfo::relocType reloc() const { return _rspec.type(); }
rspec() const95   const RelocationHolder& rspec() const { return _rspec; }
96 
97   friend class Assembler;
98   friend class MacroAssembler;
99   friend class Address;
100   friend class LIR_Assembler;
101   friend class InlinedAddress;
102 };
103 
104 class ExternalAddress: public AddressLiteral {
105 
106   public:
107 
ExternalAddress(address target)108   ExternalAddress(address target) : AddressLiteral(target) {}
109 
110 };
111 
112 class InternalAddress: public AddressLiteral {
113 
114   public:
115 
InternalAddress(address target)116   InternalAddress(address target) : AddressLiteral(target, relocInfo::internal_word_type) {}
117 
118 };
119 
120 // Inlined constants, for use with ldr_literal / bind_literal
121 // Note: InlinedInteger not supported (use move_slow(Register,int[,cond]))
122 class InlinedLiteral: StackObj {
123  public:
124   Label label; // need to be public for direct access with &
InlinedLiteral()125   InlinedLiteral() {
126   }
127 };
128 
129 class InlinedMetadata: public InlinedLiteral {
130  private:
131   Metadata *_data;
132 
133  public:
InlinedMetadata(Metadata * data)134   InlinedMetadata(Metadata *data): InlinedLiteral() {
135     _data = data;
136   }
data()137   Metadata *data() { return _data; }
138 };
139 
140 // Currently unused
141 // class InlinedOop: public InlinedLiteral {
142 //  private:
143 //   jobject _jobject;
144 //
145 //  public:
146 //   InlinedOop(jobject target): InlinedLiteral() {
147 //     _jobject = target;
148 //   }
149 //   jobject jobject() { return _jobject; }
150 // };
151 
152 class InlinedAddress: public InlinedLiteral {
153  private:
154   AddressLiteral _literal;
155 
156  public:
157 
InlinedAddress(jobject object)158   InlinedAddress(jobject object): InlinedLiteral(), _literal((address)object, relocInfo::oop_type) {
159     ShouldNotReachHere(); // use mov_oop (or implement InlinedOop)
160   }
161 
InlinedAddress(Metadata * data)162   InlinedAddress(Metadata *data): InlinedLiteral(), _literal((address)data, relocInfo::metadata_type) {
163     ShouldNotReachHere(); // use InlinedMetadata or mov_metadata
164   }
165 
InlinedAddress(address target,const RelocationHolder & rspec)166   InlinedAddress(address target, const RelocationHolder &rspec): InlinedLiteral(), _literal(target, rspec) {
167     assert(rspec.type() != relocInfo::oop_type, "Do not use InlinedAddress for oops");
168     assert(rspec.type() != relocInfo::metadata_type, "Do not use InlinedAddress for metadatas");
169   }
170 
InlinedAddress(address target,relocInfo::relocType rtype)171   InlinedAddress(address target, relocInfo::relocType rtype): InlinedLiteral(), _literal(target, rtype) {
172     assert(rtype != relocInfo::oop_type, "Do not use InlinedAddress for oops");
173     assert(rtype != relocInfo::metadata_type, "Do not use InlinedAddress for metadatas");
174   }
175 
176   // Note: default is relocInfo::none for InlinedAddress
InlinedAddress(address target)177   InlinedAddress(address target): InlinedLiteral(), _literal(target, relocInfo::none) {
178   }
179 
target()180   address target() { return _literal.target(); }
181 
rspec() const182   const RelocationHolder& rspec() const { return _literal.rspec(); }
183 };
184 
185 class InlinedString: public InlinedLiteral {
186  private:
187   const char* _msg;
188 
189  public:
InlinedString(const char * msg)190   InlinedString(const char* msg): InlinedLiteral() {
191     _msg = msg;
192   }
msg()193   const char* msg() { return _msg; }
194 };
195 
196 class MacroAssembler: public Assembler {
197 protected:
198 
199   // Support for VM calls
200   //
201 
202   // This is the base routine called by the different versions of call_VM_leaf.
203   void call_VM_leaf_helper(address entry_point, int number_of_arguments);
204 
205   // This is the base routine called by the different versions of call_VM. The interpreter
206   // may customize this version by overriding it for its purposes (e.g., to save/restore
207   // additional registers when doing a VM call).
208   virtual void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions);
209 public:
210 
MacroAssembler(CodeBuffer * code)211   MacroAssembler(CodeBuffer* code) : Assembler(code) {}
212 
213   // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
214   // The implementation is only non-empty for the InterpreterMacroAssembler,
215   // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
check_and_handle_popframe()216   virtual void check_and_handle_popframe() {}
check_and_handle_earlyret()217   virtual void check_and_handle_earlyret() {}
218 
219   // By default, we do not need relocation information for non
220   // patchable absolute addresses. However, when needed by some
221   // extensions, ignore_non_patchable_relocations can be modified,
222   // returning false to preserve all relocation information.
ignore_non_patchable_relocations()223   inline bool ignore_non_patchable_relocations() { return true; }
224 
225   // Initially added to the Assembler interface as a pure virtual:
226   //   RegisterConstant delayed_value(..)
227   // for:
228   //   6812678 macro assembler needs delayed binding of a few constants (for 6655638)
229   // this was subsequently modified to its present name and return type
230   virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, Register tmp, int offset);
231 
232 
233   void align(int modulus);
234 
235   // Support for VM calls
236   //
237   // It is imperative that all calls into the VM are handled via the call_VM methods.
238   // They make sure that the stack linkage is setup correctly. call_VM's correspond
239   // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points.
240 
241   void call_VM(Register oop_result, address entry_point, bool check_exceptions = true);
242   void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true);
243   void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
244   void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
245 
246   // The following methods are required by templateTable.cpp,
247   // but not used on ARM.
248   void call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true);
249   void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true);
250   void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
251   void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
252 
253   // Note: The super_call_VM calls are not used on ARM
254 
255   // Raw call, without saving/restoring registers, exception handling, etc.
256   // Mainly used from various stubs.
257   // Note: if 'save_R9_if_scratched' is true, call_VM may on some
258   // platforms save values on the stack. Set it to false (and handle
259   // R9 in the callers) if the top of the stack must not be modified
260   // by call_VM.
261   void call_VM(address entry_point, bool save_R9_if_scratched);
262 
263   void call_VM_leaf(address entry_point);
264   void call_VM_leaf(address entry_point, Register arg_1);
265   void call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
266   void call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3);
267   void call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4);
268 
269   void get_vm_result(Register oop_result, Register tmp);
270   void get_vm_result_2(Register metadata_result, Register tmp);
271 
272   // Always sets/resets sp, which default to SP if (last_sp == noreg)
273   // Optionally sets/resets fp (use noreg to avoid setting it)
274   // Optionally sets/resets pc depending on save_last_java_pc flag
275   // Note: when saving PC, set_last_Java_frame returns PC's offset in the code section
276   //       (for oop_maps offset computation)
277   int set_last_Java_frame(Register last_sp, Register last_fp, bool save_last_java_pc, Register tmp);
278   void reset_last_Java_frame(Register tmp);
279   // status set in set_last_Java_frame for reset_last_Java_frame
280   bool _fp_saved;
281   bool _pc_saved;
282 
283 #ifdef PRODUCT
284 #define BLOCK_COMMENT(str) /* nothing */
285 #define STOP(error) __ stop(error)
286 #else
287 #define BLOCK_COMMENT(str) __ block_comment(str)
288 #define STOP(error) __ block_comment(error); __ stop(error)
289 #endif
290 
291   void lookup_virtual_method(Register recv_klass,
292                              Register vtable_index,
293                              Register method_result);
294 
295   // Test sub_klass against super_klass, with fast and slow paths.
296 
297   // The fast path produces a tri-state answer: yes / no / maybe-slow.
298   // One of the three labels can be NULL, meaning take the fall-through.
299   // No registers are killed, except temp_regs.
300   void check_klass_subtype_fast_path(Register sub_klass,
301                                      Register super_klass,
302                                      Register temp_reg,
303                                      Register temp_reg2,
304                                      Label* L_success,
305                                      Label* L_failure,
306                                      Label* L_slow_path);
307 
308   // The rest of the type check; must be wired to a corresponding fast path.
309   // It does not repeat the fast path logic, so don't use it standalone.
310   // temp_reg3 can be noreg, if no temps are available.
311   // Updates the sub's secondary super cache as necessary.
312   // If set_cond_codes:
313   // - condition codes will be Z on success, NZ on failure.
314   // - temp_reg will be 0 on success, non-0 on failure
315   void check_klass_subtype_slow_path(Register sub_klass,
316                                      Register super_klass,
317                                      Register temp_reg,
318                                      Register temp_reg2,
319                                      Register temp_reg3, // auto assigned if noreg
320                                      Label* L_success,
321                                      Label* L_failure,
322                                      bool set_cond_codes = false);
323 
324   // Simplified, combined version, good for typical uses.
325   // temp_reg3 can be noreg, if no temps are available. It is used only on slow path.
326   // Falls through on failure.
327   void check_klass_subtype(Register sub_klass,
328                            Register super_klass,
329                            Register temp_reg,
330                            Register temp_reg2,
331                            Register temp_reg3, // auto assigned on slow path if noreg
332                            Label& L_success);
333 
334   // Returns address of receiver parameter, using tmp as base register. tmp and params_count can be the same.
335   Address receiver_argument_address(Register params_base, Register params_count, Register tmp);
336 
337   void _verify_oop(Register reg, const char* s, const char* file, int line);
338   void _verify_oop_addr(Address addr, const char * s, const char* file, int line);
339 
340   // TODO: verify method and klass metadata (compare against vptr?)
_verify_method_ptr(Register reg,const char * msg,const char * file,int line)341   void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {}
_verify_klass_ptr(Register reg,const char * msg,const char * file,int line)342   void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line) {}
343 
344 #define verify_oop(reg) _verify_oop(reg, "broken oop " #reg, __FILE__, __LINE__)
345 #define verify_oop_addr(addr) _verify_oop_addr(addr, "broken oop ", __FILE__, __LINE__)
346 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
347 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
348 
349   void null_check(Register reg, Register tmp, int offset = -1);
null_check(Register reg)350   inline void null_check(Register reg) { null_check(reg, noreg, -1); } // for C1 lir_null_check
351 
352   // Puts address of allocated object into register `obj` and end of allocated object into register `obj_end`.
353   void eden_allocate(Register obj, Register obj_end, Register tmp1, Register tmp2,
354                      RegisterOrConstant size_expression, Label& slow_case);
355   void tlab_allocate(Register obj, Register obj_end, Register tmp1,
356                      RegisterOrConstant size_expression, Label& slow_case);
357 
358   void zero_memory(Register start, Register end, Register tmp);
359 
360   static bool needs_explicit_null_check(intptr_t offset);
361   static bool uses_implicit_null_check(void* address);
362 
363   void arm_stack_overflow_check(int frame_size_in_bytes, Register tmp);
364   void arm_stack_overflow_check(Register Rsize, Register tmp);
365 
bang_stack_with_offset(int offset)366   void bang_stack_with_offset(int offset) {
367     ShouldNotReachHere();
368   }
369 
370   // Biased locking support
371   // lock_reg and obj_reg must be loaded up with the appropriate values.
372   // swap_reg must be supplied.
373   // tmp_reg must be supplied.
374   // Done label is branched to with condition code EQ set if the lock is
375   // biased and we acquired it. Slow case label is branched to with
376   // condition code NE set if the lock is biased but we failed to acquire
377   // it. Otherwise fall through.
378   // Returns offset of first potentially-faulting instruction for null
379   // check info (currently consumed only by C1). If
380   // swap_reg_contains_mark is true then returns -1 as it is assumed
381   // the calling code has already passed any potential faults.
382   // Notes:
383   // - swap_reg and tmp_reg are scratched
384   // - Rtemp was (implicitly) scratched and can now be specified as the tmp2
385   int biased_locking_enter(Register obj_reg, Register swap_reg, Register tmp_reg,
386                            bool swap_reg_contains_mark,
387                            Register tmp2,
388                            Label& done, Label& slow_case,
389                            BiasedLockingCounters* counters = NULL);
390   void biased_locking_exit(Register obj_reg, Register temp_reg, Label& done);
391 
392   // Building block for CAS cases of biased locking: makes CAS and records statistics.
393   // Optional slow_case label is used to transfer control if CAS fails. Otherwise leaves condition codes set.
394   void biased_locking_enter_with_cas(Register obj_reg, Register old_mark_reg, Register new_mark_reg,
395                                      Register tmp, Label& slow_case, int* counter_addr);
396 
397   void resolve_jobject(Register value, Register tmp1, Register tmp2);
398 
nop()399   void nop() {
400     mov(R0, R0);
401   }
402 
push(Register rd,AsmCondition cond=al)403   void push(Register rd, AsmCondition cond = al) {
404     assert(rd != SP, "unpredictable instruction");
405     str(rd, Address(SP, -wordSize, pre_indexed), cond);
406   }
407 
push(RegisterSet reg_set,AsmCondition cond=al)408   void push(RegisterSet reg_set, AsmCondition cond = al) {
409     assert(!reg_set.contains(SP), "unpredictable instruction");
410     stmdb(SP, reg_set, writeback, cond);
411   }
412 
pop(Register rd,AsmCondition cond=al)413   void pop(Register rd, AsmCondition cond = al) {
414     assert(rd != SP, "unpredictable instruction");
415     ldr(rd, Address(SP, wordSize, post_indexed), cond);
416   }
417 
pop(RegisterSet reg_set,AsmCondition cond=al)418   void pop(RegisterSet reg_set, AsmCondition cond = al) {
419     assert(!reg_set.contains(SP), "unpredictable instruction");
420     ldmia(SP, reg_set, writeback, cond);
421   }
422 
fpushd(FloatRegister fd,AsmCondition cond=al)423   void fpushd(FloatRegister fd, AsmCondition cond = al) {
424     fstmdbd(SP, FloatRegisterSet(fd), writeback, cond);
425   }
426 
fpushs(FloatRegister fd,AsmCondition cond=al)427   void fpushs(FloatRegister fd, AsmCondition cond = al) {
428     fstmdbs(SP, FloatRegisterSet(fd), writeback, cond);
429   }
430 
fpopd(FloatRegister fd,AsmCondition cond=al)431   void fpopd(FloatRegister fd, AsmCondition cond = al) {
432     fldmiad(SP, FloatRegisterSet(fd), writeback, cond);
433   }
434 
fpops(FloatRegister fd,AsmCondition cond=al)435   void fpops(FloatRegister fd, AsmCondition cond = al) {
436     fldmias(SP, FloatRegisterSet(fd), writeback, cond);
437   }
438 
fpush(FloatRegisterSet reg_set)439   void fpush(FloatRegisterSet reg_set) {
440     fstmdbd(SP, reg_set, writeback);
441   }
442 
fpop(FloatRegisterSet reg_set)443   void fpop(FloatRegisterSet reg_set) {
444     fldmiad(SP, reg_set, writeback);
445   }
446 
fpush_hardfp(FloatRegisterSet reg_set)447   void fpush_hardfp(FloatRegisterSet reg_set) {
448 #ifndef __SOFTFP__
449     fpush(reg_set);
450 #endif
451   }
452 
fpop_hardfp(FloatRegisterSet reg_set)453   void fpop_hardfp(FloatRegisterSet reg_set) {
454 #ifndef __SOFTFP__
455     fpop(reg_set);
456 #endif
457   }
458 
459   // Order access primitives
460   enum Membar_mask_bits {
461     StoreStore = 1 << 3,
462     LoadStore  = 1 << 2,
463     StoreLoad  = 1 << 1,
464     LoadLoad   = 1 << 0
465   };
466 
467   void membar(Membar_mask_bits mask,
468               Register tmp,
469               bool preserve_flags = true,
470               Register load_tgt = noreg);
471 
472   void breakpoint(AsmCondition cond = al);
473   void stop(const char* msg);
474   // prints msg and continues
475   void warn(const char* msg);
476   void unimplemented(const char* what = "");
should_not_reach_here()477   void should_not_reach_here()                   { stop("should not reach here"); }
478   static void debug(const char* msg, const intx* registers);
479 
480   // Create a walkable frame to help tracking down who called this code.
481   // Returns the frame size in words.
should_not_call_this()482   int should_not_call_this() {
483     raw_push(FP, LR);
484     should_not_reach_here();
485     flush();
486     return 2; // frame_size_in_words (FP+LR)
487   }
488 
489   int save_all_registers();
490   void restore_all_registers();
491   int save_caller_save_registers();
492   void restore_caller_save_registers();
493 
494   void add_rc(Register dst, Register arg1, RegisterOrConstant arg2);
495 
496   // add_slow and mov_slow are used to manipulate offsets larger than 1024,
497   // these functions are not expected to handle all possible constants,
498   // only those that can really occur during compilation
499   void add_slow(Register rd, Register rn, int c);
500   void sub_slow(Register rd, Register rn, int c);
501 
502 
503   void mov_slow(Register rd, intptr_t c, AsmCondition cond = al);
504   void mov_slow(Register rd, const char *string);
505   void mov_slow(Register rd, address addr);
506 
patchable_mov_oop(Register rd,jobject o,int oop_index)507   void patchable_mov_oop(Register rd, jobject o, int oop_index) {
508     mov_oop(rd, o, oop_index);
509   }
510   void mov_oop(Register rd, jobject o, int index = 0, AsmCondition cond = al);
511 
patchable_mov_metadata(Register rd,Metadata * o,int index)512   void patchable_mov_metadata(Register rd, Metadata* o, int index) {
513     mov_metadata(rd, o, index);
514   }
515   void mov_metadata(Register rd, Metadata* o, int index = 0);
516 
517   void mov_float(FloatRegister fd, jfloat c, AsmCondition cond = al);
518   void mov_double(FloatRegister fd, jdouble c, AsmCondition cond = al);
519 
520 
521   // Note: this variant of mov_address assumes the address moves with
522   // the code. Do *not* implement it with non-relocated instructions,
523   // unless PC-relative.
mov_relative_address(Register rd,address addr,AsmCondition cond=al)524   void mov_relative_address(Register rd, address addr, AsmCondition cond = al) {
525     int offset = addr - pc() - 8;
526     assert((offset & 3) == 0, "bad alignment");
527     if (offset >= 0) {
528       assert(AsmOperand::is_rotated_imm(offset), "addr too far");
529       add(rd, PC, offset, cond);
530     } else {
531       assert(AsmOperand::is_rotated_imm(-offset), "addr too far");
532       sub(rd, PC, -offset, cond);
533     }
534   }
535 
536   // Runtime address that may vary from one execution to another.
537   // Warning: do not implement as a PC relative address.
mov_address(Register rd,address addr)538   void mov_address(Register rd, address addr) {
539     mov_address(rd, addr, RelocationHolder::none);
540   }
541 
542   // rspec can be RelocationHolder::none (for ignored symbolic Relocation).
543   // In that case, the address is absolute and the generated code need
544   // not be relocable.
mov_address(Register rd,address addr,RelocationHolder const & rspec)545   void mov_address(Register rd, address addr, RelocationHolder const& rspec) {
546     assert(rspec.type() != relocInfo::runtime_call_type, "do not use mov_address for runtime calls");
547     assert(rspec.type() != relocInfo::static_call_type, "do not use mov_address for relocable calls");
548     if (rspec.type() == relocInfo::none) {
549       // absolute address, relocation not needed
550       mov_slow(rd, (intptr_t)addr);
551       return;
552     }
553     if (VM_Version::supports_movw()) {
554       relocate(rspec);
555       int c = (int)addr;
556       movw(rd, c & 0xffff);
557       if ((unsigned int)c >> 16) {
558         movt(rd, (unsigned int)c >> 16);
559       }
560       return;
561     }
562     Label skip_literal;
563     InlinedAddress addr_literal(addr, rspec);
564     ldr_literal(rd, addr_literal);
565     b(skip_literal);
566     bind_literal(addr_literal);
567     bind(skip_literal);
568   }
569 
570   // Note: Do not define mov_address for a Label
571   //
572   // Load from addresses potentially within the code are now handled
573   // InlinedLiteral subclasses (to allow more flexibility on how the
574   // ldr_literal is performed).
575 
ldr_literal(Register rd,InlinedAddress & L)576   void ldr_literal(Register rd, InlinedAddress& L) {
577     assert(L.rspec().type() != relocInfo::runtime_call_type, "avoid ldr_literal for calls");
578     assert(L.rspec().type() != relocInfo::static_call_type, "avoid ldr_literal for calls");
579     relocate(L.rspec());
580     ldr(rd, Address(PC, target(L.label) - pc() - 8));
581   }
582 
ldr_literal(Register rd,InlinedString & L)583   void ldr_literal(Register rd, InlinedString& L) {
584     const char* msg = L.msg();
585     if (code()->consts()->contains((address)msg)) {
586       // string address moves with the code
587       ldr(rd, Address(PC, ((address)msg) - pc() - 8));
588       return;
589     }
590     // Warning: use external strings with care. They are not relocated
591     // if the code moves. If needed, use code_string to move them
592     // to the consts section.
593     ldr(rd, Address(PC, target(L.label) - pc() - 8));
594   }
595 
ldr_literal(Register rd,InlinedMetadata & L)596   void ldr_literal(Register rd, InlinedMetadata& L) {
597     // relocation done in the bind_literal for metadatas
598     ldr(rd, Address(PC, target(L.label) - pc() - 8));
599   }
600 
bind_literal(InlinedAddress & L)601   void bind_literal(InlinedAddress& L) {
602     bind(L.label);
603     assert(L.rspec().type() != relocInfo::metadata_type, "Must use InlinedMetadata");
604     // We currently do not use oop 'bound' literals.
605     // If the code evolves and the following assert is triggered,
606     // we need to implement InlinedOop (see InlinedMetadata).
607     assert(L.rspec().type() != relocInfo::oop_type, "Inlined oops not supported");
608     // Note: relocation is handled by relocate calls in ldr_literal
609     AbstractAssembler::emit_address((address)L.target());
610   }
611 
bind_literal(InlinedString & L)612   void bind_literal(InlinedString& L) {
613     const char* msg = L.msg();
614     if (code()->consts()->contains((address)msg)) {
615       // The Label should not be used; avoid binding it
616       // to detect errors.
617       return;
618     }
619     bind(L.label);
620     AbstractAssembler::emit_address((address)L.msg());
621   }
622 
bind_literal(InlinedMetadata & L)623   void bind_literal(InlinedMetadata& L) {
624     bind(L.label);
625     relocate(metadata_Relocation::spec_for_immediate());
626     AbstractAssembler::emit_address((address)L.data());
627   }
628 
629   void resolve_oop_handle(Register result);
630   void load_mirror(Register mirror, Register method, Register tmp);
631 
632 #define ARM_INSTR_1(common_mnemonic, arm32_mnemonic, arg_type) \
633   void common_mnemonic(arg_type arg) { \
634       arm32_mnemonic(arg); \
635   }
636 
637 #define ARM_INSTR_2(common_mnemonic, arm32_mnemonic, arg1_type, arg2_type) \
638   void common_mnemonic(arg1_type arg1, arg2_type arg2) { \
639       arm32_mnemonic(arg1, arg2); \
640   }
641 
642 #define ARM_INSTR_3(common_mnemonic, arm32_mnemonic, arg1_type, arg2_type, arg3_type) \
643   void common_mnemonic(arg1_type arg1, arg2_type arg2, arg3_type arg3) { \
644       arm32_mnemonic(arg1, arg2, arg3); \
645   }
646 
ARM_INSTR_1(jump,bx,Register)647   ARM_INSTR_1(jump, bx,  Register)
648   ARM_INSTR_1(call, blx, Register)
649 
650   ARM_INSTR_2(cbz_32,  cbz,  Register, Label&)
651   ARM_INSTR_2(cbnz_32, cbnz, Register, Label&)
652 
653   ARM_INSTR_2(ldr_u32, ldr,  Register, Address)
654   ARM_INSTR_2(ldr_s32, ldr,  Register, Address)
655   ARM_INSTR_2(str_32,  str,  Register, Address)
656 
657   ARM_INSTR_2(mvn_32,  mvn,  Register, Register)
658   ARM_INSTR_2(cmp_32,  cmp,  Register, Register)
659   ARM_INSTR_2(neg_32,  neg,  Register, Register)
660   ARM_INSTR_2(clz_32,  clz,  Register, Register)
661   ARM_INSTR_2(rbit_32, rbit, Register, Register)
662 
663   ARM_INSTR_2(cmp_32,  cmp,  Register, int)
664   ARM_INSTR_2(cmn_32,  cmn,  Register, int)
665 
666   ARM_INSTR_3(add_32,  add,  Register, Register, Register)
667   ARM_INSTR_3(sub_32,  sub,  Register, Register, Register)
668   ARM_INSTR_3(subs_32, subs, Register, Register, Register)
669   ARM_INSTR_3(mul_32,  mul,  Register, Register, Register)
670   ARM_INSTR_3(and_32,  andr, Register, Register, Register)
671   ARM_INSTR_3(orr_32,  orr,  Register, Register, Register)
672   ARM_INSTR_3(eor_32,  eor,  Register, Register, Register)
673 
674   ARM_INSTR_3(add_32,  add,  Register, Register, AsmOperand)
675   ARM_INSTR_3(sub_32,  sub,  Register, Register, AsmOperand)
676   ARM_INSTR_3(orr_32,  orr,  Register, Register, AsmOperand)
677   ARM_INSTR_3(eor_32,  eor,  Register, Register, AsmOperand)
678   ARM_INSTR_3(and_32,  andr, Register, Register, AsmOperand)
679 
680 
681   ARM_INSTR_3(add_32,  add,  Register, Register, int)
682   ARM_INSTR_3(adds_32, adds, Register, Register, int)
683   ARM_INSTR_3(sub_32,  sub,  Register, Register, int)
684   ARM_INSTR_3(subs_32, subs, Register, Register, int)
685 
686   ARM_INSTR_2(tst_32,  tst,  Register, unsigned int)
687   ARM_INSTR_2(tst_32,  tst,  Register, AsmOperand)
688 
689   ARM_INSTR_3(and_32,  andr, Register, Register, uint)
690   ARM_INSTR_3(orr_32,  orr,  Register, Register, uint)
691   ARM_INSTR_3(eor_32,  eor,  Register, Register, uint)
692 
693   ARM_INSTR_1(cmp_zero_float,  fcmpzs, FloatRegister)
694   ARM_INSTR_1(cmp_zero_double, fcmpzd, FloatRegister)
695 
696   ARM_INSTR_2(ldr_float,   flds,   FloatRegister, Address)
697   ARM_INSTR_2(str_float,   fsts,   FloatRegister, Address)
698   ARM_INSTR_2(mov_float,   fcpys,  FloatRegister, FloatRegister)
699   ARM_INSTR_2(neg_float,   fnegs,  FloatRegister, FloatRegister)
700   ARM_INSTR_2(abs_float,   fabss,  FloatRegister, FloatRegister)
701   ARM_INSTR_2(sqrt_float,  fsqrts, FloatRegister, FloatRegister)
702   ARM_INSTR_2(cmp_float,   fcmps,  FloatRegister, FloatRegister)
703 
704   ARM_INSTR_3(add_float,   fadds,  FloatRegister, FloatRegister, FloatRegister)
705   ARM_INSTR_3(sub_float,   fsubs,  FloatRegister, FloatRegister, FloatRegister)
706   ARM_INSTR_3(mul_float,   fmuls,  FloatRegister, FloatRegister, FloatRegister)
707   ARM_INSTR_3(div_float,   fdivs,  FloatRegister, FloatRegister, FloatRegister)
708 
709   ARM_INSTR_2(ldr_double,  fldd,   FloatRegister, Address)
710   ARM_INSTR_2(str_double,  fstd,   FloatRegister, Address)
711   ARM_INSTR_2(mov_double,  fcpyd,  FloatRegister, FloatRegister)
712   ARM_INSTR_2(neg_double,  fnegd,  FloatRegister, FloatRegister)
713   ARM_INSTR_2(cmp_double,  fcmpd,  FloatRegister, FloatRegister)
714   ARM_INSTR_2(abs_double,  fabsd,  FloatRegister, FloatRegister)
715   ARM_INSTR_2(sqrt_double, fsqrtd, FloatRegister, FloatRegister)
716 
717   ARM_INSTR_3(add_double,  faddd,  FloatRegister, FloatRegister, FloatRegister)
718   ARM_INSTR_3(sub_double,  fsubd,  FloatRegister, FloatRegister, FloatRegister)
719   ARM_INSTR_3(mul_double,  fmuld,  FloatRegister, FloatRegister, FloatRegister)
720   ARM_INSTR_3(div_double,  fdivd,  FloatRegister, FloatRegister, FloatRegister)
721 
722   ARM_INSTR_2(convert_f2d, fcvtds, FloatRegister, FloatRegister)
723   ARM_INSTR_2(convert_d2f, fcvtsd, FloatRegister, FloatRegister)
724 
725   ARM_INSTR_2(mov_fpr2gpr_float, fmrs, Register, FloatRegister)
726 
727 #undef ARM_INSTR_1
728 #undef ARM_INSTR_2
729 #undef ARM_INSTR_3
730 
731 
732 
733   void tbz(Register rt, int bit, Label& L) {
734     assert(0 <= bit && bit < BitsPerWord, "bit number is out of range");
735     tst(rt, 1 << bit);
736     b(L, eq);
737   }
738 
tbnz(Register rt,int bit,Label & L)739   void tbnz(Register rt, int bit, Label& L) {
740     assert(0 <= bit && bit < BitsPerWord, "bit number is out of range");
741     tst(rt, 1 << bit);
742     b(L, ne);
743   }
744 
cbz(Register rt,Label & L)745   void cbz(Register rt, Label& L) {
746     cmp(rt, 0);
747     b(L, eq);
748   }
749 
cbz(Register rt,address target)750   void cbz(Register rt, address target) {
751     cmp(rt, 0);
752     b(target, eq);
753   }
754 
cbnz(Register rt,Label & L)755   void cbnz(Register rt, Label& L) {
756     cmp(rt, 0);
757     b(L, ne);
758   }
759 
ret(Register dst=LR)760   void ret(Register dst = LR) {
761     bx(dst);
762   }
763 
764 
zero_register(Register tmp)765   Register zero_register(Register tmp) {
766     mov(tmp, 0);
767     return tmp;
768   }
769 
logical_shift_left(Register dst,Register src,int shift)770   void logical_shift_left(Register dst, Register src, int shift) {
771     mov(dst, AsmOperand(src, lsl, shift));
772   }
773 
logical_shift_left_32(Register dst,Register src,int shift)774   void logical_shift_left_32(Register dst, Register src, int shift) {
775     mov(dst, AsmOperand(src, lsl, shift));
776   }
777 
logical_shift_right(Register dst,Register src,int shift)778   void logical_shift_right(Register dst, Register src, int shift) {
779     mov(dst, AsmOperand(src, lsr, shift));
780   }
781 
arith_shift_right(Register dst,Register src,int shift)782   void arith_shift_right(Register dst, Register src, int shift) {
783     mov(dst, AsmOperand(src, asr, shift));
784   }
785 
asr_32(Register dst,Register src,int shift)786   void asr_32(Register dst, Register src, int shift) {
787     mov(dst, AsmOperand(src, asr, shift));
788   }
789 
790   // If <cond> holds, compares r1 and r2. Otherwise, flags are set so that <cond> does not hold.
cond_cmp(Register r1,Register r2,AsmCondition cond)791   void cond_cmp(Register r1, Register r2, AsmCondition cond) {
792     cmp(r1, r2, cond);
793   }
794 
795   // If <cond> holds, compares r and imm. Otherwise, flags are set so that <cond> does not hold.
cond_cmp(Register r,int imm,AsmCondition cond)796   void cond_cmp(Register r, int imm, AsmCondition cond) {
797     cmp(r, imm, cond);
798   }
799 
align_reg(Register dst,Register src,int align)800   void align_reg(Register dst, Register src, int align) {
801     assert (is_power_of_2(align), "should be");
802     bic(dst, src, align-1);
803   }
804 
prefetch_read(Address addr)805   void prefetch_read(Address addr) {
806     pld(addr);
807   }
808 
raw_push(Register r1,Register r2)809   void raw_push(Register r1, Register r2) {
810     assert(r1->encoding() < r2->encoding(), "should be ordered");
811     push(RegisterSet(r1) | RegisterSet(r2));
812   }
813 
raw_pop(Register r1,Register r2)814   void raw_pop(Register r1, Register r2) {
815     assert(r1->encoding() < r2->encoding(), "should be ordered");
816     pop(RegisterSet(r1) | RegisterSet(r2));
817   }
818 
raw_push(Register r1,Register r2,Register r3)819   void raw_push(Register r1, Register r2, Register r3) {
820     assert(r1->encoding() < r2->encoding() && r2->encoding() < r3->encoding(), "should be ordered");
821     push(RegisterSet(r1) | RegisterSet(r2) | RegisterSet(r3));
822   }
823 
raw_pop(Register r1,Register r2,Register r3)824   void raw_pop(Register r1, Register r2, Register r3) {
825     assert(r1->encoding() < r2->encoding() && r2->encoding() < r3->encoding(), "should be ordered");
826     pop(RegisterSet(r1) | RegisterSet(r2) | RegisterSet(r3));
827   }
828 
829   // Restores registers r1 and r2 previously saved by raw_push(r1, r2, ret_addr) and returns by ret_addr. Clobbers LR.
raw_pop_and_ret(Register r1,Register r2)830   void raw_pop_and_ret(Register r1, Register r2) {
831     raw_pop(r1, r2, PC);
832   }
833 
indirect_jump(Address addr,Register scratch)834   void indirect_jump(Address addr, Register scratch) {
835     ldr(PC, addr);
836   }
837 
indirect_jump(InlinedAddress & literal,Register scratch)838   void indirect_jump(InlinedAddress& literal, Register scratch) {
839     ldr_literal(PC, literal);
840   }
841 
neg(Register dst,Register src)842   void neg(Register dst, Register src) {
843     rsb(dst, src, 0);
844   }
845 
branch_if_negative_32(Register r,Label & L)846   void branch_if_negative_32(Register r, Label& L) {
847     // TODO: This function and branch_if_any_negative_32 could possibly
848     // be revised after the aarch64 removal.
849     // tbnz is not used instead of tst & b.mi because destination may be out of tbnz range (+-32KB)
850     // since these methods are used in LIR_Assembler::emit_arraycopy() to jump to stub entry.
851     tst_32(r, r);
852     b(L, mi);
853   }
854 
branch_if_any_negative_32(Register r1,Register r2,Register tmp,Label & L)855   void branch_if_any_negative_32(Register r1, Register r2, Register tmp, Label& L) {
856     orrs(tmp, r1, r2);
857     b(L, mi);
858   }
859 
branch_if_any_negative_32(Register r1,Register r2,Register r3,Register tmp,Label & L)860   void branch_if_any_negative_32(Register r1, Register r2, Register r3, Register tmp, Label& L) {
861     orr_32(tmp, r1, r2);
862     orrs(tmp, tmp, r3);
863     b(L, mi);
864   }
865 
add_ptr_scaled_int32(Register dst,Register r1,Register r2,int shift)866   void add_ptr_scaled_int32(Register dst, Register r1, Register r2, int shift) {
867       add(dst, r1, AsmOperand(r2, lsl, shift));
868   }
869 
sub_ptr_scaled_int32(Register dst,Register r1,Register r2,int shift)870   void sub_ptr_scaled_int32(Register dst, Register r1, Register r2, int shift) {
871     sub(dst, r1, AsmOperand(r2, lsl, shift));
872   }
873 
874   // C 'boolean' to Java boolean: x == 0 ? 0 : 1
875   void c2bool(Register x);
876 
877     // klass oop manipulations if compressed
878 
879   void load_klass(Register dst_klass, Register src_oop, AsmCondition cond = al);
880 
881   void store_klass(Register src_klass, Register dst_oop);
882 
883 
884     // oop manipulations
885 
886   void load_heap_oop(Register dst, Address src, Register tmp1 = noreg, Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0);
887   void store_heap_oop(Address obj, Register new_val, Register tmp1 = noreg, Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0);
888   void store_heap_oop_null(Address obj, Register new_val, Register tmp1 = noreg, Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0);
889 
890   void access_load_at(BasicType type, DecoratorSet decorators, Address src, Register dst, Register tmp1, Register tmp2, Register tmp3);
891   void access_store_at(BasicType type, DecoratorSet decorators, Address obj, Register new_val, Register tmp1, Register tmp2, Register tmp3, bool is_null);
892 
893   // Resolves obj for access. Result is placed in the same register.
894   // All other registers are preserved.
895   void resolve(DecoratorSet decorators, Register obj);
896 
897 
898   void ldr_global_ptr(Register reg, address address_of_global);
899   void ldr_global_s32(Register reg, address address_of_global);
900   void ldrb_global(Register reg, address address_of_global);
901 
902   // address_placeholder_instruction is invalid instruction and is used
903   // as placeholder in code for address of label
904   enum { address_placeholder_instruction = 0xFFFFFFFF };
905 
emit_address(Label & L)906   void emit_address(Label& L) {
907     assert(!L.is_bound(), "otherwise address will not be patched");
908     target(L);       // creates relocation which will be patched later
909 
910     assert ((offset() & (wordSize-1)) == 0, "should be aligned by word size");
911 
912     AbstractAssembler::emit_address((address)address_placeholder_instruction);
913   }
914 
b(address target,AsmCondition cond=al)915   void b(address target, AsmCondition cond = al) {
916     Assembler::b(target, cond);                 \
917   }
b(Label & L,AsmCondition cond=al)918   void b(Label& L, AsmCondition cond = al) {
919     // internal jumps
920     Assembler::b(target(L), cond);
921   }
922 
bl(address target,AsmCondition cond=al)923   void bl(address target, AsmCondition cond = al) {
924     Assembler::bl(target, cond);
925   }
bl(Label & L,AsmCondition cond=al)926   void bl(Label& L, AsmCondition cond = al) {
927     // internal calls
928     Assembler::bl(target(L), cond);
929   }
930 
adr(Register dest,Label & L,AsmCondition cond=al)931   void adr(Register dest, Label& L, AsmCondition cond = al) {
932     int delta = target(L) - pc() - 8;
933     if (delta >= 0) {
934       add(dest, PC, delta, cond);
935     } else {
936       sub(dest, PC, -delta, cond);
937     }
938   }
939 
940   // Variable-length jump and calls. We now distinguish only the
941   // patchable case from the other cases. Patchable must be
942   // distinguised from relocable. Relocable means the generated code
943   // containing the jump/call may move. Patchable means that the
944   // targeted address may be changed later.
945 
946   // Non patchable versions.
947   // - used only for relocInfo::runtime_call_type and relocInfo::none
948   // - may use relative or absolute format (do not use relocInfo::none
949   //   if the generated code may move)
950   // - the implementation takes into account switch to THUMB mode if the
951   //   destination is a THUMB address
952   // - the implementation supports far targets
953   //
954   // To reduce regression risk, scratch still defaults to noreg on
955   // arm32. This results in patchable instructions. However, if
956   // patching really matters, the call sites should be modified and
957   // use patchable_call or patchable_jump. If patching is not required
958   // and if a register can be cloberred, it should be explicitly
959   // specified to allow future optimizations.
960   void jump(address target,
961             relocInfo::relocType rtype = relocInfo::runtime_call_type,
962             Register scratch = noreg, AsmCondition cond = al);
963 
964   void call(address target,
965             RelocationHolder rspec, AsmCondition cond = al);
966 
call(address target,relocInfo::relocType rtype=relocInfo::runtime_call_type,AsmCondition cond=al)967   void call(address target,
968             relocInfo::relocType rtype = relocInfo::runtime_call_type,
969             AsmCondition cond = al) {
970     call(target, Relocation::spec_simple(rtype), cond);
971   }
972 
jump(AddressLiteral dest)973   void jump(AddressLiteral dest) {
974     jump(dest.target(), dest.reloc());
975   }
jump(address dest,relocInfo::relocType rtype,AsmCondition cond)976   void jump(address dest, relocInfo::relocType rtype, AsmCondition cond) {
977     jump(dest, rtype, Rtemp, cond);
978   }
979 
call(AddressLiteral dest)980   void call(AddressLiteral dest) {
981     call(dest.target(), dest.reloc());
982   }
983 
984   // Patchable version:
985   // - set_destination can be used to atomically change the target
986   //
987   // The targets for patchable_jump and patchable_call must be in the
988   // code cache.
989   // [ including possible extensions of the code cache, like AOT code ]
990   //
991   // To reduce regression risk, scratch still defaults to noreg on
992   // arm32. If a register can be cloberred, it should be explicitly
993   // specified to allow future optimizations.
994   void patchable_jump(address target,
995                       relocInfo::relocType rtype = relocInfo::runtime_call_type,
996                       Register scratch = noreg, AsmCondition cond = al
997                       );
998 
999   // patchable_call may scratch Rtemp
1000   int patchable_call(address target,
1001                      RelocationHolder const& rspec,
1002                      bool c2 = false);
1003 
patchable_call(address target,relocInfo::relocType rtype,bool c2=false)1004   int patchable_call(address target,
1005                      relocInfo::relocType rtype,
1006                      bool c2 = false) {
1007     return patchable_call(target, Relocation::spec_simple(rtype), c2);
1008   }
1009 
1010 
1011   static bool _reachable_from_cache(address target);
1012   static bool _cache_fully_reachable();
1013   bool cache_fully_reachable();
1014   bool reachable_from_cache(address target);
1015 
1016   void zero_extend(Register rd, Register rn, int bits);
1017   void sign_extend(Register rd, Register rn, int bits);
1018 
zap_high_non_significant_bits(Register r)1019   inline void zap_high_non_significant_bits(Register r) {
1020   }
1021 
1022   void cmpoop(Register obj1, Register obj2);
1023 
1024   void long_move(Register rd_lo, Register rd_hi,
1025                  Register rn_lo, Register rn_hi,
1026                  AsmCondition cond = al);
1027   void long_shift(Register rd_lo, Register rd_hi,
1028                   Register rn_lo, Register rn_hi,
1029                   AsmShift shift, Register count);
1030   void long_shift(Register rd_lo, Register rd_hi,
1031                   Register rn_lo, Register rn_hi,
1032                   AsmShift shift, int count);
1033 
1034   void atomic_cas(Register tmpreg1, Register tmpreg2, Register oldval, Register newval, Register base, int offset);
1035   void atomic_cas_bool(Register oldval, Register newval, Register base, int offset, Register tmpreg);
1036   void atomic_cas64(Register temp_lo, Register temp_hi, Register temp_result, Register oldval_lo, Register oldval_hi, Register newval_lo, Register newval_hi, Register base, int offset);
1037 
1038   void cas_for_lock_acquire(Register oldval, Register newval, Register base, Register tmp, Label &slow_case, bool allow_fallthrough_on_failure = false, bool one_shot = false);
1039   void cas_for_lock_release(Register oldval, Register newval, Register base, Register tmp, Label &slow_case, bool allow_fallthrough_on_failure = false, bool one_shot = false);
1040 
1041 #ifndef PRODUCT
1042   // Preserves flags and all registers.
1043   // On SMP the updated value might not be visible to external observers without a sychronization barrier
1044   void cond_atomic_inc32(AsmCondition cond, int* counter_addr);
1045 #endif // !PRODUCT
1046 
1047   // unconditional non-atomic increment
1048   void inc_counter(address counter_addr, Register tmpreg1, Register tmpreg2);
inc_counter(int * counter_addr,Register tmpreg1,Register tmpreg2)1049   void inc_counter(int* counter_addr, Register tmpreg1, Register tmpreg2) {
1050     inc_counter((address) counter_addr, tmpreg1, tmpreg2);
1051   }
1052 
1053   void pd_patch_instruction(address branch, address target, const char* file, int line);
1054 
1055   // Loading and storing values by size and signed-ness;
1056   // size must not exceed wordSize (i.e. 8-byte values are not supported on 32-bit ARM);
1057   // each of these calls generates exactly one load or store instruction,
1058   // so src can be pre- or post-indexed address.
1059   // 32-bit ARM variants also support conditional execution
1060   void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, AsmCondition cond = al);
1061   void store_sized_value(Register src, Address dst, size_t size_in_bytes, AsmCondition cond = al);
1062 
1063   void lookup_interface_method(Register recv_klass,
1064                                Register intf_klass,
1065                                RegisterOrConstant itable_index,
1066                                Register method_result,
1067                                Register temp_reg1,
1068                                Register temp_reg2,
1069                                Label& L_no_such_interface);
1070 
1071 
1072   void floating_cmp(Register dst);
1073 
1074   // improved x86 portability (minimizing source code changes)
1075 
ldr_literal(Register rd,AddressLiteral addr)1076   void ldr_literal(Register rd, AddressLiteral addr) {
1077     relocate(addr.rspec());
1078     ldr(rd, Address(PC, addr.target() - pc() - 8));
1079   }
1080 
lea(Register Rd,AddressLiteral addr)1081   void lea(Register Rd, AddressLiteral addr) {
1082     // Never dereferenced, as on x86 (lval status ignored)
1083     mov_address(Rd, addr.target(), addr.rspec());
1084   }
1085 
1086   void restore_default_fp_mode();
1087 
1088   void safepoint_poll(Register tmp1, Label& slow_path);
1089   void get_polling_page(Register dest);
1090   void read_polling_page(Register dest, relocInfo::relocType rtype);
1091 };
1092 
1093 
1094 // The purpose of this class is to build several code fragments of the same size
1095 // in order to allow fast table branch.
1096 
1097 class FixedSizeCodeBlock {
1098 public:
1099   FixedSizeCodeBlock(MacroAssembler* masm, int size_in_instrs, bool enabled);
1100   ~FixedSizeCodeBlock();
1101 
1102 private:
1103   MacroAssembler* _masm;
1104   address _start;
1105   int _size_in_instrs;
1106   bool _enabled;
1107 };
1108 
1109 
1110 #endif // CPU_ARM_MACROASSEMBLER_ARM_HPP
1111