1 /*
2  * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #ifndef SHARE_VM_ASM_ASSEMBLER_HPP
26 #define SHARE_VM_ASM_ASSEMBLER_HPP
27 
28 #include "asm/codeBuffer.hpp"
29 #include "code/oopRecorder.hpp"
30 #include "code/relocInfo.hpp"
31 #include "memory/allocation.hpp"
32 #include "utilities/debug.hpp"
33 #include "utilities/growableArray.hpp"
34 #include "utilities/top.hpp"
35 
36 #ifdef TARGET_ARCH_x86
37 # include "register_x86.hpp"
38 # include "vm_version_x86.hpp"
39 #endif
40 #ifdef TARGET_ARCH_sparc
41 # include "register_sparc.hpp"
42 # include "vm_version_sparc.hpp"
43 #endif
44 #ifdef TARGET_ARCH_zero
45 # include "register_zero.hpp"
46 # include "vm_version_zero.hpp"
47 #endif
48 #ifdef TARGET_ARCH_arm
49 # include "register_arm.hpp"
50 # include "vm_version_arm.hpp"
51 #endif
52 #ifdef TARGET_ARCH_ppc
53 # include "register_ppc.hpp"
54 # include "vm_version_ppc.hpp"
55 #endif
56 #ifdef TARGET_ARCH_aarch64
57 # include "register_aarch64.hpp"
58 # include "vm_version_aarch64.hpp"
59 #endif
60 
61 // This file contains platform-independent assembler declarations.
62 
63 class MacroAssembler;
64 class AbstractAssembler;
65 class Label;
66 
67 /**
68  * Labels represent destinations for control transfer instructions.  Such
69  * instructions can accept a Label as their target argument.  A Label is
70  * bound to the current location in the code stream by calling the
71  * MacroAssembler's 'bind' method, which in turn calls the Label's 'bind'
72  * method.  A Label may be referenced by an instruction before it's bound
73  * (i.e., 'forward referenced').  'bind' stores the current code offset
74  * in the Label object.
75  *
76  * If an instruction references a bound Label, the offset field(s) within
77  * the instruction are immediately filled in based on the Label's code
78  * offset.  If an instruction references an unbound label, that
79  * instruction is put on a list of instructions that must be patched
80  * (i.e., 'resolved') when the Label is bound.
81  *
82  * 'bind' will call the platform-specific 'patch_instruction' method to
83  * fill in the offset field(s) for each unresolved instruction (if there
84  * are any).  'patch_instruction' lives in one of the
85  * cpu/<arch>/vm/assembler_<arch>* files.
86  *
87  * Instead of using a linked list of unresolved instructions, a Label has
88  * an array of unresolved instruction code offsets.  _patch_index
89  * contains the total number of forward references.  If the Label's array
90  * overflows (i.e., _patch_index grows larger than the array size), a
91  * GrowableArray is allocated to hold the remaining offsets.  (The cache
92  * size is 4 for now, which handles over 99.5% of the cases)
93  *
94  * Labels may only be used within a single CodeSection.  If you need
95  * to create references between code sections, use explicit relocations.
96  */
97 class Label VALUE_OBJ_CLASS_SPEC {
98  private:
99   enum { PatchCacheSize = 4 };
100 
101   // _loc encodes both the binding state (via its sign)
102   // and the binding locator (via its value) of a label.
103   //
104   // _loc >= 0   bound label, loc() encodes the target (jump) position
105   // _loc == -1  unbound label
106   int _loc;
107 
108   // References to instructions that jump to this unresolved label.
109   // These instructions need to be patched when the label is bound
110   // using the platform-specific patchInstruction() method.
111   //
112   // To avoid having to allocate from the C-heap each time, we provide
113   // a local cache and use the overflow only if we exceed the local cache
114   int _patches[PatchCacheSize];
115   int _patch_index;
116   GrowableArray<int>* _patch_overflow;
117 
Label(const Label &)118   Label(const Label&) { ShouldNotReachHere(); }
119 
120  public:
121 
122   /**
123    * After binding, be sure 'patch_instructions' is called later to link
124    */
bind_loc(int loc)125   void bind_loc(int loc) {
126     assert(loc >= 0, "illegal locator");
127     assert(_loc == -1, "already bound");
128     _loc = loc;
129   }
bind_loc(int pos,int sect)130   void bind_loc(int pos, int sect) { bind_loc(CodeBuffer::locator(pos, sect)); }
131 
132 #ifndef PRODUCT
133   // Iterates over all unresolved instructions for printing
134   void print_instructions(MacroAssembler* masm) const;
135 #endif // PRODUCT
136 
137   /**
138    * Returns the position of the the Label in the code buffer
139    * The position is a 'locator', which encodes both offset and section.
140    */
loc() const141   int loc() const {
142     assert(_loc >= 0, "unbound label");
143     return _loc;
144   }
loc_pos() const145   int loc_pos()  const { return CodeBuffer::locator_pos(loc()); }
loc_sect() const146   int loc_sect() const { return CodeBuffer::locator_sect(loc()); }
147 
is_bound() const148   bool is_bound() const    { return _loc >=  0; }
is_unbound() const149   bool is_unbound() const  { return _loc == -1 && _patch_index > 0; }
is_unused() const150   bool is_unused() const   { return _loc == -1 && _patch_index == 0; }
151 
152   /**
153    * Adds a reference to an unresolved displacement instruction to
154    * this unbound label
155    *
156    * @param cb         the code buffer being patched
157    * @param branch_loc the locator of the branch instruction in the code buffer
158    */
159   void add_patch_at(CodeBuffer* cb, int branch_loc);
160 
161   /**
162    * Iterate over the list of patches, resolving the instructions
163    * Call patch_instruction on each 'branch_loc' value
164    */
165   void patch_instructions(MacroAssembler* masm);
166 
init()167   void init() {
168     _loc = -1;
169     _patch_index = 0;
170     _patch_overflow = NULL;
171   }
172 
Label()173   Label() {
174     init();
175   }
176 
~Label()177   ~Label() {
178     assert(is_bound() || is_unused(), "Label was never bound to a location, but it was used as a jmp target");
179   }
180 
reset()181   void reset() {
182     init(); //leave _patch_overflow because it points to CodeBuffer.
183   }
184 };
185 
186 // A union type for code which has to assemble both constant and
187 // non-constant operands, when the distinction cannot be made
188 // statically.
189 class RegisterOrConstant VALUE_OBJ_CLASS_SPEC {
190  private:
191   Register _r;
192   intptr_t _c;
193 
194  public:
RegisterOrConstant()195   RegisterOrConstant(): _r(noreg), _c(0) {}
RegisterOrConstant(Register r)196   RegisterOrConstant(Register r): _r(r), _c(0) {}
RegisterOrConstant(intptr_t c)197   RegisterOrConstant(intptr_t c): _r(noreg), _c(c) {}
198 
as_register() const199   Register as_register() const { assert(is_register(),""); return _r; }
as_constant() const200   intptr_t as_constant() const { assert(is_constant(),""); return _c; }
201 
register_or_noreg() const202   Register register_or_noreg() const { return _r; }
constant_or_zero() const203   intptr_t constant_or_zero() const  { return _c; }
204 
is_register() const205   bool is_register() const { return _r != noreg; }
is_constant() const206   bool is_constant() const { return _r == noreg; }
207 };
208 
209 // The Abstract Assembler: Pure assembler doing NO optimizations on the
210 // instruction level; i.e., what you write is what you get.
211 // The Assembler is generating code into a CodeBuffer.
212 class AbstractAssembler : public ResourceObj  {
213   friend class Label;
214 
215  protected:
216   CodeSection* _code_section;          // section within the code buffer
217   OopRecorder* _oop_recorder;          // support for relocInfo::oop_type
218 
219  public:
220   // Code emission & accessing
addr_at(int pos) const221   address addr_at(int pos) const { return code_section()->start() + pos; }
222 
223  protected:
224   // This routine is called with a label is used for an address.
225   // Labels and displacements truck in offsets, but target must return a PC.
target(Label & L)226   address target(Label& L)             { return code_section()->target(L, pc()); }
227 
is8bit(int x) const228   bool is8bit(int x) const             { return -0x80 <= x && x < 0x80; }
isByte(int x) const229   bool isByte(int x) const             { return 0 <= x && x < 0x100; }
isShiftCount(int x) const230   bool isShiftCount(int x) const       { return 0 <= x && x < 32; }
231 
232   // Instruction boundaries (required when emitting relocatable values).
233   class InstructionMark: public StackObj {
234    private:
235     AbstractAssembler* _assm;
236 
237    public:
InstructionMark(AbstractAssembler * assm)238     InstructionMark(AbstractAssembler* assm) : _assm(assm) {
239       assert(assm->inst_mark() == NULL, "overlapping instructions");
240       _assm->set_inst_mark();
241     }
~InstructionMark()242     ~InstructionMark() {
243       _assm->clear_inst_mark();
244     }
245   };
246   friend class InstructionMark;
247 #ifdef ASSERT
248   // Make it return true on platforms which need to verify
249   // instruction boundaries for some operations.
250   static bool pd_check_instruction_mark();
251 
252   // Add delta to short branch distance to verify that it still fit into imm8.
253   int _short_branch_delta;
254 
short_branch_delta() const255   int  short_branch_delta() const { return _short_branch_delta; }
set_short_branch_delta()256   void set_short_branch_delta()   { _short_branch_delta = 32; }
clear_short_branch_delta()257   void clear_short_branch_delta() { _short_branch_delta = 0; }
258 
259   class ShortBranchVerifier: public StackObj {
260    private:
261     AbstractAssembler* _assm;
262 
263    public:
ShortBranchVerifier(AbstractAssembler * assm)264     ShortBranchVerifier(AbstractAssembler* assm) : _assm(assm) {
265       assert(assm->short_branch_delta() == 0, "overlapping instructions");
266       _assm->set_short_branch_delta();
267     }
~ShortBranchVerifier()268     ~ShortBranchVerifier() {
269       _assm->clear_short_branch_delta();
270     }
271   };
272 #else
273   // Dummy in product.
274   class ShortBranchVerifier: public StackObj {
275    public:
ShortBranchVerifier(AbstractAssembler * assm)276     ShortBranchVerifier(AbstractAssembler* assm) {}
277   };
278 #endif
279 
280  public:
281 
282   // Creation
283   AbstractAssembler(CodeBuffer* code);
284 
285   // ensure buf contains all code (call this before using/copying the code)
286   void flush();
287 
emit_int8(int8_t x)288   void emit_int8(   int8_t  x) { code_section()->emit_int8(   x); }
emit_int16(int16_t x)289   void emit_int16(  int16_t x) { code_section()->emit_int16(  x); }
emit_int32(int32_t x)290   void emit_int32(  int32_t x) { code_section()->emit_int32(  x); }
emit_int64(int64_t x)291   void emit_int64(  int64_t x) { code_section()->emit_int64(  x); }
292 
emit_float(jfloat x)293   void emit_float(  jfloat  x) { code_section()->emit_float(  x); }
emit_double(jdouble x)294   void emit_double( jdouble x) { code_section()->emit_double( x); }
emit_address(address x)295   void emit_address(address x) { code_section()->emit_address(x); }
296 
297   enum { min_simm10 = -512 };
298 
299   // Test if x is within signed immediate range for width.
is_simm(int64_t x,uint w)300   static bool is_simm(int64_t x, uint w) {
301     precond(1 < w && w < 64);
302     int64_t limes = INT64_C(1) << (w - 1);
303     return -limes <= x && x < limes;
304   }
305 
is_simm8(int64_t x)306   static bool is_simm8(int64_t x) { return is_simm(x, 8); }
is_simm9(int64_t x)307   static bool is_simm9(int64_t x) { return is_simm(x, 9); }
is_simm10(int64_t x)308   static bool is_simm10(int64_t x) { return is_simm(x, 10); }
is_simm16(int64_t x)309   static bool is_simm16(int64_t x) { return is_simm(x, 16); }
is_simm32(int64_t x)310   static bool is_simm32(int64_t x) { return is_simm(x, 32); }
311 
312   // Test if x is within unsigned immediate range for width.
is_uimm(uint64_t x,uint w)313   static bool is_uimm(uint64_t x, uint w) {
314     precond(0 < w && w < 64);
315     uint64_t limes = UINT64_C(1) << w;
316     return x < limes;
317   }
318 
is_uimm12(uint64_t x)319   static bool is_uimm12(uint64_t x) { return is_uimm(x, 12); }
320 
321   // Accessors
code_section() const322   CodeSection*  code_section() const   { return _code_section; }
code() const323   CodeBuffer*   code()         const   { return code_section()->outer(); }
sect() const324   int           sect()         const   { return code_section()->index(); }
pc() const325   address       pc()           const   { return code_section()->end();   }
offset() const326   int           offset()       const   { return code_section()->size();  }
locator() const327   int           locator()      const   { return CodeBuffer::locator(offset(), sect()); }
328 
oop_recorder() const329   OopRecorder*  oop_recorder() const   { return _oop_recorder; }
set_oop_recorder(OopRecorder * r)330   void      set_oop_recorder(OopRecorder* r) { _oop_recorder = r; }
331 
inst_mark() const332   address       inst_mark() const { return code_section()->mark();       }
set_inst_mark()333   void      set_inst_mark()       {        code_section()->set_mark();   }
clear_inst_mark()334   void    clear_inst_mark()       {        code_section()->clear_mark(); }
335 
336   // Constants in code
relocate(RelocationHolder const & rspec,int format=0)337   void relocate(RelocationHolder const& rspec, int format = 0) {
338     assert(!pd_check_instruction_mark()
339         || inst_mark() == NULL || inst_mark() == code_section()->end(),
340         "call relocate() between instructions");
341     code_section()->relocate(code_section()->end(), rspec, format);
342   }
relocate(relocInfo::relocType rtype,int format=0)343   void relocate(   relocInfo::relocType rtype, int format = 0) {
344     code_section()->relocate(code_section()->end(), rtype, format);
345   }
346 
347   static int code_fill_byte();         // used to pad out odd-sized code buffers
348 
349   // Associate a comment with the current offset.  It will be printed
350   // along with the disassembly when printing nmethods.  Currently
351   // only supported in the instruction section of the code buffer.
352   void block_comment(const char* comment);
353   // Copy str to a buffer that has the same lifetime as the CodeBuffer
354   const char* code_string(const char* str);
355 
356   // Label functions
357   void bind(Label& L); // binds an unbound label L to the current code position
358 
359   // Move to a different section in the same code buffer.
360   void set_code_section(CodeSection* cs);
361 
362   // Inform assembler when generating stub code and relocation info
363   address    start_a_stub(int required_space);
364   void       end_a_stub();
365   // Ditto for constants.
366   address    start_a_const(int required_space, int required_align = sizeof(double));
367   void       end_a_const(CodeSection* cs);  // Pass the codesection to continue in (insts or stubs?).
368 
369   // constants support
370   //
371   // We must remember the code section (insts or stubs) in c1
372   // so we can reset to the proper section in end_a_const().
long_constant(jlong c)373   address long_constant(jlong c) {
374     CodeSection* c1 = _code_section;
375     address ptr = start_a_const(sizeof(c), sizeof(c));
376     if (ptr != NULL) {
377       emit_int64(c);
378       end_a_const(c1);
379     }
380     return ptr;
381   }
double_constant(jdouble c)382   address double_constant(jdouble c) {
383     CodeSection* c1 = _code_section;
384     address ptr = start_a_const(sizeof(c), sizeof(c));
385     if (ptr != NULL) {
386       emit_double(c);
387       end_a_const(c1);
388     }
389     return ptr;
390   }
float_constant(jfloat c)391   address float_constant(jfloat c) {
392     CodeSection* c1 = _code_section;
393     address ptr = start_a_const(sizeof(c), sizeof(c));
394     if (ptr != NULL) {
395       emit_float(c);
396       end_a_const(c1);
397     }
398     return ptr;
399   }
address_constant(address c)400   address address_constant(address c) {
401     CodeSection* c1 = _code_section;
402     address ptr = start_a_const(sizeof(c), sizeof(c));
403     if (ptr != NULL) {
404       emit_address(c);
405       end_a_const(c1);
406     }
407     return ptr;
408   }
address_constant(address c,RelocationHolder const & rspec)409   address address_constant(address c, RelocationHolder const& rspec) {
410     CodeSection* c1 = _code_section;
411     address ptr = start_a_const(sizeof(c), sizeof(c));
412     if (ptr != NULL) {
413       relocate(rspec);
414       emit_address(c);
415       end_a_const(c1);
416     }
417     return ptr;
418   }
419 
420   // Bootstrapping aid to cope with delayed determination of constants.
421   // Returns a static address which will eventually contain the constant.
422   // The value zero (NULL) stands instead of a constant which is still uncomputed.
423   // Thus, the eventual value of the constant must not be zero.
424   // This is fine, since this is designed for embedding object field
425   // offsets in code which must be generated before the object class is loaded.
426   // Field offsets are never zero, since an object's header (mark word)
427   // is located at offset zero.
428   RegisterOrConstant delayed_value(int(*value_fn)(), Register tmp, int offset = 0);
429   RegisterOrConstant delayed_value(address(*value_fn)(), Register tmp, int offset = 0);
430   virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, Register tmp, int offset) = 0;
431   // Last overloading is platform-dependent; look in assembler_<arch>.cpp.
432   static intptr_t* delayed_value_addr(int(*constant_fn)());
433   static intptr_t* delayed_value_addr(address(*constant_fn)());
434   static void update_delayed_values();
435 
436   // Bang stack to trigger StackOverflowError at a safe location
437   // implementation delegates to machine-specific bang_stack_with_offset
438   void generate_stack_overflow_check( int frame_size_in_bytes );
439   virtual void bang_stack_with_offset(int offset) = 0;
440 
441 
442   /**
443    * A platform-dependent method to patch a jump instruction that refers
444    * to this label.
445    *
446    * @param branch the location of the instruction to patch
447    * @param masm the assembler which generated the branch
448    */
449   void pd_patch_instruction(address branch, address target);
450 
451 };
452 
453 #ifdef TARGET_ARCH_x86
454 # include "assembler_x86.hpp"
455 #endif
456 #ifdef TARGET_ARCH_aarch64
457 # include "assembler_aarch64.hpp"
458 #endif
459 #ifdef TARGET_ARCH_sparc
460 # include "assembler_sparc.hpp"
461 #endif
462 #ifdef TARGET_ARCH_zero
463 # include "assembler_zero.hpp"
464 #endif
465 #ifdef TARGET_ARCH_arm
466 # include "assembler_arm.hpp"
467 #endif
468 #ifdef TARGET_ARCH_ppc
469 # include "assembler_ppc.hpp"
470 #endif
471 
472 
473 #endif // SHARE_VM_ASM_ASSEMBLER_HPP
474