1 /*
2 * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/assembler.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "assembler_arm.inline.hpp"
29 #include "code/vtableStubs.hpp"
30 #include "interp_masm_arm.hpp"
31 #include "memory/resourceArea.hpp"
32 #include "oops/compiledICHolder.hpp"
33 #include "oops/instanceKlass.hpp"
34 #include "oops/klassVtable.hpp"
35 #include "runtime/sharedRuntime.hpp"
36 #include "vmreg_arm.inline.hpp"
37 #ifdef COMPILER2
38 #include "opto/runtime.hpp"
39 #endif
40
41 // machine-dependent part of VtableStubs: create VtableStub of correct size and
42 // initialize its code
43
44 #define __ masm->
45
46 #ifndef PRODUCT
47 extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index);
48 #endif
49
create_vtable_stub(int vtable_index)50 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
51 // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
52 const int stub_code_length = code_size_limit(true);
53 VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index);
54 // Can be NULL if there is no free space in the code cache.
55 if (s == NULL) {
56 return NULL;
57 }
58
59 // Count unused bytes in instruction sequences of variable size.
60 // We add them to the computed buffer size in order to avoid
61 // overflow in subsequently generated stubs.
62 address start_pc;
63 int slop_bytes = 0;
64 int slop_delta = 0;
65
66 ResourceMark rm;
67 CodeBuffer cb(s->entry_point(), stub_code_length);
68 MacroAssembler* masm = new MacroAssembler(&cb);
69
70 #if (!defined(PRODUCT) && defined(COMPILER2))
71 if (CountCompiledCalls) {
72 // Implementation required?
73 }
74 #endif
75
76 assert(VtableStub::receiver_location() == R0->as_VMReg(), "receiver expected in R0");
77
78 const Register tmp = Rtemp; // Rtemp OK, should be free at call sites
79
80 address npe_addr = __ pc();
81 __ load_klass(tmp, R0);
82
83 #ifndef PRODUCT
84 if (DebugVtables) {
85 // Implementation required?
86 }
87 #endif
88
89 start_pc = __ pc();
90 { // lookup virtual method
91 int entry_offset = in_bytes(Klass::vtable_start_offset()) + vtable_index * vtableEntry::size_in_bytes();
92 int method_offset = vtableEntry::method_offset_in_bytes() + entry_offset;
93
94 assert ((method_offset & (wordSize - 1)) == 0, "offset should be aligned");
95 int offset_mask = 0xfff;
96 if (method_offset & ~offset_mask) {
97 __ add(tmp, tmp, method_offset & ~offset_mask);
98 }
99 __ ldr(Rmethod, Address(tmp, method_offset & offset_mask));
100 }
101 slop_delta = 8 - (int)(__ pc() - start_pc);
102 slop_bytes += slop_delta;
103 assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
104
105 #ifndef PRODUCT
106 if (DebugVtables) {
107 // Implementation required?
108 }
109 #endif
110
111 address ame_addr = __ pc();
112 __ ldr(PC, Address(Rmethod, Method::from_compiled_offset()));
113
114 masm->flush();
115 bookkeeping(masm, tty, s, npe_addr, ame_addr, true, vtable_index, slop_bytes, 0);
116
117 return s;
118 }
119
create_itable_stub(int itable_index)120 VtableStub* VtableStubs::create_itable_stub(int itable_index) {
121 // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
122 const int stub_code_length = code_size_limit(false);
123 VtableStub* s = new(stub_code_length) VtableStub(false, itable_index);
124 // Can be NULL if there is no free space in the code cache.
125 if (s == NULL) {
126 return NULL;
127 }
128 // Count unused bytes in instruction sequences of variable size.
129 // We add them to the computed buffer size in order to avoid
130 // overflow in subsequently generated stubs.
131 address start_pc;
132 int slop_bytes = 0;
133 int slop_delta = 0;
134
135 ResourceMark rm;
136 CodeBuffer cb(s->entry_point(), stub_code_length);
137 MacroAssembler* masm = new MacroAssembler(&cb);
138
139 #if (!defined(PRODUCT) && defined(COMPILER2))
140 if (CountCompiledCalls) {
141 // Implementation required?
142 }
143 #endif
144
145 assert(VtableStub::receiver_location() == R0->as_VMReg(), "receiver expected in R0");
146
147 // R0-R3 / R0-R7 registers hold the arguments and cannot be spoiled
148 const Register Rclass = R4;
149 const Register Rintf = R5;
150 const Register Rscan = R6;
151
152 Label L_no_such_interface;
153
154 assert_different_registers(Ricklass, Rclass, Rintf, Rscan, Rtemp);
155
156 start_pc = __ pc();
157
158 // get receiver klass (also an implicit null-check)
159 address npe_addr = __ pc();
160 __ load_klass(Rclass, R0);
161
162 // Receiver subtype check against REFC.
163 __ ldr(Rintf, Address(Ricklass, CompiledICHolder::holder_klass_offset()));
164 __ lookup_interface_method(// inputs: rec. class, interface, itable index
165 Rclass, Rintf, noreg,
166 // outputs: temp reg1, temp reg2
167 noreg, Rscan, Rtemp,
168 L_no_such_interface);
169
170 const ptrdiff_t typecheckSize = __ pc() - start_pc;
171 start_pc = __ pc();
172
173 // Get Method* and entry point for compiler
174 __ ldr(Rintf, Address(Ricklass, CompiledICHolder::holder_metadata_offset()));
175 __ lookup_interface_method(// inputs: rec. class, interface, itable index
176 Rclass, Rintf, itable_index,
177 // outputs: temp reg1, temp reg2, temp reg3
178 Rmethod, Rscan, Rtemp,
179 L_no_such_interface);
180
181 const ptrdiff_t lookupSize = __ pc() - start_pc;
182
183 // Reduce "estimate" such that "padding" does not drop below 8.
184 const ptrdiff_t estimate = 140;
185 const ptrdiff_t codesize = typecheckSize + lookupSize;
186 slop_delta = (int)(estimate - codesize);
187 slop_bytes += slop_delta;
188 assert(slop_delta >= 0, "itable #%d: Code size estimate (%d) for lookup_interface_method too small, required: %d", itable_index, (int)estimate, (int)codesize);
189
190 #ifndef PRODUCT
191 if (DebugVtables) {
192 // Implementation required?
193 }
194 #endif
195
196 address ame_addr = __ pc();
197
198 __ ldr(PC, Address(Rmethod, Method::from_compiled_offset()));
199
200 __ bind(L_no_such_interface);
201 // Handle IncompatibleClassChangeError in itable stubs.
202 // More detailed error message.
203 // We force resolving of the call site by jumping to the "handle
204 // wrong method" stub, and so let the interpreter runtime do all the
205 // dirty work.
206 assert(SharedRuntime::get_handle_wrong_method_stub() != NULL, "check initialization order");
207 __ jump(SharedRuntime::get_handle_wrong_method_stub(), relocInfo::runtime_call_type, Rtemp);
208
209 masm->flush();
210 bookkeeping(masm, tty, s, npe_addr, ame_addr, false, itable_index, slop_bytes, 0);
211
212 return s;
213 }
214
pd_code_alignment()215 int VtableStub::pd_code_alignment() {
216 // ARM32 cache line size is not an architected constant. We just align on word size.
217 const unsigned int icache_line_size = wordSize;
218 return icache_line_size;
219 }
220