1 /*
2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2012, 2018 SAP SE. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "code/vtableStubs.hpp"
29 #include "interp_masm_ppc.hpp"
30 #include "memory/resourceArea.hpp"
31 #include "oops/compiledICHolder.hpp"
32 #include "oops/instanceKlass.hpp"
33 #include "oops/klass.inline.hpp"
34 #include "oops/klassVtable.hpp"
35 #include "runtime/sharedRuntime.hpp"
36 #include "vmreg_ppc.inline.hpp"
37 #ifdef COMPILER2
38 #include "opto/runtime.hpp"
39 #endif
40
41 #define __ masm->
42
43 #ifndef PRODUCT
44 extern "C" void bad_compiled_vtable_index(JavaThread* thread, oopDesc* receiver, int index);
45 #endif
46
47 // Used by compiler only; may use only caller saved, non-argument registers.
create_vtable_stub(int vtable_index)48 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
49 // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
50 const int stub_code_length = code_size_limit(true);
51 VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index);
52 // Can be NULL if there is no free space in the code cache.
53 if (s == NULL) {
54 return NULL;
55 }
56
57 // Count unused bytes in instruction sequences of variable size.
58 // We add them to the computed buffer size in order to avoid
59 // overflow in subsequently generated stubs.
60 address start_pc;
61 int slop_bytes = 8; // just a two-instruction safety net
62 int slop_delta = 0;
63
64 ResourceMark rm;
65 CodeBuffer cb(s->entry_point(), stub_code_length);
66 MacroAssembler* masm = new MacroAssembler(&cb);
67
68 #if (!defined(PRODUCT) && defined(COMPILER2))
69 if (CountCompiledCalls) {
70 start_pc = __ pc();
71 int load_const_maxLen = 5*BytesPerInstWord; // load_const generates 5 instructions. Assume that as max size for laod_const_optimized
72 int offs = __ load_const_optimized(R11_scratch1, SharedRuntime::nof_megamorphic_calls_addr(), R12_scratch2, true);
73 slop_delta = load_const_maxLen - (__ pc() - start_pc);
74 slop_bytes += slop_delta;
75 assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
76 __ lwz(R12_scratch2, offs, R11_scratch1);
77 __ addi(R12_scratch2, R12_scratch2, 1);
78 __ stw(R12_scratch2, offs, R11_scratch1);
79 }
80 #endif
81
82 assert(VtableStub::receiver_location() == R3_ARG1->as_VMReg(), "receiver expected in R3_ARG1");
83
84 const Register rcvr_klass = R11_scratch1;
85 address npe_addr = __ pc(); // npe = null pointer exception
86 // check if we must do an explicit check (implicit checks disabled, offset too large).
87 __ null_check(R3, oopDesc::klass_offset_in_bytes(), /*implicit only*/NULL);
88 // Get receiver klass.
89 __ load_klass(rcvr_klass, R3);
90
91 #ifndef PRODUCT
92 if (DebugVtables) {
93 Label L;
94 // Check offset vs vtable length.
95 const Register vtable_len = R12_scratch2;
96 __ lwz(vtable_len, in_bytes(Klass::vtable_length_offset()), rcvr_klass);
97 __ cmpwi(CCR0, vtable_len, vtable_index*vtableEntry::size());
98 __ bge(CCR0, L);
99 __ li(R12_scratch2, vtable_index);
100 __ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), R3_ARG1, R12_scratch2, false);
101 __ bind(L);
102 }
103 #endif
104
105 int entry_offset = in_bytes(Klass::vtable_start_offset()) +
106 vtable_index*vtableEntry::size_in_bytes();
107 int v_off = entry_offset + vtableEntry::method_offset_in_bytes();
108
109 __ ld(R19_method, (RegisterOrConstant)v_off, rcvr_klass);
110
111 #ifndef PRODUCT
112 if (DebugVtables) {
113 Label L;
114 __ cmpdi(CCR0, R19_method, 0);
115 __ bne(CCR0, L);
116 __ stop("Vtable entry is ZERO");
117 __ bind(L);
118 }
119 #endif
120
121 address ame_addr = __ pc(); // ame = abstract method error
122 // if the vtable entry is null, the method is abstract
123 // NOTE: for vtable dispatches, the vtable entry will never be null.
124
125 __ null_check(R19_method, in_bytes(Method::from_compiled_offset()), /*implicit only*/NULL);
126 __ ld(R12_scratch2, in_bytes(Method::from_compiled_offset()), R19_method);
127 __ mtctr(R12_scratch2);
128 __ bctr();
129
130 masm->flush();
131 bookkeeping(masm, tty, s, npe_addr, ame_addr, true, vtable_index, slop_bytes, 0);
132
133 return s;
134 }
135
create_itable_stub(int itable_index)136 VtableStub* VtableStubs::create_itable_stub(int itable_index) {
137 // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
138 const int stub_code_length = code_size_limit(false);
139 VtableStub* s = new(stub_code_length) VtableStub(false, itable_index);
140 // Can be NULL if there is no free space in the code cache.
141 if (s == NULL) {
142 return NULL;
143 }
144 // Count unused bytes in instruction sequences of variable size.
145 // We add them to the computed buffer size in order to avoid
146 // overflow in subsequently generated stubs.
147 address start_pc;
148 int slop_bytes = 8; // just a two-instruction safety net
149 int slop_delta = 0;
150
151 ResourceMark rm;
152 CodeBuffer cb(s->entry_point(), stub_code_length);
153 MacroAssembler* masm = new MacroAssembler(&cb);
154 int load_const_maxLen = 5*BytesPerInstWord; // load_const generates 5 instructions. Assume that as max size for laod_const_optimized
155
156 #if (!defined(PRODUCT) && defined(COMPILER2))
157 if (CountCompiledCalls) {
158 start_pc = __ pc();
159 int offs = __ load_const_optimized(R11_scratch1, SharedRuntime::nof_megamorphic_calls_addr(), R12_scratch2, true);
160 slop_delta = load_const_maxLen - (__ pc() - start_pc);
161 slop_bytes += slop_delta;
162 assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
163 __ lwz(R12_scratch2, offs, R11_scratch1);
164 __ addi(R12_scratch2, R12_scratch2, 1);
165 __ stw(R12_scratch2, offs, R11_scratch1);
166 }
167 #endif
168
169 assert(VtableStub::receiver_location() == R3_ARG1->as_VMReg(), "receiver expected in R3_ARG1");
170
171 // Entry arguments:
172 // R19_method: Interface
173 // R3_ARG1: Receiver
174
175 Label L_no_such_interface;
176 const Register rcvr_klass = R11_scratch1,
177 interface = R12_scratch2,
178 tmp1 = R21_tmp1,
179 tmp2 = R22_tmp2;
180
181 address npe_addr = __ pc(); // npe = null pointer exception
182 __ null_check(R3_ARG1, oopDesc::klass_offset_in_bytes(), /*implicit only*/NULL);
183 __ load_klass(rcvr_klass, R3_ARG1);
184
185 // Receiver subtype check against REFC.
186 __ ld(interface, CompiledICHolder::holder_klass_offset(), R19_method);
187 __ lookup_interface_method(rcvr_klass, interface, noreg,
188 R0, tmp1, tmp2,
189 L_no_such_interface, /*return_method=*/ false);
190
191 // Get Method* and entrypoint for compiler
192 __ ld(interface, CompiledICHolder::holder_metadata_offset(), R19_method);
193 __ lookup_interface_method(rcvr_klass, interface, itable_index,
194 R19_method, tmp1, tmp2,
195 L_no_such_interface, /*return_method=*/ true);
196
197 #ifndef PRODUCT
198 if (DebugVtables) {
199 Label ok;
200 __ cmpd(CCR0, R19_method, 0);
201 __ bne(CCR0, ok);
202 __ stop("method is null");
203 __ bind(ok);
204 }
205 #endif
206
207 // If the vtable entry is null, the method is abstract.
208 address ame_addr = __ pc(); // ame = abstract method error
209
210 // Must do an explicit check if implicit checks are disabled.
211 __ null_check(R19_method, in_bytes(Method::from_compiled_offset()), &L_no_such_interface);
212 __ ld(R12_scratch2, in_bytes(Method::from_compiled_offset()), R19_method);
213 __ mtctr(R12_scratch2);
214 __ bctr();
215
216 // Handle IncompatibleClassChangeError in itable stubs.
217 // More detailed error message.
218 // We force resolving of the call site by jumping to the "handle
219 // wrong method" stub, and so let the interpreter runtime do all the
220 // dirty work.
221 __ bind(L_no_such_interface);
222 start_pc = __ pc();
223 __ load_const_optimized(R11_scratch1, SharedRuntime::get_handle_wrong_method_stub(), R12_scratch2);
224 slop_delta = load_const_maxLen - (__ pc() - start_pc);
225 slop_bytes += slop_delta;
226 assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
227 __ mtctr(R11_scratch1);
228 __ bctr();
229
230 masm->flush();
231 bookkeeping(masm, tty, s, npe_addr, ame_addr, false, itable_index, slop_bytes, 0);
232
233 return s;
234 }
235
pd_code_alignment()236 int VtableStub::pd_code_alignment() {
237 // Power cache line size is 128 bytes, but we want to limit alignment loss.
238 const unsigned int icache_line_size = 32;
239 return icache_line_size;
240 }
241