1 /*
2 * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/javaClasses.hpp"
27 #include "classfile/symbolTable.hpp"
28 #include "classfile/systemDictionary.hpp"
29 #include "code/nmethod.hpp"
30 #include "interpreter/interpreter.hpp"
31 #include "interpreter/oopMapCache.hpp"
32 #include "jvmtifiles/jvmtiEnv.hpp"
33 #include "logging/log.hpp"
34 #include "logging/logStream.hpp"
35 #include "memory/allocation.inline.hpp"
36 #include "memory/resourceArea.hpp"
37 #include "oops/instanceKlass.hpp"
38 #include "oops/oop.inline.hpp"
39 #include "oops/oopHandle.inline.hpp"
40 #include "prims/jvmtiAgentThread.hpp"
41 #include "prims/jvmtiEventController.inline.hpp"
42 #include "prims/jvmtiImpl.hpp"
43 #include "prims/jvmtiRedefineClasses.hpp"
44 #include "runtime/deoptimization.hpp"
45 #include "runtime/frame.inline.hpp"
46 #include "runtime/handles.inline.hpp"
47 #include "runtime/interfaceSupport.inline.hpp"
48 #include "runtime/javaCalls.hpp"
49 #include "runtime/os.hpp"
50 #include "runtime/serviceThread.hpp"
51 #include "runtime/signature.hpp"
52 #include "runtime/thread.inline.hpp"
53 #include "runtime/threadSMR.hpp"
54 #include "runtime/vframe.hpp"
55 #include "runtime/vframe_hp.hpp"
56 #include "runtime/vmOperations.hpp"
57 #include "utilities/exceptions.hpp"
58
59 //
60 // class JvmtiAgentThread
61 //
62 // JavaThread used to wrap a thread started by an agent
63 // using the JVMTI method RunAgentThread.
64 //
65
JvmtiAgentThread(JvmtiEnv * env,jvmtiStartFunction start_fn,const void * start_arg)66 JvmtiAgentThread::JvmtiAgentThread(JvmtiEnv* env, jvmtiStartFunction start_fn, const void *start_arg)
67 : JavaThread(start_function_wrapper) {
68 _env = env;
69 _start_fn = start_fn;
70 _start_arg = start_arg;
71 }
72
73 void
start_function_wrapper(JavaThread * thread,TRAPS)74 JvmtiAgentThread::start_function_wrapper(JavaThread *thread, TRAPS) {
75 // It is expected that any Agent threads will be created as
76 // Java Threads. If this is the case, notification of the creation
77 // of the thread is given in JavaThread::thread_main().
78 assert(thread == JavaThread::current(), "sanity check");
79
80 JvmtiAgentThread *dthread = (JvmtiAgentThread *)thread;
81 dthread->call_start_function();
82 }
83
84 void
call_start_function()85 JvmtiAgentThread::call_start_function() {
86 ThreadToNativeFromVM transition(this);
87 _start_fn(_env->jvmti_external(), jni_environment(), (void*)_start_arg);
88 }
89
90
91 //
92 // class GrowableCache - private methods
93 //
94
recache()95 void GrowableCache::recache() {
96 int len = _elements->length();
97
98 FREE_C_HEAP_ARRAY(address, _cache);
99 _cache = NEW_C_HEAP_ARRAY(address,len+1, mtInternal);
100
101 for (int i=0; i<len; i++) {
102 _cache[i] = _elements->at(i)->getCacheValue();
103 //
104 // The cache entry has gone bad. Without a valid frame pointer
105 // value, the entry is useless so we simply delete it in product
106 // mode. The call to remove() will rebuild the cache again
107 // without the bad entry.
108 //
109 if (_cache[i] == NULL) {
110 assert(false, "cannot recache NULL elements");
111 remove(i);
112 return;
113 }
114 }
115 _cache[len] = NULL;
116
117 _listener_fun(_this_obj,_cache);
118 }
119
equals(void * v,GrowableElement * e2)120 bool GrowableCache::equals(void* v, GrowableElement *e2) {
121 GrowableElement *e1 = (GrowableElement *) v;
122 assert(e1 != NULL, "e1 != NULL");
123 assert(e2 != NULL, "e2 != NULL");
124
125 return e1->equals(e2);
126 }
127
128 //
129 // class GrowableCache - public methods
130 //
131
GrowableCache()132 GrowableCache::GrowableCache() {
133 _this_obj = NULL;
134 _listener_fun = NULL;
135 _elements = NULL;
136 _cache = NULL;
137 }
138
~GrowableCache()139 GrowableCache::~GrowableCache() {
140 clear();
141 delete _elements;
142 FREE_C_HEAP_ARRAY(address, _cache);
143 }
144
initialize(void * this_obj,void listener_fun (void *,address *))145 void GrowableCache::initialize(void *this_obj, void listener_fun(void *, address*) ) {
146 _this_obj = this_obj;
147 _listener_fun = listener_fun;
148 _elements = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<GrowableElement*>(5, mtServiceability);
149 recache();
150 }
151
152 // number of elements in the collection
length()153 int GrowableCache::length() {
154 return _elements->length();
155 }
156
157 // get the value of the index element in the collection
at(int index)158 GrowableElement* GrowableCache::at(int index) {
159 GrowableElement *e = (GrowableElement *) _elements->at(index);
160 assert(e != NULL, "e != NULL");
161 return e;
162 }
163
find(GrowableElement * e)164 int GrowableCache::find(GrowableElement* e) {
165 return _elements->find(e, GrowableCache::equals);
166 }
167
168 // append a copy of the element to the end of the collection
append(GrowableElement * e)169 void GrowableCache::append(GrowableElement* e) {
170 GrowableElement *new_e = e->clone();
171 _elements->append(new_e);
172 recache();
173 }
174
175 // remove the element at index
remove(int index)176 void GrowableCache::remove (int index) {
177 GrowableElement *e = _elements->at(index);
178 assert(e != NULL, "e != NULL");
179 _elements->remove(e);
180 delete e;
181 recache();
182 }
183
184 // clear out all elements, release all heap space and
185 // let our listener know that things have changed.
clear()186 void GrowableCache::clear() {
187 int len = _elements->length();
188 for (int i=0; i<len; i++) {
189 delete _elements->at(i);
190 }
191 _elements->clear();
192 recache();
193 }
194
195 //
196 // class JvmtiBreakpoint
197 //
198
JvmtiBreakpoint(Method * m_method,jlocation location)199 JvmtiBreakpoint::JvmtiBreakpoint(Method* m_method, jlocation location)
200 : _method(m_method), _bci((int)location) {
201 assert(_method != NULL, "No method for breakpoint.");
202 assert(_bci >= 0, "Negative bci for breakpoint.");
203 oop class_holder_oop = _method->method_holder()->klass_holder();
204 _class_holder = OopHandle(JvmtiExport::jvmti_oop_storage(), class_holder_oop);
205 }
206
~JvmtiBreakpoint()207 JvmtiBreakpoint::~JvmtiBreakpoint() {
208 if (_class_holder.peek() != NULL) {
209 _class_holder.release(JvmtiExport::jvmti_oop_storage());
210 }
211 }
212
copy(JvmtiBreakpoint & bp)213 void JvmtiBreakpoint::copy(JvmtiBreakpoint& bp) {
214 _method = bp._method;
215 _bci = bp._bci;
216 _class_holder = OopHandle(JvmtiExport::jvmti_oop_storage(), bp._class_holder.resolve());
217 }
218
equals(JvmtiBreakpoint & bp)219 bool JvmtiBreakpoint::equals(JvmtiBreakpoint& bp) {
220 return _method == bp._method
221 && _bci == bp._bci;
222 }
223
getBcp() const224 address JvmtiBreakpoint::getBcp() const {
225 return _method->bcp_from(_bci);
226 }
227
each_method_version_do(method_action meth_act)228 void JvmtiBreakpoint::each_method_version_do(method_action meth_act) {
229 ((Method*)_method->*meth_act)(_bci);
230
231 // add/remove breakpoint to/from versions of the method that are EMCP.
232 Thread *thread = Thread::current();
233 InstanceKlass* ik = _method->method_holder();
234 Symbol* m_name = _method->name();
235 Symbol* m_signature = _method->signature();
236
237 // search previous versions if they exist
238 for (InstanceKlass* pv_node = ik->previous_versions();
239 pv_node != NULL;
240 pv_node = pv_node->previous_versions()) {
241 Array<Method*>* methods = pv_node->methods();
242
243 for (int i = methods->length() - 1; i >= 0; i--) {
244 Method* method = methods->at(i);
245 // Only set breakpoints in running EMCP methods.
246 if (method->is_running_emcp() &&
247 method->name() == m_name &&
248 method->signature() == m_signature) {
249 ResourceMark rm;
250 log_debug(redefine, class, breakpoint)
251 ("%sing breakpoint in %s(%s)", meth_act == &Method::set_breakpoint ? "sett" : "clear",
252 method->name()->as_C_string(), method->signature()->as_C_string());
253 (method->*meth_act)(_bci);
254 break;
255 }
256 }
257 }
258 }
259
set()260 void JvmtiBreakpoint::set() {
261 each_method_version_do(&Method::set_breakpoint);
262 }
263
clear()264 void JvmtiBreakpoint::clear() {
265 each_method_version_do(&Method::clear_breakpoint);
266 }
267
print_on(outputStream * out) const268 void JvmtiBreakpoint::print_on(outputStream* out) const {
269 #ifndef PRODUCT
270 ResourceMark rm;
271 const char *class_name = (_method == NULL) ? "NULL" : _method->klass_name()->as_C_string();
272 const char *method_name = (_method == NULL) ? "NULL" : _method->name()->as_C_string();
273 out->print("Breakpoint(%s,%s,%d,%p)", class_name, method_name, _bci, getBcp());
274 #endif
275 }
276
277
278 //
279 // class VM_ChangeBreakpoints
280 //
281 // Modify the Breakpoints data structure at a safepoint
282 //
283
doit()284 void VM_ChangeBreakpoints::doit() {
285 switch (_operation) {
286 case SET_BREAKPOINT:
287 _breakpoints->set_at_safepoint(*_bp);
288 break;
289 case CLEAR_BREAKPOINT:
290 _breakpoints->clear_at_safepoint(*_bp);
291 break;
292 default:
293 assert(false, "Unknown operation");
294 }
295 }
296
297 //
298 // class JvmtiBreakpoints
299 //
300 // a JVMTI internal collection of JvmtiBreakpoint
301 //
302
JvmtiBreakpoints(void listener_fun (void *,address *))303 JvmtiBreakpoints::JvmtiBreakpoints(void listener_fun(void *,address *)) {
304 _bps.initialize(this,listener_fun);
305 }
306
~JvmtiBreakpoints()307 JvmtiBreakpoints:: ~JvmtiBreakpoints() {}
308
print()309 void JvmtiBreakpoints::print() {
310 #ifndef PRODUCT
311 LogTarget(Trace, jvmti) log;
312 LogStream log_stream(log);
313
314 int n = _bps.length();
315 for (int i=0; i<n; i++) {
316 JvmtiBreakpoint& bp = _bps.at(i);
317 log_stream.print("%d: ", i);
318 bp.print_on(&log_stream);
319 log_stream.cr();
320 }
321 #endif
322 }
323
324
set_at_safepoint(JvmtiBreakpoint & bp)325 void JvmtiBreakpoints::set_at_safepoint(JvmtiBreakpoint& bp) {
326 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
327
328 int i = _bps.find(bp);
329 if (i == -1) {
330 _bps.append(bp);
331 bp.set();
332 }
333 }
334
clear_at_safepoint(JvmtiBreakpoint & bp)335 void JvmtiBreakpoints::clear_at_safepoint(JvmtiBreakpoint& bp) {
336 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
337
338 int i = _bps.find(bp);
339 if (i != -1) {
340 _bps.remove(i);
341 bp.clear();
342 }
343 }
344
length()345 int JvmtiBreakpoints::length() { return _bps.length(); }
346
set(JvmtiBreakpoint & bp)347 int JvmtiBreakpoints::set(JvmtiBreakpoint& bp) {
348 if ( _bps.find(bp) != -1) {
349 return JVMTI_ERROR_DUPLICATE;
350 }
351 VM_ChangeBreakpoints set_breakpoint(VM_ChangeBreakpoints::SET_BREAKPOINT, &bp);
352 VMThread::execute(&set_breakpoint);
353 return JVMTI_ERROR_NONE;
354 }
355
clear(JvmtiBreakpoint & bp)356 int JvmtiBreakpoints::clear(JvmtiBreakpoint& bp) {
357 if ( _bps.find(bp) == -1) {
358 return JVMTI_ERROR_NOT_FOUND;
359 }
360
361 VM_ChangeBreakpoints clear_breakpoint(VM_ChangeBreakpoints::CLEAR_BREAKPOINT, &bp);
362 VMThread::execute(&clear_breakpoint);
363 return JVMTI_ERROR_NONE;
364 }
365
clearall_in_class_at_safepoint(Klass * klass)366 void JvmtiBreakpoints::clearall_in_class_at_safepoint(Klass* klass) {
367 bool changed = true;
368 // We are going to run thru the list of bkpts
369 // and delete some. This deletion probably alters
370 // the list in some implementation defined way such
371 // that when we delete entry i, the next entry might
372 // no longer be at i+1. To be safe, each time we delete
373 // an entry, we'll just start again from the beginning.
374 // We'll stop when we make a pass thru the whole list without
375 // deleting anything.
376 while (changed) {
377 int len = _bps.length();
378 changed = false;
379 for (int i = 0; i < len; i++) {
380 JvmtiBreakpoint& bp = _bps.at(i);
381 if (bp.method()->method_holder() == klass) {
382 bp.clear();
383 _bps.remove(i);
384 // This changed 'i' so we have to start over.
385 changed = true;
386 break;
387 }
388 }
389 }
390 }
391
392 //
393 // class JvmtiCurrentBreakpoints
394 //
395
396 JvmtiBreakpoints *JvmtiCurrentBreakpoints::_jvmti_breakpoints = NULL;
397 address * JvmtiCurrentBreakpoints::_breakpoint_list = NULL;
398
399
get_jvmti_breakpoints()400 JvmtiBreakpoints& JvmtiCurrentBreakpoints::get_jvmti_breakpoints() {
401 if (_jvmti_breakpoints != NULL) return (*_jvmti_breakpoints);
402 _jvmti_breakpoints = new JvmtiBreakpoints(listener_fun);
403 assert(_jvmti_breakpoints != NULL, "_jvmti_breakpoints != NULL");
404 return (*_jvmti_breakpoints);
405 }
406
listener_fun(void * this_obj,address * cache)407 void JvmtiCurrentBreakpoints::listener_fun(void *this_obj, address *cache) {
408 JvmtiBreakpoints *this_jvmti = (JvmtiBreakpoints *) this_obj;
409 assert(this_jvmti != NULL, "this_jvmti != NULL");
410
411 debug_only(int n = this_jvmti->length(););
412 assert(cache[n] == NULL, "cache must be NULL terminated");
413
414 set_breakpoint_list(cache);
415 }
416
417 ///////////////////////////////////////////////////////////////
418 //
419 // class VM_GetOrSetLocal
420 //
421
422 // Constructor for non-object getter
VM_GetOrSetLocal(JavaThread * thread,jint depth,jint index,BasicType type)423 VM_GetOrSetLocal::VM_GetOrSetLocal(JavaThread* thread, jint depth, jint index, BasicType type)
424 : _thread(thread)
425 , _calling_thread(NULL)
426 , _depth(depth)
427 , _index(index)
428 , _type(type)
429 , _jvf(NULL)
430 , _set(false)
431 , _eb(false, NULL, NULL)
432 , _result(JVMTI_ERROR_NONE)
433 {
434 }
435
436 // Constructor for object or non-object setter
VM_GetOrSetLocal(JavaThread * thread,jint depth,jint index,BasicType type,jvalue value)437 VM_GetOrSetLocal::VM_GetOrSetLocal(JavaThread* thread, jint depth, jint index, BasicType type, jvalue value)
438 : _thread(thread)
439 , _calling_thread(NULL)
440 , _depth(depth)
441 , _index(index)
442 , _type(type)
443 , _value(value)
444 , _jvf(NULL)
445 , _set(true)
446 , _eb(type == T_OBJECT, JavaThread::current(), thread)
447 , _result(JVMTI_ERROR_NONE)
448 {
449 }
450
451 // Constructor for object getter
VM_GetOrSetLocal(JavaThread * thread,JavaThread * calling_thread,jint depth,int index)452 VM_GetOrSetLocal::VM_GetOrSetLocal(JavaThread* thread, JavaThread* calling_thread, jint depth, int index)
453 : _thread(thread)
454 , _calling_thread(calling_thread)
455 , _depth(depth)
456 , _index(index)
457 , _type(T_OBJECT)
458 , _jvf(NULL)
459 , _set(false)
460 , _eb(true, calling_thread, thread)
461 , _result(JVMTI_ERROR_NONE)
462 {
463 }
464
get_vframe()465 vframe *VM_GetOrSetLocal::get_vframe() {
466 if (!_thread->has_last_Java_frame()) {
467 return NULL;
468 }
469 RegisterMap reg_map(_thread);
470 vframe *vf = _thread->last_java_vframe(®_map);
471 int d = 0;
472 while ((vf != NULL) && (d < _depth)) {
473 vf = vf->java_sender();
474 d++;
475 }
476 return vf;
477 }
478
get_java_vframe()479 javaVFrame *VM_GetOrSetLocal::get_java_vframe() {
480 vframe* vf = get_vframe();
481 if (vf == NULL) {
482 _result = JVMTI_ERROR_NO_MORE_FRAMES;
483 return NULL;
484 }
485 javaVFrame *jvf = (javaVFrame*)vf;
486
487 if (!vf->is_java_frame()) {
488 _result = JVMTI_ERROR_OPAQUE_FRAME;
489 return NULL;
490 }
491 return jvf;
492 }
493
494 // Check that the klass is assignable to a type with the given signature.
495 // Another solution could be to use the function Klass::is_subtype_of(type).
496 // But the type class can be forced to load/initialize eagerly in such a case.
497 // This may cause unexpected consequences like CFLH or class-init JVMTI events.
498 // It is better to avoid such a behavior.
is_assignable(const char * ty_sign,Klass * klass,Thread * thread)499 bool VM_GetOrSetLocal::is_assignable(const char* ty_sign, Klass* klass, Thread* thread) {
500 assert(ty_sign != NULL, "type signature must not be NULL");
501 assert(thread != NULL, "thread must not be NULL");
502 assert(klass != NULL, "klass must not be NULL");
503
504 int len = (int) strlen(ty_sign);
505 if (ty_sign[0] == JVM_SIGNATURE_CLASS &&
506 ty_sign[len-1] == JVM_SIGNATURE_ENDCLASS) { // Need pure class/interface name
507 ty_sign++;
508 len -= 2;
509 }
510 TempNewSymbol ty_sym = SymbolTable::new_symbol(ty_sign, len);
511 if (klass->name() == ty_sym) {
512 return true;
513 }
514 // Compare primary supers
515 int super_depth = klass->super_depth();
516 int idx;
517 for (idx = 0; idx < super_depth; idx++) {
518 if (klass->primary_super_of_depth(idx)->name() == ty_sym) {
519 return true;
520 }
521 }
522 // Compare secondary supers
523 const Array<Klass*>* sec_supers = klass->secondary_supers();
524 for (idx = 0; idx < sec_supers->length(); idx++) {
525 if (((Klass*) sec_supers->at(idx))->name() == ty_sym) {
526 return true;
527 }
528 }
529 return false;
530 }
531
532 // Checks error conditions:
533 // JVMTI_ERROR_INVALID_SLOT
534 // JVMTI_ERROR_TYPE_MISMATCH
535 // Returns: 'true' - everything is Ok, 'false' - error code
536
check_slot_type_lvt(javaVFrame * jvf)537 bool VM_GetOrSetLocal::check_slot_type_lvt(javaVFrame* jvf) {
538 Method* method = jvf->method();
539 jint num_entries = method->localvariable_table_length();
540 if (num_entries == 0) {
541 _result = JVMTI_ERROR_INVALID_SLOT;
542 return false; // There are no slots
543 }
544 int signature_idx = -1;
545 int vf_bci = jvf->bci();
546 LocalVariableTableElement* table = method->localvariable_table_start();
547 for (int i = 0; i < num_entries; i++) {
548 int start_bci = table[i].start_bci;
549 int end_bci = start_bci + table[i].length;
550
551 // Here we assume that locations of LVT entries
552 // with the same slot number cannot be overlapped
553 if (_index == (jint) table[i].slot && start_bci <= vf_bci && vf_bci <= end_bci) {
554 signature_idx = (int) table[i].descriptor_cp_index;
555 break;
556 }
557 }
558 if (signature_idx == -1) {
559 _result = JVMTI_ERROR_INVALID_SLOT;
560 return false; // Incorrect slot index
561 }
562 Symbol* sign_sym = method->constants()->symbol_at(signature_idx);
563 BasicType slot_type = Signature::basic_type(sign_sym);
564
565 switch (slot_type) {
566 case T_BYTE:
567 case T_SHORT:
568 case T_CHAR:
569 case T_BOOLEAN:
570 slot_type = T_INT;
571 break;
572 case T_ARRAY:
573 slot_type = T_OBJECT;
574 break;
575 default:
576 break;
577 };
578 if (_type != slot_type) {
579 _result = JVMTI_ERROR_TYPE_MISMATCH;
580 return false;
581 }
582
583 jobject jobj = _value.l;
584 if (_set && slot_type == T_OBJECT && jobj != NULL) { // NULL reference is allowed
585 // Check that the jobject class matches the return type signature.
586 oop obj = JNIHandles::resolve_external_guard(jobj);
587 NULL_CHECK(obj, (_result = JVMTI_ERROR_INVALID_OBJECT, false));
588 Klass* ob_k = obj->klass();
589 NULL_CHECK(ob_k, (_result = JVMTI_ERROR_INVALID_OBJECT, false));
590
591 const char* signature = (const char *) sign_sym->as_utf8();
592 if (!is_assignable(signature, ob_k, VMThread::vm_thread())) {
593 _result = JVMTI_ERROR_TYPE_MISMATCH;
594 return false;
595 }
596 }
597 return true;
598 }
599
check_slot_type_no_lvt(javaVFrame * jvf)600 bool VM_GetOrSetLocal::check_slot_type_no_lvt(javaVFrame* jvf) {
601 Method* method = jvf->method();
602 jint extra_slot = (_type == T_LONG || _type == T_DOUBLE) ? 1 : 0;
603
604 if (_index < 0 || _index + extra_slot >= method->max_locals()) {
605 _result = JVMTI_ERROR_INVALID_SLOT;
606 return false;
607 }
608 StackValueCollection *locals = _jvf->locals();
609 BasicType slot_type = locals->at(_index)->type();
610
611 if (slot_type == T_CONFLICT) {
612 _result = JVMTI_ERROR_INVALID_SLOT;
613 return false;
614 }
615 if (extra_slot) {
616 BasicType extra_slot_type = locals->at(_index + 1)->type();
617 if (extra_slot_type != T_INT) {
618 _result = JVMTI_ERROR_INVALID_SLOT;
619 return false;
620 }
621 }
622 if (_type != slot_type && (_type == T_OBJECT || slot_type != T_INT)) {
623 _result = JVMTI_ERROR_TYPE_MISMATCH;
624 return false;
625 }
626 return true;
627 }
628
can_be_deoptimized(vframe * vf)629 static bool can_be_deoptimized(vframe* vf) {
630 return (vf->is_compiled_frame() && vf->fr().can_be_deoptimized());
631 }
632
doit_prologue()633 bool VM_GetOrSetLocal::doit_prologue() {
634 if (!_eb.deoptimize_objects(_depth, _depth)) {
635 // The target frame is affected by a reallocation failure.
636 _result = JVMTI_ERROR_OUT_OF_MEMORY;
637 return false;
638 }
639
640 return true;
641 }
642
doit()643 void VM_GetOrSetLocal::doit() {
644 _jvf = _jvf == NULL ? get_java_vframe() : _jvf;
645 if (_jvf == NULL) {
646 return;
647 };
648
649 Method* method = _jvf->method();
650 if (getting_receiver()) {
651 if (method->is_static()) {
652 _result = JVMTI_ERROR_INVALID_SLOT;
653 return;
654 }
655 } else {
656 if (method->is_native()) {
657 _result = JVMTI_ERROR_OPAQUE_FRAME;
658 return;
659 }
660
661 if (!check_slot_type_no_lvt(_jvf)) {
662 return;
663 }
664 if (method->has_localvariable_table() &&
665 !check_slot_type_lvt(_jvf)) {
666 return;
667 }
668 }
669
670 InterpreterOopMap oop_mask;
671 _jvf->method()->mask_for(_jvf->bci(), &oop_mask);
672 if (oop_mask.is_dead(_index)) {
673 // The local can be invalid and uninitialized in the scope of current bci
674 _result = JVMTI_ERROR_INVALID_SLOT;
675 return;
676 }
677 if (_set) {
678 // Force deoptimization of frame if compiled because it's
679 // possible the compiler emitted some locals as constant values,
680 // meaning they are not mutable.
681 if (can_be_deoptimized(_jvf)) {
682
683 // Schedule deoptimization so that eventually the local
684 // update will be written to an interpreter frame.
685 Deoptimization::deoptimize_frame(_jvf->thread(), _jvf->fr().id());
686
687 // Now store a new value for the local which will be applied
688 // once deoptimization occurs. Note however that while this
689 // write is deferred until deoptimization actually happens
690 // can vframe created after this point will have its locals
691 // reflecting this update so as far as anyone can see the
692 // write has already taken place.
693
694 // If we are updating an oop then get the oop from the handle
695 // since the handle will be long gone by the time the deopt
696 // happens. The oop stored in the deferred local will be
697 // gc'd on its own.
698 if (_type == T_OBJECT) {
699 _value.l = cast_from_oop<jobject>(JNIHandles::resolve_external_guard(_value.l));
700 }
701 // Re-read the vframe so we can see that it is deoptimized
702 // [ Only need because of assert in update_local() ]
703 _jvf = get_java_vframe();
704 ((compiledVFrame*)_jvf)->update_local(_type, _index, _value);
705 return;
706 }
707 StackValueCollection *locals = _jvf->locals();
708 Thread* current_thread = VMThread::vm_thread();
709 HandleMark hm(current_thread);
710
711 switch (_type) {
712 case T_INT: locals->set_int_at (_index, _value.i); break;
713 case T_LONG: locals->set_long_at (_index, _value.j); break;
714 case T_FLOAT: locals->set_float_at (_index, _value.f); break;
715 case T_DOUBLE: locals->set_double_at(_index, _value.d); break;
716 case T_OBJECT: {
717 Handle ob_h(current_thread, JNIHandles::resolve_external_guard(_value.l));
718 locals->set_obj_at (_index, ob_h);
719 break;
720 }
721 default: ShouldNotReachHere();
722 }
723 _jvf->set_locals(locals);
724 } else {
725 if (_jvf->method()->is_native() && _jvf->is_compiled_frame()) {
726 assert(getting_receiver(), "Can only get here when getting receiver");
727 oop receiver = _jvf->fr().get_native_receiver();
728 _value.l = JNIHandles::make_local(_calling_thread, receiver);
729 } else {
730 StackValueCollection *locals = _jvf->locals();
731
732 switch (_type) {
733 case T_INT: _value.i = locals->int_at (_index); break;
734 case T_LONG: _value.j = locals->long_at (_index); break;
735 case T_FLOAT: _value.f = locals->float_at (_index); break;
736 case T_DOUBLE: _value.d = locals->double_at(_index); break;
737 case T_OBJECT: {
738 // Wrap the oop to be returned in a local JNI handle since
739 // oops_do() no longer applies after doit() is finished.
740 oop obj = locals->obj_at(_index)();
741 _value.l = JNIHandles::make_local(_calling_thread, obj);
742 break;
743 }
744 default: ShouldNotReachHere();
745 }
746 }
747 }
748 }
749
750
allow_nested_vm_operations() const751 bool VM_GetOrSetLocal::allow_nested_vm_operations() const {
752 return true; // May need to deoptimize
753 }
754
755
VM_GetReceiver(JavaThread * thread,JavaThread * caller_thread,jint depth)756 VM_GetReceiver::VM_GetReceiver(
757 JavaThread* thread, JavaThread* caller_thread, jint depth)
758 : VM_GetOrSetLocal(thread, caller_thread, depth, 0) {}
759
760 /////////////////////////////////////////////////////////////////////////////////////////
761
762 //
763 // class JvmtiSuspendControl - see comments in jvmtiImpl.hpp
764 //
765
suspend(JavaThread * java_thread)766 bool JvmtiSuspendControl::suspend(JavaThread *java_thread) {
767 // external suspend should have caught suspending a thread twice
768
769 // Immediate suspension required for JPDA back-end so JVMTI agent threads do
770 // not deadlock due to later suspension on transitions while holding
771 // raw monitors. Passing true causes the immediate suspension.
772 // java_suspend() will catch threads in the process of exiting
773 // and will ignore them.
774 java_thread->java_suspend();
775
776 // It would be nice to have the following assertion in all the time,
777 // but it is possible for a racing resume request to have resumed
778 // this thread right after we suspended it. Temporarily enable this
779 // assertion if you are chasing a different kind of bug.
780 //
781 // assert(java_lang_Thread::thread(java_thread->threadObj()) == NULL ||
782 // java_thread->is_being_ext_suspended(), "thread is not suspended");
783
784 if (java_lang_Thread::thread(java_thread->threadObj()) == NULL) {
785 // check again because we can get delayed in java_suspend():
786 // the thread is in process of exiting.
787 return false;
788 }
789
790 return true;
791 }
792
resume(JavaThread * java_thread)793 bool JvmtiSuspendControl::resume(JavaThread *java_thread) {
794 // external suspend should have caught resuming a thread twice
795 assert(java_thread->is_being_ext_suspended(), "thread should be suspended");
796
797 // resume thread
798 {
799 // must always grab Threads_lock, see JVM_SuspendThread
800 MutexLocker ml(Threads_lock);
801 java_thread->java_resume();
802 }
803
804 return true;
805 }
806
807
print()808 void JvmtiSuspendControl::print() {
809 #ifndef PRODUCT
810 ResourceMark rm;
811 LogStreamHandle(Trace, jvmti) log_stream;
812 log_stream.print("Suspended Threads: [");
813 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) {
814 #ifdef JVMTI_TRACE
815 const char *name = JvmtiTrace::safe_get_thread_name(thread);
816 #else
817 const char *name = "";
818 #endif /*JVMTI_TRACE */
819 log_stream.print("%s(%c ", name, thread->is_being_ext_suspended() ? 'S' : '_');
820 if (!thread->has_last_Java_frame()) {
821 log_stream.print("no stack");
822 }
823 log_stream.print(") ");
824 }
825 log_stream.print_cr("]");
826 #endif
827 }
828
compiled_method_load_event(nmethod * nm)829 JvmtiDeferredEvent JvmtiDeferredEvent::compiled_method_load_event(
830 nmethod* nm) {
831 JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_COMPILED_METHOD_LOAD);
832 event._event_data.compiled_method_load = nm;
833 return event;
834 }
835
compiled_method_unload_event(jmethodID id,const void * code)836 JvmtiDeferredEvent JvmtiDeferredEvent::compiled_method_unload_event(
837 jmethodID id, const void* code) {
838 JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_COMPILED_METHOD_UNLOAD);
839 event._event_data.compiled_method_unload.method_id = id;
840 event._event_data.compiled_method_unload.code_begin = code;
841 return event;
842 }
843
dynamic_code_generated_event(const char * name,const void * code_begin,const void * code_end)844 JvmtiDeferredEvent JvmtiDeferredEvent::dynamic_code_generated_event(
845 const char* name, const void* code_begin, const void* code_end) {
846 JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_DYNAMIC_CODE_GENERATED);
847 // Need to make a copy of the name since we don't know how long
848 // the event poster will keep it around after we enqueue the
849 // deferred event and return. strdup() failure is handled in
850 // the post() routine below.
851 event._event_data.dynamic_code_generated.name = os::strdup(name);
852 event._event_data.dynamic_code_generated.code_begin = code_begin;
853 event._event_data.dynamic_code_generated.code_end = code_end;
854 return event;
855 }
856
class_unload_event(const char * name)857 JvmtiDeferredEvent JvmtiDeferredEvent::class_unload_event(const char* name) {
858 JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_CLASS_UNLOAD);
859 // Need to make a copy of the name since we don't know how long
860 // the event poster will keep it around after we enqueue the
861 // deferred event and return. strdup() failure is handled in
862 // the post() routine below.
863 event._event_data.class_unload.name = os::strdup(name);
864 return event;
865 }
866
post()867 void JvmtiDeferredEvent::post() {
868 assert(Thread::current()->is_service_thread(),
869 "Service thread must post enqueued events");
870 switch(_type) {
871 case TYPE_COMPILED_METHOD_LOAD: {
872 nmethod* nm = _event_data.compiled_method_load;
873 JvmtiExport::post_compiled_method_load(nm);
874 break;
875 }
876 case TYPE_COMPILED_METHOD_UNLOAD: {
877 JvmtiExport::post_compiled_method_unload(
878 _event_data.compiled_method_unload.method_id,
879 _event_data.compiled_method_unload.code_begin);
880 break;
881 }
882 case TYPE_DYNAMIC_CODE_GENERATED: {
883 JvmtiExport::post_dynamic_code_generated_internal(
884 // if strdup failed give the event a default name
885 (_event_data.dynamic_code_generated.name == NULL)
886 ? "unknown_code" : _event_data.dynamic_code_generated.name,
887 _event_data.dynamic_code_generated.code_begin,
888 _event_data.dynamic_code_generated.code_end);
889 if (_event_data.dynamic_code_generated.name != NULL) {
890 // release our copy
891 os::free((void *)_event_data.dynamic_code_generated.name);
892 }
893 break;
894 }
895 case TYPE_CLASS_UNLOAD: {
896 JvmtiExport::post_class_unload_internal(
897 // if strdup failed give the event a default name
898 (_event_data.class_unload.name == NULL)
899 ? "unknown_class" : _event_data.class_unload.name);
900 if (_event_data.class_unload.name != NULL) {
901 // release our copy
902 os::free((void *)_event_data.class_unload.name);
903 }
904 break;
905 }
906 default:
907 ShouldNotReachHere();
908 }
909 }
910
post_compiled_method_load_event(JvmtiEnv * env)911 void JvmtiDeferredEvent::post_compiled_method_load_event(JvmtiEnv* env) {
912 assert(_type == TYPE_COMPILED_METHOD_LOAD, "only user of this method");
913 nmethod* nm = _event_data.compiled_method_load;
914 JvmtiExport::post_compiled_method_load(env, nm);
915 }
916
run_nmethod_entry_barriers()917 void JvmtiDeferredEvent::run_nmethod_entry_barriers() {
918 if (_type == TYPE_COMPILED_METHOD_LOAD) {
919 _event_data.compiled_method_load->run_nmethod_entry_barrier();
920 }
921 }
922
923
924 // Keep the nmethod for compiled_method_load from being unloaded.
oops_do(OopClosure * f,CodeBlobClosure * cf)925 void JvmtiDeferredEvent::oops_do(OopClosure* f, CodeBlobClosure* cf) {
926 if (cf != NULL && _type == TYPE_COMPILED_METHOD_LOAD) {
927 cf->do_code_blob(_event_data.compiled_method_load);
928 }
929 }
930
931 // The sweeper calls this and marks the nmethods here on the stack so that
932 // they cannot be turned into zombies while in the queue.
nmethods_do(CodeBlobClosure * cf)933 void JvmtiDeferredEvent::nmethods_do(CodeBlobClosure* cf) {
934 if (cf != NULL && _type == TYPE_COMPILED_METHOD_LOAD) {
935 cf->do_code_blob(_event_data.compiled_method_load);
936 }
937 }
938
939
has_events()940 bool JvmtiDeferredEventQueue::has_events() {
941 // We save the queued events before the live phase and post them when it starts.
942 // This code could skip saving the events on the queue before the live
943 // phase and ignore them, but this would change how we do things now.
944 // Starting the service thread earlier causes this to be called before the live phase begins.
945 // The events on the queue should all be posted after the live phase so this is an
946 // ok check. Before the live phase, DynamicCodeGenerated events are posted directly.
947 // If we add other types of events to the deferred queue, this could get ugly.
948 return JvmtiEnvBase::get_phase() == JVMTI_PHASE_LIVE && _queue_head != NULL;
949 }
950
enqueue(JvmtiDeferredEvent event)951 void JvmtiDeferredEventQueue::enqueue(JvmtiDeferredEvent event) {
952 // Events get added to the end of the queue (and are pulled off the front).
953 QueueNode* node = new QueueNode(event);
954 if (_queue_tail == NULL) {
955 _queue_tail = _queue_head = node;
956 } else {
957 assert(_queue_tail->next() == NULL, "Must be the last element in the list");
958 _queue_tail->set_next(node);
959 _queue_tail = node;
960 }
961
962 assert((_queue_head == NULL) == (_queue_tail == NULL),
963 "Inconsistent queue markers");
964 }
965
dequeue()966 JvmtiDeferredEvent JvmtiDeferredEventQueue::dequeue() {
967 assert(_queue_head != NULL, "Nothing to dequeue");
968
969 if (_queue_head == NULL) {
970 // Just in case this happens in product; it shouldn't but let's not crash
971 return JvmtiDeferredEvent();
972 }
973
974 QueueNode* node = _queue_head;
975 _queue_head = _queue_head->next();
976 if (_queue_head == NULL) {
977 _queue_tail = NULL;
978 }
979
980 assert((_queue_head == NULL) == (_queue_tail == NULL),
981 "Inconsistent queue markers");
982
983 JvmtiDeferredEvent event = node->event();
984 delete node;
985 return event;
986 }
987
post(JvmtiEnv * env)988 void JvmtiDeferredEventQueue::post(JvmtiEnv* env) {
989 // Post and destroy queue nodes
990 while (_queue_head != NULL) {
991 JvmtiDeferredEvent event = dequeue();
992 event.post_compiled_method_load_event(env);
993 }
994 }
995
run_nmethod_entry_barriers()996 void JvmtiDeferredEventQueue::run_nmethod_entry_barriers() {
997 for(QueueNode* node = _queue_head; node != NULL; node = node->next()) {
998 node->event().run_nmethod_entry_barriers();
999 }
1000 }
1001
1002
oops_do(OopClosure * f,CodeBlobClosure * cf)1003 void JvmtiDeferredEventQueue::oops_do(OopClosure* f, CodeBlobClosure* cf) {
1004 for(QueueNode* node = _queue_head; node != NULL; node = node->next()) {
1005 node->event().oops_do(f, cf);
1006 }
1007 }
1008
nmethods_do(CodeBlobClosure * cf)1009 void JvmtiDeferredEventQueue::nmethods_do(CodeBlobClosure* cf) {
1010 for(QueueNode* node = _queue_head; node != NULL; node = node->next()) {
1011 node->event().nmethods_do(cf);
1012 }
1013 }
1014