1 /*
2  * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #include "precompiled.hpp"
26 #include "gc/shared/collectedHeap.hpp"
27 #include "gc/shared/collectedHeap.inline.hpp"
28 #include "logging/log.hpp"
29 #include "memory/resourceArea.hpp"
30 #include "memory/universe.hpp"
31 #include "runtime/atomic.hpp"
32 #include "runtime/frame.inline.hpp"
33 #include "runtime/handles.inline.hpp"
34 #include "runtime/init.hpp"
35 #include "runtime/interfaceSupport.inline.hpp"
36 #include "runtime/os.inline.hpp"
37 #include "runtime/thread.inline.hpp"
38 #include "runtime/safepointVerifiers.hpp"
39 #include "runtime/vframe.hpp"
40 #include "runtime/vmThread.hpp"
41 #include "utilities/preserveException.hpp"
42 
43 // Implementation of InterfaceSupport
44 
45 #ifdef ASSERT
VMEntryWrapper()46 VMEntryWrapper::VMEntryWrapper() {
47   if (VerifyLastFrame) {
48     InterfaceSupport::verify_last_frame();
49   }
50 }
51 
~VMEntryWrapper()52 VMEntryWrapper::~VMEntryWrapper() {
53   InterfaceSupport::check_gc_alot();
54   if (WalkStackALot) {
55     InterfaceSupport::walk_stack();
56   }
57   if (DeoptimizeALot || DeoptimizeRandom) {
58     InterfaceSupport::deoptimizeAll();
59   }
60   if (ZombieALot) {
61     InterfaceSupport::zombieAll();
62   }
63   // do verification AFTER potential deoptimization
64   if (VerifyStack) {
65     InterfaceSupport::verify_stack();
66   }
67 }
68 
VMNativeEntryWrapper()69 VMNativeEntryWrapper::VMNativeEntryWrapper() {
70   if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot();
71 }
72 
~VMNativeEntryWrapper()73 VMNativeEntryWrapper::~VMNativeEntryWrapper() {
74   if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot();
75 }
76 
77 unsigned int InterfaceSupport::_scavenge_alot_counter = 1;
78 unsigned int InterfaceSupport::_fullgc_alot_counter   = 1;
79 int InterfaceSupport::_fullgc_alot_invocation = 0;
80 
81 Histogram* RuntimeHistogram;
82 
RuntimeHistogramElement(const char * elementName)83 RuntimeHistogramElement::RuntimeHistogramElement(const char* elementName) {
84   static volatile int RuntimeHistogram_lock = 0;
85   _name = elementName;
86   uintx count = 0;
87 
88   while (Atomic::cmpxchg(&RuntimeHistogram_lock, 0, 1) != 0) {
89     while (Atomic::load_acquire(&RuntimeHistogram_lock) != 0) {
90       count +=1;
91       if ( (WarnOnStalledSpinLock > 0)
92         && (count % WarnOnStalledSpinLock == 0)) {
93         warning("RuntimeHistogram_lock seems to be stalled");
94       }
95     }
96   }
97 
98   if (RuntimeHistogram == NULL) {
99     RuntimeHistogram = new Histogram("VM Runtime Call Counts",200);
100   }
101 
102   RuntimeHistogram->add_element(this);
103   Atomic::dec(&RuntimeHistogram_lock);
104 }
105 
gc_alot()106 void InterfaceSupport::gc_alot() {
107   Thread *thread = Thread::current();
108   if (!thread->is_Java_thread()) return; // Avoid concurrent calls
109   // Check for new, not quite initialized thread. A thread in new mode cannot initiate a GC.
110   JavaThread *current_thread = thread->as_Java_thread();
111   if (current_thread->active_handles() == NULL) return;
112 
113   // Short-circuit any possible re-entrant gc-a-lot attempt
114   if (thread->skip_gcalot()) return;
115 
116   if (Threads::is_vm_complete()) {
117 
118     if (++_fullgc_alot_invocation < FullGCALotStart) {
119       return;
120     }
121 
122     // Use this line if you want to block at a specific point,
123     // e.g. one number_of_calls/scavenge/gc before you got into problems
124     if (FullGCALot) _fullgc_alot_counter--;
125 
126     // Check if we should force a full gc
127     if (_fullgc_alot_counter == 0) {
128       // Release dummy so objects are forced to move
129       if (!Universe::release_fullgc_alot_dummy()) {
130         warning("FullGCALot: Unable to release more dummies at bottom of heap");
131       }
132       HandleMark hm(thread);
133       Universe::heap()->collect(GCCause::_full_gc_alot);
134       unsigned int invocations = Universe::heap()->total_full_collections();
135       // Compute new interval
136       if (FullGCALotInterval > 1) {
137         _fullgc_alot_counter = 1+(unsigned int)((double)FullGCALotInterval*os::random()/(max_jint+1.0));
138         log_trace(gc)("Full gc no: %u\tInterval: %u", invocations, _fullgc_alot_counter);
139       } else {
140         _fullgc_alot_counter = 1;
141       }
142       // Print progress message
143       if (invocations % 100 == 0) {
144         log_trace(gc)("Full gc no: %u", invocations);
145       }
146     } else {
147       if (ScavengeALot) _scavenge_alot_counter--;
148       // Check if we should force a scavenge
149       if (_scavenge_alot_counter == 0) {
150         HandleMark hm(thread);
151         Universe::heap()->collect(GCCause::_scavenge_alot);
152         unsigned int invocations = Universe::heap()->total_collections() - Universe::heap()->total_full_collections();
153         // Compute new interval
154         if (ScavengeALotInterval > 1) {
155           _scavenge_alot_counter = 1+(unsigned int)((double)ScavengeALotInterval*os::random()/(max_jint+1.0));
156           log_trace(gc)("Scavenge no: %u\tInterval: %u", invocations, _scavenge_alot_counter);
157         } else {
158           _scavenge_alot_counter = 1;
159         }
160         // Print progress message
161         if (invocations % 1000 == 0) {
162           log_trace(gc)("Scavenge no: %u", invocations);
163         }
164       }
165     }
166   }
167 }
168 
169 
170 vframe* vframe_array[50];
171 int walk_stack_counter = 0;
172 
walk_stack_from(vframe * start_vf)173 void InterfaceSupport::walk_stack_from(vframe* start_vf) {
174   // walk
175   int i = 0;
176   for (vframe* f = start_vf; f; f = f->sender() ) {
177     if (i < 50) vframe_array[i++] = f;
178   }
179 }
180 
181 
walk_stack()182 void InterfaceSupport::walk_stack() {
183   JavaThread* thread = JavaThread::current();
184   walk_stack_counter++;
185   if (!thread->has_last_Java_frame()) return;
186   ResourceMark rm(thread);
187   RegisterMap reg_map(thread);
188   walk_stack_from(thread->last_java_vframe(&reg_map));
189 }
190 
191 // invocation counter for InterfaceSupport::deoptimizeAll/zombieAll functions
192 int deoptimizeAllCounter = 0;
193 int zombieAllCounter = 0;
194 
zombieAll()195 void InterfaceSupport::zombieAll() {
196   // This method is called by all threads when a thread make
197   // transition to VM state (for example, runtime calls).
198   // Divide number of calls by number of threads to avoid
199   // dependence of ZombieAll events frequency on number of threads.
200   int value = zombieAllCounter / Threads::number_of_threads();
201   if (is_init_completed() && value > ZombieALotInterval) {
202     zombieAllCounter = 0;
203     VM_ZombieAll op;
204     VMThread::execute(&op);
205   }
206   zombieAllCounter++;
207 }
208 
deoptimizeAll()209 void InterfaceSupport::deoptimizeAll() {
210   // This method is called by all threads when a thread make
211   // transition to VM state (for example, runtime calls).
212   // Divide number of calls by number of threads to avoid
213   // dependence of DeoptimizeAll events frequency on number of threads.
214   int value = deoptimizeAllCounter / Threads::number_of_threads();
215   if (is_init_completed()) {
216     if (DeoptimizeALot && value > DeoptimizeALotInterval) {
217       deoptimizeAllCounter = 0;
218       VM_DeoptimizeAll op;
219       VMThread::execute(&op);
220     } else if (DeoptimizeRandom && (value & 0x1F) == (os::random() & 0x1F)) {
221       VM_DeoptimizeAll op;
222       VMThread::execute(&op);
223     }
224   }
225   deoptimizeAllCounter++;
226 }
227 
228 
verify_stack()229 void InterfaceSupport::verify_stack() {
230   JavaThread* thread = JavaThread::current();
231   ResourceMark rm(thread);
232   // disabled because it throws warnings that oop maps should only be accessed
233   // in VM thread or during debugging
234 
235   if (!thread->has_pending_exception()) {
236     // verification does not work if there are pending exceptions
237     StackFrameStream sfs(thread, true /* update */, true /* process_frames */);
238     CodeBlob* cb = sfs.current()->cb();
239       // In case of exceptions we might not have a runtime_stub on
240       // top of stack, hence, all callee-saved registers are not going
241       // to be setup correctly, hence, we cannot do stack verify
242     if (cb != NULL && !(cb->is_runtime_stub() || cb->is_uncommon_trap_stub())) return;
243 
244     for (; !sfs.is_done(); sfs.next()) {
245       sfs.current()->verify(sfs.register_map());
246     }
247   }
248 }
249 
250 
verify_last_frame()251 void InterfaceSupport::verify_last_frame() {
252   JavaThread* thread = JavaThread::current();
253   ResourceMark rm(thread);
254   RegisterMap reg_map(thread);
255   frame fr = thread->last_frame();
256   fr.verify(&reg_map);
257 }
258 
259 
260 #endif // ASSERT
261 
262 
InterfaceSupport_init()263 void InterfaceSupport_init() {
264 #ifdef ASSERT
265   if (ScavengeALot || FullGCALot) {
266     srand(ScavengeALotInterval * FullGCALotInterval);
267   }
268 #endif
269 }
270