1 /*
2  * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #include "precompiled.hpp"
26 #include "aot/aotLoader.hpp"
27 #include "classfile/systemDictionary.hpp"
28 #include "code/codeCache.hpp"
29 #include "gc/parallel/parallelScavengeHeap.hpp"
30 #include "gc/parallel/pcTasks.hpp"
31 #include "gc/parallel/psCompactionManager.inline.hpp"
32 #include "gc/parallel/psParallelCompact.inline.hpp"
33 #include "gc/shared/collectedHeap.hpp"
34 #include "gc/shared/gcTimer.hpp"
35 #include "gc/shared/gcTraceTime.inline.hpp"
36 #include "logging/log.hpp"
37 #include "memory/resourceArea.hpp"
38 #include "memory/universe.hpp"
39 #include "oops/objArrayKlass.inline.hpp"
40 #include "oops/oop.inline.hpp"
41 #include "prims/jvmtiExport.hpp"
42 #include "runtime/jniHandles.hpp"
43 #include "runtime/thread.hpp"
44 #include "runtime/vmThread.hpp"
45 #include "services/management.hpp"
46 #include "utilities/stack.inline.hpp"
47 
48 //
49 // ThreadRootsMarkingTask
50 //
51 
do_it(GCTaskManager * manager,uint which)52 void ThreadRootsMarkingTask::do_it(GCTaskManager* manager, uint which) {
53   assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
54 
55   ResourceMark rm;
56 
57   ParCompactionManager* cm =
58     ParCompactionManager::gc_thread_compaction_manager(which);
59 
60   ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm);
61   MarkingCodeBlobClosure mark_and_push_in_blobs(&mark_and_push_closure, !CodeBlobToOopClosure::FixRelocations);
62 
63   _thread->oops_do(&mark_and_push_closure, &mark_and_push_in_blobs);
64 
65   // Do the real work
66   cm->follow_marking_stacks();
67 }
68 
69 
do_it(GCTaskManager * manager,uint which)70 void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) {
71   assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
72 
73   ParCompactionManager* cm =
74     ParCompactionManager::gc_thread_compaction_manager(which);
75   ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm);
76 
77   switch (_root_type) {
78     case universe:
79       Universe::oops_do(&mark_and_push_closure);
80       break;
81 
82     case jni_handles:
83       JNIHandles::oops_do(&mark_and_push_closure);
84       break;
85 
86     case threads:
87     {
88       ResourceMark rm;
89       MarkingCodeBlobClosure each_active_code_blob(&mark_and_push_closure, !CodeBlobToOopClosure::FixRelocations);
90       Threads::oops_do(&mark_and_push_closure, &each_active_code_blob);
91     }
92     break;
93 
94     case object_synchronizer:
95       ObjectSynchronizer::oops_do(&mark_and_push_closure);
96       break;
97 
98     case management:
99       Management::oops_do(&mark_and_push_closure);
100       break;
101 
102     case jvmti:
103       JvmtiExport::oops_do(&mark_and_push_closure);
104       break;
105 
106     case system_dictionary:
107       SystemDictionary::oops_do(&mark_and_push_closure);
108       break;
109 
110     case class_loader_data:
111       ClassLoaderDataGraph::always_strong_oops_do(&mark_and_push_closure, true);
112       break;
113 
114     case code_cache:
115       // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
116       //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(&mark_and_push_closure));
117       AOTLoader::oops_do(&mark_and_push_closure);
118       break;
119 
120     default:
121       fatal("Unknown root type");
122   }
123 
124   // Do the real work
125   cm->follow_marking_stacks();
126 }
127 
128 
129 //
130 // RefProcTaskProxy
131 //
132 
do_it(GCTaskManager * manager,uint which)133 void RefProcTaskProxy::do_it(GCTaskManager* manager, uint which)
134 {
135   assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
136 
137   ParCompactionManager* cm =
138     ParCompactionManager::gc_thread_compaction_manager(which);
139   ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm);
140   ParCompactionManager::FollowStackClosure follow_stack_closure(cm);
141   _rp_task.work(_work_id, *PSParallelCompact::is_alive_closure(),
142                 mark_and_push_closure, follow_stack_closure);
143 }
144 
145 //
146 // RefProcTaskExecutor
147 //
148 
execute(ProcessTask & task,uint ergo_workers)149 void RefProcTaskExecutor::execute(ProcessTask& task, uint ergo_workers)
150 {
151   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
152   uint active_gc_threads = heap->gc_task_manager()->active_workers();
153   assert(active_gc_threads == ergo_workers,
154          "Ergonomically chosen workers (%u) must be equal to active workers (%u)",
155          ergo_workers, active_gc_threads);
156   OopTaskQueueSet* qset = ParCompactionManager::stack_array();
157   ParallelTaskTerminator terminator(active_gc_threads, qset);
158   GCTaskQueue* q = GCTaskQueue::create();
159   for(uint i=0; i<active_gc_threads; i++) {
160     q->enqueue(new RefProcTaskProxy(task, i));
161   }
162   if (task.marks_oops_alive() && (active_gc_threads>1)) {
163     for (uint j=0; j<active_gc_threads; j++) {
164       q->enqueue(new StealMarkingTask(&terminator));
165     }
166   }
167   PSParallelCompact::gc_task_manager()->execute_and_wait(q);
168 }
169 
170 //
171 // StealMarkingTask
172 //
173 
StealMarkingTask(ParallelTaskTerminator * t)174 StealMarkingTask::StealMarkingTask(ParallelTaskTerminator* t) :
175   _terminator(t) {}
176 
do_it(GCTaskManager * manager,uint which)177 void StealMarkingTask::do_it(GCTaskManager* manager, uint which) {
178   assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
179 
180   ParCompactionManager* cm =
181     ParCompactionManager::gc_thread_compaction_manager(which);
182   ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm);
183 
184   oop obj = NULL;
185   ObjArrayTask task;
186   int random_seed = 17;
187   do {
188     while (ParCompactionManager::steal_objarray(which, &random_seed, task)) {
189       cm->follow_contents((objArrayOop)task.obj(), task.index());
190       cm->follow_marking_stacks();
191     }
192     while (ParCompactionManager::steal(which, &random_seed, obj)) {
193       cm->follow_contents(obj);
194       cm->follow_marking_stacks();
195     }
196   } while (!terminator()->offer_termination());
197 }
198 
199 //
200 // CompactionWithStealingTask
201 //
202 
CompactionWithStealingTask(ParallelTaskTerminator * t)203 CompactionWithStealingTask::CompactionWithStealingTask(ParallelTaskTerminator* t):
204   _terminator(t) {}
205 
do_it(GCTaskManager * manager,uint which)206 void CompactionWithStealingTask::do_it(GCTaskManager* manager, uint which) {
207   assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
208 
209   ParCompactionManager* cm =
210     ParCompactionManager::gc_thread_compaction_manager(which);
211 
212   // Drain the stacks that have been preloaded with regions
213   // that are ready to fill.
214 
215   cm->drain_region_stacks();
216 
217   guarantee(cm->region_stack()->is_empty(), "Not empty");
218 
219   size_t region_index = 0;
220   int random_seed = 17;
221 
222   while(true) {
223     if (ParCompactionManager::steal(which, &random_seed, region_index)) {
224       PSParallelCompact::fill_and_update_region(cm, region_index);
225       cm->drain_region_stacks();
226     } else {
227       if (terminator()->offer_termination()) {
228         break;
229       }
230       // Go around again.
231     }
232   }
233   return;
234 }
235 
UpdateDensePrefixTask(PSParallelCompact::SpaceId space_id,size_t region_index_start,size_t region_index_end)236 UpdateDensePrefixTask::UpdateDensePrefixTask(
237                                    PSParallelCompact::SpaceId space_id,
238                                    size_t region_index_start,
239                                    size_t region_index_end) :
240   _space_id(space_id), _region_index_start(region_index_start),
241   _region_index_end(region_index_end) {}
242 
do_it(GCTaskManager * manager,uint which)243 void UpdateDensePrefixTask::do_it(GCTaskManager* manager, uint which) {
244 
245   ParCompactionManager* cm =
246     ParCompactionManager::gc_thread_compaction_manager(which);
247 
248   PSParallelCompact::update_and_deadwood_in_dense_prefix(cm,
249                                                          _space_id,
250                                                          _region_index_start,
251                                                          _region_index_end);
252 }
253