1 /*
2 * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 */
23
24 #include "precompiled.hpp"
25 #include "classfile/javaClasses.inline.hpp"
26 #include "gc/shared/referencePolicy.hpp"
27 #include "gc/shared/referenceProcessorStats.hpp"
28 #include "gc/z/zHeap.inline.hpp"
29 #include "gc/z/zOopClosures.inline.hpp"
30 #include "gc/z/zReferenceProcessor.hpp"
31 #include "gc/z/zStat.hpp"
32 #include "gc/z/zTask.hpp"
33 #include "gc/z/zTracer.inline.hpp"
34 #include "gc/z/zUtils.inline.hpp"
35 #include "gc/z/zValue.inline.hpp"
36 #include "memory/universe.hpp"
37 #include "runtime/atomic.hpp"
38 #include "runtime/mutexLocker.hpp"
39 #include "runtime/os.hpp"
40
41 static const ZStatSubPhase ZSubPhaseConcurrentReferencesProcess("Concurrent References Process");
42 static const ZStatSubPhase ZSubPhaseConcurrentReferencesEnqueue("Concurrent References Enqueue");
43
reference_type(oop reference)44 static ReferenceType reference_type(oop reference) {
45 return InstanceKlass::cast(reference->klass())->reference_type();
46 }
47
reference_type_name(ReferenceType type)48 static const char* reference_type_name(ReferenceType type) {
49 switch (type) {
50 case REF_SOFT:
51 return "Soft";
52
53 case REF_WEAK:
54 return "Weak";
55
56 case REF_FINAL:
57 return "Final";
58
59 case REF_PHANTOM:
60 return "Phantom";
61
62 default:
63 ShouldNotReachHere();
64 return NULL;
65 }
66 }
67
reference_referent_addr(oop reference)68 static volatile oop* reference_referent_addr(oop reference) {
69 return (volatile oop*)java_lang_ref_Reference::referent_addr_raw(reference);
70 }
71
reference_referent(oop reference)72 static oop reference_referent(oop reference) {
73 return *reference_referent_addr(reference);
74 }
75
reference_set_referent(oop reference,oop referent)76 static void reference_set_referent(oop reference, oop referent) {
77 java_lang_ref_Reference::set_referent_raw(reference, referent);
78 }
79
reference_discovered_addr(oop reference)80 static oop* reference_discovered_addr(oop reference) {
81 return (oop*)java_lang_ref_Reference::discovered_addr_raw(reference);
82 }
83
reference_discovered(oop reference)84 static oop reference_discovered(oop reference) {
85 return *reference_discovered_addr(reference);
86 }
87
reference_set_discovered(oop reference,oop discovered)88 static void reference_set_discovered(oop reference, oop discovered) {
89 java_lang_ref_Reference::set_discovered_raw(reference, discovered);
90 }
91
reference_next_addr(oop reference)92 static oop* reference_next_addr(oop reference) {
93 return (oop*)java_lang_ref_Reference::next_addr_raw(reference);
94 }
95
reference_next(oop reference)96 static oop reference_next(oop reference) {
97 return *reference_next_addr(reference);
98 }
99
reference_set_next(oop reference,oop next)100 static void reference_set_next(oop reference, oop next) {
101 java_lang_ref_Reference::set_next_raw(reference, next);
102 }
103
soft_reference_update_clock()104 static void soft_reference_update_clock() {
105 const jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
106 java_lang_ref_SoftReference::set_clock(now);
107 }
108
ZReferenceProcessor(ZWorkers * workers)109 ZReferenceProcessor::ZReferenceProcessor(ZWorkers* workers) :
110 _workers(workers),
111 _soft_reference_policy(NULL),
112 _encountered_count(),
113 _discovered_count(),
114 _enqueued_count(),
115 _discovered_list(NULL),
116 _pending_list(NULL),
117 _pending_list_tail(_pending_list.addr()) {}
118
set_soft_reference_policy(bool clear)119 void ZReferenceProcessor::set_soft_reference_policy(bool clear) {
120 static AlwaysClearPolicy always_clear_policy;
121 static LRUMaxHeapPolicy lru_max_heap_policy;
122
123 if (clear) {
124 log_info(gc, ref)("Clearing All SoftReferences");
125 _soft_reference_policy = &always_clear_policy;
126 } else {
127 _soft_reference_policy = &lru_max_heap_policy;
128 }
129
130 _soft_reference_policy->setup();
131 }
132
is_inactive(oop reference,oop referent,ReferenceType type) const133 bool ZReferenceProcessor::is_inactive(oop reference, oop referent, ReferenceType type) const {
134 if (type == REF_FINAL) {
135 // A FinalReference is inactive if its next field is non-null. An application can't
136 // call enqueue() or clear() on a FinalReference.
137 return reference_next(reference) != NULL;
138 } else {
139 // A non-FinalReference is inactive if the referent is null. The referent can only
140 // be null if the application called Reference.enqueue() or Reference.clear().
141 return referent == NULL;
142 }
143 }
144
is_strongly_live(oop referent) const145 bool ZReferenceProcessor::is_strongly_live(oop referent) const {
146 return ZHeap::heap()->is_object_strongly_live(ZOop::to_address(referent));
147 }
148
is_softly_live(oop reference,ReferenceType type) const149 bool ZReferenceProcessor::is_softly_live(oop reference, ReferenceType type) const {
150 if (type != REF_SOFT) {
151 // Not a SoftReference
152 return false;
153 }
154
155 // Ask SoftReference policy
156 const jlong clock = java_lang_ref_SoftReference::clock();
157 assert(clock != 0, "Clock not initialized");
158 assert(_soft_reference_policy != NULL, "Policy not initialized");
159 return !_soft_reference_policy->should_clear_reference(reference, clock);
160 }
161
should_discover(oop reference,ReferenceType type) const162 bool ZReferenceProcessor::should_discover(oop reference, ReferenceType type) const {
163 volatile oop* const referent_addr = reference_referent_addr(reference);
164 const oop referent = ZBarrier::weak_load_barrier_on_oop_field(referent_addr);
165
166 if (is_inactive(reference, referent, type)) {
167 return false;
168 }
169
170 if (is_strongly_live(referent)) {
171 return false;
172 }
173
174 if (is_softly_live(reference, type)) {
175 return false;
176 }
177
178 // PhantomReferences with finalizable marked referents should technically not have
179 // to be discovered. However, InstanceRefKlass::oop_oop_iterate_ref_processing()
180 // does not know about the finalizable mark concept, and will therefore mark
181 // referents in non-discovered PhantomReferences as strongly live. To prevent
182 // this, we always discover PhantomReferences with finalizable marked referents.
183 // They will automatically be dropped during the reference processing phase.
184 return true;
185 }
186
should_drop(oop reference,ReferenceType type) const187 bool ZReferenceProcessor::should_drop(oop reference, ReferenceType type) const {
188 // This check is racing with a call to Reference.clear() from the application.
189 // If the application clears the reference after this check it will still end
190 // up on the pending list, and there's nothing we can do about that without
191 // changing the Reference.clear() API. This check is also racing with a call
192 // to Reference.enqueue() from the application, which is unproblematic, since
193 // the application wants the reference to be enqueued anyway.
194 const oop referent = reference_referent(reference);
195 if (referent == NULL) {
196 // Reference has been cleared, by a call to Reference.enqueue()
197 // or Reference.clear() from the application, which means we
198 // should drop the reference.
199 return true;
200 }
201
202 // Check if the referent is still alive, in which case we should
203 // drop the reference.
204 if (type == REF_PHANTOM) {
205 return ZBarrier::is_alive_barrier_on_phantom_oop(referent);
206 } else {
207 return ZBarrier::is_alive_barrier_on_weak_oop(referent);
208 }
209 }
210
keep_alive(oop reference,ReferenceType type) const211 void ZReferenceProcessor::keep_alive(oop reference, ReferenceType type) const {
212 volatile oop* const p = reference_referent_addr(reference);
213 if (type == REF_PHANTOM) {
214 ZBarrier::keep_alive_barrier_on_phantom_oop_field(p);
215 } else {
216 ZBarrier::keep_alive_barrier_on_weak_oop_field(p);
217 }
218 }
219
make_inactive(oop reference,ReferenceType type) const220 void ZReferenceProcessor::make_inactive(oop reference, ReferenceType type) const {
221 if (type == REF_FINAL) {
222 // Don't clear referent. It is needed by the Finalizer thread to make the call
223 // to finalize(). A FinalReference is instead made inactive by self-looping the
224 // next field. An application can't call FinalReference.enqueue(), so there is
225 // no race to worry about when setting the next field.
226 assert(reference_next(reference) == NULL, "Already inactive");
227 reference_set_next(reference, reference);
228 } else {
229 // Clear referent
230 reference_set_referent(reference, NULL);
231 }
232 }
233
discover(oop reference,ReferenceType type)234 void ZReferenceProcessor::discover(oop reference, ReferenceType type) {
235 log_trace(gc, ref)("Discovered Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type));
236
237 // Update statistics
238 _discovered_count.get()[type]++;
239
240 if (type == REF_FINAL) {
241 // Mark referent (and its reachable subgraph) finalizable. This avoids
242 // the problem of later having to mark those objects if the referent is
243 // still final reachable during processing.
244 volatile oop* const referent_addr = reference_referent_addr(reference);
245 ZBarrier::mark_barrier_on_oop_field(referent_addr, true /* finalizable */);
246 }
247
248 // Add reference to discovered list
249 assert(reference_discovered(reference) == NULL, "Already discovered");
250 oop* const list = _discovered_list.addr();
251 reference_set_discovered(reference, *list);
252 *list = reference;
253 }
254
discover_reference(oop reference,ReferenceType type)255 bool ZReferenceProcessor::discover_reference(oop reference, ReferenceType type) {
256 if (!RegisterReferences) {
257 // Reference processing disabled
258 return false;
259 }
260
261 log_trace(gc, ref)("Encountered Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type));
262
263 // Update statistics
264 _encountered_count.get()[type]++;
265
266 if (!should_discover(reference, type)) {
267 // Not discovered
268 return false;
269 }
270
271 discover(reference, type);
272
273 // Discovered
274 return true;
275 }
276
drop(oop reference,ReferenceType type)277 oop ZReferenceProcessor::drop(oop reference, ReferenceType type) {
278 log_trace(gc, ref)("Dropped Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type));
279
280 // Keep referent alive
281 keep_alive(reference, type);
282
283 // Unlink and return next in list
284 const oop next = reference_discovered(reference);
285 reference_set_discovered(reference, NULL);
286 return next;
287 }
288
keep(oop reference,ReferenceType type)289 oop* ZReferenceProcessor::keep(oop reference, ReferenceType type) {
290 log_trace(gc, ref)("Enqueued Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type));
291
292 // Update statistics
293 _enqueued_count.get()[type]++;
294
295 // Make reference inactive
296 make_inactive(reference, type);
297
298 // Return next in list
299 return reference_discovered_addr(reference);
300 }
301
work()302 void ZReferenceProcessor::work() {
303 // Process discovered references
304 oop* const list = _discovered_list.addr();
305 oop* p = list;
306
307 while (*p != NULL) {
308 const oop reference = *p;
309 const ReferenceType type = reference_type(reference);
310
311 if (should_drop(reference, type)) {
312 *p = drop(reference, type);
313 } else {
314 p = keep(reference, type);
315 }
316 }
317
318 // Prepend discovered references to internal pending list
319 if (*list != NULL) {
320 *p = Atomic::xchg(_pending_list.addr(), *list);
321 if (*p == NULL) {
322 // First to prepend to list, record tail
323 _pending_list_tail = p;
324 }
325
326 // Clear discovered list
327 *list = NULL;
328 }
329 }
330
is_empty() const331 bool ZReferenceProcessor::is_empty() const {
332 ZPerWorkerConstIterator<oop> iter(&_discovered_list);
333 for (const oop* list; iter.next(&list);) {
334 if (*list != NULL) {
335 return false;
336 }
337 }
338
339 if (_pending_list.get() != NULL) {
340 return false;
341 }
342
343 return true;
344 }
345
reset_statistics()346 void ZReferenceProcessor::reset_statistics() {
347 assert(is_empty(), "Should be empty");
348
349 // Reset encountered
350 ZPerWorkerIterator<Counters> iter_encountered(&_encountered_count);
351 for (Counters* counters; iter_encountered.next(&counters);) {
352 for (int i = REF_SOFT; i <= REF_PHANTOM; i++) {
353 (*counters)[i] = 0;
354 }
355 }
356
357 // Reset discovered
358 ZPerWorkerIterator<Counters> iter_discovered(&_discovered_count);
359 for (Counters* counters; iter_discovered.next(&counters);) {
360 for (int i = REF_SOFT; i <= REF_PHANTOM; i++) {
361 (*counters)[i] = 0;
362 }
363 }
364
365 // Reset enqueued
366 ZPerWorkerIterator<Counters> iter_enqueued(&_enqueued_count);
367 for (Counters* counters; iter_enqueued.next(&counters);) {
368 for (int i = REF_SOFT; i <= REF_PHANTOM; i++) {
369 (*counters)[i] = 0;
370 }
371 }
372 }
373
collect_statistics()374 void ZReferenceProcessor::collect_statistics() {
375 Counters encountered = {};
376 Counters discovered = {};
377 Counters enqueued = {};
378
379 // Sum encountered
380 ZPerWorkerConstIterator<Counters> iter_encountered(&_encountered_count);
381 for (const Counters* counters; iter_encountered.next(&counters);) {
382 for (int i = REF_SOFT; i <= REF_PHANTOM; i++) {
383 encountered[i] += (*counters)[i];
384 }
385 }
386
387 // Sum discovered
388 ZPerWorkerConstIterator<Counters> iter_discovered(&_discovered_count);
389 for (const Counters* counters; iter_discovered.next(&counters);) {
390 for (int i = REF_SOFT; i <= REF_PHANTOM; i++) {
391 discovered[i] += (*counters)[i];
392 }
393 }
394
395 // Sum enqueued
396 ZPerWorkerConstIterator<Counters> iter_enqueued(&_enqueued_count);
397 for (const Counters* counters; iter_enqueued.next(&counters);) {
398 for (int i = REF_SOFT; i <= REF_PHANTOM; i++) {
399 enqueued[i] += (*counters)[i];
400 }
401 }
402
403 // Update statistics
404 ZStatReferences::set_soft(encountered[REF_SOFT], discovered[REF_SOFT], enqueued[REF_SOFT]);
405 ZStatReferences::set_weak(encountered[REF_WEAK], discovered[REF_WEAK], enqueued[REF_WEAK]);
406 ZStatReferences::set_final(encountered[REF_FINAL], discovered[REF_FINAL], enqueued[REF_FINAL]);
407 ZStatReferences::set_phantom(encountered[REF_PHANTOM], discovered[REF_PHANTOM], enqueued[REF_PHANTOM]);
408
409 // Trace statistics
410 const ReferenceProcessorStats stats(discovered[REF_SOFT],
411 discovered[REF_WEAK],
412 discovered[REF_FINAL],
413 discovered[REF_PHANTOM]);
414 ZTracer::tracer()->report_gc_reference_stats(stats);
415 }
416
417 class ZReferenceProcessorTask : public ZTask {
418 private:
419 ZReferenceProcessor* const _reference_processor;
420
421 public:
ZReferenceProcessorTask(ZReferenceProcessor * reference_processor)422 ZReferenceProcessorTask(ZReferenceProcessor* reference_processor) :
423 ZTask("ZReferenceProcessorTask"),
424 _reference_processor(reference_processor) {}
425
work()426 virtual void work() {
427 _reference_processor->work();
428 }
429 };
430
process_references()431 void ZReferenceProcessor::process_references() {
432 ZStatTimer timer(ZSubPhaseConcurrentReferencesProcess);
433
434 // Process discovered lists
435 ZReferenceProcessorTask task(this);
436 _workers->run_concurrent(&task);
437
438 // Update SoftReference clock
439 soft_reference_update_clock();
440
441 // Collect, log and trace statistics
442 collect_statistics();
443 }
444
enqueue_references()445 void ZReferenceProcessor::enqueue_references() {
446 ZStatTimer timer(ZSubPhaseConcurrentReferencesEnqueue);
447
448 if (_pending_list.get() == NULL) {
449 // Nothing to enqueue
450 return;
451 }
452
453 {
454 // Heap_lock protects external pending list
455 MonitorLocker ml(Heap_lock);
456
457 // Prepend internal pending list to external pending list
458 *_pending_list_tail = Universe::swap_reference_pending_list(_pending_list.get());
459
460 // Notify ReferenceHandler thread
461 ml.notify_all();
462 }
463
464 // Reset internal pending list
465 _pending_list.set(NULL);
466 _pending_list_tail = _pending_list.addr();
467 }
468