1 /*
2  * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #include "precompiled.hpp"
26 #include "gc_implementation/g1/dirtyCardQueue.hpp"
27 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
28 #include "gc_implementation/g1/g1HotCardCache.hpp"
29 #include "gc_implementation/g1/g1RemSet.hpp"
30 #include "runtime/atomic.hpp"
31 
G1HotCardCache(G1CollectedHeap * g1h)32 G1HotCardCache::G1HotCardCache(G1CollectedHeap *g1h):
33   _g1h(g1h), _hot_cache(NULL), _use_cache(false), _card_counts(g1h) {}
34 
initialize(G1RegionToSpaceMapper * card_counts_storage)35 void G1HotCardCache::initialize(G1RegionToSpaceMapper* card_counts_storage) {
36   if (default_use_cache()) {
37     _use_cache = true;
38 
39     _hot_cache_size = (size_t)1 << G1ConcRSLogCacheSize;
40     _hot_cache = NEW_C_HEAP_ARRAY(jbyte*, _hot_cache_size, mtGC);
41 
42     reset_hot_cache_internal();
43 
44     // For refining the cards in the hot cache in parallel
45     _hot_cache_par_chunk_size = (int)(ParallelGCThreads > 0 ? ClaimChunkSize : _hot_cache_size);
46     _hot_cache_par_claimed_idx = 0;
47 
48     _card_counts.initialize(card_counts_storage);
49   }
50 }
51 
~G1HotCardCache()52 G1HotCardCache::~G1HotCardCache() {
53   if (default_use_cache()) {
54     assert(_hot_cache != NULL, "Logic");
55     FREE_C_HEAP_ARRAY(jbyte*, _hot_cache, mtGC);
56   }
57 }
58 
insert(jbyte * card_ptr)59 jbyte* G1HotCardCache::insert(jbyte* card_ptr) {
60   uint count = _card_counts.add_card_count(card_ptr);
61   if (!_card_counts.is_hot(count)) {
62     // The card is not hot so do not store it in the cache;
63     // return it for immediate refining.
64     return card_ptr;
65   }
66   // Otherwise, the card is hot.
67   size_t index = Atomic::add_ptr((intptr_t)1, (volatile intptr_t*)&_hot_cache_idx) - 1;
68   size_t masked_index = index & (_hot_cache_size - 1);
69   jbyte* current_ptr = _hot_cache[masked_index];
70 
71   // Try to store the new card pointer into the cache. Compare-and-swap to guard
72   // against the unlikely event of a race resulting in another card pointer to
73   // have already been written to the cache. In this case we will return
74   // card_ptr in favor of the other option, which would be starting over. This
75   // should be OK since card_ptr will likely be the older card already when/if
76   // this ever happens.
77   jbyte* previous_ptr = (jbyte*)Atomic::cmpxchg_ptr(card_ptr,
78                                                     &_hot_cache[masked_index],
79                                                     current_ptr);
80   return (previous_ptr == current_ptr) ? previous_ptr : card_ptr;
81 }
82 
drain(uint worker_i,G1RemSet * g1rs,DirtyCardQueue * into_cset_dcq)83 void G1HotCardCache::drain(uint worker_i,
84                            G1RemSet* g1rs,
85                            DirtyCardQueue* into_cset_dcq) {
86   if (!default_use_cache()) {
87     assert(_hot_cache == NULL, "Logic");
88     return;
89   }
90 
91   assert(_hot_cache != NULL, "Logic");
92   assert(!use_cache(), "cache should be disabled");
93   while (_hot_cache_par_claimed_idx < _hot_cache_size) {
94     size_t end_idx = Atomic::add_ptr((intptr_t)_hot_cache_par_chunk_size,
95                                      (volatile intptr_t*)&_hot_cache_par_claimed_idx);
96     size_t start_idx = end_idx - _hot_cache_par_chunk_size;
97     // The current worker has successfully claimed the chunk [start_idx..end_idx)
98     end_idx = MIN2(end_idx, _hot_cache_size);
99     for (size_t i = start_idx; i < end_idx; i++) {
100       jbyte* card_ptr = _hot_cache[i];
101       if (card_ptr != NULL) {
102         if (g1rs->refine_card(card_ptr, worker_i, true)) {
103           // The part of the heap spanned by the card contains references
104           // that point into the current collection set.
105           // We need to record the card pointer in the DirtyCardQueueSet
106           // that we use for such cards.
107           //
108           // The only time we care about recording cards that contain
109           // references that point into the collection set is during
110           // RSet updating while within an evacuation pause.
111           // In this case worker_i should be the id of a GC worker thread
112           assert(SafepointSynchronize::is_at_safepoint(), "Should be at a safepoint");
113           assert(worker_i < ParallelGCThreads,
114                  err_msg("incorrect worker id: %u", worker_i));
115 
116           into_cset_dcq->enqueue(card_ptr);
117         }
118       } else {
119         break;
120       }
121     }
122   }
123 
124   // The existing entries in the hot card cache, which were just refined
125   // above, are discarded prior to re-enabling the cache near the end of the GC.
126 }
127 
reset_card_counts(HeapRegion * hr)128 void G1HotCardCache::reset_card_counts(HeapRegion* hr) {
129   _card_counts.clear_region(hr);
130 }
131 
reset_card_counts()132 void G1HotCardCache::reset_card_counts() {
133   _card_counts.clear_all();
134 }
135