1 /*
2 * Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/parallel/mutableSpace.hpp"
27 #include "gc/shared/pretouchTask.hpp"
28 #include "gc/shared/spaceDecorator.inline.hpp"
29 #include "memory/iterator.inline.hpp"
30 #include "memory/universe.hpp"
31 #include "oops/oop.inline.hpp"
32 #include "runtime/atomic.hpp"
33 #include "runtime/safepoint.hpp"
34 #include "runtime/thread.hpp"
35 #include "utilities/align.hpp"
36 #include "utilities/macros.hpp"
37
MutableSpace(size_t alignment)38 MutableSpace::MutableSpace(size_t alignment) :
39 _mangler(NULL),
40 _last_setup_region(),
41 _alignment(alignment),
42 _bottom(NULL),
43 _top(NULL),
44 _end(NULL)
45 {
46 assert(MutableSpace::alignment() % os::vm_page_size() == 0,
47 "Space should be aligned");
48 _mangler = new MutableSpaceMangler(this);
49 }
50
~MutableSpace()51 MutableSpace::~MutableSpace() {
52 delete _mangler;
53 }
54
numa_setup_pages(MemRegion mr,bool clear_space)55 void MutableSpace::numa_setup_pages(MemRegion mr, bool clear_space) {
56 if (!mr.is_empty()) {
57 size_t page_size = UseLargePages ? alignment() : os::vm_page_size();
58 HeapWord *start = align_up(mr.start(), page_size);
59 HeapWord *end = align_down(mr.end(), page_size);
60 if (end > start) {
61 size_t size = pointer_delta(end, start, sizeof(char));
62 if (clear_space) {
63 // Prefer page reallocation to migration.
64 os::free_memory((char*)start, size, page_size);
65 }
66 os::numa_make_global((char*)start, size);
67 }
68 }
69 }
70
initialize(MemRegion mr,bool clear_space,bool mangle_space,bool setup_pages,WorkGang * pretouch_gang)71 void MutableSpace::initialize(MemRegion mr,
72 bool clear_space,
73 bool mangle_space,
74 bool setup_pages,
75 WorkGang* pretouch_gang) {
76
77 assert(Universe::on_page_boundary(mr.start()) && Universe::on_page_boundary(mr.end()),
78 "invalid space boundaries");
79
80 if (setup_pages && (UseNUMA || AlwaysPreTouch)) {
81 // The space may move left and right or expand/shrink.
82 // We'd like to enforce the desired page placement.
83 MemRegion head, tail;
84 if (last_setup_region().is_empty()) {
85 // If it's the first initialization don't limit the amount of work.
86 head = mr;
87 tail = MemRegion(mr.end(), mr.end());
88 } else {
89 // Is there an intersection with the address space?
90 MemRegion intersection = last_setup_region().intersection(mr);
91 if (intersection.is_empty()) {
92 intersection = MemRegion(mr.end(), mr.end());
93 }
94 // All the sizes below are in words.
95 size_t head_size = 0, tail_size = 0;
96 if (mr.start() <= intersection.start()) {
97 head_size = pointer_delta(intersection.start(), mr.start());
98 }
99 if(intersection.end() <= mr.end()) {
100 tail_size = pointer_delta(mr.end(), intersection.end());
101 }
102 // Limit the amount of page manipulation if necessary.
103 if (NUMASpaceResizeRate > 0 && !AlwaysPreTouch) {
104 const size_t change_size = head_size + tail_size;
105 const float setup_rate_words = NUMASpaceResizeRate >> LogBytesPerWord;
106 head_size = MIN2((size_t)(setup_rate_words * head_size / change_size),
107 head_size);
108 tail_size = MIN2((size_t)(setup_rate_words * tail_size / change_size),
109 tail_size);
110 }
111 head = MemRegion(intersection.start() - head_size, intersection.start());
112 tail = MemRegion(intersection.end(), intersection.end() + tail_size);
113 }
114 assert(mr.contains(head) && mr.contains(tail), "Sanity");
115
116 if (UseNUMA) {
117 numa_setup_pages(head, clear_space);
118 numa_setup_pages(tail, clear_space);
119 }
120
121 if (AlwaysPreTouch) {
122 size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
123
124 PretouchTask::pretouch("ParallelGC PreTouch head", (char*)head.start(), (char*)head.end(),
125 page_size, pretouch_gang);
126
127 PretouchTask::pretouch("ParallelGC PreTouch tail", (char*)tail.start(), (char*)tail.end(),
128 page_size, pretouch_gang);
129 }
130
131 // Remember where we stopped so that we can continue later.
132 set_last_setup_region(MemRegion(head.start(), tail.end()));
133 }
134
135 set_bottom(mr.start());
136 // When expanding concurrently with callers of cas_allocate, setting end
137 // makes the new space available for allocation by other threads. So this
138 // assignment must follow all other configuration and initialization that
139 // might be done for expansion.
140 Atomic::release_store(end_addr(), mr.end());
141
142 if (clear_space) {
143 clear(mangle_space);
144 }
145 }
146
clear(bool mangle_space)147 void MutableSpace::clear(bool mangle_space) {
148 set_top(bottom());
149 if (ZapUnusedHeapArea && mangle_space) {
150 mangle_unused_area();
151 }
152 }
153
154 #ifndef PRODUCT
check_mangled_unused_area(HeapWord * limit)155 void MutableSpace::check_mangled_unused_area(HeapWord* limit) {
156 mangler()->check_mangled_unused_area(limit);
157 }
158
check_mangled_unused_area_complete()159 void MutableSpace::check_mangled_unused_area_complete() {
160 mangler()->check_mangled_unused_area_complete();
161 }
162
163 // Mangle only the unused space that has not previously
164 // been mangled and that has not been allocated since being
165 // mangled.
mangle_unused_area()166 void MutableSpace::mangle_unused_area() {
167 mangler()->mangle_unused_area();
168 }
169
mangle_unused_area_complete()170 void MutableSpace::mangle_unused_area_complete() {
171 mangler()->mangle_unused_area_complete();
172 }
173
mangle_region(MemRegion mr)174 void MutableSpace::mangle_region(MemRegion mr) {
175 SpaceMangler::mangle_region(mr);
176 }
177
set_top_for_allocations(HeapWord * v)178 void MutableSpace::set_top_for_allocations(HeapWord* v) {
179 mangler()->set_top_for_allocations(v);
180 }
181
set_top_for_allocations()182 void MutableSpace::set_top_for_allocations() {
183 mangler()->set_top_for_allocations(top());
184 }
185 #endif
186
cas_allocate(size_t size)187 HeapWord* MutableSpace::cas_allocate(size_t size) {
188 do {
189 // Read top before end, else the range check may pass when it shouldn't.
190 // If end is read first, other threads may advance end and top such that
191 // current top > old end and current top + size > current end. Then
192 // pointer_delta underflows, allowing installation of top > current end.
193 HeapWord* obj = Atomic::load_acquire(top_addr());
194 if (pointer_delta(end(), obj) >= size) {
195 HeapWord* new_top = obj + size;
196 HeapWord* result = Atomic::cmpxchg(top_addr(), obj, new_top);
197 // result can be one of two:
198 // the old top value: the exchange succeeded
199 // otherwise: the new value of the top is returned.
200 if (result != obj) {
201 continue; // another thread beat us to the allocation, try again
202 }
203 assert(is_object_aligned(obj) && is_object_aligned(new_top),
204 "checking alignment");
205 return obj;
206 } else {
207 return NULL;
208 }
209 } while (true);
210 }
211
212 // Try to deallocate previous allocation. Returns true upon success.
cas_deallocate(HeapWord * obj,size_t size)213 bool MutableSpace::cas_deallocate(HeapWord *obj, size_t size) {
214 HeapWord* expected_top = obj + size;
215 return Atomic::cmpxchg(top_addr(), expected_top, obj) == expected_top;
216 }
217
218 // Only used by oldgen allocation.
needs_expand(size_t word_size) const219 bool MutableSpace::needs_expand(size_t word_size) const {
220 assert_lock_strong(ExpandHeap_lock);
221 // Holding the lock means end is stable. So while top may be advancing
222 // via concurrent allocations, there is no need to order the reads of top
223 // and end here, unlike in cas_allocate.
224 return pointer_delta(end(), top()) < word_size;
225 }
226
oop_iterate(OopIterateClosure * cl)227 void MutableSpace::oop_iterate(OopIterateClosure* cl) {
228 HeapWord* obj_addr = bottom();
229 HeapWord* t = top();
230 // Could call objects iterate, but this is easier.
231 while (obj_addr < t) {
232 obj_addr += cast_to_oop(obj_addr)->oop_iterate_size(cl);
233 }
234 }
235
object_iterate(ObjectClosure * cl)236 void MutableSpace::object_iterate(ObjectClosure* cl) {
237 HeapWord* p = bottom();
238 while (p < top()) {
239 cl->do_object(cast_to_oop(p));
240 p += cast_to_oop(p)->size();
241 }
242 }
243
print_short() const244 void MutableSpace::print_short() const { print_short_on(tty); }
print_short_on(outputStream * st) const245 void MutableSpace::print_short_on( outputStream* st) const {
246 st->print(" space " SIZE_FORMAT "K, %d%% used", capacity_in_bytes() / K,
247 (int) ((double) used_in_bytes() * 100 / capacity_in_bytes()));
248 }
249
print() const250 void MutableSpace::print() const { print_on(tty); }
print_on(outputStream * st) const251 void MutableSpace::print_on(outputStream* st) const {
252 MutableSpace::print_short_on(st);
253 st->print_cr(" [" INTPTR_FORMAT "," INTPTR_FORMAT "," INTPTR_FORMAT ")",
254 p2i(bottom()), p2i(top()), p2i(end()));
255 }
256
verify()257 void MutableSpace::verify() {
258 HeapWord* p = bottom();
259 HeapWord* t = top();
260 HeapWord* prev_p = NULL;
261 while (p < t) {
262 oopDesc::verify(cast_to_oop(p));
263 prev_p = p;
264 p += cast_to_oop(p)->size();
265 }
266 guarantee(p == top(), "end of last object must match end of space");
267 }
268