1 /*
2  * Copyright (c) 2013, 2020, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 #include "precompiled.hpp"
25 
26 #include "logging/log.hpp"
27 #include "memory/metaspace.hpp"
28 #include "runtime/os.hpp"
29 #include "runtime/threadCritical.hpp"
30 #include "services/memTracker.hpp"
31 #include "services/threadStackTracker.hpp"
32 #include "services/virtualMemoryTracker.hpp"
33 #include "utilities/ostream.hpp"
34 
35 size_t VirtualMemorySummary::_snapshot[CALC_OBJ_SIZE_IN_TYPE(VirtualMemorySnapshot, size_t)];
36 
initialize()37 void VirtualMemorySummary::initialize() {
38   assert(sizeof(_snapshot) >= sizeof(VirtualMemorySnapshot), "Sanity Check");
39   // Use placement operator new to initialize static data area.
40   ::new ((void*)_snapshot) VirtualMemorySnapshot();
41 }
42 
snapshot(VirtualMemorySnapshot * s)43 void VirtualMemorySummary::snapshot(VirtualMemorySnapshot* s) {
44   // Only if thread stack is backed by virtual memory
45   if (ThreadStackTracker::track_as_vm()) {
46     // Snapshot current thread stacks
47     VirtualMemoryTracker::snapshot_thread_stacks();
48   }
49   as_snapshot()->copy_to(s);
50 }
51 
52 SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>* VirtualMemoryTracker::_reserved_regions;
53 
compare_committed_region(const CommittedMemoryRegion & r1,const CommittedMemoryRegion & r2)54 int compare_committed_region(const CommittedMemoryRegion& r1, const CommittedMemoryRegion& r2) {
55   return r1.compare(r2);
56 }
57 
compare_reserved_region_base(const ReservedMemoryRegion & r1,const ReservedMemoryRegion & r2)58 int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2) {
59   return r1.compare(r2);
60 }
61 
is_mergeable_with(CommittedMemoryRegion * rgn,address addr,size_t size,const NativeCallStack & stack)62 static bool is_mergeable_with(CommittedMemoryRegion* rgn, address addr, size_t size, const NativeCallStack& stack) {
63   return rgn->adjacent_to(addr, size) && rgn->call_stack()->equals(stack);
64 }
65 
is_same_as(CommittedMemoryRegion * rgn,address addr,size_t size,const NativeCallStack & stack)66 static bool is_same_as(CommittedMemoryRegion* rgn, address addr, size_t size, const NativeCallStack& stack) {
67   // It would have made sense to use rgn->equals(...), but equals returns true for overlapping regions.
68   return rgn->same_region(addr, size) && rgn->call_stack()->equals(stack);
69 }
70 
find_preceding_node_from(LinkedListNode<CommittedMemoryRegion> * from,address addr)71 static LinkedListNode<CommittedMemoryRegion>* find_preceding_node_from(LinkedListNode<CommittedMemoryRegion>* from, address addr) {
72   LinkedListNode<CommittedMemoryRegion>* preceding = NULL;
73 
74   for (LinkedListNode<CommittedMemoryRegion>* node = from; node != NULL; node = node->next()) {
75     CommittedMemoryRegion* rgn = node->data();
76 
77     // We searched past the region start.
78     if (rgn->end() > addr) {
79       break;
80     }
81 
82     preceding = node;
83   }
84 
85   return preceding;
86 }
87 
try_merge_with(LinkedListNode<CommittedMemoryRegion> * node,address addr,size_t size,const NativeCallStack & stack)88 static bool try_merge_with(LinkedListNode<CommittedMemoryRegion>* node, address addr, size_t size, const NativeCallStack& stack) {
89   if (node != NULL) {
90     CommittedMemoryRegion* rgn = node->data();
91 
92     if (is_mergeable_with(rgn, addr, size, stack)) {
93       rgn->expand_region(addr, size);
94       return true;
95     }
96   }
97 
98   return false;
99 }
100 
try_merge_with(LinkedListNode<CommittedMemoryRegion> * node,LinkedListNode<CommittedMemoryRegion> * other)101 static bool try_merge_with(LinkedListNode<CommittedMemoryRegion>* node, LinkedListNode<CommittedMemoryRegion>* other) {
102   if (other == NULL) {
103     return false;
104   }
105 
106   CommittedMemoryRegion* rgn = other->data();
107   return try_merge_with(node, rgn->base(), rgn->size(), *rgn->call_stack());
108 }
109 
add_committed_region(address addr,size_t size,const NativeCallStack & stack)110 bool ReservedMemoryRegion::add_committed_region(address addr, size_t size, const NativeCallStack& stack) {
111   assert(addr != NULL, "Invalid address");
112   assert(size > 0, "Invalid size");
113   assert(contain_region(addr, size), "Not contain this region");
114 
115   // Find the region that fully precedes the [addr, addr + size) region.
116   LinkedListNode<CommittedMemoryRegion>* prev = find_preceding_node_from(_committed_regions.head(), addr);
117   LinkedListNode<CommittedMemoryRegion>* next = (prev != NULL ? prev->next() : _committed_regions.head());
118 
119   if (next != NULL) {
120     // Ignore request if region already exists.
121     if (is_same_as(next->data(), addr, size, stack)) {
122       return true;
123     }
124 
125     // The new region is after prev, and either overlaps with the
126     // next region (and maybe more regions), or overlaps with no region.
127     if (next->data()->overlap_region(addr, size)) {
128       // Remove _all_ overlapping regions, and parts of regions,
129       // in preparation for the addition of this new region.
130       remove_uncommitted_region(addr, size);
131 
132       // The remove could have split a region into two and created a
133       // new prev region. Need to reset the prev and next pointers.
134       prev = find_preceding_node_from((prev != NULL ? prev : _committed_regions.head()), addr);
135       next = (prev != NULL ? prev->next() : _committed_regions.head());
136     }
137   }
138 
139   // At this point the previous overlapping regions have been
140   // cleared, and the full region is guaranteed to be inserted.
141   VirtualMemorySummary::record_committed_memory(size, flag());
142 
143   // Try to merge with prev and possibly next.
144   if (try_merge_with(prev, addr, size, stack)) {
145     if (try_merge_with(prev, next)) {
146       // prev was expanded to contain the new region
147       // and next, need to remove next from the list
148       _committed_regions.remove_after(prev);
149     }
150 
151     return true;
152   }
153 
154   // Didn't merge with prev, try with next.
155   if (try_merge_with(next, addr, size, stack)) {
156     return true;
157   }
158 
159   // Couldn't merge with any regions - create a new region.
160   return add_committed_region(CommittedMemoryRegion(addr, size, stack));
161 }
162 
remove_uncommitted_region(LinkedListNode<CommittedMemoryRegion> * node,address addr,size_t size)163 bool ReservedMemoryRegion::remove_uncommitted_region(LinkedListNode<CommittedMemoryRegion>* node,
164   address addr, size_t size) {
165   assert(addr != NULL, "Invalid address");
166   assert(size > 0, "Invalid size");
167 
168   CommittedMemoryRegion* rgn = node->data();
169   assert(rgn->contain_region(addr, size), "Has to be contained");
170   assert(!rgn->same_region(addr, size), "Can not be the same region");
171 
172   if (rgn->base() == addr ||
173       rgn->end() == addr + size) {
174     rgn->exclude_region(addr, size);
175     return true;
176   } else {
177     // split this region
178     address top =rgn->end();
179     // use this region for lower part
180     size_t exclude_size = rgn->end() - addr;
181     rgn->exclude_region(addr, exclude_size);
182 
183     // higher part
184     address high_base = addr + size;
185     size_t  high_size = top - high_base;
186 
187     CommittedMemoryRegion high_rgn(high_base, high_size, *rgn->call_stack());
188     LinkedListNode<CommittedMemoryRegion>* high_node = _committed_regions.add(high_rgn);
189     assert(high_node == NULL || node->next() == high_node, "Should be right after");
190     return (high_node != NULL);
191   }
192 
193   return false;
194 }
195 
remove_uncommitted_region(address addr,size_t sz)196 bool ReservedMemoryRegion::remove_uncommitted_region(address addr, size_t sz) {
197   assert(addr != NULL, "Invalid address");
198   assert(sz > 0, "Invalid size");
199 
200   CommittedMemoryRegion del_rgn(addr, sz, *call_stack());
201   address end = addr + sz;
202 
203   LinkedListNode<CommittedMemoryRegion>* head = _committed_regions.head();
204   LinkedListNode<CommittedMemoryRegion>* prev = NULL;
205   CommittedMemoryRegion* crgn;
206 
207   while (head != NULL) {
208     crgn = head->data();
209 
210     if (crgn->same_region(addr, sz)) {
211       VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag());
212       _committed_regions.remove_after(prev);
213       return true;
214     }
215 
216     // del_rgn contains crgn
217     if (del_rgn.contain_region(crgn->base(), crgn->size())) {
218       VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag());
219       head = head->next();
220       _committed_regions.remove_after(prev);
221       continue;  // don't update head or prev
222     }
223 
224     // Found addr in the current crgn. There are 2 subcases:
225     if (crgn->contain_address(addr)) {
226 
227       // (1) Found addr+size in current crgn as well. (del_rgn is contained in crgn)
228       if (crgn->contain_address(end - 1)) {
229         VirtualMemorySummary::record_uncommitted_memory(sz, flag());
230         return remove_uncommitted_region(head, addr, sz); // done!
231       } else {
232         // (2) Did not find del_rgn's end in crgn.
233         size_t size = crgn->end() - del_rgn.base();
234         crgn->exclude_region(addr, size);
235         VirtualMemorySummary::record_uncommitted_memory(size, flag());
236       }
237 
238     } else if (crgn->contain_address(end - 1)) {
239       // Found del_rgn's end, but not its base addr.
240       size_t size = del_rgn.end() - crgn->base();
241       crgn->exclude_region(crgn->base(), size);
242       VirtualMemorySummary::record_uncommitted_memory(size, flag());
243       return true;  // should be done if the list is sorted properly!
244     }
245 
246     prev = head;
247     head = head->next();
248   }
249 
250   return true;
251 }
252 
move_committed_regions(address addr,ReservedMemoryRegion & rgn)253 void ReservedMemoryRegion::move_committed_regions(address addr, ReservedMemoryRegion& rgn) {
254   assert(addr != NULL, "Invalid address");
255 
256   // split committed regions
257   LinkedListNode<CommittedMemoryRegion>* head =
258     _committed_regions.head();
259   LinkedListNode<CommittedMemoryRegion>* prev = NULL;
260 
261   while (head != NULL) {
262     if (head->data()->base() >= addr) {
263       break;
264     }
265     prev = head;
266     head = head->next();
267   }
268 
269   if (head != NULL) {
270     if (prev != NULL) {
271       prev->set_next(head->next());
272     } else {
273       _committed_regions.set_head(NULL);
274     }
275   }
276 
277   rgn._committed_regions.set_head(head);
278 }
279 
committed_size() const280 size_t ReservedMemoryRegion::committed_size() const {
281   size_t committed = 0;
282   LinkedListNode<CommittedMemoryRegion>* head =
283     _committed_regions.head();
284   while (head != NULL) {
285     committed += head->data()->size();
286     head = head->next();
287   }
288   return committed;
289 }
290 
set_flag(MEMFLAGS f)291 void ReservedMemoryRegion::set_flag(MEMFLAGS f) {
292   assert((flag() == mtNone || flag() == f),
293          "Overwrite memory type for region [" INTPTR_FORMAT "-" INTPTR_FORMAT "), %u->%u.",
294          p2i(base()), p2i(end()), (unsigned)flag(), (unsigned)f);
295   if (flag() != f) {
296     VirtualMemorySummary::move_reserved_memory(flag(), f, size());
297     VirtualMemorySummary::move_committed_memory(flag(), f, committed_size());
298     _flag = f;
299   }
300 }
301 
thread_stack_uncommitted_bottom() const302 address ReservedMemoryRegion::thread_stack_uncommitted_bottom() const {
303   assert(flag() == mtThreadStack, "Only for thread stack");
304   LinkedListNode<CommittedMemoryRegion>* head = _committed_regions.head();
305   address bottom = base();
306   address top = base() + size();
307   while (head != NULL) {
308     address committed_top = head->data()->base() + head->data()->size();
309     if (committed_top < top) {
310       // committed stack guard pages, skip them
311       bottom = head->data()->base() + head->data()->size();
312       head = head->next();
313     } else {
314       assert(top == committed_top, "Sanity");
315       break;
316     }
317   }
318 
319   return bottom;
320 }
321 
initialize(NMT_TrackingLevel level)322 bool VirtualMemoryTracker::initialize(NMT_TrackingLevel level) {
323   if (level >= NMT_summary) {
324     VirtualMemorySummary::initialize();
325   }
326   return true;
327 }
328 
late_initialize(NMT_TrackingLevel level)329 bool VirtualMemoryTracker::late_initialize(NMT_TrackingLevel level) {
330   if (level >= NMT_summary) {
331     _reserved_regions = new (std::nothrow, ResourceObj::C_HEAP, mtNMT)
332       SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>();
333     return (_reserved_regions != NULL);
334   }
335   return true;
336 }
337 
add_reserved_region(address base_addr,size_t size,const NativeCallStack & stack,MEMFLAGS flag)338 bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size,
339     const NativeCallStack& stack, MEMFLAGS flag) {
340   assert(base_addr != NULL, "Invalid address");
341   assert(size > 0, "Invalid size");
342   assert(_reserved_regions != NULL, "Sanity check");
343   ReservedMemoryRegion  rgn(base_addr, size, stack, flag);
344   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
345 
346   log_debug(nmt)("Add reserved region \'%s\' (" INTPTR_FORMAT ", " SIZE_FORMAT ")",
347                 rgn.flag_name(), p2i(rgn.base()), rgn.size());
348   if (reserved_rgn == NULL) {
349     VirtualMemorySummary::record_reserved_memory(size, flag);
350     return _reserved_regions->add(rgn) != NULL;
351   } else {
352     // Deal with recursive reservation
353     // os::reserve_memory() -> pd_reserve_memory() -> os::reserve_memory()
354     // See JDK-8198226.
355     if (reserved_rgn->same_region(base_addr, size) &&
356         (reserved_rgn->flag() == flag || reserved_rgn->flag() == mtNone)) {
357       reserved_rgn->set_call_stack(stack);
358       reserved_rgn->set_flag(flag);
359       return true;
360     } else {
361       assert(reserved_rgn->overlap_region(base_addr, size), "Must be");
362 
363       // Overlapped reservation.
364       // It can happen when the regions are thread stacks, as JNI
365       // thread does not detach from VM before exits, and leads to
366       // leak JavaThread object
367       if (reserved_rgn->flag() == mtThreadStack) {
368         guarantee(!CheckJNICalls, "Attached JNI thread exited without being detached");
369         // Overwrite with new region
370 
371         // Release old region
372         VirtualMemorySummary::record_uncommitted_memory(reserved_rgn->committed_size(), reserved_rgn->flag());
373         VirtualMemorySummary::record_released_memory(reserved_rgn->size(), reserved_rgn->flag());
374 
375         // Add new region
376         VirtualMemorySummary::record_reserved_memory(rgn.size(), flag);
377 
378         *reserved_rgn = rgn;
379         return true;
380       }
381 
382       // CDS mapping region.
383       // CDS reserves the whole region for mapping CDS archive, then maps each section into the region.
384       // NMT reports CDS as a whole.
385       if (reserved_rgn->flag() == mtClassShared) {
386         log_debug(nmt)("CDS reserved region \'%s\' as a whole (" INTPTR_FORMAT ", " SIZE_FORMAT ")",
387                       reserved_rgn->flag_name(), p2i(reserved_rgn->base()), reserved_rgn->size());
388         assert(reserved_rgn->contain_region(base_addr, size), "Reserved CDS region should contain this mapping region");
389         return true;
390       }
391 
392       // Mapped CDS string region.
393       // The string region(s) is part of the java heap.
394       if (reserved_rgn->flag() == mtJavaHeap) {
395         log_debug(nmt)("CDS reserved region \'%s\' as a whole (" INTPTR_FORMAT ", " SIZE_FORMAT ")",
396                       reserved_rgn->flag_name(), p2i(reserved_rgn->base()), reserved_rgn->size());
397         assert(reserved_rgn->contain_region(base_addr, size), "Reserved heap region should contain this mapping region");
398         return true;
399       }
400 
401       // Print some more details. Don't use UL here to avoid circularities.
402 #ifdef ASSERT
403       tty->print_cr("Error: existing region: [" INTPTR_FORMAT "-" INTPTR_FORMAT "), flag %u.\n"
404                     "       new region: [" INTPTR_FORMAT "-" INTPTR_FORMAT "), flag %u.",
405                     p2i(reserved_rgn->base()), p2i(reserved_rgn->end()), (unsigned)reserved_rgn->flag(),
406                     p2i(base_addr), p2i(base_addr + size), (unsigned)flag);
407 #endif
408       ShouldNotReachHere();
409       return false;
410     }
411   }
412 }
413 
set_reserved_region_type(address addr,MEMFLAGS flag)414 void VirtualMemoryTracker::set_reserved_region_type(address addr, MEMFLAGS flag) {
415   assert(addr != NULL, "Invalid address");
416   assert(_reserved_regions != NULL, "Sanity check");
417 
418   ReservedMemoryRegion   rgn(addr, 1);
419   ReservedMemoryRegion*  reserved_rgn = _reserved_regions->find(rgn);
420   if (reserved_rgn != NULL) {
421     assert(reserved_rgn->contain_address(addr), "Containment");
422     if (reserved_rgn->flag() != flag) {
423       assert(reserved_rgn->flag() == mtNone, "Overwrite memory type (should be mtNone, is: \"%s\")",
424              NMTUtil::flag_to_name(reserved_rgn->flag()));
425       reserved_rgn->set_flag(flag);
426     }
427   }
428 }
429 
add_committed_region(address addr,size_t size,const NativeCallStack & stack)430 bool VirtualMemoryTracker::add_committed_region(address addr, size_t size,
431   const NativeCallStack& stack) {
432   assert(addr != NULL, "Invalid address");
433   assert(size > 0, "Invalid size");
434   assert(_reserved_regions != NULL, "Sanity check");
435 
436   ReservedMemoryRegion  rgn(addr, size);
437   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
438 
439   if (reserved_rgn == NULL) {
440     log_debug(nmt)("Add committed region \'%s\', No reserved region found for  (" INTPTR_FORMAT ", " SIZE_FORMAT ")",
441                   rgn.flag_name(),  p2i(rgn.base()), rgn.size());
442   }
443   assert(reserved_rgn != NULL, "Add committed region, No reserved region found");
444   assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
445   bool result = reserved_rgn->add_committed_region(addr, size, stack);
446   log_debug(nmt)("Add committed region \'%s\'(" INTPTR_FORMAT ", " SIZE_FORMAT ") %s",
447                 rgn.flag_name(),  p2i(rgn.base()), rgn.size(), (result ? "Succeeded" : "Failed"));
448   return result;
449 }
450 
remove_uncommitted_region(address addr,size_t size)451 bool VirtualMemoryTracker::remove_uncommitted_region(address addr, size_t size) {
452   assert(addr != NULL, "Invalid address");
453   assert(size > 0, "Invalid size");
454   assert(_reserved_regions != NULL, "Sanity check");
455 
456   ReservedMemoryRegion  rgn(addr, size);
457   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
458   assert(reserved_rgn != NULL, "No reserved region (" INTPTR_FORMAT ", " SIZE_FORMAT ")", p2i(addr), size);
459   assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
460   const char* flag_name = reserved_rgn->flag_name();  // after remove, info is not complete
461   bool result = reserved_rgn->remove_uncommitted_region(addr, size);
462   log_debug(nmt)("Removed uncommitted region \'%s\' (" INTPTR_FORMAT ", " SIZE_FORMAT ") %s",
463                 flag_name,  p2i(addr), size, (result ? " Succeeded" : "Failed"));
464   return result;
465 }
466 
remove_released_region(ReservedMemoryRegion * rgn)467 bool VirtualMemoryTracker::remove_released_region(ReservedMemoryRegion* rgn) {
468   assert(rgn != NULL, "Sanity check");
469   assert(_reserved_regions != NULL, "Sanity check");
470 
471   // uncommit regions within the released region
472   ReservedMemoryRegion backup(*rgn);
473   bool result = rgn->remove_uncommitted_region(rgn->base(), rgn->size());
474   log_debug(nmt)("Remove uncommitted region \'%s\' (" INTPTR_FORMAT ", " SIZE_FORMAT ") %s",
475                 backup.flag_name(), p2i(backup.base()), backup.size(), (result ? "Succeeded" : "Failed"));
476   if (!result) {
477     return false;
478   }
479 
480   VirtualMemorySummary::record_released_memory(rgn->size(), rgn->flag());
481   result =  _reserved_regions->remove(*rgn);
482   log_debug(nmt)("Removed region \'%s\' (" INTPTR_FORMAT ", " SIZE_FORMAT ") from _resvered_regions %s" ,
483                 backup.flag_name(), p2i(backup.base()), backup.size(), (result ? "Succeeded" : "Failed"));
484   return result;
485 }
486 
remove_released_region(address addr,size_t size)487 bool VirtualMemoryTracker::remove_released_region(address addr, size_t size) {
488   assert(addr != NULL, "Invalid address");
489   assert(size > 0, "Invalid size");
490   assert(_reserved_regions != NULL, "Sanity check");
491 
492   ReservedMemoryRegion  rgn(addr, size);
493   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
494 
495   if (reserved_rgn == NULL) {
496     log_debug(nmt)("No reserved region found for (" INTPTR_FORMAT ", " SIZE_FORMAT ")!",
497                   p2i(rgn.base()), rgn.size());
498   }
499   assert(reserved_rgn != NULL, "No reserved region");
500   if (reserved_rgn->same_region(addr, size)) {
501     return remove_released_region(reserved_rgn);
502   }
503 
504   // uncommit regions within the released region
505   if (!reserved_rgn->remove_uncommitted_region(addr, size)) {
506     return false;
507   }
508 
509   if (reserved_rgn->flag() == mtClassShared &&
510       reserved_rgn->contain_region(addr, size)) {
511     // This is an unmapped CDS region, which is part of the reserved shared
512     // memory region.
513     // See special handling in VirtualMemoryTracker::add_reserved_region also.
514     return true;
515   }
516 
517   VirtualMemorySummary::record_released_memory(size, reserved_rgn->flag());
518 
519   assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
520   if (reserved_rgn->base() == addr ||
521       reserved_rgn->end() == addr + size) {
522       reserved_rgn->exclude_region(addr, size);
523     return true;
524   } else {
525     address top = reserved_rgn->end();
526     address high_base = addr + size;
527     ReservedMemoryRegion high_rgn(high_base, top - high_base,
528       *reserved_rgn->call_stack(), reserved_rgn->flag());
529 
530     // use original region for lower region
531     reserved_rgn->exclude_region(addr, top - addr);
532     LinkedListNode<ReservedMemoryRegion>* new_rgn = _reserved_regions->add(high_rgn);
533     if (new_rgn == NULL) {
534       return false;
535     } else {
536       reserved_rgn->move_committed_regions(addr, *new_rgn->data());
537       return true;
538     }
539   }
540 }
541 
542 // Given an existing memory mapping registered with NMT, split the mapping in
543 //  two. The newly created two mappings will be registered under the call
544 //  stack and the memory flags of the original section.
split_reserved_region(address addr,size_t size,size_t split)545 bool VirtualMemoryTracker::split_reserved_region(address addr, size_t size, size_t split) {
546 
547   ReservedMemoryRegion  rgn(addr, size);
548   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
549   assert(reserved_rgn->same_region(addr, size), "Must be identical region");
550   assert(reserved_rgn != NULL, "No reserved region");
551   assert(reserved_rgn->committed_size() == 0, "Splitting committed region?");
552 
553   NativeCallStack original_stack = *reserved_rgn->call_stack();
554   MEMFLAGS original_flags = reserved_rgn->flag();
555 
556   const char* name = reserved_rgn->flag_name();
557   remove_released_region(reserved_rgn);
558   log_debug(nmt)("Split region \'%s\' (" INTPTR_FORMAT ", " SIZE_FORMAT ")  with size " SIZE_FORMAT,
559                 name, p2i(rgn.base()), rgn.size(), split);
560   // Now, create two new regions.
561   add_reserved_region(addr, split, original_stack, original_flags);
562   add_reserved_region(addr + split, size - split, original_stack, original_flags);
563 
564   return true;
565 }
566 
567 
568 // Iterate the range, find committed region within its bound.
569 class RegionIterator : public StackObj {
570 private:
571   const address _start;
572   const size_t  _size;
573 
574   address _current_start;
575   size_t  _current_size;
576 public:
RegionIterator(address start,size_t size)577   RegionIterator(address start, size_t size) :
578     _start(start), _size(size), _current_start(start), _current_size(size) {
579   }
580 
581   // return true if committed region is found
582   bool next_committed(address& start, size_t& size);
583 private:
end() const584   address end() const { return _start + _size; }
585 };
586 
next_committed(address & committed_start,size_t & committed_size)587 bool RegionIterator::next_committed(address& committed_start, size_t& committed_size) {
588   if (end() <= _current_start) return false;
589 
590   const size_t page_sz = os::vm_page_size();
591   assert(_current_start + _current_size == end(), "Must be");
592   if (os::committed_in_range(_current_start, _current_size, committed_start, committed_size)) {
593     assert(committed_start != NULL, "Must be");
594     assert(committed_size > 0 && is_aligned(committed_size, os::vm_page_size()), "Must be");
595 
596     size_t remaining_size = (_current_start + _current_size) - (committed_start + committed_size);
597     _current_start = committed_start + committed_size;
598     _current_size = remaining_size;
599     return true;
600   } else {
601     return false;
602   }
603 }
604 
605 // Walk all known thread stacks, snapshot their committed ranges.
606 class SnapshotThreadStackWalker : public VirtualMemoryWalker {
607 public:
SnapshotThreadStackWalker()608   SnapshotThreadStackWalker() {}
609 
do_allocation_site(const ReservedMemoryRegion * rgn)610   bool do_allocation_site(const ReservedMemoryRegion* rgn) {
611     if (rgn->flag() == mtThreadStack) {
612       address stack_bottom = rgn->thread_stack_uncommitted_bottom();
613       address committed_start;
614       size_t  committed_size;
615       size_t stack_size = rgn->base() + rgn->size() - stack_bottom;
616       // Align the size to work with full pages (Alpine and AIX stack top is not page aligned)
617       size_t aligned_stack_size = align_up(stack_size, os::vm_page_size());
618 
619       ReservedMemoryRegion* region = const_cast<ReservedMemoryRegion*>(rgn);
620       NativeCallStack ncs; // empty stack
621 
622       RegionIterator itr(stack_bottom, aligned_stack_size);
623       DEBUG_ONLY(bool found_stack = false;)
624       while (itr.next_committed(committed_start, committed_size)) {
625         assert(committed_start != NULL, "Should not be null");
626         assert(committed_size > 0, "Should not be 0");
627         // unaligned stack_size case: correct the region to fit the actual stack_size
628         if (stack_bottom + stack_size < committed_start + committed_size) {
629           committed_size = stack_bottom + stack_size - committed_start;
630         }
631         region->add_committed_region(committed_start, committed_size, ncs);
632         DEBUG_ONLY(found_stack = true;)
633       }
634 #ifdef ASSERT
635       if (!found_stack) {
636         log_debug(thread)("Thread exited without proper cleanup, may leak thread object");
637       }
638 #endif
639     }
640     return true;
641   }
642 };
643 
snapshot_thread_stacks()644 void VirtualMemoryTracker::snapshot_thread_stacks() {
645   SnapshotThreadStackWalker walker;
646   walk_virtual_memory(&walker);
647 }
648 
walk_virtual_memory(VirtualMemoryWalker * walker)649 bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) {
650   assert(_reserved_regions != NULL, "Sanity check");
651   ThreadCritical tc;
652   // Check that the _reserved_regions haven't been deleted.
653   if (_reserved_regions != NULL) {
654     LinkedListNode<ReservedMemoryRegion>* head = _reserved_regions->head();
655     while (head != NULL) {
656       const ReservedMemoryRegion* rgn = head->peek();
657       if (!walker->do_allocation_site(rgn)) {
658         return false;
659       }
660       head = head->next();
661     }
662    }
663   return true;
664 }
665 
666 // Transition virtual memory tracking level.
transition(NMT_TrackingLevel from,NMT_TrackingLevel to)667 bool VirtualMemoryTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel to) {
668   assert (from != NMT_minimal, "cannot convert from the lowest tracking level to anything");
669   if (to == NMT_minimal) {
670     assert(from == NMT_summary || from == NMT_detail, "Just check");
671     // Clean up virtual memory tracking data structures.
672     ThreadCritical tc;
673     // Check for potential race with other thread calling transition
674     if (_reserved_regions != NULL) {
675       delete _reserved_regions;
676       _reserved_regions = NULL;
677     }
678   }
679 
680   return true;
681 }
682 
683 // Metaspace Support
MetaspaceSnapshot()684 MetaspaceSnapshot::MetaspaceSnapshot() {
685   for (int index = (int)Metaspace::ClassType; index < (int)Metaspace::MetadataTypeCount; index ++) {
686     Metaspace::MetadataType type = (Metaspace::MetadataType)index;
687     assert_valid_metadata_type(type);
688     _reserved_in_bytes[type]  = 0;
689     _committed_in_bytes[type] = 0;
690     _used_in_bytes[type]      = 0;
691     _free_in_bytes[type]      = 0;
692   }
693 }
694 
snapshot(Metaspace::MetadataType type,MetaspaceSnapshot & mss)695 void MetaspaceSnapshot::snapshot(Metaspace::MetadataType type, MetaspaceSnapshot& mss) {
696   assert_valid_metadata_type(type);
697 
698   mss._reserved_in_bytes[type]   = MetaspaceUtils::reserved_bytes(type);
699   mss._committed_in_bytes[type]  = MetaspaceUtils::committed_bytes(type);
700   mss._used_in_bytes[type]       = MetaspaceUtils::used_bytes(type);
701 
702   // The answer to "what is free" in metaspace is complex and cannot be answered with a single number.
703   // Free as in available to all loaders? Free, pinned to one loader? For now, keep it simple.
704   mss._free_in_bytes[type] = mss._committed_in_bytes[type] - mss._used_in_bytes[type];
705 }
706 
snapshot(MetaspaceSnapshot & mss)707 void MetaspaceSnapshot::snapshot(MetaspaceSnapshot& mss) {
708   snapshot(Metaspace::NonClassType, mss);
709   if (Metaspace::using_class_space()) {
710     snapshot(Metaspace::ClassType, mss);
711   }
712 }
713