1 /*
2  * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  */
23 
24 #include "precompiled.hpp"
25 
26 // Included early because the NMT flags don't include it.
27 #include "utilities/macros.hpp"
28 
29 #if INCLUDE_NMT
30 
31 #include "runtime/thread.hpp"
32 #include "services/memTracker.hpp"
33 #include "services/virtualMemoryTracker.hpp"
34 #include "utilities/globalDefinitions.hpp"
35 #include "unittest.hpp"
36 
37 
38 class CommittedVirtualMemoryTest {
39 public:
test()40   static void test() {
41 #ifndef _AIX
42     // See JDK-8202772: temporarily disabled.
43     Thread* thr = Thread::current();
44     address stack_end = thr->stack_end();
45     size_t  stack_size = thr->stack_size();
46 
47     MemTracker::record_thread_stack(stack_end, stack_size);
48 
49     VirtualMemoryTracker::add_reserved_region(stack_end, stack_size, CALLER_PC, mtThreadStack);
50 
51     // snapshot current stack usage
52     VirtualMemoryTracker::snapshot_thread_stacks();
53 
54     ReservedMemoryRegion* rmr = VirtualMemoryTracker::_reserved_regions->find(ReservedMemoryRegion(stack_end, stack_size));
55     ASSERT_TRUE(rmr != NULL);
56 
57     ASSERT_EQ(rmr->base(), stack_end);
58     ASSERT_EQ(rmr->size(), stack_size);
59 
60     CommittedRegionIterator iter = rmr->iterate_committed_regions();
61     int i = 0;
62     address i_addr = (address)&i;
63     bool found_i_addr = false;
64 
65     // stack grows downward
66     address stack_top = stack_end + stack_size;
67     bool found_stack_top = false;
68 
69     for (const CommittedMemoryRegion* region = iter.next(); region != NULL; region = iter.next()) {
70       if (region->base() + region->size() == stack_top) {
71         ASSERT_TRUE(region->size() <= stack_size);
72         found_stack_top = true;
73       }
74 
75       if(i_addr < stack_top && i_addr >= region->base()) {
76         found_i_addr = true;
77       }
78 
79       i++;
80     }
81 
82     // stack and guard pages may be contiguous as one region
83     ASSERT_TRUE(i >= 1);
84     ASSERT_TRUE(found_stack_top);
85     ASSERT_TRUE(found_i_addr);
86 #endif // !_AIX
87   }
88 
check_covered_pages(address addr,size_t size,address base,size_t touch_pages,int * page_num)89   static void check_covered_pages(address addr, size_t size, address base, size_t touch_pages, int* page_num) {
90     const size_t page_sz = os::vm_page_size();
91     size_t index;
92     for (index = 0; index < touch_pages; index ++) {
93       address page_addr = base + page_num[index] * page_sz;
94       // The range covers this page, marks the page
95       if (page_addr >= addr && page_addr < addr + size) {
96         page_num[index] = -1;
97       }
98     }
99   }
100 
test_committed_region_impl(size_t num_pages,size_t touch_pages,int * page_num)101   static void test_committed_region_impl(size_t num_pages, size_t touch_pages, int* page_num) {
102     const size_t page_sz = os::vm_page_size();
103     const size_t size = num_pages * page_sz;
104     char* base = os::reserve_memory(size, NULL, page_sz, mtThreadStack);
105     bool result = os::commit_memory(base, size, false);
106     size_t index;
107     ASSERT_NE(base, (char*)NULL);
108     for (index = 0; index < touch_pages; index ++) {
109       char* touch_addr = base + page_sz * page_num[index];
110       *touch_addr = 'a';
111     }
112 
113     address frame = (address)0x1235;
114     NativeCallStack stack(&frame, 1);
115     VirtualMemoryTracker::add_reserved_region((address)base, size, stack, mtThreadStack);
116 
117     // trigger the test
118     VirtualMemoryTracker::snapshot_thread_stacks();
119 
120     ReservedMemoryRegion* rmr = VirtualMemoryTracker::_reserved_regions->find(ReservedMemoryRegion((address)base, size));
121     ASSERT_TRUE(rmr != NULL);
122 
123     bool precise_tracking_supported = false;
124     CommittedRegionIterator iter = rmr->iterate_committed_regions();
125     for (const CommittedMemoryRegion* region = iter.next(); region != NULL; region = iter.next()) {
126       if (region->size() == size) {
127         // platforms that do not support precise tracking.
128         ASSERT_TRUE(iter.next() == NULL);
129         break;
130       } else {
131         precise_tracking_supported = true;
132         check_covered_pages(region->base(), region->size(), (address)base, touch_pages, page_num);
133       }
134     }
135 
136     if (precise_tracking_supported) {
137       // All touched pages should be committed
138       for (size_t index = 0; index < touch_pages; index ++) {
139         ASSERT_EQ(page_num[index], -1);
140       }
141     }
142 
143     // Cleanup
144     os::free_memory(base, size, page_sz);
145     VirtualMemoryTracker::remove_released_region((address)base, size);
146 
147     rmr = VirtualMemoryTracker::_reserved_regions->find(ReservedMemoryRegion((address)base, size));
148     ASSERT_TRUE(rmr == NULL);
149   }
150 
test_committed_region()151   static void test_committed_region() {
152     // On Linux, we scan 1024 pages at a time.
153     // Here, we test scenario that scans < 1024 pages.
154     int small_range[] = {3, 9, 46};
155     int mid_range[] = {0, 45, 100, 399, 400, 1000, 1031};
156     int large_range[] = {100, 301, 1024, 2047, 2048, 2049, 2050, 3000};
157 
158     test_committed_region_impl(47, 3, small_range);
159     test_committed_region_impl(1088, 5, mid_range);
160     test_committed_region_impl(3074, 8, large_range);
161   }
162 
test_partial_region()163   static void test_partial_region() {
164     bool   result;
165     size_t committed_size;
166     address committed_start;
167     size_t index;
168 
169     const size_t page_sz = os::vm_page_size();
170     const size_t num_pages = 4;
171     const size_t size = num_pages * page_sz;
172     char* base = os::reserve_memory(size, NULL, page_sz, mtTest);
173     ASSERT_NE(base, (char*)NULL);
174     result = os::commit_memory(base, size, false);
175 
176     ASSERT_TRUE(result);
177     // touch all pages
178     for (index = 0; index < num_pages; index ++) {
179       *(base + index * page_sz) = 'a';
180     }
181 
182     // Test whole range
183     result = os::committed_in_range((address)base, size, committed_start, committed_size);
184     ASSERT_TRUE(result);
185     ASSERT_EQ(num_pages * page_sz, committed_size);
186     ASSERT_EQ(committed_start, (address)base);
187 
188     // Test beginning of the range
189     result = os::committed_in_range((address)base, 2 * page_sz, committed_start, committed_size);
190     ASSERT_TRUE(result);
191     ASSERT_EQ(2 * page_sz, committed_size);
192     ASSERT_EQ(committed_start, (address)base);
193 
194     // Test end of the range
195     result = os::committed_in_range((address)(base + page_sz), 3 * page_sz, committed_start, committed_size);
196     ASSERT_TRUE(result);
197     ASSERT_EQ(3 * page_sz, committed_size);
198     ASSERT_EQ(committed_start, (address)(base + page_sz));
199 
200     // Test middle of the range
201     result = os::committed_in_range((address)(base + page_sz), 2 * page_sz, committed_start, committed_size);
202     ASSERT_TRUE(result);
203     ASSERT_EQ(2 * page_sz, committed_size);
204     ASSERT_EQ(committed_start, (address)(base + page_sz));
205   }
206 };
207 
TEST_VM(CommittedVirtualMemoryTracker,test_committed_virtualmemory_region)208 TEST_VM(CommittedVirtualMemoryTracker, test_committed_virtualmemory_region) {
209   VirtualMemoryTracker::initialize(NMT_detail);
210   VirtualMemoryTracker::late_initialize(NMT_detail);
211 
212   CommittedVirtualMemoryTest::test();
213   CommittedVirtualMemoryTest::test_committed_region();
214   CommittedVirtualMemoryTest::test_partial_region();
215 }
216 
217 #endif // INCLUDE_NMT
218