1 /*
2  * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
3  * Copyright (c) 2020 SAP SE. All rights reserved.
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This code is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 only, as
8  * published by the Free Software Foundation.
9  *
10  * This code is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13  * version 2 for more details (a copy is included in the LICENSE file that
14  * accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License version
17  * 2 along with this work; if not, write to the Free Software Foundation,
18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19  *
20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21  * or visit www.oracle.com if you need additional information or have any
22  * questions.
23  *
24  */
25 
26 #include "precompiled.hpp"
27 #include "memory/metaspace/chunkManager.hpp"
28 #include "memory/metaspace/freeChunkList.hpp"
29 #include "memory/metaspace/metachunk.hpp"
30 #include "memory/metaspace/metaspaceSettings.hpp"
31 #include "memory/metaspace/virtualSpaceNode.hpp"
32 #include "metaspaceGtestCommon.hpp"
33 #include "metaspaceGtestContexts.hpp"
34 #include "runtime/mutexLocker.hpp"
35 
36 using metaspace::ChunkManager;
37 using metaspace::FreeChunkListVector;
38 using metaspace::Metachunk;
39 using metaspace::Settings;
40 using metaspace::VirtualSpaceNode;
41 using namespace metaspace::chunklevel;
42 
43 // Test ChunkManager::get_chunk
TEST_VM(metaspace,get_chunk)44 TEST_VM(metaspace, get_chunk) {
45 
46   ChunkGtestContext context(8 * M);
47   Metachunk* c = NULL;
48 
49   for (chunklevel_t pref_lvl = LOWEST_CHUNK_LEVEL; pref_lvl <= HIGHEST_CHUNK_LEVEL; pref_lvl++) {
50 
51     for (chunklevel_t max_lvl = pref_lvl; max_lvl <= HIGHEST_CHUNK_LEVEL; max_lvl++) {
52 
53       for (size_t min_committed_words = Settings::commit_granule_words();
54            min_committed_words <= word_size_for_level(max_lvl); min_committed_words *= 2) {
55         context.alloc_chunk_expect_success(&c, pref_lvl, max_lvl, min_committed_words);
56         context.return_chunk(c);
57       }
58     }
59   }
60 }
61 
62 // Test ChunkManager::get_chunk, but with a commit limit.
TEST_VM(metaspace,get_chunk_with_commit_limit)63 TEST_VM(metaspace, get_chunk_with_commit_limit) {
64 
65   const size_t commit_limit_words = 1 * M;
66   ChunkGtestContext context(commit_limit_words);
67   Metachunk* c = NULL;
68 
69   for (chunklevel_t pref_lvl = LOWEST_CHUNK_LEVEL; pref_lvl <= HIGHEST_CHUNK_LEVEL; pref_lvl++) {
70 
71     for (chunklevel_t max_lvl = pref_lvl; max_lvl <= HIGHEST_CHUNK_LEVEL; max_lvl++) {
72 
73       for (size_t min_committed_words = Settings::commit_granule_words();
74            min_committed_words <= word_size_for_level(max_lvl); min_committed_words *= 2) {
75 
76         if (min_committed_words <= commit_limit_words) {
77           context.alloc_chunk_expect_success(&c, pref_lvl, max_lvl, min_committed_words);
78           context.return_chunk(c);
79         } else {
80           context.alloc_chunk_expect_failure(pref_lvl, max_lvl, min_committed_words);
81         }
82 
83       }
84     }
85   }
86 }
87 
88 // Test that recommitting the used portion of a chunk will preserve the original content.
TEST_VM(metaspace,get_chunk_recommit)89 TEST_VM(metaspace, get_chunk_recommit) {
90 
91   ChunkGtestContext context;
92   Metachunk* c = NULL;
93   context.alloc_chunk_expect_success(&c, ROOT_CHUNK_LEVEL, ROOT_CHUNK_LEVEL, 0);
94   context.uncommit_chunk_with_test(c);
95 
96   context.commit_chunk_with_test(c, Settings::commit_granule_words());
97   context.allocate_from_chunk(c, Settings::commit_granule_words());
98 
99   c->ensure_committed(Settings::commit_granule_words());
100   check_range_for_pattern(c->base(), c->used_words(), (uintx)c);
101 
102   c->ensure_committed(Settings::commit_granule_words() * 2);
103   check_range_for_pattern(c->base(), c->used_words(), (uintx)c);
104 
105   context.return_chunk(c);
106 
107 }
108 
109 // Test ChunkManager::get_chunk, but with a reserve limit.
110 // (meaning, the underlying VirtualSpaceList cannot expand, like compressed class space).
TEST_VM(metaspace,get_chunk_with_reserve_limit)111 TEST_VM(metaspace, get_chunk_with_reserve_limit) {
112 
113   const size_t reserve_limit_words = word_size_for_level(ROOT_CHUNK_LEVEL);
114   const size_t commit_limit_words = 1024 * M; // just very high
115   ChunkGtestContext context(commit_limit_words, reserve_limit_words);
116 
117   // Reserve limit works at root chunk size granularity: if the chunk manager cannot satisfy
118   //  a request for a chunk from its freelists, it will acquire a new root chunk from the
119   //  underlying virtual space list. If that list is full and cannot be expanded (think ccs)
120   //  we should get an error.
121   // Testing this is simply testing a chunk allocation which should cause allocation of a new
122   //  root chunk.
123 
124   // Cause allocation of the firstone root chunk, should still work:
125   Metachunk* c = NULL;
126   context.alloc_chunk_expect_success(&c, HIGHEST_CHUNK_LEVEL);
127 
128   // and this should need a new root chunk and hence fail:
129   context.alloc_chunk_expect_failure(ROOT_CHUNK_LEVEL);
130 
131   context.return_chunk(c);
132 
133 }
134 
135 // Test MetaChunk::allocate
TEST_VM(metaspace,chunk_allocate_full)136 TEST_VM(metaspace, chunk_allocate_full) {
137 
138   ChunkGtestContext context;
139 
140   for (chunklevel_t lvl = LOWEST_CHUNK_LEVEL; lvl <= HIGHEST_CHUNK_LEVEL; lvl++) {
141     Metachunk* c = NULL;
142     context.alloc_chunk_expect_success(&c, lvl);
143     context.allocate_from_chunk(c, c->word_size());
144     context.return_chunk(c);
145   }
146 
147 }
148 
149 // Test MetaChunk::allocate
TEST_VM(metaspace,chunk_allocate_random)150 TEST_VM(metaspace, chunk_allocate_random) {
151 
152   ChunkGtestContext context;
153 
154   for (chunklevel_t lvl = LOWEST_CHUNK_LEVEL; lvl <= HIGHEST_CHUNK_LEVEL; lvl++) {
155 
156     Metachunk* c = NULL;
157     context.alloc_chunk_expect_success(&c, lvl);
158     context.uncommit_chunk_with_test(c); // start out fully uncommitted
159 
160     RandSizeGenerator rgen(1, c->word_size() / 30);
161     bool stop = false;
162 
163     while (!stop) {
164       const size_t s = rgen.get();
165       if (s <= c->free_words()) {
166         context.commit_chunk_with_test(c, s);
167         context.allocate_from_chunk(c, s);
168       } else {
169         stop = true;
170       }
171 
172     }
173     context.return_chunk(c);
174 
175   }
176 
177 }
178 
TEST_VM(metaspace,chunk_buddy_stuff)179 TEST_VM(metaspace, chunk_buddy_stuff) {
180 
181   for (chunklevel_t l = ROOT_CHUNK_LEVEL + 1; l <= HIGHEST_CHUNK_LEVEL; l++) {
182 
183     ChunkGtestContext context;
184 
185     // Allocate two chunks; since we know the first chunk is the first in its area,
186     // it has to be a leader, and the next one of the same size its buddy.
187 
188     // (Note: strictly speaking the ChunkManager does not promise any placement but
189     //  we know how the placement works so these tests make sense).
190 
191     Metachunk* c1 = NULL;
192     context.alloc_chunk(&c1, CHUNK_LEVEL_1K);
193     EXPECT_TRUE(c1->is_leader());
194 
195     Metachunk* c2 = NULL;
196     context.alloc_chunk(&c2, CHUNK_LEVEL_1K);
197     EXPECT_FALSE(c2->is_leader());
198 
199     // buddies are adjacent in memory
200     // (next/prev_in_vs needs lock)
201     {
202       MutexLocker fcl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
203       EXPECT_EQ(c1->next_in_vs(), c2);
204       EXPECT_EQ(c1->end(), c2->base());
205       EXPECT_NULL(c1->prev_in_vs()); // since we know this is the first in the area
206       EXPECT_EQ(c2->prev_in_vs(), c1);
207     }
208 
209     context.return_chunk(c1);
210     context.return_chunk(c2);
211 
212   }
213 
214 }
215 
TEST_VM(metaspace,chunk_allocate_with_commit_limit)216 TEST_VM(metaspace, chunk_allocate_with_commit_limit) {
217 
218   // This test does not make sense if commit-on-demand is off
219   if (Settings::new_chunks_are_fully_committed()) {
220     return;
221   }
222 
223   const size_t granule_sz = Settings::commit_granule_words();
224   const size_t commit_limit = granule_sz * 3;
225   ChunkGtestContext context(commit_limit);
226 
227   // A big chunk, but uncommitted.
228   Metachunk* c = NULL;
229   context.alloc_chunk_expect_success(&c, ROOT_CHUNK_LEVEL, ROOT_CHUNK_LEVEL, 0);
230   context.uncommit_chunk_with_test(c); // ... just to make sure.
231 
232   // first granule...
233   context.commit_chunk_with_test(c, granule_sz);
234   context.allocate_from_chunk(c, granule_sz);
235 
236   // second granule...
237   context.commit_chunk_with_test(c, granule_sz);
238   context.allocate_from_chunk(c, granule_sz);
239 
240   // third granule...
241   context.commit_chunk_with_test(c, granule_sz);
242   context.allocate_from_chunk(c, granule_sz);
243 
244   // This should fail now.
245   context.commit_chunk_expect_failure(c, granule_sz);
246 
247   context.return_chunk(c);
248 
249 }
250 
251 // Test splitting a chunk
TEST_VM(metaspace,chunk_split_and_merge)252 TEST_VM(metaspace, chunk_split_and_merge) {
253 
254   // Split works like this:
255   //
256   //  ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ----
257   // |                                  A                                            |
258   //  ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ----
259   //
260   //  ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ----
261   // | A' | b  |    c    |         d         |                   e                   |
262   //  ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ----
263   //
264   // A original chunk (A) is split to form a target chunk (A') and as a result splinter
265   // chunks form (b..e). A' is the leader of the (A',b) pair, which is the leader of the
266   // ((A',b), c) pair and so on. In other words, A' will be a leader chunk, all splinter
267   // chunks are follower chunks.
268   //
269   // Merging reverses this operation:
270   //
271   //  ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ----
272   // | A  | b  |    c    |         d         |                   e                   |
273   //  ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ----
274   //
275   //  ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ----
276   // |                                  A'                                           |
277   //  ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ----
278   //
279   // (A) will be merged with its buddy b, (A+b) with its buddy c and so on. The result
280   // chunk is A'.
281   // Note that merging also works, of course, if we were to start the merge at (b) (so,
282   // with a follower chunk, not a leader). Also, at any point in the merge
283   // process we may arrive at a follower chunk. So, the fact that in this test
284   // we only expect a leader merge is a feature of the test, and of the fact that we
285   // start each split test with a fresh ChunkTestsContext.
286 
287   // Note: Splitting and merging chunks is usually done from within the ChunkManager and
288   //  subject to a lot of assumptions and hence asserts. Here, we have to explicitly use
289   //  VirtualSpaceNode::split/::merge and therefore have to observe rules:
290   // - both split and merge expect free chunks, so state has to be "free"
291   // - but that would trigger the "ideally merged" assertion in the RootChunkArea, so the
292   //   original chunk has to be a root chunk, we cannot just split any chunk manually.
293   // - Also, after the split we have to completely re-merge to avoid triggering asserts
294   //   in ~RootChunkArea()
295   // - finally we have to lock manually
296 
297   ChunkGtestContext context;
298 
299   const chunklevel_t orig_lvl = ROOT_CHUNK_LEVEL;
300   for (chunklevel_t target_lvl = orig_lvl + 1; target_lvl <= HIGHEST_CHUNK_LEVEL; target_lvl++) {
301 
302     // Split a fully committed chunk. The resulting chunk should be fully
303     //  committed as well, and have its content preserved.
304     Metachunk* c = NULL;
305     context.alloc_chunk_expect_success(&c, orig_lvl);
306 
307     // We allocate from this chunk to be able to completely paint the payload.
308     context.allocate_from_chunk(c, c->word_size());
309 
310     const uintx canary = os::random();
311     fill_range_with_pattern(c->base(), c->word_size(), canary);
312 
313     FreeChunkListVector splinters;
314 
315     {
316       // Splitting/Merging chunks is usually done by the chunkmanager, and no explicit
317       // outside API exists. So we split/merge chunks via the underlying vs node, directly.
318       // This means that we have to go through some extra hoops to not trigger any asserts.
319       MutexLocker fcl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
320       c->reset_used_words();
321       c->set_free();
322       c->vsnode()->split(target_lvl, c, &splinters);
323     }
324 
325     DEBUG_ONLY(context.verify();)
326 
327     EXPECT_EQ(c->level(), target_lvl);
328     EXPECT_TRUE(c->is_fully_committed());
329     EXPECT_FALSE(c->is_root_chunk());
330     EXPECT_TRUE(c->is_leader());
331 
332     check_range_for_pattern(c->base(), c->word_size(), canary);
333 
334     // I expect splinter chunks (one for each splinter level:
335     //  e.g. splitting a 1M chunk to get a 64K chunk should yield splinters: [512K, 256K, 128K, 64K]
336     for (chunklevel_t l = LOWEST_CHUNK_LEVEL; l < HIGHEST_CHUNK_LEVEL; l++) {
337       const Metachunk* c2 = splinters.first_at_level(l);
338       if (l > orig_lvl && l <= target_lvl) {
339         EXPECT_NOT_NULL(c2);
340         EXPECT_EQ(c2->level(), l);
341         EXPECT_TRUE(c2->is_free());
342         EXPECT_TRUE(!c2->is_leader());
343         DEBUG_ONLY(c2->verify());
344         check_range_for_pattern(c2->base(), c2->word_size(), canary);
345       } else {
346         EXPECT_NULL(c2);
347       }
348     }
349 
350     // Revert the split by using merge. This should result in all splinters coalescing
351     // to one chunk.
352     {
353       MutexLocker fcl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
354       Metachunk* merged = c->vsnode()->merge(c, &splinters);
355 
356       // the merged chunk should occupy the same address as the splinter
357       // since it should have been the leader in the split.
358       EXPECT_EQ(merged, c);
359       EXPECT_TRUE(merged->is_root_chunk() || merged->is_leader());
360 
361       // Splitting should have arrived at the original chunk since none of the splinters are in use.
362       EXPECT_EQ(c->level(), orig_lvl);
363 
364       // All splinters should have been removed from the list
365       EXPECT_EQ(splinters.num_chunks(), 0);
366     }
367 
368     context.return_chunk(c);
369 
370   }
371 
372 }
373 
TEST_VM(metaspace,chunk_enlarge_in_place)374 TEST_VM(metaspace, chunk_enlarge_in_place) {
375 
376   ChunkGtestContext context;
377 
378   // Starting with the smallest chunk size, attempt to enlarge the chunk in place until we arrive
379   // at root chunk size. Since the state is clean, this should work.
380 
381   Metachunk* c = NULL;
382   context.alloc_chunk_expect_success(&c, HIGHEST_CHUNK_LEVEL);
383 
384   chunklevel_t l = c->level();
385 
386   while (l != ROOT_CHUNK_LEVEL) {
387 
388     // commit and allocate from chunk to pattern it...
389     const size_t original_chunk_size = c->word_size();
390     context.commit_chunk_with_test(c, c->free_words());
391     context.allocate_from_chunk(c, c->free_words());
392 
393     size_t used_before = c->used_words();
394     size_t free_before = c->free_words();
395     size_t free_below_committed_before = c->free_below_committed_words();
396     const MetaWord* top_before = c->top();
397 
398     EXPECT_TRUE(context.cm().attempt_enlarge_chunk(c));
399     EXPECT_EQ(l - 1, c->level());
400     EXPECT_EQ(c->word_size(), original_chunk_size * 2);
401 
402     // Used words should not have changed
403     EXPECT_EQ(c->used_words(), used_before);
404     EXPECT_EQ(c->top(), top_before);
405 
406     // free words should be expanded by the old size (since old chunk is doubled in size)
407     EXPECT_EQ(c->free_words(), free_before + original_chunk_size);
408 
409     // free below committed can be larger but never smaller
410     EXPECT_GE(c->free_below_committed_words(), free_below_committed_before);
411 
412     // Old content should be preserved
413     check_range_for_pattern(c->base(), original_chunk_size, (uintx)c);
414 
415     l = c->level();
416   }
417 
418   context.return_chunk(c);
419 
420 }
421 
422