1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2  * vim: set ts=8 sts=2 et sw=2 tw=80:
3  */
4 /* This Source Code Form is subject to the terms of the Mozilla Public
5  * License, v. 2.0. If a copy of the MPL was not distributed with this
6  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 
8 #include <cstdlib>
9 
10 #include "gc/Memory.h"
11 #include "jsapi-tests/tests.h"
12 
13 #if defined(XP_WIN)
14 #  include "util/WindowsWrapper.h"
15 #  include <psapi.h>
16 #elif defined(__wasi__)
17 // Nothing.
18 #else
19 #  include <algorithm>
20 #  include <errno.h>
21 #  include <sys/mman.h>
22 #  include <sys/resource.h>
23 #  include <sys/stat.h>
24 #  include <sys/types.h>
25 #  include <unistd.h>
26 #endif
27 
BEGIN_TEST(testGCAllocator)28 BEGIN_TEST(testGCAllocator) {
29 #ifdef JS_64BIT
30   // If we're using the scattershot allocator, this test does not apply.
31   if (js::gc::UsingScattershotAllocator()) {
32     return true;
33   }
34 #endif
35 
36   size_t PageSize = js::gc::SystemPageSize();
37 
38   /* Finish any ongoing background free activity. */
39   js::gc::FinishGC(cx);
40 
41   bool growUp = false;
42   CHECK(addressesGrowUp(&growUp));
43 
44   if (growUp) {
45     return testGCAllocatorUp(PageSize);
46   } else {
47     return testGCAllocatorDown(PageSize);
48   }
49 }
50 
51 static const size_t Chunk = 512 * 1024;
52 static const size_t Alignment = 2 * Chunk;
53 static const int MaxTempChunks = 4096;
54 static const size_t StagingSize = 16 * Chunk;
55 
addressesGrowUp(bool * resultOut)56 bool addressesGrowUp(bool* resultOut) {
57   /*
58    * Try to detect whether the OS allocates memory in increasing or decreasing
59    * address order by making several allocations and comparing the addresses.
60    */
61 
62   static const unsigned ChunksToTest = 20;
63   static const int ThresholdCount = 15;
64 
65   void* chunks[ChunksToTest];
66   for (unsigned i = 0; i < ChunksToTest; i++) {
67     chunks[i] = mapMemory(2 * Chunk);
68     CHECK(chunks[i]);
69   }
70 
71   int upCount = 0;
72   int downCount = 0;
73 
74   for (unsigned i = 0; i < ChunksToTest - 1; i++) {
75     if (chunks[i] < chunks[i + 1]) {
76       upCount++;
77     } else {
78       downCount++;
79     }
80   }
81 
82   for (unsigned i = 0; i < ChunksToTest; i++) {
83     unmapPages(chunks[i], 2 * Chunk);
84   }
85 
86   /* Check results were mostly consistent. */
87   CHECK(abs(upCount - downCount) >= ThresholdCount);
88 
89   *resultOut = upCount > downCount;
90 
91   return true;
92 }
93 
offsetFromAligned(void * p)94 size_t offsetFromAligned(void* p) { return uintptr_t(p) % Alignment; }
95 
96 enum AllocType { UseNormalAllocator, UseLastDitchAllocator };
97 
testGCAllocatorUp(const size_t PageSize)98 bool testGCAllocatorUp(const size_t PageSize) {
99   const size_t UnalignedSize = StagingSize + Alignment - PageSize;
100   void* chunkPool[MaxTempChunks];
101   // Allocate a contiguous chunk that we can partition for testing.
102   void* stagingArea = mapMemory(UnalignedSize);
103   if (!stagingArea) {
104     return false;
105   }
106   // Ensure that the staging area is aligned.
107   unmapPages(stagingArea, UnalignedSize);
108   if (offsetFromAligned(stagingArea)) {
109     const size_t Offset = offsetFromAligned(stagingArea);
110     // Place the area at the lowest aligned address.
111     stagingArea = (void*)(uintptr_t(stagingArea) + (Alignment - Offset));
112   }
113   mapMemoryAt(stagingArea, StagingSize);
114   // Make sure there are no available chunks below the staging area.
115   int tempChunks;
116   if (!fillSpaceBeforeStagingArea(tempChunks, stagingArea, chunkPool, false)) {
117     return false;
118   }
119   // Unmap the staging area so we can set it up for testing.
120   unmapPages(stagingArea, StagingSize);
121   // Check that the first chunk is used if it is aligned.
122   CHECK(positionIsCorrect("xxooxxx---------", stagingArea, chunkPool,
123                           tempChunks));
124   // Check that the first chunk is used if it can be aligned.
125   CHECK(positionIsCorrect("x-ooxxx---------", stagingArea, chunkPool,
126                           tempChunks));
127   // Check that an aligned chunk after a single unalignable chunk is used.
128   CHECK(positionIsCorrect("x--xooxxx-------", stagingArea, chunkPool,
129                           tempChunks));
130   // Check that we fall back to the slow path after two unalignable chunks.
131   CHECK(positionIsCorrect("x--xx--xoo--xxx-", stagingArea, chunkPool,
132                           tempChunks));
133   // Check that we also fall back after an unalignable and an alignable chunk.
134   CHECK(positionIsCorrect("x--xx---x-oo--x-", stagingArea, chunkPool,
135                           tempChunks));
136   // Check that the last ditch allocator works as expected.
137   CHECK(positionIsCorrect("x--xx--xx-oox---", stagingArea, chunkPool,
138                           tempChunks, UseLastDitchAllocator));
139   // Check that the last ditch allocator can deal with naturally aligned chunks.
140   CHECK(positionIsCorrect("x--xx--xoo------", stagingArea, chunkPool,
141                           tempChunks, UseLastDitchAllocator));
142 
143   // Clean up.
144   while (--tempChunks >= 0) {
145     unmapPages(chunkPool[tempChunks], 2 * Chunk);
146   }
147   return true;
148 }
149 
testGCAllocatorDown(const size_t PageSize)150 bool testGCAllocatorDown(const size_t PageSize) {
151   const size_t UnalignedSize = StagingSize + Alignment - PageSize;
152   void* chunkPool[MaxTempChunks];
153   // Allocate a contiguous chunk that we can partition for testing.
154   void* stagingArea = mapMemory(UnalignedSize);
155   if (!stagingArea) {
156     return false;
157   }
158   // Ensure that the staging area is aligned.
159   unmapPages(stagingArea, UnalignedSize);
160   if (offsetFromAligned(stagingArea)) {
161     void* stagingEnd = (void*)(uintptr_t(stagingArea) + UnalignedSize);
162     const size_t Offset = offsetFromAligned(stagingEnd);
163     // Place the area at the highest aligned address.
164     stagingArea = (void*)(uintptr_t(stagingEnd) - Offset - StagingSize);
165   }
166   mapMemoryAt(stagingArea, StagingSize);
167   // Make sure there are no available chunks above the staging area.
168   int tempChunks;
169   if (!fillSpaceBeforeStagingArea(tempChunks, stagingArea, chunkPool, true)) {
170     return false;
171   }
172   // Unmap the staging area so we can set it up for testing.
173   unmapPages(stagingArea, StagingSize);
174   // Check that the first chunk is used if it is aligned.
175   CHECK(positionIsCorrect("---------xxxooxx", stagingArea, chunkPool,
176                           tempChunks));
177   // Check that the first chunk is used if it can be aligned.
178   CHECK(positionIsCorrect("---------xxxoo-x", stagingArea, chunkPool,
179                           tempChunks));
180   // Check that an aligned chunk after a single unalignable chunk is used.
181   CHECK(positionIsCorrect("-------xxxoox--x", stagingArea, chunkPool,
182                           tempChunks));
183   // Check that we fall back to the slow path after two unalignable chunks.
184   CHECK(positionIsCorrect("-xxx--oox--xx--x", stagingArea, chunkPool,
185                           tempChunks));
186   // Check that we also fall back after an unalignable and an alignable chunk.
187   CHECK(positionIsCorrect("-x--oo-x---xx--x", stagingArea, chunkPool,
188                           tempChunks));
189   // Check that the last ditch allocator works as expected.
190   CHECK(positionIsCorrect("---xoo-xx--xx--x", stagingArea, chunkPool,
191                           tempChunks, UseLastDitchAllocator));
192   // Check that the last ditch allocator can deal with naturally aligned chunks.
193   CHECK(positionIsCorrect("------oox--xx--x", stagingArea, chunkPool,
194                           tempChunks, UseLastDitchAllocator));
195 
196   // Clean up.
197   while (--tempChunks >= 0) {
198     unmapPages(chunkPool[tempChunks], 2 * Chunk);
199   }
200   return true;
201 }
202 
fillSpaceBeforeStagingArea(int & tempChunks,void * stagingArea,void ** chunkPool,bool addressesGrowDown)203 bool fillSpaceBeforeStagingArea(int& tempChunks, void* stagingArea,
204                                 void** chunkPool, bool addressesGrowDown) {
205   // Make sure there are no available chunks before the staging area.
206   tempChunks = 0;
207   chunkPool[tempChunks++] = mapMemory(2 * Chunk);
208   while (tempChunks < MaxTempChunks && chunkPool[tempChunks - 1] &&
209          (chunkPool[tempChunks - 1] < stagingArea) ^ addressesGrowDown) {
210     chunkPool[tempChunks++] = mapMemory(2 * Chunk);
211     if (!chunkPool[tempChunks - 1]) {
212       break;  // We already have our staging area, so OOM here is okay.
213     }
214     if ((chunkPool[tempChunks - 1] < chunkPool[tempChunks - 2]) ^
215         addressesGrowDown) {
216       break;  // The address growth direction is inconsistent!
217     }
218   }
219   // OOM also means success in this case.
220   if (!chunkPool[tempChunks - 1]) {
221     --tempChunks;
222     return true;
223   }
224   // Bail if we can't guarantee the right address space layout.
225   if ((chunkPool[tempChunks - 1] < stagingArea) ^ addressesGrowDown ||
226       (tempChunks > 1 &&
227        (chunkPool[tempChunks - 1] < chunkPool[tempChunks - 2]) ^
228            addressesGrowDown)) {
229     while (--tempChunks >= 0) {
230       unmapPages(chunkPool[tempChunks], 2 * Chunk);
231     }
232     unmapPages(stagingArea, StagingSize);
233     return false;
234   }
235   return true;
236 }
237 
positionIsCorrect(const char * str,void * base,void ** chunkPool,int tempChunks,AllocType allocator=UseNormalAllocator)238 bool positionIsCorrect(const char* str, void* base, void** chunkPool,
239                        int tempChunks,
240                        AllocType allocator = UseNormalAllocator) {
241   // str represents a region of memory, with each character representing a
242   // region of Chunk bytes. str should contain only x, o and -, where
243   // x = mapped by the test to set up the initial conditions,
244   // o = mapped by the GC allocator, and
245   // - = unmapped.
246   // base should point to a region of contiguous free memory
247   // large enough to hold strlen(str) chunks of Chunk bytes.
248   int len = strlen(str);
249   int i;
250   // Find the index of the desired address.
251   for (i = 0; i < len && str[i] != 'o'; ++i)
252     ;
253   void* desired = (void*)(uintptr_t(base) + i * Chunk);
254   // Map the regions indicated by str.
255   for (i = 0; i < len; ++i) {
256     if (str[i] == 'x') {
257       mapMemoryAt((void*)(uintptr_t(base) + i * Chunk), Chunk);
258     }
259   }
260   // Allocate using the GC's allocator.
261   void* result;
262   if (allocator == UseNormalAllocator) {
263     result = js::gc::MapAlignedPages(2 * Chunk, Alignment);
264   } else {
265     result = js::gc::TestMapAlignedPagesLastDitch(2 * Chunk, Alignment);
266   }
267   // Clean up the mapped regions.
268   if (result) {
269     js::gc::UnmapPages(result, 2 * Chunk);
270   }
271   for (--i; i >= 0; --i) {
272     if (str[i] == 'x') {
273       js::gc::UnmapPages((void*)(uintptr_t(base) + i * Chunk), Chunk);
274     }
275   }
276   // CHECK returns, so clean up on failure.
277   if (result != desired) {
278     while (--tempChunks >= 0) {
279       js::gc::UnmapPages(chunkPool[tempChunks], 2 * Chunk);
280     }
281   }
282   return result == desired;
283 }
284 
285 #if defined(XP_WIN)
286 
mapMemoryAt(void * desired,size_t length)287 void* mapMemoryAt(void* desired, size_t length) {
288   return VirtualAlloc(desired, length, MEM_COMMIT | MEM_RESERVE,
289                       PAGE_READWRITE);
290 }
291 
mapMemory(size_t length)292 void* mapMemory(size_t length) {
293   return VirtualAlloc(nullptr, length, MEM_COMMIT | MEM_RESERVE,
294                       PAGE_READWRITE);
295 }
296 
unmapPages(void * p,size_t size)297 void unmapPages(void* p, size_t size) {
298   MOZ_ALWAYS_TRUE(VirtualFree(p, 0, MEM_RELEASE));
299 }
300 
301 #elif defined(__wasi__)
302 
mapMemoryAt(void * desired,size_t length)303 void* mapMemoryAt(void* desired, size_t length) { return nullptr; }
304 
mapMemory(size_t length)305 void* mapMemory(size_t length) {
306   void* addr = nullptr;
307   if (int err = posix_memalign(&addr, js::gc::SystemPageSize(), length)) {
308     MOZ_ASSERT(err == ENOMEM);
309   }
310   MOZ_ASSERT(addr);
311   memset(addr, 0, length);
312   return addr;
313 }
314 
unmapPages(void * p,size_t size)315 void unmapPages(void* p, size_t size) { free(p); }
316 
317 #else
318 
mapMemoryAt(void * desired,size_t length)319 void* mapMemoryAt(void* desired, size_t length) {
320   void* region = mmap(desired, length, PROT_READ | PROT_WRITE,
321                       MAP_PRIVATE | MAP_ANON, -1, 0);
322   if (region == MAP_FAILED) {
323     return nullptr;
324   }
325   if (region != desired) {
326     if (munmap(region, length)) {
327       MOZ_RELEASE_ASSERT(errno == ENOMEM);
328     }
329     return nullptr;
330   }
331   return region;
332 }
333 
mapMemory(size_t length)334 void* mapMemory(size_t length) {
335   int prot = PROT_READ | PROT_WRITE;
336   int flags = MAP_PRIVATE | MAP_ANON;
337   int fd = -1;
338   off_t offset = 0;
339   void* region = mmap(nullptr, length, prot, flags, fd, offset);
340   if (region == MAP_FAILED) {
341     return nullptr;
342   }
343   return region;
344 }
345 
unmapPages(void * p,size_t size)346 void unmapPages(void* p, size_t size) {
347   if (munmap(p, size)) {
348     MOZ_RELEASE_ASSERT(errno == ENOMEM);
349   }
350 }
351 
352 #endif
353 
354 END_TEST(testGCAllocator)
355