1 /*
2  *  vm_alloc.cpp - Wrapper to various virtual memory allocation schemes
3  *                 (supports mmap, vm_allocate or fallbacks to malloc)
4  *
5  * Copyright (c) 2000-2005 ARAnyM developer team (see AUTHORS)
6  *
7  * Originally derived from Basilisk II (C) 1997-2000 Christian Bauer
8  *
9  * This file is part of the ARAnyM project which builds a new and powerful
10  * TOS/FreeMiNT compatible virtual machine running on almost any hardware.
11  *
12  * ARAnyM is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2 of the License, or
15  * (at your option) any later version.
16  *
17  * ARAnyM is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  * GNU General Public License for more details.
21  *
22  * You should have received a copy of the GNU General Public License
23  * along with ARAnyM; if not, write to the Free Software
24  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
25  */
26 
27 #include "sysdeps.h"
28 #include "vm_alloc.h"
29 
30 #if defined(OS_freebsd) && defined(CPU_x86_64)
31 #	include <sys/resource.h>
32 #endif
33 
34 # include <cstdlib>
35 # include <cstring>
36 #ifdef HAVE_WIN32_VM
37     #undef WIN32_LEAN_AND_MEAN
38 	#define WIN32_LEAN_AND_MEAN 1 /* avoid including junk */
39 	#include <windows.h>
40     #undef WIN32_LEAN_AND_MEAN /* to avoid redefinition in SDL headers */
41 #endif
42 
43 #ifdef HAVE_FCNTL_H
44 	/* for O_RDWR - RedHat FC2 need this */
45 	#include <fcntl.h>
46 #endif
47 
48 #ifdef HAVE_MACH_VM
49 	#ifndef HAVE_MACH_TASK_SELF
50 		#ifdef HAVE_TASK_SELF
51 			#define mach_task_self task_self
52 		#else
53 			#error "No task_self(), you lose."
54 		#endif
55 	#endif
56 #endif
57 
58 /* We want MAP_32BIT, if available, for SheepShaver and BasiliskII
59    because the emulated target is 32-bit and this helps to allocate
60    memory so that branches could be resolved more easily (32-bit
61    displacement to code in .text), on AMD64 for example.  */
62 #if !defined(MAP_32BIT) && defined(MAP_LOW32)
63 	#define MAP_32BIT MAP_LOW32
64 #endif
65 #ifndef MAP_32BIT
66 	#define MAP_32BIT 0
67 #endif
68 #ifndef MAP_ANON
69 	#define MAP_ANON 0
70 #endif
71 #ifndef MAP_ANONYMOUS
72 	#define MAP_ANONYMOUS 0
73 #endif
74 
75 #define MAP_EXTRA_FLAGS 0
76 
77 #ifdef HAVE_MACH_VM
78 #elif defined(HAVE_MMAP_VM)
79 	#if defined(__linux__) && defined(CPU_i386)
80 		/*  Force a reasonnable address below 0x80000000 on x86 so that we
81 			don't get addresses above when the program is run on AMD64.
82 			NOTE: this is empirically determined on Linux/x86.  */
83 		#define MAP_BASE	0x10000000
84 	#else
85 		#define MAP_BASE	0x00000000
86 	#endif
87 	static char * next_address = (char *)MAP_BASE;
88 	static char * next_address_32bit = (char *)MAP_BASE;
89 #endif
90 
91 #ifdef HAVE_MMAP_VM
92 	#ifdef HAVE_MMAP_ANON
93 	#define map_flags	(MAP_ANON | MAP_EXTRA_FLAGS)
94 	#define zero_fd		-1
95 	#elif defined(HAVE_MMAP_ANONYMOUS)
96 	#define map_flags	(MAP_ANONYMOUS | MAP_EXTRA_FLAGS)
97 	#define zero_fd		-1
98 	#else
99 	#define map_flags	(MAP_EXTRA_FLAGS)
100 	static int zero_fd	= -1;
101 	#endif
102 #endif
103 
104 /* Translate generic VM map flags to host values.  */
105 
106 #ifdef HAVE_MACH_VM
107 #elif defined(HAVE_MMAP_VM)
translate_map_flags(int vm_flags)108 static int translate_map_flags(int vm_flags)
109 {
110 	int flags = 0;
111 	if (vm_flags & VM_MAP_SHARED)
112 		flags |= MAP_SHARED;
113 	if (vm_flags & VM_MAP_PRIVATE)
114 		flags |= MAP_PRIVATE;
115 	if (vm_flags & VM_MAP_FIXED)
116 		flags |= MAP_FIXED;
117 	if (vm_flags & VM_MAP_32BIT)
118 		flags |= MAP_32BIT;
119 	return flags;
120 }
121 #endif
122 
123 /* Align ADDR and SIZE to 64K boundaries.  */
124 
125 #ifdef HAVE_WIN32_VM
align_addr_segment(LPVOID addr)126 static inline LPVOID align_addr_segment(LPVOID addr)
127 {
128 	return (LPVOID)(((DWORD_PTR)addr) & -65536);
129 }
130 
align_size_segment(LPVOID addr,DWORD size)131 static inline DWORD align_size_segment(LPVOID addr, DWORD size)
132 {
133 	return size + ((DWORD_PTR)addr - (DWORD_PTR)align_addr_segment(addr));
134 }
135 #endif
136 
137 /* Translate generic VM prot flags to host values.  */
138 
139 #ifdef HAVE_WIN32_VM
translate_prot_flags(int prot_flags)140 static int translate_prot_flags(int prot_flags)
141 {
142 	int prot = PAGE_READWRITE;
143 	if (prot_flags == (VM_PAGE_EXECUTE | VM_PAGE_READ | VM_PAGE_WRITE))
144 		prot = PAGE_EXECUTE_READWRITE;
145 	else if (prot_flags == (VM_PAGE_EXECUTE | VM_PAGE_READ))
146 		prot = PAGE_EXECUTE_READ;
147 	else if (prot_flags == (VM_PAGE_READ | VM_PAGE_WRITE))
148 		prot = PAGE_READWRITE;
149 	else if (prot_flags == VM_PAGE_READ)
150 		prot = PAGE_READONLY;
151 	else if (prot_flags == VM_PAGE_NOACCESS)
152 		prot = PAGE_NOACCESS;
153 	return prot;
154 }
155 #endif
156 
157 /* Initialize the VM system. Returns 0 if successful, -1 for errors.  */
158 
vm_init(void)159 int vm_init(void)
160 {
161 #ifdef HAVE_MMAP_VM
162 #ifndef zero_fd
163 	zero_fd = open("/dev/zero", O_RDWR);
164 	if (zero_fd < 0)
165 		return -1;
166 #endif
167 #endif
168 	return 0;
169 }
170 
171 /* Deallocate all internal data used to wrap virtual memory allocators.  */
172 
vm_exit(void)173 void vm_exit(void)
174 {
175 #ifdef HAVE_MMAP_VM
176 #ifndef zero_fd
177 	if (zero_fd != -1) {
178 		close(zero_fd);
179 		zero_fd = -1;
180 	}
181 #endif
182 #endif
183 }
184 
185 /* Allocate zero-filled memory of SIZE bytes. The mapping is private
186    and default protection bits are read / write. The return value
187    is the actual mapping address chosen or VM_MAP_FAILED for errors.  */
188 
vm_acquire(size_t size,int options)189 void * vm_acquire(size_t size, int options)
190 {
191 	void * addr;
192 
193 	// VM_MAP_FIXED are to be used with vm_acquire_fixed() only
194 	if (options & VM_MAP_FIXED)
195 		return VM_MAP_FAILED;
196 
197 #ifdef HAVE_MACH_VM
198 	// vm_allocate() returns a zero-filled memory region
199 	if (vm_allocate(mach_task_self(), (vm_address_t *)&addr, size, TRUE) != KERN_SUCCESS)
200 		return VM_MAP_FAILED;
201 
202 	// Sanity checks for 64-bit platforms
203 	if (sizeof(void *) > 4 && (options & VM_MAP_32BIT) && !(((char *)addr + size) <= (char *)0xffffffff))
204 	{
205 		vm_release(addr, size);
206 		return VM_MAP_FAILED;
207 	}
208 #elif defined(HAVE_MMAP_VM)
209 	int fd = zero_fd;
210 	int the_map_flags = translate_map_flags(options) | map_flags;
211 	char **base = (options & VM_MAP_32BIT) ? &next_address_32bit : &next_address;
212 
213 //
214 // FREEBSD has no MAP_32BIT on x64
215 // Hack to limit allocation to lower 32 Bit
216 #if defined(OS_freebsd) && defined(CPU_x86_64)
217 	static int mode32 = 0;
218 	static rlimit oldlim;
219         if (!mode32 && (options & VM_MAP_32BIT)) {
220 	  getrlimit(RLIMIT_DATA, &oldlim);
221           struct rlimit rlim;
222           rlim.rlim_cur = rlim.rlim_max = 0x10000000;
223           setrlimit(RLIMIT_DATA, &rlim);
224           mode32 = 1;
225         }
226 #	define RESTORE_MODE	if (mode32) { setrlimit(RLIMIT_DATA, &oldlim); mode32 = 0;}
227 #else
228 #	define RESTORE_MODE
229 #endif
230 
231 	addr = mmap((void *)(*base), size, VM_PAGE_DEFAULT, the_map_flags, fd, 0);
232 	RESTORE_MODE;
233 	if (addr == (void *)MAP_FAILED)
234 		return VM_MAP_FAILED;
235 
236 	// Sanity checks for 64-bit platforms
237 	if (sizeof(void *) > 4 && (options & VM_MAP_32BIT) && !(((char *)addr + size) <= (char *)0xffffffff))
238 	{
239 		vm_release(addr, size);
240 		return VM_MAP_FAILED;
241 	}
242 
243 	*base = (char *)addr + size;
244 
245 #else
246 #ifdef HAVE_WIN32_VM
247 	if ((addr = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT, PAGE_EXECUTE_READWRITE)) == NULL)
248 		return VM_MAP_FAILED;
249 
250 	// Sanity checks for 64-bit platforms
251 	if (sizeof(void *) > 4 && (options & VM_MAP_32BIT) && !(((char *)addr + size) <= (char *)0xffffffff))
252 	{
253 		vm_release(addr, size);
254 		return VM_MAP_FAILED;
255 	}
256 #else
257 	if ((addr = calloc(size, 1)) == 0)
258 		return VM_MAP_FAILED;
259 
260 	// Sanity checks for 64-bit platforms
261 	if (sizeof(void *) > 4 && (options & VM_MAP_32BIT) && !(((char *)addr + size) <= (char *)0xffffffff))
262 	{
263 		free(addr);
264 		return VM_MAP_FAILED;
265 	}
266 
267 	// Omit changes for protections because they are not supported in this mode
268 	return addr;
269 #endif
270 #endif
271 
272 	// Explicitely protect the newly mapped region here because on some systems,
273 	// say MacOS X, mmap() doesn't honour the requested protection flags.
274 	if (vm_protect(addr, size, VM_PAGE_DEFAULT) != 0)
275 	{
276 		vm_release(addr, size);
277 		return VM_MAP_FAILED;
278 	}
279 
280 	return addr;
281 }
282 
283 /* Allocate zero-filled memory at exactly ADDR (which must be page-aligned).
284    Retuns 0 if successful, -1 on errors.  */
285 
vm_acquire_fixed(void * addr,size_t size,int options)286 bool vm_acquire_fixed(void * addr, size_t size, int options)
287 {
288 	// Fixed mappings are required to be private
289 	if (options & VM_MAP_SHARED)
290 		return false;
291 
292 #ifdef HAVE_MACH_VM
293 	// vm_allocate() returns a zero-filled memory region
294 	if (vm_allocate(mach_task_self(), (vm_address_t *)&addr, size, 0) != KERN_SUCCESS)
295 		return false;
296 #elif defined(HAVE_MMAP_VM)
297 	const int extra_map_flags = translate_map_flags(options);
298 
299 	if (mmap((void *)addr, size, VM_PAGE_DEFAULT, extra_map_flags | map_flags | MAP_FIXED, zero_fd, 0) == MAP_FAILED)
300 		return false;
301 #else
302 #ifdef HAVE_WIN32_VM
303 	// Windows cannot allocate Low Memory
304 	if (addr == NULL)
305 		return false;
306 
307 	// Allocate a possibly offset region to align on 64K boundaries
308 	LPVOID req_addr = align_addr_segment(addr);
309 	DWORD  req_size = align_size_segment(addr, size);
310 	LPVOID ret_addr = VirtualAlloc(req_addr, req_size, MEM_COMMIT | MEM_RESERVE, PAGE_EXECUTE_READWRITE);
311 	if (ret_addr != req_addr)
312 		return false;
313 #else
314 	// Unsupported
315 	return false;
316 #endif
317 #endif
318 
319 	// Explicitely protect the newly mapped region here because on some systems,
320 	// say MacOS X, mmap() doesn't honour the requested protection flags.
321 	if (vm_protect(addr, size, VM_PAGE_DEFAULT) != 0)
322 		return false;
323 
324 	return true;
325 }
326 
327 /* Deallocate any mapping for the region starting at ADDR and extending
328    LEN bytes. Returns 0 if successful, -1 on errors.  */
329 
vm_release(void * addr,size_t size)330 int vm_release(void * addr, size_t size)
331 {
332 	// Safety check: don't try to release memory that was not allocated
333 	if (addr == VM_MAP_FAILED)
334 		return 0;
335 
336 #ifdef HAVE_MACH_VM
337 	if (vm_deallocate(mach_task_self(), (vm_address_t)addr, size) != KERN_SUCCESS)
338 		return -1;
339 #elif defined(HAVE_MMAP_VM)
340 	if (munmap((void *)addr, size) != 0)
341 		return -1;
342 
343 #else
344 #ifdef HAVE_WIN32_VM
345 	if (VirtualFree(align_addr_segment(addr), 0, MEM_RELEASE) == 0)
346 		return -1;
347 	(void) size;
348 #else
349 	free(addr);
350 #endif
351 #endif
352 
353 	return 0;
354 }
355 
356 /* Change the memory protection of the region starting at ADDR and
357    extending LEN bytes to PROT. Returns 0 if successful, -1 for errors.  */
358 
vm_protect(void * addr,size_t size,int prot)359 int vm_protect(void * addr, size_t size, int prot)
360 {
361 #ifdef HAVE_MACH_VM
362 	int ret_code = vm_protect(mach_task_self(), (vm_address_t)addr, size, 0, prot);
363 	return ret_code == KERN_SUCCESS ? 0 : -1;
364 #elif defined(HAVE_MMAP_VM)
365 	int ret_code = mprotect((void *)addr, size, prot);
366 	return ret_code == 0 ? 0 : -1;
367 #elif defined(HAVE_WIN32_VM)
368 	DWORD old_prot;
369 	int ret_code = VirtualProtect(addr, size, translate_prot_flags(prot), &old_prot);
370 	return ret_code != 0 ? 0 : -1;
371 #else
372 	// Unsupported
373 	return -1;
374 #endif
375 }
376 
377 /* Returns the size of a page.  */
378 
vm_get_page_size(void)379 int vm_get_page_size(void)
380 {
381 #ifdef _WIN32
382 	SYSTEM_INFO si;
383 	GetSystemInfo(&si);
384 	if (si.dwAllocationGranularity > si.dwPageSize)
385 		return si.dwAllocationGranularity;
386 	return si.dwPageSize;
387 #else
388     return getpagesize();
389 #endif
390 }
391 
392 #ifdef CONFIGURE_TEST_VM_MAP
393 /* Tests covered here:
394    - TEST_VM_PROT_* program slices actually succeeds when a crash occurs
395    - TEST_VM_MAP_ANON* program slices succeeds when it could be compiled
396 */
397 #include <signal.h>
handler(int sig)398 void handler(int sig)
399 {
400 	exit(2);
401 }
main(void)402 int main(void)
403 {
404 	vm_init();
405 	signal(SIGSEGV, handler);
406 
407 #define page_align(address) ((char *)((uintptr)(address) & -page_size))
408 	const unsigned long page_size = vm_get_page_size();
409 
410 	const int area_size = 6 * page_size;
411 	volatile char * area = (volatile char *) vm_acquire(area_size);
412 	volatile char * fault_address = area + (page_size * 7) / 2;
413 
414 #if defined(TEST_VM_MMAP_ANON) || defined(TEST_VM_MMAP_ANONYMOUS)
415 	if (area == VM_MAP_FAILED)
416 		return 1;
417 
418 	if (vm_release((char *)area, area_size) < 0)
419 		return 1;
420 
421 	return 0;
422 #endif
423 
424 #if defined(TEST_VM_PROT_NONE_READ) || defined(TEST_VM_PROT_NONE_WRITE)
425 	if (area == VM_MAP_FAILED)
426 		return 0;
427 
428 	if (vm_protect(page_align(fault_address), page_size, VM_PAGE_NOACCESS) < 0)
429 		return 0;
430 #endif
431 
432 #if defined(TEST_VM_PROT_RDWR_WRITE)
433 	if (area == VM_MAP_FAILED)
434 		return 1;
435 
436 	if (vm_protect(page_align(fault_address), page_size, VM_PAGE_READ) < 0)
437 		return 1;
438 
439 	if (vm_protect(page_align(fault_address), page_size, VM_PAGE_READ | VM_PAGE_WRITE) < 0)
440 		return 1;
441 #endif
442 
443 #if defined(TEST_VM_PROT_READ_WRITE)
444 	if (vm_protect(page_align(fault_address), page_size, VM_PAGE_READ) < 0)
445 		return 0;
446 #endif
447 
448 #if defined(TEST_VM_PROT_NONE_READ)
449 	// this should cause a core dump
450 	char foo = *fault_address;
451 	// if we get here vm_protect(VM_PAGE_NOACCESS) did not work
452 	return 0;
453 #endif
454 
455 #if defined(TEST_VM_PROT_NONE_WRITE) || defined(TEST_VM_PROT_READ_WRITE)
456 	// this should cause a core dump
457 	*fault_address = 'z';
458 	// if we get here vm_protect(VM_PAGE_READ) did not work
459 	return 0;
460 #endif
461 
462 #if defined(TEST_VM_PROT_RDWR_WRITE)
463 	// this should not cause a core dump
464 	*fault_address = 'z';
465 	return 0;
466 #endif
467 }
468 #endif
469