1 // Implement mprotect() for Win32
2 // Copyright (C) 2000, Brian King
3 // GNU Public License
4
5 #include <float.h>
6
7 #include "sysconfig.h"
8 #include "sysdeps.h"
9 #include "uae/memory.h"
10 #include "uae/mman.h"
11 #include "uae/vm.h"
12 #include "options.h"
13 #include "autoconf.h"
14 #include "gfxboard.h"
15 #include "cpuboard.h"
16 #include "rommgr.h"
17 #include "newcpu.h"
18 #ifdef WINUAE
19 #include "win32.h"
20 #endif
21
22 #ifdef FSUAE // NL
23 #include "uae/fs.h"
24 #endif
25
26 #if defined(NATMEM_OFFSET)
27
28 uae_u32 max_z3fastmem;
29
30 /* BARRIER is used in case Amiga memory is access across memory banks,
31 * for example move.l $1fffffff,d0 when $10000000-$1fffffff is mapped and
32 * $20000000+ is not mapped.
33 * Note: BARRIER will probably effectively be rounded up the host memory
34 * page size.
35 */
36 #define BARRIER 32
37
38 #define MAXZ3MEM32 0x7F000000
39 #define MAXZ3MEM64 0xF0000000
40
41 static struct uae_shmid_ds shmids[MAX_SHMID];
42 uae_u8 *natmem_reserved, *natmem_offset, *natmem_offset_end;
43 uae_u32 natmem_reserved_size;
44 static uae_u8 *p96mem_offset;
45 static int p96mem_size;
46 static uae_u32 p96base_offset;
47 static SYSTEM_INFO si;
48 #ifdef FSUAE
49 static uint32_t maxmem;
50 /* FIXME: check if signed int is a bit small */
51 /* FIXME: check where maxmem is set */
52 #else
53 int maxmem;
54 #endif
55 bool jit_direct_compatible_memory;
56
virtualallocwithlock(LPVOID addr,SIZE_T size,DWORD allocationtype,DWORD protect)57 static uae_u8 *virtualallocwithlock (LPVOID addr, SIZE_T size, DWORD allocationtype, DWORD protect)
58 {
59 uae_u8 *p = (uae_u8*)VirtualAlloc (addr, size, allocationtype, protect);
60 return p;
61 }
virtualfreewithlock(LPVOID addr,SIZE_T size,DWORD freetype)62 static void virtualfreewithlock (LPVOID addr, SIZE_T size, DWORD freetype)
63 {
64 VirtualFree(addr, size, freetype);
65 }
66
lowmem(void)67 static uae_u32 lowmem (void)
68 {
69 uae_u32 change = 0;
70 if (currprefs.z3fastmem_size + currprefs.z3fastmem2_size + currprefs.z3chipmem_size >= 8 * 1024 * 1024) {
71 if (currprefs.z3fastmem2_size) {
72 change = currprefs.z3fastmem2_size;
73 currprefs.z3fastmem2_size = 0;
74 } else if (currprefs.z3chipmem_size) {
75 if (currprefs.z3chipmem_size <= 16 * 1024 * 1024) {
76 change = currprefs.z3chipmem_size;
77 currprefs.z3chipmem_size = 0;
78 } else {
79 change = currprefs.z3chipmem_size / 2;
80 currprefs.z3chipmem_size /= 2;
81 }
82 } else {
83 change = currprefs.z3fastmem_size - currprefs.z3fastmem_size / 4;
84 currprefs.z3fastmem2_size = changed_prefs.z3fastmem2_size = currprefs.z3fastmem_size / 4;
85 currprefs.z3fastmem_size /= 2;
86 changed_prefs.z3fastmem_size = currprefs.z3fastmem_size;
87 }
88 } else if (currprefs.rtgmem_type == GFXBOARD_UAE_Z3 && currprefs.rtgmem_size >= 1 * 1024 * 1024) {
89 change = currprefs.rtgmem_size - currprefs.rtgmem_size / 2;
90 currprefs.rtgmem_size /= 2;
91 changed_prefs.rtgmem_size = currprefs.rtgmem_size;
92 }
93 if (currprefs.z3fastmem2_size < 128 * 1024 * 1024)
94 currprefs.z3fastmem2_size = changed_prefs.z3fastmem2_size = 0;
95 return change;
96 }
97
98 #ifdef FSUAE
99 #else
mman_GetWriteWatch(PVOID lpBaseAddress,SIZE_T dwRegionSize,PVOID * lpAddresses,PULONG_PTR lpdwCount,PULONG lpdwGranularity)100 int mman_GetWriteWatch (PVOID lpBaseAddress, SIZE_T dwRegionSize, PVOID *lpAddresses, PULONG_PTR lpdwCount, PULONG lpdwGranularity)
101 {
102 return GetWriteWatch (0, lpBaseAddress, dwRegionSize, lpAddresses, lpdwCount, lpdwGranularity);
103 }
mman_ResetWatch(PVOID lpBaseAddress,SIZE_T dwRegionSize)104 void mman_ResetWatch (PVOID lpBaseAddress, SIZE_T dwRegionSize)
105 {
106 if (ResetWriteWatch (lpBaseAddress, dwRegionSize))
107 write_log (_T("ResetWriteWatch() failed, %d\n"), GetLastError ());
108 }
109 #endif
110
111 static uae_u64 size64;
112 #ifdef _WIN32
113 typedef BOOL (CALLBACK* GLOBALMEMORYSTATUSEX)(LPMEMORYSTATUSEX);
114 #endif
115
clear_shm(void)116 static void clear_shm (void)
117 {
118 shm_start = NULL;
119 for (int i = 0; i < MAX_SHMID; i++) {
120 memset (&shmids[i], 0, sizeof(struct uae_shmid_ds));
121 shmids[i].key = -1;
122 }
123 }
124
preinit_shm(void)125 bool preinit_shm (void)
126 {
127 #ifdef FSUAE
128 write_log("preinit_shm\n");
129 #endif
130 uae_u64 total64;
131 uae_u64 totalphys64;
132 #ifdef _WIN32
133 MEMORYSTATUS memstats;
134 GLOBALMEMORYSTATUSEX pGlobalMemoryStatusEx;
135 MEMORYSTATUSEX memstatsex;
136 #endif
137 uae_u32 max_allowed_mman;
138
139 if (natmem_reserved)
140 #ifdef _WIN32
141 VirtualFree (natmem_reserved, 0, MEM_RELEASE);
142 #else
143 #ifdef FSUAE
144 free (natmem_reserved);
145 #endif
146 #endif
147 natmem_reserved = NULL;
148 natmem_offset = NULL;
149 if (p96mem_offset) {
150 #ifdef _WIN32
151 VirtualFree (p96mem_offset, 0, MEM_RELEASE);
152 #else
153 #ifdef FSUAE
154 /* Don't free p96mem_offset - it is freed as part of natmem_offset */
155 // free (p96mem_offset);
156 #endif
157 #endif
158 }
159 p96mem_offset = NULL;
160
161 GetSystemInfo (&si);
162 #ifdef FSUAE
163 max_allowed_mman = 2048;
164 #else
165 max_allowed_mman = 512 + 256;
166 #endif
167 #if 1
168 if (os_64bit) {
169 //#ifdef WIN64
170 // max_allowed_mman = 3072;
171 //#else
172 max_allowed_mman = 2048;
173 //#endif
174 }
175 #endif
176 if (maxmem > max_allowed_mman)
177 max_allowed_mman = maxmem;
178
179 #ifdef _WIN32
180 memstats.dwLength = sizeof(memstats);
181 GlobalMemoryStatus(&memstats);
182 totalphys64 = memstats.dwTotalPhys;
183 total64 = (uae_u64)memstats.dwAvailPageFile + (uae_u64)memstats.dwTotalPhys;
184 #ifdef FSUAE
185 pGlobalMemoryStatusEx = GlobalMemoryStatusEx;
186 #else
187 pGlobalMemoryStatusEx = (GLOBALMEMORYSTATUSEX)GetProcAddress (GetModuleHandle (_T("kernel32.dll")), "GlobalMemoryStatusEx");
188 #endif
189 if (pGlobalMemoryStatusEx) {
190 memstatsex.dwLength = sizeof (MEMORYSTATUSEX);
191 if (pGlobalMemoryStatusEx(&memstatsex)) {
192 totalphys64 = memstatsex.ullTotalPhys;
193 total64 = memstatsex.ullAvailPageFile + memstatsex.ullTotalPhys;
194 }
195 }
196 #else
197 #ifdef FSUAE
198 #ifdef __APPLE__
199 int mib[2];
200 size_t len;
201
202 mib[0] = CTL_HW;
203 // FIXME: check 64-bit compat
204 mib[1] = HW_MEMSIZE; /* gives a 64 bit int */
205 len = sizeof(totalphys64);
206 sysctl(mib, 2, &totalphys64, &len, NULL, 0);
207 total64 = (uae_u64) totalphys64;
208 #else
209 totalphys64 = sysconf (_SC_PHYS_PAGES) * (uae_u64)getpagesize();
210 total64 = (uae_u64)sysconf (_SC_PHYS_PAGES) * (uae_u64)getpagesize();
211 #endif
212 #endif
213 #endif
214 size64 = total64;
215 if (os_64bit) {
216 if (size64 > MAXZ3MEM64)
217 size64 = MAXZ3MEM64;
218 } else {
219 if (size64 > MAXZ3MEM32)
220 size64 = MAXZ3MEM32;
221 }
222 #ifdef FSUAE
223 /* FIXME: check */
224 if (maxmem == 0) {
225 #else
226 if (maxmem < 0) {
227 #endif
228 size64 = MAXZ3MEM64;
229 if (!os_64bit) {
230 if (totalphys64 < 1536 * 1024 * 1024)
231 max_allowed_mman = 256;
232 if (max_allowed_mman < 256)
233 max_allowed_mman = 256;
234 }
235 } else if (maxmem > 0) {
236 size64 = maxmem * 1024 * 1024;
237 }
238 if (size64 < 8 * 1024 * 1024)
239 size64 = 8 * 1024 * 1024;
240 if (max_allowed_mman * 1024 * 1024 > size64)
241 max_allowed_mman = size64 / (1024 * 1024);
242
243 uae_u32 natmem_size = (max_allowed_mman + 1) * 1024 * 1024;
244 if (natmem_size < 17 * 1024 * 1024)
245 natmem_size = 17 * 1024 * 1024;
246
247 //natmem_size = 257 * 1024 * 1024;
248
249 if (natmem_size > 0x80000000) {
250 natmem_size = 0x80000000;
251 }
252
253 write_log (_T("NATMEM: Total physical RAM %llu MB, all RAM %llu MB\n"),
254 totalphys64 >> 20, total64 >> 20);
255 write_log(_T("NATMEM: Attempting to reserve: %u MB\n"), natmem_size >> 20);
256
257 int vm_flags = UAE_VM_32BIT | UAE_VM_WRITE_WATCH;
258 #ifdef FSUAE
259 write_log("NATMEM: jit compiler %d\n", g_fs_uae_jit_compiler);
260 if (!g_fs_uae_jit_compiler) {
261 /* Not using the JIT compiler, so we do not need "32-bit memory". */
262 vm_flags &= ~UAE_VM_32BIT;
263 }
264 #endif
265 natmem_reserved = (uae_u8 *) uae_vm_reserve(natmem_size, vm_flags);
266
267 if (!natmem_reserved) {
268 if (natmem_size <= 768 * 1024 * 1024) {
269 uae_u32 p = 0x78000000 - natmem_size;
270 for (;;) {
271 #ifdef FSUAE
272 natmem_reserved = (uae_u8 *) uae_vm_reserve(natmem_size, vm_flags);
273 #else
274 natmem_reserved = (uae_u8*) VirtualAlloc((void*)(intptr_t)p, natmem_size, MEM_RESERVE | MEM_WRITE_WATCH, PAGE_READWRITE);
275 #endif
276 if (natmem_reserved)
277 break;
278 p -= 128 * 1024 * 1024;
279 if (p <= 128 * 1024 * 1024)
280 break;
281 }
282 }
283 }
284 if (!natmem_reserved) {
285 DWORD vaflags = MEM_RESERVE | MEM_WRITE_WATCH;
286 #ifdef _WIN32
287 #ifdef FSUAE
288 OSVERSIONINFO osVersion;
289 osVersion.dwOSVersionInfoSize = sizeof (OSVERSIONINFO);
290 bool os_vista = (osVersion.dwMajorVersion == 6 &&
291 osVersion.dwMinorVersion == 0);
292 #endif
293 #ifndef _WIN64
294 if (!os_vista)
295 vaflags |= MEM_TOP_DOWN;
296 #endif
297 #endif
298 for (;;) {
299 #ifdef FSUAE
300 natmem_reserved = (uae_u8 *) uae_vm_reserve(natmem_size, vm_flags);
301 #else
302 natmem_reserved = (uae_u8*)VirtualAlloc (NULL, natmem_size, vaflags, PAGE_READWRITE);
303 #endif
304 if (natmem_reserved)
305 break;
306 natmem_size -= 128 * 1024 * 1024;
307 if (!natmem_size) {
308 write_log (_T("Can't allocate 257M of virtual address space!?\n"));
309 natmem_size = 17 * 1024 * 1024;
310 #ifdef FSUAE
311 natmem_reserved = (uae_u8 *) uae_vm_reserve(natmem_size, vm_flags);
312 #else
313 natmem_reserved = (uae_u8*)VirtualAlloc (NULL, natmem_size, vaflags, PAGE_READWRITE);
314 #endif
315 if (!natmem_size) {
316 write_log (_T("Can't allocate 17M of virtual address space!? Something is seriously wrong\n"));
317 return false;
318 }
319 break;
320 }
321 }
322 }
323 natmem_reserved_size = natmem_size;
324 natmem_offset = natmem_reserved;
325 if (natmem_size <= 257 * 1024 * 1024) {
326 max_z3fastmem = 0;
327 } else {
328 max_z3fastmem = natmem_size;
329 }
330 write_log (_T("NATMEM: Reserved %p-%p (0x%08x %dM)\n"),
331 natmem_reserved, (uae_u8 *) natmem_reserved + natmem_reserved_size,
332 natmem_reserved_size, natmem_reserved_size / (1024 * 1024));
333
334 clear_shm ();
335
336 // write_log (_T("Max Z3FastRAM %dM. Total physical RAM %uM\n"), max_z3fastmem >> 20, totalphys64 >> 20);
337
338 canbang = 1;
339 return true;
340 }
341
342 static void resetmem (bool decommit)
343 {
344 int i;
345
346 if (!shm_start)
347 return;
348 for (i = 0; i < MAX_SHMID; i++) {
349 struct uae_shmid_ds *s = &shmids[i];
350 int size = s->size;
351 uae_u8 *shmaddr;
352 uae_u8 *result;
353
354 if (!s->attached)
355 continue;
356 if (!s->natmembase)
357 continue;
358 if (s->fake)
359 continue;
360 if (!decommit && ((uae_u8*)s->attached - (uae_u8*)s->natmembase) >= 0x10000000)
361 continue;
362 shmaddr = natmem_offset + ((uae_u8*)s->attached - (uae_u8*)s->natmembase);
363 if (decommit) {
364 VirtualFree (shmaddr, size, MEM_DECOMMIT);
365 } else {
366 result = virtualallocwithlock (shmaddr, size, decommit ? MEM_DECOMMIT : MEM_COMMIT, PAGE_READWRITE);
367 if (result != shmaddr)
368 write_log (_T("NATMEM: realloc(%p-%p,%d,%d,%s) failed, err=%d\n"), shmaddr, shmaddr + size, size, s->mode, s->name, GetLastError ());
369 else
370 write_log (_T("NATMEM: rellocated(%p-%p,%d,%s)\n"), shmaddr, shmaddr + size, size, s->name);
371 }
372 }
373 }
374
375 static ULONG getz2rtgaddr (int rtgsize)
376 {
377 ULONG start;
378 start = changed_prefs.fastmem_size;
379 if (changed_prefs.fastmem2_size >= 524288)
380 start += changed_prefs.fastmem2_size;
381 start += rtgsize - 1;
382 start &= ~(rtgsize - 1);
383 while (start & (changed_prefs.rtgmem_size - 1) && start < 4 * 1024 * 1024)
384 start += 1024 * 1024;
385 return start + 2 * 1024 * 1024;
386 }
387
388 static uae_u8 *va (uae_u32 offset, uae_u32 len, DWORD alloc, DWORD protect)
389 {
390 uae_u8 *addr;
391
392 addr = (uae_u8*)VirtualAlloc (natmem_offset + offset, len, alloc, protect);
393 if (addr) {
394 write_log (_T("VA(%p - %p, %4uM, %s)\n"),
395 natmem_offset + offset, natmem_offset + offset + len, len >> 20, (alloc & MEM_WRITE_WATCH) ? _T("WATCH") : _T("RESERVED"));
396 return addr;
397 }
398 write_log (_T("VA(%p - %p, %4uM, %s) failed %d\n"),
399 natmem_offset + offset, natmem_offset + offset + len, len >> 20, (alloc & MEM_WRITE_WATCH) ? _T("WATCH") : _T("RESERVED"), GetLastError ());
400 return NULL;
401 }
402
403 static int doinit_shm (void)
404 {
405 uae_u32 size, totalsize, z3size, natmemsize, othersize;
406 uae_u32 startbarrier, z3offset, align;
407 int rounds = 0;
408 uae_u32 z3rtgmem_size;
409
410 canbang = 1;
411 natmem_offset = natmem_reserved;
412 for (;;) {
413 int lowround = 0;
414 if (rounds > 0)
415 write_log (_T("NATMEM: retrying %d..\n"), rounds);
416 rounds++;
417
418 align = 16 * 1024 * 1024 - 1;
419 z3size = 0;
420 othersize = 0;
421 size = 0x1000000;
422 startbarrier = changed_prefs.mbresmem_high_size >= 128 * 1024 * 1024 ? (changed_prefs.mbresmem_high_size - 128 * 1024 * 1024) + 16 * 1024 * 1024 : 0;
423 z3rtgmem_size = gfxboard_get_configtype(changed_prefs.rtgmem_type) == 3 ? changed_prefs.rtgmem_size : 0;
424 if (changed_prefs.cpu_model >= 68020)
425 size = 0x10000000;
426 z3size = ((changed_prefs.z3fastmem_size + align) & ~align) + ((changed_prefs.z3fastmem2_size + align) & ~align) + ((changed_prefs.z3chipmem_size + align) & ~align);
427 if (cfgfile_board_enabled(&currprefs, ROMTYPE_A4091, 0))
428 othersize += 2 * 16 * 1024 * 1024;
429 if (cfgfile_board_enabled(&currprefs, ROMTYPE_FASTLANE, 0))
430 othersize += 2 * 32 * 1024 * 1024;
431 totalsize = size + z3size + z3rtgmem_size + othersize;
432 while (totalsize > size64) {
433 int change = lowmem ();
434 if (!change)
435 return 0;
436 write_log (_T("NATMEM: %d, %dM > %lldM = %dM\n"), ++lowround, totalsize >> 20, size64 >> 20, (totalsize - change) >> 20);
437 totalsize -= change;
438 }
439 if ((rounds > 1 && totalsize < 0x10000000) || rounds > 20) {
440 write_log (_T("NATMEM: No special area could be allocated (3)!\n"));
441 return 0;
442 }
443 natmemsize = size + z3size;
444
445 if (startbarrier + natmemsize + z3rtgmem_size + 16 * si.dwPageSize <= natmem_reserved_size)
446 break;
447 write_log (_T("NATMEM: %dM area failed to allocate, err=%d (Z3=%dM,RTG=%dM)\n"),
448 natmemsize >> 20, GetLastError (), (changed_prefs.z3fastmem_size + changed_prefs.z3fastmem2_size + changed_prefs.z3chipmem_size) >> 20, z3rtgmem_size >> 20);
449 if (!lowmem ()) {
450 write_log (_T("NATMEM: No special area could be allocated (2)!\n"));
451 return 0;
452 }
453 }
454 #ifdef FSUAE
455 write_log("NATMEM: size 0x%08x\n", size);
456 write_log("NATMEM: z3size + 0x%08x\n", z3size);
457 write_log("NATMEM: z3rtgmem_size + 0x%08x\n", z3rtgmem_size);
458 write_log("NATMEM: othersize + 0x%08x\n", othersize);
459 write_log("NATMEM: totalsize = 0x%08x\n", totalsize);
460 #endif
461
462 set_expamem_z3_hack_override(false);
463 z3offset = 0;
464 if (changed_prefs.z3_mapping_mode != Z3MAPPING_UAE && cpuboard_memorytype(&changed_prefs) != BOARD_MEMORY_BLIZZARD_12xx) {
465 if (1 && natmem_reserved_size > 0x40000000 && natmem_reserved_size - 0x40000000 >= (totalsize - 0x10000000 - ((changed_prefs.z3chipmem_size + align) & ~align)) && changed_prefs.z3chipmem_size <= 512 * 1024 * 1024) {
466 changed_prefs.z3autoconfig_start = currprefs.z3autoconfig_start = Z3BASE_REAL;
467 z3offset += Z3BASE_REAL - Z3BASE_UAE - ((changed_prefs.z3chipmem_size + align) & ~align);
468 z3offset += cpuboards[currprefs.cpuboard_type].subtypes[currprefs.cpuboard_subtype].z3extra;
469 set_expamem_z3_hack_override(true);
470 startbarrier = 0;
471 write_log(_T("Z3 REAL mapping. JIT direct compatible.\n"));
472 jit_direct_compatible_memory = true;
473 } else if (changed_prefs.z3_mapping_mode == Z3MAPPING_AUTO && currprefs.cachesize) {
474 changed_prefs.z3autoconfig_start = currprefs.z3autoconfig_start = Z3BASE_UAE;
475 jit_direct_compatible_memory = true;
476 write_log(_T("Z3 UAE mapping (auto).\n"));
477 } else {
478 changed_prefs.z3autoconfig_start = currprefs.z3autoconfig_start = Z3BASE_REAL;
479 write_log(_T("Z3 REAL mapping. Not JIT direct compatible.\n"));
480 jit_direct_compatible_memory = false;
481 }
482 } else {
483 currprefs.z3autoconfig_start = changed_prefs.z3autoconfig_start = Z3BASE_UAE;
484 jit_direct_compatible_memory = true;
485 write_log(_T("Z3 UAE mapping.\n"));
486 }
487 #ifdef FSUAE
488 write_log("NATMEM: JIT direct compatible: %d\n", jit_direct_compatible_memory);
489 #endif
490
491 p96mem_offset = NULL;
492 p96mem_size = z3rtgmem_size;
493 p96base_offset = 0;
494 uae_u32 z3rtgallocsize = 0;
495 if (changed_prefs.rtgmem_size && gfxboard_get_configtype(changed_prefs.rtgmem_type) == 3) {
496 z3rtgallocsize = gfxboard_get_autoconfig_size(changed_prefs.rtgmem_type) < 0 ? changed_prefs.rtgmem_size : gfxboard_get_autoconfig_size(changed_prefs.rtgmem_type);
497 if (changed_prefs.z3autoconfig_start == Z3BASE_UAE)
498 p96base_offset = natmemsize + startbarrier + z3offset;
499 else
500 p96base_offset = expansion_startaddress(natmemsize + startbarrier + z3offset, z3rtgallocsize);
501 } else if (changed_prefs.rtgmem_size && gfxboard_get_configtype(changed_prefs.rtgmem_type) == 2) {
502 p96base_offset = getz2rtgaddr (changed_prefs.rtgmem_size);
503 } else if (changed_prefs.rtgmem_size && gfxboard_get_configtype(changed_prefs.rtgmem_type) == 1) {
504 p96base_offset = 0xa80000;
505 }
506 if (p96base_offset) {
507 if (jit_direct_compatible_memory) {
508 p96mem_offset = natmem_offset + p96base_offset;
509 } else {
510 if (changed_prefs.cachesize) {
511 currprefs.rtgmem_size = changed_prefs.rtgmem_size = 0;
512 currprefs.rtgmem_type = changed_prefs.rtgmem_type = 0;
513 error_log(_T("RTG board is not anymore supported when JIT is enabled and RTG VRAM is located outside of NATMEM (Real Z3 mode under 32-bit Windows)."));
514 } else {
515 // calculate Z3 alignment (argh, I thought only Z2 needed this..)
516 uae_u32 addr = Z3BASE_REAL;
517 int z3off = cpuboards[currprefs.cpuboard_type].subtypes[currprefs.cpuboard_subtype].z3extra;
518 if (z3off) {
519 addr = expansion_startaddress(addr, z3off);
520 addr += z3off;
521 }
522 addr = expansion_startaddress(addr, changed_prefs.z3fastmem_size);
523 addr += changed_prefs.z3fastmem_size;
524 addr = expansion_startaddress(addr, changed_prefs.z3fastmem2_size);
525 addr += changed_prefs.z3fastmem2_size;
526 addr = expansion_startaddress(addr, z3rtgallocsize);
527 if (gfxboard_get_configtype(changed_prefs.rtgmem_type) == 3) {
528 p96base_offset = addr;
529 write_log("NATMEM: p96base_offset = 0x%x\n", p96base_offset);
530 // adjust p96mem_offset to beginning of natmem
531 // by subtracting start of original p96mem_offset from natmem_offset
532 if (p96base_offset >= 0x10000000) {
533 #ifdef FSUAE
534 write_log("NATMEM: natmem_offset = %p - 0x%x\n", natmem_reserved, p96base_offset);
535 #endif
536 natmem_offset = natmem_reserved - p96base_offset;
537 p96mem_offset = natmem_offset + p96base_offset;
538 }
539 }
540 }
541 }
542 }
543
544 if (!natmem_offset) {
545 write_log (_T("NATMEM: No special area could be allocated! err=%d\n"), GetLastError ());
546 } else {
547 write_log(_T("NATMEM: Our special area: %p-%p (0x%08x %dM)\n"),
548 natmem_offset, (uae_u8*)natmem_offset + natmemsize,
549 natmemsize, natmemsize / (1024 * 1024));
550 if (changed_prefs.rtgmem_size)
551 write_log (_T("NATMEM: P96 special area: %p-%p (0x%08x %dM)\n"),
552 p96mem_offset, (uae_u8*)p96mem_offset + changed_prefs.rtgmem_size,
553 changed_prefs.rtgmem_size, changed_prefs.rtgmem_size >> 20);
554 canbang = jit_direct_compatible_memory ? 1 : 0;
555 if (p96mem_size)
556 natmem_offset_end = p96mem_offset + p96mem_size;
557 else
558 natmem_offset_end = natmem_offset + natmemsize;
559 }
560
561 return canbang;
562 }
563
564 static uae_u32 oz3fastmem_size, oz3fastmem2_size;
565 static uae_u32 oz3chipmem_size;
566 static uae_u32 ortgmem_size;
567 static int ortgmem_type = -1;
568
569 bool init_shm (void)
570 {
571 #ifdef FSUAE
572 write_log("init_shm\n");
573 #endif
574 if (
575 oz3fastmem_size == changed_prefs.z3fastmem_size &&
576 oz3fastmem2_size == changed_prefs.z3fastmem2_size &&
577 oz3chipmem_size == changed_prefs.z3chipmem_size &&
578 ortgmem_size == changed_prefs.rtgmem_size &&
579 ortgmem_type == changed_prefs.rtgmem_type)
580 return false;
581
582 oz3fastmem_size = changed_prefs.z3fastmem_size;
583 oz3fastmem2_size = changed_prefs.z3fastmem2_size;
584 oz3chipmem_size = changed_prefs.z3chipmem_size;;
585 ortgmem_size = changed_prefs.rtgmem_size;
586 ortgmem_type = changed_prefs.rtgmem_type;
587
588 doinit_shm ();
589
590 resetmem (false);
591 clear_shm ();
592
593 memory_hardreset (2);
594 return true;
595 }
596
597 void free_shm (void)
598 {
599 resetmem (true);
600 clear_shm ();
601 ortgmem_type = -1;
602 }
603
604 void mapped_free (addrbank *ab)
605 {
606 shmpiece *x = shm_start;
607 bool rtgmem = (ab->flags & ABFLAG_RTG) != 0;
608
609 if (ab->baseaddr == NULL)
610 return;
611
612 if (ab->flags & ABFLAG_INDIRECT) {
613 while(x) {
614 if (ab->baseaddr == x->native_address) {
615 int shmid = x->id;
616 shmids[shmid].key = -1;
617 shmids[shmid].name[0] = '\0';
618 shmids[shmid].size = 0;
619 shmids[shmid].attached = 0;
620 shmids[shmid].mode = 0;
621 shmids[shmid].natmembase = 0;
622 if (!(ab->flags & ABFLAG_NOALLOC)) {
623 xfree(ab->baseaddr);
624 ab->baseaddr = NULL;
625 }
626 }
627 x = x->next;
628 }
629 ab->baseaddr = NULL;
630 ab->flags &= ~ABFLAG_DIRECTMAP;
631 write_log(_T("mapped_free indirect %s\n"), ab->name);
632 return;
633 }
634
635 if (!(ab->flags & ABFLAG_DIRECTMAP)) {
636 if (!(ab->flags & ABFLAG_NOALLOC)) {
637 xfree(ab->baseaddr);
638 }
639 ab->baseaddr = NULL;
640 write_log(_T("mapped_free nondirect %s\n"), ab->name);
641 return;
642 }
643
644 while(x) {
645 if(ab->baseaddr == x->native_address)
646 uae_shmdt (x->native_address);
647 x = x->next;
648 }
649 x = shm_start;
650 while(x) {
651 struct uae_shmid_ds blah;
652 if (ab->baseaddr == x->native_address) {
653 if (uae_shmctl (x->id, UAE_IPC_STAT, &blah) == 0)
654 uae_shmctl (x->id, UAE_IPC_RMID, &blah);
655 }
656 x = x->next;
657 }
658 ab->baseaddr = NULL;
659 write_log(_T("mapped_free direct %s\n"), ab->name);
660 }
661
662 static uae_key_t get_next_shmkey (void)
663 {
664 uae_key_t result = -1;
665 int i;
666 for (i = 0; i < MAX_SHMID; i++) {
667 if (shmids[i].key == -1) {
668 shmids[i].key = i;
669 result = i;
670 break;
671 }
672 }
673 return result;
674 }
675
676 STATIC_INLINE uae_key_t find_shmkey (uae_key_t key)
677 {
678 int result = -1;
679 if(shmids[key].key == key) {
680 result = key;
681 }
682 return result;
683 }
684
685 void *uae_shmat (addrbank *ab, int shmid, void *shmaddr, int shmflg)
686 {
687 #ifdef FSUAE
688 write_log("uae_shmat shmid %d shmaddr %p, shmflg %d natmem_offset = %p\n",
689 shmid, shmaddr, shmflg, natmem_offset);
690 #endif
691 void *result = (void *)-1;
692 bool got = false, readonly = false, maprom = false;
693 int p96special = FALSE;
694
695 #ifdef NATMEM_OFFSET
696 unsigned int size = shmids[shmid].size;
697 unsigned int readonlysize = size;
698
699 if (shmids[shmid].attached)
700 return shmids[shmid].attached;
701
702 if (ab->flags & ABFLAG_INDIRECT) {
703 result = xcalloc (uae_u8, size);
704 shmids[shmid].attached = result;
705 shmids[shmid].fake = true;
706 return result;
707 }
708
709 if ((uae_u8*)shmaddr < natmem_offset) {
710 if(!_tcscmp (shmids[shmid].name, _T("chip"))) {
711 shmaddr=natmem_offset;
712 got = true;
713 if (getz2endaddr () <= 2 * 1024 * 1024 || currprefs.chipmem_size < 2 * 1024 * 1024)
714 size += BARRIER;
715 } else if(!_tcscmp (shmids[shmid].name, _T("kick"))) {
716 shmaddr=natmem_offset + 0xf80000;
717 got = true;
718 size += BARRIER;
719 readonly = true;
720 maprom = true;
721 } else if(!_tcscmp (shmids[shmid].name, _T("rom_a8"))) {
722 shmaddr=natmem_offset + 0xa80000;
723 got = true;
724 readonly = true;
725 maprom = true;
726 } else if(!_tcscmp (shmids[shmid].name, _T("rom_e0"))) {
727 shmaddr=natmem_offset + 0xe00000;
728 got = true;
729 readonly = true;
730 maprom = true;
731 } else if(!_tcscmp (shmids[shmid].name, _T("rom_f0"))) {
732 shmaddr=natmem_offset + 0xf00000;
733 got = true;
734 readonly = true;
735 } else if(!_tcscmp (shmids[shmid].name, _T("rom_f0_ppc"))) {
736 // this is flash and also contains IO
737 shmaddr=natmem_offset + 0xf00000;
738 got = true;
739 readonly = false;
740 } else if (!_tcscmp(shmids[shmid].name, _T("rtarea"))) {
741 shmaddr = natmem_offset + rtarea_base;
742 got = true;
743 readonly = true;
744 readonlysize = RTAREA_TRAPS;
745 } else if (!_tcscmp(shmids[shmid].name, _T("fmv_rom"))) {
746 got = true;
747 shmaddr = natmem_offset + 0x200000;
748 } else if (!_tcscmp(shmids[shmid].name, _T("fmv_ram"))) {
749 got = true;
750 shmaddr = natmem_offset + 0x280000;
751 } else if(!_tcscmp (shmids[shmid].name, _T("fast"))) {
752 got = true;
753 if (size < 524288) {
754 shmaddr=natmem_offset + 0xec0000;
755 } else {
756 shmaddr=natmem_offset + 0x200000;
757 if (!(currprefs.rtgmem_size && gfxboard_get_configtype(currprefs.rtgmem_type) == 3))
758 size += BARRIER;
759 }
760 } else if(!_tcscmp (shmids[shmid].name, _T("fast2"))) {
761 got = true;
762 if (size < 524288) {
763 shmaddr=natmem_offset + 0xec0000;
764 } else {
765 shmaddr=natmem_offset + 0x200000;
766 if (currprefs.fastmem_size >= 524288)
767 shmaddr=natmem_offset + 0x200000 + currprefs.fastmem_size;
768 if (!(currprefs.rtgmem_size && gfxboard_get_configtype(currprefs.rtgmem_type) == 3))
769 size += BARRIER;
770 }
771 } else if(!_tcscmp (shmids[shmid].name, _T("fast2"))) {
772 shmaddr=natmem_offset + 0x200000;
773 got = true;
774 if (!(currprefs.rtgmem_size && gfxboard_get_configtype(currprefs.rtgmem_type) == 3))
775 size += BARRIER;
776 } else if(!_tcscmp (shmids[shmid].name, _T("z2_gfx"))) {
777 ULONG start = getz2rtgaddr (size);
778 got = true;
779 p96special = true;
780 shmaddr = natmem_offset + start;
781 gfxmem_bank.start = start;
782 if (start + currprefs.rtgmem_size < 10 * 1024 * 1024)
783 size += BARRIER;
784 } else if(!_tcscmp (shmids[shmid].name, _T("ramsey_low"))) {
785 shmaddr=natmem_offset + a3000lmem_bank.start;
786 if (!a3000hmem_bank.start)
787 size += BARRIER;
788 got = true;
789 } else if (!_tcscmp(shmids[shmid].name, _T("csmk1_maprom"))) {
790 shmaddr = natmem_offset + 0x07f80000;
791 got = true;
792 } else if (!_tcscmp(shmids[shmid].name, _T("25bitram"))) {
793 shmaddr = natmem_offset + 0x01000000;
794 got = true;
795 } else if (!_tcscmp(shmids[shmid].name, _T("ramsey_high"))) {
796 shmaddr = natmem_offset + 0x08000000;
797 got = true;
798 } else if (!_tcscmp(shmids[shmid].name, _T("dkb"))) {
799 shmaddr = natmem_offset + 0x10000000;
800 got = true;
801 } else if (!_tcscmp(shmids[shmid].name, _T("fusionforty"))) {
802 shmaddr = natmem_offset + 0x11000000;
803 got = true;
804 } else if (!_tcscmp(shmids[shmid].name, _T("blizzard_40"))) {
805 shmaddr = natmem_offset + 0x40000000;
806 got = true;
807 } else if (!_tcscmp(shmids[shmid].name, _T("blizzard_48"))) {
808 shmaddr = natmem_offset + 0x48000000;
809 got = true;
810 } else if (!_tcscmp(shmids[shmid].name, _T("blizzard_68"))) {
811 shmaddr = natmem_offset + 0x68000000;
812 got = true;
813 } else if (!_tcscmp(shmids[shmid].name, _T("blizzard_70"))) {
814 shmaddr = natmem_offset + 0x70000000;
815 got = true;
816 } else if (!_tcscmp(shmids[shmid].name, _T("cyberstorm"))) {
817 shmaddr = natmem_offset + 0x0c000000;
818 got = true;
819 } else if (!_tcscmp(shmids[shmid].name, _T("cyberstormmaprom"))) {
820 shmaddr = natmem_offset + 0xfff00000;
821 got = true;
822 } else if (!_tcscmp(shmids[shmid].name, _T("z3"))) {
823 shmaddr=natmem_offset + z3fastmem_bank.start;
824 if (!currprefs.z3fastmem2_size)
825 size += BARRIER;
826 got = true;
827 } else if(!_tcscmp (shmids[shmid].name, _T("z3_2"))) {
828 shmaddr=natmem_offset + z3fastmem_bank.start + currprefs.z3fastmem_size;
829 size += BARRIER;
830 got = true;
831 } else if(!_tcscmp (shmids[shmid].name, _T("z3_chip"))) {
832 shmaddr=natmem_offset + z3chipmem_bank.start;
833 size += BARRIER;
834 got = true;
835 } else if(!_tcscmp (shmids[shmid].name, _T("z3_gfx"))) {
836 got = true;
837 p96special = true;
838 gfxmem_bank.start = p96mem_offset - natmem_offset;
839 shmaddr = natmem_offset + gfxmem_bank.start;
840 size += BARRIER;
841 } else if(!_tcscmp (shmids[shmid].name, _T("bogo"))) {
842 shmaddr=natmem_offset+0x00C00000;
843 got = true;
844 if (currprefs.bogomem_size <= 0x100000)
845 size += BARRIER;
846 #if 0
847 } else if(!_tcscmp (shmids[shmid].name, _T("filesys"))) {
848 static uae_u8 *filesysptr;
849 if (filesysptr == NULL)
850 filesysptr = xcalloc (uae_u8, size);
851 result = filesysptr;
852 shmids[shmid].attached = result;
853 shmids[shmid].fake = true;
854 return result;
855 #endif
856 } else if(!_tcscmp (shmids[shmid].name, _T("custmem1"))) {
857 shmaddr=natmem_offset + currprefs.custom_memory_addrs[0];
858 got = true;
859 } else if(!_tcscmp (shmids[shmid].name, _T("custmem2"))) {
860 shmaddr=natmem_offset + currprefs.custom_memory_addrs[1];
861 got = true;
862 } else if(!_tcscmp (shmids[shmid].name, _T("hrtmem"))) {
863 shmaddr=natmem_offset + 0x00a10000;
864 got = true;
865 } else if(!_tcscmp (shmids[shmid].name, _T("arhrtmon"))) {
866 shmaddr=natmem_offset + 0x00800000;
867 size += BARRIER;
868 got = true;
869 } else if(!_tcscmp (shmids[shmid].name, _T("xpower_e2"))) {
870 shmaddr=natmem_offset + 0x00e20000;
871 size += BARRIER;
872 got = true;
873 } else if(!_tcscmp (shmids[shmid].name, _T("xpower_f2"))) {
874 shmaddr=natmem_offset + 0x00f20000;
875 size += BARRIER;
876 got = true;
877 } else if(!_tcscmp (shmids[shmid].name, _T("nordic_f0"))) {
878 shmaddr=natmem_offset + 0x00f00000;
879 size += BARRIER;
880 got = true;
881 } else if(!_tcscmp (shmids[shmid].name, _T("nordic_f4"))) {
882 shmaddr=natmem_offset + 0x00f40000;
883 size += BARRIER;
884 got = true;
885 } else if(!_tcscmp (shmids[shmid].name, _T("nordic_f6"))) {
886 shmaddr=natmem_offset + 0x00f60000;
887 size += BARRIER;
888 got = true;
889 } else if(!_tcscmp(shmids[shmid].name, _T("superiv_b0"))) {
890 shmaddr=natmem_offset + 0x00b00000;
891 size += BARRIER;
892 got = true;
893 } else if(!_tcscmp (shmids[shmid].name, _T("superiv_d0"))) {
894 shmaddr=natmem_offset + 0x00d00000;
895 size += BARRIER;
896 got = true;
897 } else if (!_tcscmp(shmids[shmid].name, _T("superiv_e0"))) {
898 shmaddr = natmem_offset + 0x00e00000;
899 size += BARRIER;
900 got = true;
901 } else if (!_tcscmp(shmids[shmid].name, _T("ram_a8"))) {
902 shmaddr = natmem_offset + 0x00a80000;
903 size += BARRIER;
904 got = true;
905 }
906 }
907 #endif
908
909 uintptr_t natmem_end = (uintptr_t) natmem_reserved + natmem_reserved_size;
910 if ((uintptr_t) shmaddr + size > natmem_end) {
911 /* We cannot add a barrier beyond the end of the reserved memory. */
912 assert((uintptr_t) shmaddr + size - natmem_end == BARRIER);
913 write_log(_T("NATMEM: Removing barrier (%d bytes) beyond reserved memory\n"), BARRIER);
914 size -= BARRIER;
915 }
916
917 if (shmids[shmid].key == shmid && shmids[shmid].size) {
918 DWORD protect = readonly ? PAGE_READONLY : PAGE_READWRITE;
919 shmids[shmid].mode = protect;
920 shmids[shmid].rosize = readonlysize;
921 shmids[shmid].natmembase = natmem_offset;
922 shmids[shmid].maprom = maprom ? 1 : 0;
923 if (shmaddr)
924 virtualfreewithlock (shmaddr, size, MEM_DECOMMIT);
925 result = virtualallocwithlock (shmaddr, size, MEM_COMMIT, PAGE_READWRITE);
926 if (result == NULL)
927 virtualfreewithlock (shmaddr, 0, MEM_DECOMMIT);
928 result = virtualallocwithlock (shmaddr, size, MEM_COMMIT, PAGE_READWRITE);
929 if (result == NULL) {
930 result = (void*)-1;
931 error_log (_T("Memory %s failed to allocate %p: VA %08X - %08X %x (%dk). Error %d."),
932 shmids[shmid].name, shmaddr,
933 (uae_u8*)shmaddr - natmem_offset, (uae_u8*)shmaddr - natmem_offset + size,
934 size, size >> 10, GetLastError ());
935 } else {
936 shmids[shmid].attached = result;
937 write_log (_T("%p: VA %08lX - %08lX %x (%dk) ok (%p)%s\n"),
938 shmaddr, (uae_u8*)shmaddr - natmem_offset, (uae_u8*)shmaddr - natmem_offset + size,
939 size, size >> 10, shmaddr, p96special ? _T(" P96") : _T(""));
940 }
941 }
942 return result;
943 }
944
945 void unprotect_maprom (void)
946 {
947 bool protect = false;
948 for (int i = 0; i < MAX_SHMID; i++) {
949 struct uae_shmid_ds *shm = &shmids[i];
950 if (shm->mode != PAGE_READONLY)
951 continue;
952 if (!shm->attached || !shm->rosize)
953 continue;
954 if (shm->maprom <= 0)
955 continue;
956 shm->maprom = -1;
957 DWORD old;
958 if (!VirtualProtect (shm->attached, shm->rosize, protect ? PAGE_READONLY : PAGE_READWRITE, &old)) {
959 write_log (_T("unprotect_maprom VP %08lX - %08lX %x (%dk) failed %d\n"),
960 (uae_u8*)shm->attached - natmem_offset, (uae_u8*)shm->attached - natmem_offset + shm->size,
961 shm->size, shm->size >> 10, GetLastError ());
962 }
963 }
964 }
965
966 void protect_roms (bool protect)
967 {
968 if (protect) {
969 // protect only if JIT enabled, always allow unprotect
970 if (!currprefs.cachesize || currprefs.comptrustbyte || currprefs.comptrustword || currprefs.comptrustlong)
971 return;
972 }
973 for (int i = 0; i < MAX_SHMID; i++) {
974 struct uae_shmid_ds *shm = &shmids[i];
975 if (shm->mode != PAGE_READONLY)
976 continue;
977 if (!shm->attached || !shm->rosize)
978 continue;
979 if (shm->maprom < 0 && protect)
980 continue;
981 DWORD old;
982 if (!VirtualProtect (shm->attached, shm->rosize, protect ? PAGE_READONLY : PAGE_READWRITE, &old)) {
983 write_log (_T("protect_roms VP %08lX - %08lX %x (%dk) failed %d\n"),
984 (uae_u8*)shm->attached - natmem_offset, (uae_u8*)shm->attached - natmem_offset + shm->size,
985 shm->size, shm->size >> 10, GetLastError ());
986 } else {
987 write_log(_T("ROM VP %08lX - %08lX %x (%dk) %s\n"),
988 (uae_u8*)shm->attached - natmem_offset, (uae_u8*)shm->attached - natmem_offset + shm->size,
989 shm->size, shm->size >> 10, protect ? _T("WPROT") : _T("UNPROT"));
990 }
991 }
992 }
993
994 int uae_shmdt (const void *shmaddr)
995 {
996 return 0;
997 }
998
999 int uae_shmget (uae_key_t key, size_t size, int shmflg, const TCHAR *name)
1000 {
1001 int result = -1;
1002
1003 if ((key == UAE_IPC_PRIVATE) || ((shmflg & UAE_IPC_CREAT) && (find_shmkey (key) == -1))) {
1004 write_log (_T("shmget of size %zd (%zdk) for %s\n"), size, size >> 10, name);
1005 if ((result = get_next_shmkey ()) != -1) {
1006 shmids[result].size = size;
1007 _tcscpy (shmids[result].name, name);
1008 } else {
1009 result = -1;
1010 }
1011 }
1012 return result;
1013 }
1014
1015 int uae_shmctl (int shmid, int cmd, struct uae_shmid_ds *buf)
1016 {
1017 int result = -1;
1018
1019 if ((find_shmkey (shmid) != -1) && buf) {
1020 switch (cmd)
1021 {
1022 case UAE_IPC_STAT:
1023 *buf = shmids[shmid];
1024 result = 0;
1025 break;
1026 case UAE_IPC_RMID:
1027 VirtualFree (shmids[shmid].attached, shmids[shmid].size, MEM_DECOMMIT);
1028 shmids[shmid].key = -1;
1029 shmids[shmid].name[0] = '\0';
1030 shmids[shmid].size = 0;
1031 shmids[shmid].attached = 0;
1032 shmids[shmid].mode = 0;
1033 result = 0;
1034 break;
1035 }
1036 }
1037 return result;
1038 }
1039
1040 #endif
1041
1042 #ifdef FSUAE
1043 #else
1044 int isinf (double x)
1045 {
1046 const int nClass = _fpclass (x);
1047 int result;
1048 if (nClass == _FPCLASS_NINF || nClass == _FPCLASS_PINF)
1049 result = 1;
1050 else
1051 result = 0;
1052 return result;
1053 }
1054 #endif
1055