1 /* Memory mapping support. */
2 
3 #if defined(HAVE_MMAP_ANON) || defined(HAVE_MMAP_DEVZERO) || defined(HAVE_MACH_VM) || defined(HAVE_WIN32_VM)
4 
5 /* -------------------------- Specification ---------------------------- */
6 
7 /* This adds support for mapping private memory pages at given addresses.
8  If HAVE_MMAP is defined, it also supports private mappings of files
9  at given addresses. */
10 
11 /* The size of a physical page, as returned by the operating system.
12  Initialized by mmap_init_pagesize(). */
13 local uintP system_pagesize;
14 
15 /* The size of a single page. (This may be a multiple of the actual physical
16  page size.) Always a power of two. Initialized by mmap_init_pagesize(). */
17 local uintP mmap_pagesize;
18 local void mmap_init_pagesize (void);
19 
20 /* Initialize the mmap system.
21  mmap_init()
22  < returns 0 upon success, -1 upon failure */
23 local int mmap_init (void);
24 
25 /* Reserves an address range for use with mmap_zeromap().
26  It tries to reserve the range [*map_addr,*map_endaddr). If this is not
27  possible and shrinkp is true, *map_addr is increased and *map_endaddr is
28  reduced as necessary. */
29 local int mmap_prepare (uintP* map_addr, uintP* map_endaddr, bool shrinkp);
30 
31 /* Fill a memory range [map_addr,map_addr+map_len-1] with empty pages.
32  mmap_zeromap(map_addr,map_len)
33  map_addr and map_len must be multiples of mmap_pagesize. */
34 local int mmap_zeromap (void* map_addr, uintM map_len);
35 
36 #ifdef HAVE_MMAP
37 /* Fill a memory range [map_addr,map_addr+map_len-1] with pages mapped in from
38  file fd starting at position offset.
39  map_addr and map_len must be multiples of mmap_pagesize. */
40 local void* mmap_filemap (void* map_addr, uintM map_len, int fd, off_t offset);
41 #endif
42 
43 /* Unmaps a memory range. */
44 #if defined(HAVE_MACH_VM) || defined(HAVE_WIN32_VM)
45 global int munmap (void* addr, size_t len);
46 #endif
47 
48 /* Changes the access protection for a memory range. */
49 #if defined(HAVE_MACH_VM) || defined(HAVE_WIN32_VM)
50 global int mprotect (void* addr, size_t len, int prot);
51 #endif
52 
53 /* -------------------------- Implementation --------------------------- */
54 
55 #if HAVE_MINCORE && !defined(UNIX_MACOSX)
56 /* mincore() is a system call that allows to inquire the status of a
57    range of pages of virtual memory.  In particular, it allows to inquire
58    whether a page is mapped at all (except on Mac OS X, where mincore
59    returns 0 even for unmapped addresses).
60    As of 2006, mincore() is supported by:        possible bits:
61      - Linux,   since Linux 2.4 and glibc 2.2,   1
62      - Solaris, since Solaris 9,                 1
63      - MacOS X, since MacOS X 10.3 (at least),   1
64      - FreeBSD, since FreeBSD 6.0,               MINCORE_{INCORE,REFERENCED,MODIFIED}
65      - NetBSD,  since NetBSD 3.0 (at least),     1
66      - OpenBSD, since OpenBSD 2.6 (at least),    1
67      - AIX,     since AIX 5.3,                   1
68    However, while the API allows to easily determine the bounds of mapped
69    virtual memory, it does not make it easy to find the bounds of _unmapped_
70    virtual memory ranges. */
71 
72 /* The AIX declaration of mincore() uses 'caddr_t', whereas the other platforms
73    use 'void *'. */
74 #ifdef UNIX_AIX
75 typedef caddr_t MINCORE_ADDR_T;
76 #else
77 typedef void* MINCORE_ADDR_T;
78 #endif
79 
80 /* The glibc and musl declaration of mincore() uses 'unsigned char *', whereas
81    the BSD declaration uses 'char *'. */
82 #if __GLIBC__ >= 2 || defined(UNIX_LINUX)
83 typedef unsigned char mincore_pageinfo_t;
84 #else
85 typedef char mincore_pageinfo_t;
86 #endif
87 
88 /* The page size used by mincore(), that is, the physical page size. */
89 local uintP mincore_pagesize;
90 
91 /* Whether mincore() can be used to detect mapped pages. */
92 local bool mincore_works;
93 
mincore_init(void)94 local void mincore_init (void)
95 {
96   /* Note: HAVE_MINCORE implies HAVE_GETPAGESIZE. */
97   mincore_pagesize = getpagesize();
98   /* FreeBSD 6.[01] doesn't allow to distinguish unmapped pages from mapped
99      but swapped-out pages.  Similarly, on DragonFly BSD 3.8, mincore succeeds
100      for any page, even unmapped ones.
101      Detect these unusable implementations: Test what mincore() reports for
102      the page at address 0 (which is always unmapped, in order to catch NULL
103      pointer accesses). */
104   {
105     mincore_pageinfo_t vec[1];
106     mincore_works = (mincore ((MINCORE_ADDR_T) 0, mincore_pagesize, vec) < 0);
107   }
108 }
109 
110 /* Determines whether the memory range [map_addr,map_endaddr) is entirely
111    unmapped.
112    map_endaddr must be >= map_addr or == 0. */
is_small_range_unmapped(uintP map_addr,uintP map_endaddr)113 local bool is_small_range_unmapped (uintP map_addr, uintP map_endaddr)
114 {
115   mincore_pageinfo_t vec[1];
116   map_addr = map_addr & ~(mincore_pagesize-1); /* round down */
117   map_endaddr = ((map_endaddr-1) | (mincore_pagesize-1)) + 1; /* round up */
118   for (; map_addr != map_endaddr; map_addr += mincore_pagesize) {
119     if (mincore ((MINCORE_ADDR_T) map_addr, mincore_pagesize, vec) >= 0)
120       /* The page [map_addr,map_addr+mincore_pagesize) is mapped. */
121       return false;
122   }
123   return true;
124 }
125 
126 /* Warn before invoking mmap on an address range [map_addr,map_endaddr).
127    If this call will overwrite existing memory mappings, clisp is likely to
128    crash afterwards. This is a debugging tool for systems with address space
129    randomization. */
warn_before_mmap(uintP map_addr,uintP map_endaddr)130 local void warn_before_mmap (uintP map_addr, uintP map_endaddr)
131 {
132   if (mincore_works && !is_small_range_unmapped(map_addr,map_endaddr)) {
133     fprintf(stderr,GETTEXTL("Warning: overwriting existing memory mappings in the address range 0x%lx...0x%lx. clisp will likely crash soon!!\n"),
134             (unsigned long)map_addr,(unsigned long)(map_endaddr-1));
135   }
136 }
137 
138 #else
139 #define mincore_init()
140 #define warn_before_mmap(map_addr,map_endaddr)
141 #endif
142 
143 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
144 
145 #if VMA_ITERATE_SUPPORTED
146 
147 struct is_large_range_unmapped_locals {
148   uintptr_t map_start;
149   uintptr_t map_end;
150   int intersect;
151 };
152 
is_large_range_unmapped_callback(void * data,uintptr_t start,uintptr_t end,unsigned int flags)153 local int is_large_range_unmapped_callback (void *data,
154                                             uintptr_t start, uintptr_t end,
155                                             unsigned int flags)
156 {
157   unused(flags);
158   var struct is_large_range_unmapped_locals* locals =
159     (struct is_large_range_unmapped_locals*) data;
160   if (!(start > locals->map_end-1 || locals->map_start > end-1)) {
161     locals->intersect = 1;
162     return 1; /* terminate loop */
163   } else
164     return 0; /* continue */
165 }
166 
167 /* Determines whether the memory range [map_addr,map_endaddr) is entirely
168    unmapped.
169    map_endaddr must be >= map_addr or == 0.
170    This function is suitable for large ranges (unlike is_small_range_unmapped),
171    but is significantly slower than is_small_range_unmapped for small ranges. */
is_large_range_unmapped(uintP map_addr,uintP map_endaddr)172 local bool is_large_range_unmapped (uintP map_addr, uintP map_endaddr)
173 {
174   /* Use the gnulib module 'vma-iter' to look for an intersection between
175      the specified interval and the existing VMAs. */
176   var struct is_large_range_unmapped_locals locals;
177   locals.map_start = map_addr;
178   locals.map_end = map_endaddr;
179   locals.intersect = 0;
180   return !(vma_iterate (&is_large_range_unmapped_callback, &locals) == 0
181            && locals.intersect);
182 }
183 
184 /* Warn before reserving an address range that contains existing memory
185    mappings. */
warn_before_reserving_range(uintP map_addr,uintP map_endaddr)186 local void warn_before_reserving_range (uintP map_addr, uintP map_endaddr)
187 {
188   if (!is_large_range_unmapped(map_addr,map_endaddr)) {
189     fprintf(stderr,GETTEXTL("Warning: reserving address range 0x%lx...0x%lx that contains memory mappings. clisp might crash later!\n"),
190             (unsigned long)map_addr,(unsigned long)(map_endaddr-1));
191     dump_process_memory_map(stderr);
192   }
193 }
194 
195 #else
196 #define warn_before_reserving_range(map_addr,map_endaddr)
197 #endif
198 
199 /* -------------------- Implementation for Mac OS X -------------------- */
200 
201 #if defined(HAVE_MACH_VM)
202 
mmap_init_pagesize(void)203 local void mmap_init_pagesize (void)
204 {
205   system_pagesize = vm_page_size;
206   mmap_pagesize = system_pagesize;
207 }
208 
209 #define mmap_init()  0
210 
mmap_prepare(uintP * map_addr,uintP * map_endaddr,bool shrinkp)211 local int mmap_prepare (uintP* map_addr, uintP* map_endaddr, bool shrinkp)
212 {
213   /* Warn before reserving an address range that contains existing memory
214      mappings. We don't actually shrink the range [*map_addr,*map_endaddr)
215      here. */
216   warn_before_reserving_range(*map_addr,*map_endaddr);
217   return 0;
218 }
219 
mmap_zeromap(void * map_addr,uintM map_len)220 local int mmap_zeromap (void* map_addr, uintM map_len)
221 {
222   var int errcode;
223   switch (vm_allocate(task_self(), (vm_address_t*) &map_addr, map_len, false)) {
224     case KERN_SUCCESS:
225       return 0;
226     case KERN_NO_SPACE:
227       errcode = ENOMEM;
228       break;
229     case KERN_INVALID_ADDRESS:
230     default:
231       errcode = EINVAL;
232       break;
233   }
234   fprintf(stderr,GETTEXTL("Cannot map memory to address 0x%lx ."),map_addr);
235   errno_out(errcode);
236   return -1;                  /* error */
237 }
238 
mmap_filemap(void * map_addr,uintM map_len,int fd,off_t offset)239 local void* mmap_filemap (void* map_addr, uintM map_len, int fd, off_t offset)
240 {
241   switch (vm_allocate(task_self(), (vm_address_t*) &map_addr, map_len, false)) {
242     case KERN_SUCCESS:
243       break;
244     default:
245       errno = EINVAL; return (void*)(-1);
246   }
247   switch (map_fd(fd, offset, (vm_address_t*) &map_addr, 0, map_len)) {
248     case KERN_SUCCESS:
249       return map_addr;
250     case KERN_INVALID_ADDRESS:
251     case KERN_INVALID_ARGUMENT:
252     default:
253       errno = EINVAL; return (void*)(-1);
254   }
255 }
256 
257 /* We need to implement munmap() ourselves. */
munmap(void * addr,size_t len)258 global int munmap (void* addr, size_t len)
259 {
260   switch (vm_deallocate(task_self(),addr,len)) {
261     case KERN_SUCCESS:
262       return 0;
263     case KERN_INVALID_ADDRESS:
264     default:
265       errno = EINVAL; return -1;
266   }
267 }
268 
269 /* We need to implement mprotect() ourselves. */
mprotect(void * addr,size_t len,int prot)270 global int mprotect (void* addr, size_t len, int prot)
271 {
272   switch (vm_protect(task_self(),addr,len,0,prot)) {
273     case KERN_SUCCESS:
274       return 0;
275     case KERN_PROTECTION_FAILURE:
276       errno = EACCES; return -1;
277     case KERN_INVALID_ADDRESS:
278     default:
279       errno = EINVAL; return -1;
280   }
281 }
282 
283 #endif
284 
285 /* -------------------- Implementation for Windows --------------------- */
286 
287 #if defined(HAVE_WIN32_VM)
288 
mmap_init_pagesize(void)289 local void mmap_init_pagesize (void)
290 {
291   system_pagesize = getpagesize();
292   mmap_pagesize = system_pagesize;
293 }
294 
295 #define mmap_init()  0
296 
297 /* With Win32 VM, you cannot simply map a page of memory anywhere you want.
298  You first have to reserve address space before you can do that.
299  It's more programming, but it has the advantage that you cannot accidentally
300  overwrite some of the shared libraries or malloc regions. (If you try that,
301  VirtualAlloc(..,MEM_RESERVE,..) will return an error.)
302  This function reserves an address range for use with mmap_zeromap().
303  It tries to reserve the range [*map_addr,*map_endaddr). If this is not
304  possible and shrinkp is true, *map_addr is increased and *map_endaddr is
305  reduced as necessary. */
mmap_prepare(aint * map_addr,aint * map_endaddr,bool shrinkp)306 local int mmap_prepare (aint* map_addr, aint* map_endaddr, bool shrinkp)
307 {
308   var uintM map_len = *map_endaddr - *map_addr;
309   var aint start_addr = round_down(*map_addr,0x10000);
310   var aint end_addr = round_up(*map_addr+map_len,0x10000);
311   if (shrinkp) {
312     /* Try to find the largest free address range subinterval of
313        [start_addr,end_addr). */
314     var MEMORY_BASIC_INFORMATION info;
315     var aint largest_start_addr = start_addr;
316     var uintM largest_len = 0;
317     var aint addr = start_addr;
318     while (VirtualQuery((void*)addr,&info,sizeof(info)) == sizeof(info)) {
319       /* Always info.BaseAddress = addr. */
320       addr = (aint)info.BaseAddress;
321       var uintM len = (info.RegionSize >= end_addr-addr ? end_addr-addr : info.RegionSize);
322       if ((info.State == MEM_FREE) && (len > largest_len)) {
323         largest_start_addr = addr; largest_len = len;
324       }
325       if (info.RegionSize >= end_addr-addr)
326         break;
327       addr += info.RegionSize;
328     }
329     if (largest_len < 0x10000) {
330       fprintf(stderr,GETTEXTL("Cannot reserve address range at 0x%lx ."),
331               *map_addr);
332       /* DumpProcessMemoryMap(stderr); */
333       return -1;
334     }
335     *map_addr = start_addr = round_up(largest_start_addr,0x10000);
336     *map_endaddr = end_addr = largest_start_addr + largest_len;
337   }
338   if (!VirtualAlloc((void*)start_addr,end_addr-start_addr,MEM_RESERVE,PAGE_NOACCESS/*dummy*/)) {
339     var DWORD errcode = GetLastError();
340     fprintf(stderr,GETTEXTL("Cannot reserve address range 0x%lx-0x%lx ."),
341             start_addr,end_addr-1);
342     errno_out(errcode);
343     /* DumpProcessMemoryMap(stderr); */
344     return -1;
345   }
346  #ifdef DEBUG_SPVW
347   fprintf(stderr,"Reserved address range 0x%lx-0x%lx .\n",
348           start_addr,end_addr-1);
349  #endif
350   return 0;
351 }
352 
mmap_zeromap(void * map_addr,uintM map_len)353 local int mmap_zeromap (void* map_addr, uintM map_len)
354 {
355   if (!VirtualAlloc(map_addr,map_len,MEM_COMMIT,PAGE_READWRITE)) {
356     var DWORD errcode = GetLastError();
357     fprintf(stderr,GETTEXTL("Cannot map memory to address 0x%lx ."),
358             map_addr);
359     errno_out(errcode);
360     return -1;                  /* error */
361   }
362   return 0;
363 }
364 
365 #if 0
366 /* This implementation, on top of MapViewOfFileEx(), has three severe flaws:
367  - It forces `map_addr' and `offset' to be aligned to 64 KB (not to the
368    pagesize, 4KB, as indicated in the documentation), thus the mem files for
369    SINGLEMAP_MEMORY get big.
370  - On an address range prepared with mmap_prepare(), MapViewOfFileEx()
371    returns the error code ERROR_INVALID_ADDRESS. We would have to map the
372    first part of each heap to the file and mmap_prepare() only the remainder
373    of the heap. This would give problems once a heap shrinks too much:
374    munmap() below wouldn't work.
375  - It doesn't work on Win95: MapViewOfFileEx() on Win95 cannot guarantee
376    that it will be able to map at the desired address. */
377 local void* mmap_filemap (void* map_addr, uintM map_len, Handle fd,
378                           off_t offset) {
379   if (map_len==0)
380     return map_addr;
381   var HANDLE maphandle = CreateFileMapping(fd,NULL,PAGE_WRITECOPY,0,0,NULL);
382   if (maphandle == NULL) {
383     var DWORD errcode = GetLastError();
384     fprint(stderr,GETTEXTL("CreateFileMapping() failed."));
385     errno_out(errcode);
386     return (void*)(-1);
387   }
388   var void* resultaddr = MapViewOfFileEx(maphandle,FILE_MAP_COPY,0,
389                                          (DWORD)offset,map_len,map_addr);
390   if (resultaddr == NULL) {
391     var DWORD errcode = GetLastError();
392     fprintf(stderr,GETTEXTL("MapViewOfFileEx(addr=0x%x,off=0x%x) failed."),
393             map_addr,offset);
394     errno_out(errcode);
395     return (void*)(-1);
396   }
397   if (resultaddr != map_addr) {
398     fprintf(stderr,GETTEXTL("MapViewOfFileEx() returned 0x%x instead of 0x%x."),
399             resultaddr,map_addr);
400     fprint(stderr,"\n");
401     UnmapViewOfFile(resultaddr);
402     return (void*)(-1);
403   }
404     return map_addr;
405 }
406 #endif
407 
408 /* We need to implement munmap() ourselves. */
munmap(void * addr,size_t len)409 global int munmap (void* addr, size_t len)
410 {
411   if (!VirtualFree(addr,len,MEM_DECOMMIT)) {
412     var DWORD errcode = GetLastError();
413     fprint(stderr,GETTEXTL("VirtualFree() failed."));
414     errno_out(errcode);
415     return -1;
416   }
417   return 0;
418 }
419 
420 #ifndef HAVE_MPROTECT
421 /* We need to implement mprotect() ourselves. */
mprotect(void * addr,size_t len,int prot)422 global int mprotect (void* addr, size_t len, int prot)
423 {
424   var DWORD oldprot;
425   if (!VirtualProtect(addr,len,prot,&oldprot)) {
426     var DWORD errcode = GetLastError();
427     fprint(stderr,GETTEXTL("VirtualProtect() failed."));
428     errno_out(errcode);
429     return -1;
430   }
431   return 0;
432 }
433 #endif
434 
435 #endif
436 
437 /* -------------- Implementation for Unix except Mac OS X -------------- */
438 
439 #if defined(HAVE_MMAP_ANON) || defined(HAVE_MMAP_DEVZERO)
440 
441 /* We don't need both mmap() methods. One is sufficient. */
442 #ifdef HAVE_MMAP_ANON
443   #undef HAVE_MMAP_DEVZERO
444 #endif
445 
446 #ifdef HAVE_MMAP_DEVZERO
447 local int mmap_zero_fd;       /* open handle for /dev/zero */
448   /* How to access /dev/zero: Sometimes /dev/zero has permissions 0644.
449    Therefore we can OPEN() it only with O_RDONLY instead of O_RDWR.
450    Therefore, in the mmap() call, we use MAP_PRIVATE instead of MAP_SHARED. */
451   #ifdef MAP_FILE
452     #define map_flags  MAP_FILE | MAP_PRIVATE
453   #else
454     #define map_flags  MAP_PRIVATE
455   #endif
456 #endif
457 #ifdef HAVE_MMAP_ANON
458   #define mmap_zero_fd  -1      /* any invalid handles works! */
459   #define map_flags  MAP_ANON | MAP_PRIVATE
460 #endif
461 
mmap_init_pagesize(void)462 local void mmap_init_pagesize (void)
463 {
464   system_pagesize = getpagesize();
465   mmap_pagesize =
466    #if defined(SPARC) /* && (defined(UNIX_SUNOS5) || defined(UNIX_LINUX) || defined(UNIX_NETBSD) || ...) */
467     /* Normal SPARCs have PAGESIZE=4096, UltraSPARCs have PAGESIZE=8192.
468        For compatibility of the .mem files between the architectures,
469        choose the same value for both here. */
470     8192
471    #elif defined(UNIX_LINUX) && defined(IA64)
472     /* The pagesize can be 4, 8, 16 or 64 KB.
473        For compatibility of the .mem files, choose always the same value. */
474     65536
475    #else
476     system_pagesize
477    #endif
478     ;
479 }
480 
mmap_init(void)481 local int mmap_init (void)
482 {
483  #ifdef HAVE_MMAP_DEVZERO
484   {
485     var int fd = OPEN("/dev/zero",O_RDONLY,my_open_mask);
486     if (fd<0) {
487       var int errcode = errno;
488       fprintf(stderr,GETTEXTL("Cannot open <%s>."),"/dev/zero");
489       errno_out(errcode);
490       return -1; /* error */
491     }
492     mmap_zero_fd = fd;
493   }
494  #endif
495   mincore_init();
496   return 0;
497 }
498 
mmap_prepare(uintP * map_addr,uintP * map_endaddr,bool shrinkp)499 local int mmap_prepare (uintP* map_addr, uintP* map_endaddr, bool shrinkp)
500 {
501   unused(shrinkp);
502   /* Warn before reserving an address range that contains existing memory
503      mappings. We don't actually shrink the range [*map_addr,*map_endaddr)
504      here. */
505   warn_before_reserving_range(*map_addr,*map_endaddr);
506   return 0;
507 }
508 
mmap_zeromap(void * map_addr,uintM map_len)509 local int mmap_zeromap (void* map_addr, uintM map_len)
510 {
511   static void* last_addr;
512   static uintM last_len;
513   static int last_errcode;
514   static int repeated;
515 
516   warn_before_mmap((uintP)map_addr,(uintP)map_addr+map_len);
517   if ( (void*) mmap((void*)map_addr, /* desired address */
518                     map_len, /* length */
519                     PROT_READ_WRITE, /* access rights */
520                     map_flags | MAP_FIXED, /* exactly at this address! */
521                     mmap_zero_fd, 0) /* put empty pages */
522        == (void*)(-1)) {
523     var int errcode = errno;
524     fprintf(stderr,GETTEXTL("Cannot map memory to address 0x%lx ."),
525             (uintP)map_addr);
526     errno_out(errcode);
527     /* This error tends to repeat, leading to an endless loop.
528        It's better to abort than to loop endlessly. */
529     {
530       if (map_addr==last_addr && map_len==last_len && errcode==last_errcode) {
531         repeated++;
532         if (repeated >= 10)
533           abort();
534       } else {
535         last_addr = map_addr; last_len = map_len; last_errcode = errcode;
536         repeated = 1;
537       }
538     }
539     return -1; /* error */
540   }
541   last_addr = NULL; last_len = 0; last_errcode = 0; repeated = 0;
542   return 0;
543 }
544 
545 #ifdef HAVE_MMAP
mmap_filemap(void * map_addr,uintM map_len,int fd,off_t offset)546 local void* mmap_filemap (void* map_addr, uintM map_len, int fd, off_t offset)
547 {
548   warn_before_mmap((uintP)map_addr,(uintP)map_addr+map_len);
549   return (void*) mmap((void*)map_addr,
550                       map_len,
551                       PROT_READ_WRITE,
552                       MAP_FIXED | MAP_PRIVATE,
553                       fd, offset);
554 }
555 #endif
556 
557 #endif
558 
559 /* --------------------------------------------------------------------- */
560 
561 #endif
562