1 /***********************************************************************
2 * *
3 * This software is part of the ast package *
4 * Copyright (c) 1985-2013 AT&T Intellectual Property *
5 * and is licensed under the *
6 * Eclipse Public License, Version 1.0 *
7 * by AT&T Intellectual Property *
8 * *
9 * A copy of the License is available at *
10 * http://www.eclipse.org/org/documents/epl-v10.html *
11 * (with md5 checksum b35adb5213ca9657e911e9befb180842) *
12 * *
13 * Information and Software Systems Research *
14 * AT&T Research *
15 * Florham Park NJ *
16 * *
17 * Glenn Fowler <glenn.s.fowler@gmail.com> *
18 * David Korn <dgkorn@gmail.com> *
19 * Phong Vo <phongvo@gmail.com> *
20 * *
21 ***********************************************************************/
22 #include "vmhdr.h"
23 #include <sys/types.h>
24 #include <sys/shm.h>
25 #include <sys/ipc.h>
26 #include <signal.h>
27 #include <setjmp.h>
28
29 #if _mem_mmap_anon
30 #include <sys/mman.h>
31 #ifndef MAP_ANON
32 #ifdef MAP_ANONYMOUS
33 #define MAP_ANON MAP_ANONYMOUS
34 #else
35 #define MAP_ANON 0
36 #endif /*MAP_ANONYMOUS*/
37 #endif /*MAP_ANON*/
38 #endif /*_mem_mmap_anon*/
39
40 /* Heuristic to suggest an address usable for mapping shared memory
41 **
42 ** Written by Kiem-Phong Vo, phongvo@gmail.com, 07/07/2012
43 */
44
45 /* see if a given range of address is available for mapping */
46 #define VMCHKMEM 1 /* set this to zero if signal&sigsetjmp don't work */
47
48 #if VMCHKMEM
49
50 /*
51 * NOTE: all (*_Vmchkmem)() calls are locked by _Vmsbrklock
52 * make sure future usage follows suit
53 */
54
55 typedef void (*Sighandler_f)_ARG_((int));
56
57 static volatile int peek;
58 static sigjmp_buf jmp;
59
sigsegv(int sig)60 static void sigsegv(int sig)
61 {
62 signal(sig, sigsegv);
63 siglongjmp(jmp, 1);
64 }
65
_vmchkmem(Vmuchar_t * area,size_t size)66 static int _vmchkmem(Vmuchar_t* area, size_t size)
67 {
68 Sighandler_f oldsigsegv;
69 int available;
70
71 if (!(_Vmassert & VM_check_seg))
72 return 1;
73 oldsigsegv = (Sighandler_f)signal(SIGSEGV, sigsegv);
74 if (!(available = sigsetjmp(jmp, 1)))
75 peek = area[0];
76 if (available && !(available = sigsetjmp(jmp, 1)))
77 peek = area[size-1];
78 if (available && !(available = sigsetjmp(jmp, 1)))
79 peek = area[size/2];
80 signal(SIGSEGV, oldsigsegv);
81 return available;
82 }
83
84 #else
85
86 #define _vmchkmem(a,z) (1) /* beware of unbounded optimism! */
87
88 #endif /*VMCHKMEM*/
89
90 /* return page size */
_vmpagesize(void)91 ssize_t _vmpagesize(void)
92 {
93 if (_Vmpagesize <= 0)
94 {
95 #if _lib_getpagesize
96 if ((_Vmpagesize = getpagesize()) <= 0)
97 #endif
98 _Vmpagesize = VM_PAGESIZE;
99 _Vmpagesize = (*_Vmlcm)(_Vmpagesize, MEM_ALIGN);
100 }
101 #if VMCHKMEM
102 _Vmchkmem = _vmchkmem; /* _vmchkmem() can check memory availability */
103 #endif
104 return _Vmpagesize;
105 }
106
_vmboundaries(void)107 int _vmboundaries(void)
108 {
109 ssize_t memz, z;
110 unsigned long left, rght, size;
111 int shmid; /* shared memory id */
112 Vmuchar_t *tmp, *shm, *min, *max;
113
114 VMPAGESIZE();
115 #if !_WINIX
116 /* try to get a shared memory segment, memz is the successful size */
117 memz = sizeof(void*) < 8 ? 1024*1024 : 64*1024*1024;
118 for(; memz >= _Vmpagesize; memz /= 2)
119 { z = ROUND(memz, _Vmpagesize);
120 if((shmid = shmget(IPC_PRIVATE, z, IPC_CREAT|0600)) >= 0 )
121 break;
122 }
123 if(memz >= _Vmpagesize) /* did get a shared segment */
124 memz = ROUND(memz, _Vmpagesize);
125 else
126 { /**/DEBUG_MESSAGE("shmget() failed");
127 return (int)_Vmpagesize;
128 }
129
130 /* the stack and the heap in Unix programs are conventionally set
131 ** at opposite ends of the available address space. So, we use them
132 ** as candidate boundaries for mappable memory.
133 */
134 min = (Vmuchar_t*)sbrk(0); min = (Vmuchar_t*)ROUND((unsigned long)min, _Vmpagesize); /* heap */
135 max = (Vmuchar_t*)(&max); max = (Vmuchar_t*)ROUND((unsigned long)max, _Vmpagesize); /* stack */
136 if(min > max)
137 { tmp = min; min = max; max = tmp; }
138
139 /* now attach a segment to see where it falls in the range */
140 if(!(shm = shmat(shmid, NIL(Void_t*), 0600)) || shm == (Vmuchar_t*)(-1) )
141 { /**/DEBUG_MESSAGE("shmat() failed first NULL attachment");
142 goto done;
143 }
144 else shmdt((Void_t*)shm);
145 if(shm < min || shm > max )
146 { /**/DEBUG_MESSAGE("shmat() got an out-of-range address");
147 goto done;
148 }
149
150 /* Heuristic: allocate address in the larger side */
151 left = shm - min;
152 rght = max - shm;
153
154 min = max = shm; /* compute bounds of known mappable memory */
155 for(size = 7*(left > rght ? left : rght)/8; size > memz; size /= 2 )
156 { size = ROUND(size, _Vmpagesize);
157 shm = left > rght ? max-size : min+size;
158 if((tmp = shmat(shmid, shm, 0600)) == shm )
159 { shmdt((Void_t*)tmp);
160 if(left > rght)
161 min = shm;
162 else max = shm;
163 break;
164 }
165 }
166
167 if((min+memz) >= max ) /* no mappable region of memory */
168 { /**/DEBUG_MESSAGE("vmmaddress: No mappable memory region found");
169 goto done;
170 }
171
172 /* search outward from last computed bound for a better bound */
173 for(z = memz; z < size; z *= 2 )
174 { shm = left > rght ? min-z : max+z;
175 if((tmp = shmat(shmid, shm, 0600)) == shm )
176 shmdt((Void_t*)tmp);
177 else /* failing to attach means at limit or close to it */
178 { if(left > rght)
179 min -= z/2;
180 else max += z/2;
181 break;
182 }
183 }
184
185 /* amount to offset from boundaries to avoid random collisions */
186 z = (max - min)/(sizeof(Void_t*) > 4 ? 4 : 8);
187 z = ROUND(z, _Vmpagesize);
188
189 /* these are the bounds that we can use */
190 _Vmmemmin = min;
191 _Vmmemmax = max;
192
193 _Vmmemaddr = max - z; /* address usable by vmmaddress() */
194 _Vmmemsbrk = NIL(Vmuchar_t*); /* address usable for sbrk() simulation */
195
196 #if _mem_mmap_anon /* see if we can simulate sbrk(): memory grows from low to high */
197 /* map two consecutive pages to see if they come out adjacent */
198 tmp = (Void_t*)mmap((Void_t*)(min+z), _Vmpagesize, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, 0);
199 shm = (Void_t*)mmap((Void_t*)(tmp+_Vmpagesize), _Vmpagesize, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, 0);
200 if(tmp && tmp != (Vmuchar_t*)(-1) )
201 munmap((Void_t*)tmp, _Vmpagesize);
202 if(shm && shm != (Vmuchar_t*)(-1) )
203 munmap((Void_t*)shm, _Vmpagesize);
204
205 if(tmp && tmp != (Vmuchar_t*)(-1) && shm && shm != (Vmuchar_t*)(-1) )
206 { _Vmmemsbrk = shm+_Vmpagesize; /* mmap starts from here */
207
208 if(tmp >= (_Vmmemmin + (_Vmmemmax - _Vmmemmin)/2) ||
209 shm >= (_Vmmemmin + (_Vmmemmax - _Vmmemmin)/2) ||
210 shm < tmp ) /* mmap can be used but needs MAP_FIXED! */
211 {
212 #if !VMCHKMEM
213 _Vmmemsbrk = NIL(Vmuchar_t*); /* no memory checking, must use sbrk() */
214 #endif /*VMCHKMEM*/
215 }
216 }
217 #endif /*_mem_mmap_anon_*/
218
219 #endif /*!_WINIX*/
220
221 done: (void)shmctl(shmid, IPC_RMID, 0);
222 return 0;
223 }
224
225 /* Function to suggest an address usable for mapping shared memory. */
vmmaddress(size_t size)226 Void_t* vmmaddress(size_t size)
227 {
228 Vmuchar_t *addr, *memaddr;
229
230 VMBOUNDARIES();
231 if(_Vmmemaddr)
232 for(size = ROUND(size, _Vmpagesize); (addr = (memaddr = _Vmmemaddr) - size) >= _Vmmemmin; )
233 if(asocasptr(&_Vmmemaddr, memaddr, addr) == memaddr && _vmchkmem(addr, size))
234 return addr;
235 return NIL(Void_t*);
236 }
237