1 /***********************************************************************
2 *                                                                      *
3 *               This software is part of the ast package               *
4 *          Copyright (c) 1985-2013 AT&T Intellectual Property          *
5 *                      and is licensed under the                       *
6 *                 Eclipse Public License, Version 1.0                  *
7 *                    by AT&T Intellectual Property                     *
8 *                                                                      *
9 *                A copy of the License is available at                 *
10 *          http://www.eclipse.org/org/documents/epl-v10.html           *
11 *         (with md5 checksum b35adb5213ca9657e911e9befb180842)         *
12 *                                                                      *
13 *              Information and Software Systems Research               *
14 *                            AT&T Research                             *
15 *                           Florham Park NJ                            *
16 *                                                                      *
17 *               Glenn Fowler <glenn.s.fowler@gmail.com>                *
18 *                    David Korn <dgkorn@gmail.com>                     *
19 *                     Phong Vo <phongvo@gmail.com>                     *
20 *                                                                      *
21 ***********************************************************************/
22 #if defined(_UWIN) && defined(_BLD_ast)
23 
_STUB_vmlast()24 void _STUB_vmlast(){}
25 
26 #else
27 
28 #include	"vmhdr.h"
29 
30 /*	Allocation with freeing and reallocing of last allocated block only.
31 **
32 **	Written by Kiem-Phong Vo, phongvo@gmail.com, 01/16/1994, 03/30/2012.
33 */
34 
35 #define KEY_LAST		1001
36 #define LASTLOCK(lst,lcl)	((lcl) ? 0 : asolock(&(lst)->lock, KEY_LAST, ASO_LOCK) )
37 #define LASTOPEN(lst,lcl)	((lcl) ? 0 : asolock(&(lst)->lock, KEY_LAST, ASO_UNLOCK) )
38 
39 typedef struct _vmlast_s
40 {	Vmdata_t	vmdt;
41 	unsigned int	lock;
42 	Block_t*	blk;	/* allocate from this	*/
43 	Vmuchar_t*	data;	/* start of free memory	*/
44 	ssize_t		size;	/* size of free memory	*/
45 	Vmuchar_t*	last;	/* last allocated block */
46 } Vmlast_t;
47 
48 #if __STD_C
lastalloc(Vmalloc_t * vm,size_t size,int local)49 static Void_t* lastalloc(Vmalloc_t* vm, size_t size, int local)
50 #else
51 static Void_t* lastalloc(vm, size, local)
52 Vmalloc_t*	vm;
53 size_t		size;
54 int		local;
55 #endif
56 {
57 	Block_t		*blk;
58 	size_t		sz, blksz;
59 	size_t		origsz = size;
60 	Vmlast_t	*last = (Vmlast_t*)vm->data;
61 
62 	LASTLOCK(last, local);
63 
64 	size = size < MEM_ALIGN ? MEM_ALIGN : ROUND(size,MEM_ALIGN);
65 
66 	last->last = NIL(Vmuchar_t*); /* wipe record of last allocation */
67 
68 	if(last->size < size )
69 	{	if((blk = last->blk) ) /* try extending in place */
70 		{	blksz = SIZE(blk)&~BITS;
71 			sz = blksz + (size - last->size);
72 			if((blk = (*_Vmsegalloc)(vm, blk, sz, VM_SEGALL|VM_SEGEXTEND)) )
73 			{	/**/DEBUG_ASSERT(blk == last->blk);
74 				/**/DEBUG_ASSERT((SIZE(blk)&~BITS) > blksz);
75 				last->size += (SIZE(blk)&~BITS) - blksz;
76 			}
77 		}
78 		if(!blk ) /* try getting a new block */
79 		{	if((blk = (*_Vmsegalloc)(vm, NIL(Block_t*), size, VM_SEGALL|VM_SEGEXTEND)) )
80 			{	/**/DEBUG_ASSERT((SIZE(blk)&~BITS) >= size);
81 				last->data = DATA(blk);
82 				last->size = SIZE(blk)&~BITS;
83 				last->blk  = blk;
84 			}
85 		}
86 	}
87 
88 	if(last->size >= size) /* allocate memory */
89 	{	last->last = last->data;
90 		last->data += size;
91 		last->size -= size;
92 	}
93 
94 	if(last->last && !local && _Vmtrace)
95 		(*_Vmtrace)(vm, NIL(Vmuchar_t*), last->last, origsz, 0);
96 
97 	LASTOPEN(last, local);
98 
99 	return (Void_t*)last->last;
100 }
101 
102 #if __STD_C
lastfree(Vmalloc_t * vm,Void_t * data,int local)103 static int lastfree(Vmalloc_t* vm, Void_t* data, int local )
104 #else
105 static int lastfree(vm, data, local)
106 Vmalloc_t*	vm;
107 Void_t*		data;
108 int		local;
109 #endif
110 {
111 	ssize_t		size;
112 	Vmlast_t	*last = (Vmlast_t*)vm->data;
113 
114 	if(!data)
115 		return 0;
116 
117 	LASTLOCK(last, local);
118 
119 	if(data != (Void_t*)last->last )
120 		data = NIL(Void_t*);
121 	else
122 	{	size = last->data - last->last;	/**/DEBUG_ASSERT(size > 0 && size%MEM_ALIGN == 0);
123 		last->data -= size;
124 		last->size += size;
125 		last->last = NIL(Vmuchar_t*);
126 
127 		if(!local && _Vmtrace)
128 			(*_Vmtrace)(vm, (Vmuchar_t*)data, NIL(Vmuchar_t*), size, 0);
129 	}
130 
131 	LASTOPEN(last, local);
132 
133 	return data ? 0 : -1;
134 }
135 
136 #if __STD_C
lastresize(Vmalloc_t * vm,Void_t * data,size_t size,int type,int local)137 static Void_t* lastresize(Vmalloc_t* vm, Void_t* data, size_t size, int type, int local)
138 #else
139 static Void_t* lastresize(vm, data, size, type, local )
140 Vmalloc_t*	vm;
141 Void_t*		data;
142 size_t		size;
143 int		type;
144 int		local;
145 #endif
146 {
147 	Block_t		*blk;
148 	ssize_t		sz, oldz, blksz;
149 	Void_t		*origdt = data;
150 	size_t		origsz = size;
151 	Vmlast_t	*last = (Vmlast_t*)vm->data;
152 
153 	if(!data)
154 	{	data = lastalloc(vm, size, local);
155 		if(data && (type&VM_RSZERO) )
156 			memset(data, 0, size);
157 		return data;
158 	}
159 	else if(size <= 0)
160 	{	(void)lastfree(vm, data, local);
161 		return NIL(Void_t*);
162 	}
163 
164 	LASTLOCK(last, local);
165 
166 	if(data != (Void_t*)last->last )
167 		data = NIL(Void_t*);
168 	else
169 	{	oldz = last->data - last->last; /**/DEBUG_ASSERT(oldz > 0 && oldz%MEM_ALIGN == 0);
170 		size = ROUND(size, MEM_ALIGN);
171 		if(size <= oldz) /* getting smaller */
172 		{	sz = oldz - size;
173 			last->data -= sz;
174 			last->size += sz;
175 		}
176 		else /* getting larger */
177 		{	if((oldz + last->size) < size && (blk = last->blk) != NIL(Block_t*) )
178 			{	/* try to extend in place */
179 				blksz = SIZE(blk)&~BITS;
180 				sz = blksz + size - (oldz + last->size);
181 				if((blk = (*_Vmsegalloc)(vm, blk, sz, VM_SEGALL|VM_SEGEXTEND)) )
182 				{	/**/DEBUG_ASSERT((SIZE(blk)&~BITS) >= sz);
183 					/**/DEBUG_ASSERT(blk == last->blk);
184 					last->size += (SIZE(blk)&~BITS) - blksz;
185 				}
186 			}
187 
188 			if((oldz + last->size) < size && (type&VM_RSMOVE) )
189 			{	/* try to get new memory */
190 				if((blk = (*_Vmsegalloc)(vm, NIL(Block_t*), size, VM_SEGALL|VM_SEGEXTEND)) )
191 				{	/**/DEBUG_ASSERT((SIZE(blk)&~BITS) >= size);
192 					last->size = SIZE(blk)&~BITS;
193 					last->data = (Vmuchar_t*)DATA(blk);
194 					last->last = NIL(Vmuchar_t*);
195 					last->blk  = blk;
196 				}
197 			}
198 
199 			if((oldz + last->size) < size)
200 				data = NIL(Void_t*);
201 			else
202 			{	if(data != (Void_t*)last->last)
203 				{	/* block moved, reset location */
204 					last->last = last->data;
205 					last->data += oldz;
206 					last->size -= oldz;
207 
208 					if(type&VM_RSCOPY)
209 						memcpy(last->last, data, oldz);
210 
211 					data = (Void_t*)last->last;
212 				}
213 
214 				if(type&VM_RSZERO)
215 					memset(last->last+oldz, 0, size-oldz);
216 
217 				last->data += size-oldz;
218 				last->size -= size-oldz;
219 			}
220 		}
221 	}
222 
223 	if(data && !local && _Vmtrace)
224 		(*_Vmtrace)(vm, (Vmuchar_t*)origdt, (Vmuchar_t*)data, origsz, 0);
225 
226 	LASTOPEN(last, local);
227 
228 	return (Void_t*)data;
229 }
230 
231 
232 #if __STD_C
lastalign(Vmalloc_t * vm,size_t size,size_t align,int local)233 static Void_t* lastalign(Vmalloc_t* vm, size_t size, size_t align, int local)
234 #else
235 static Void_t* lastalign(vm, size, align, local)
236 Vmalloc_t*	vm;
237 size_t		size;
238 size_t		align;
239 int		local;
240 #endif
241 {
242 	Vmuchar_t	*data;
243 	size_t		algn;
244 	size_t		 orgsize = size, orgalign = align;
245 	Vmlast_t	*last = (Vmlast_t*)vm->data;
246 
247 	if(size <= 0 || align <= 0)
248 		return NIL(Void_t*);
249 
250 	LASTLOCK(last, local);
251 
252 	size = ROUND(size,MEM_ALIGN);
253 	align = (*_Vmlcm)(align, 2*sizeof(Block_t));
254 
255 	if((data = (Vmuchar_t*)KPVALLOC(vm, size + align, lastalloc)) )
256 	{	if((algn = (size_t)(VMLONG(data)%align)) != 0)
257 		{	/* move forward for required alignment */
258 			data += align - algn; /**/DEBUG_ASSERT((VMLONG(data)%align) == 0);
259 			last->last = data;
260 		}
261 	}
262 
263 	if(data && !local && _Vmtrace)
264 		(*_Vmtrace)(vm, NIL(Vmuchar_t*), data, orgsize, orgalign);
265 
266 	LASTOPEN(last, local);
267 
268 	return (Void_t*)data;
269 }
270 
laststat(Vmalloc_t * vm,Vmstat_t * st,int local)271 static int laststat(Vmalloc_t* vm, Vmstat_t* st, int local)
272 {
273 	Vmlast_t	*last = (Vmlast_t*)vm->data;
274 
275 	if(!st) /* just returning the lock state */
276 		return last->lock ? 1 : 0;
277 
278 	LASTLOCK(last, local);
279 
280 	if(last->last)
281 	{	st->n_busy = 1;
282 		st->s_busy = last->data - last->last;
283 	}
284 	if(last->data)
285 	{	st->n_free = 1;
286 		st->s_free = last->size;
287 	}
288 
289 	LASTOPEN(last, local);
290 	return 0;
291 }
292 
lastevent(Vmalloc_t * vm,int event,Void_t * arg)293 static int lastevent(Vmalloc_t* vm, int event, Void_t* arg)
294 {
295 	Vmlast_t	*last;
296 
297 	if(event == VM_OPEN ) /* return the size of Vmpool_t */
298 	{	if(!arg)
299 			return -1;
300 		*((ssize_t*)arg) = sizeof(Vmlast_t);
301 	}
302 	else if(event == VM_ENDOPEN) /* start as if region was cleared */
303 	{	if(!(last = (Vmlast_t*)vm->data) )
304 			return -1;
305 		last->lock = 0;
306 		last->blk  = NIL(Block_t*);
307 		last->data = NIL(Vmuchar_t*);
308 		last->size = 0;
309 		last->last = NIL(Vmuchar_t*);
310 	}
311 
312 	return 0;
313 }
314 
315 /* Public method for free-1 allocation */
316 static Vmethod_t _Vmlast =
317 {	lastalloc,
318 	lastresize,
319 	lastfree,
320 	0,
321 	laststat,
322 	lastevent,
323 	lastalign,
324 	VM_MTLAST
325 };
326 
327 __DEFINE__(Vmethod_t*,Vmlast,&_Vmlast);
328 
329 #ifdef NoF
330 NoF(vmlast)
331 #endif
332 
333 #endif
334