1 /***********************************************************************
2 *                                                                      *
3 *               This software is part of the ast package               *
4 *          Copyright (c) 1985-2013 AT&T Intellectual Property          *
5 *                      and is licensed under the                       *
6 *                 Eclipse Public License, Version 1.0                  *
7 *                    by AT&T Intellectual Property                     *
8 *                                                                      *
9 *                A copy of the License is available at                 *
10 *          http://www.eclipse.org/org/documents/epl-v10.html           *
11 *         (with md5 checksum b35adb5213ca9657e911e9befb180842)         *
12 *                                                                      *
13 *              Information and Software Systems Research               *
14 *                            AT&T Research                             *
15 *                           Florham Park NJ                            *
16 *                                                                      *
17 *               Glenn Fowler <glenn.s.fowler@gmail.com>                *
18 *                    David Korn <dgkorn@gmail.com>                     *
19 *                     Phong Vo <phongvo@gmail.com>                     *
20 *                                                                      *
21 ***********************************************************************/
22 #if defined(_UWIN) && defined(_BLD_ast)
23 
_STUB_vmpool()24 void _STUB_vmpool(){}
25 
26 #else
27 
28 #include	"vmhdr.h"
29 
30 /*	Method for pool allocation.
31 **	All elements in a pool have the same size.
32 **
33 **	Written by Kiem-Phong Vo, phongvo@gmail.com, 01/16/94, 06/22/2012.
34 */
35 
36 /* data structures to keep pool elements */
37 #define FOOBAR	0xf00ba5
38 typedef struct _pool_s
39 {	struct _pool_s*	next;	/* linked list		*/
40 	unsigned int	foo;	/* free indicator	*/
41 } Pool_t;
42 
43 typedef struct _vmpool_s
44 {	Vmdata_t	vmdt;
45 	ssize_t		size;	/* size of a block	*/
46 	ssize_t		nblk;	/* total #blocks	*/
47 	Pool_t*		free;	/* list of free blocks	*/
48 } Vmpool_t;
49 
50 #define POOLSIZE(sz)	ROUND(ROUND((sz), sizeof(Pool_t)), MEM_ALIGN)
51 
52 #ifdef DEBUG
53 static int	N_pool;	/* counter for Vmpool calls	*/
54 #endif
55 
56 #if __STD_C
poolalloc(Vmalloc_t * vm,size_t size,int local)57 static Void_t* poolalloc(Vmalloc_t* vm, size_t size, int local)
58 #else
59 static Void_t* poolalloc(vm, size, local )
60 Vmalloc_t*	vm;
61 size_t		size;
62 int		local;
63 #endif
64 {
65 	Pool_t		*pl, *last, *list, *free;
66 	Block_t		*blk;
67 	Vmuchar_t	*dt, *enddt;
68 	Vmpool_t	*pool = (Vmpool_t*)vm->data;
69 
70 	if(size <= 0)
71 		return NIL(Void_t*);
72 
73 	if(size != pool->size )
74 	{	if(pool->size <= 0) /* first time */
75 			pool->size = size;
76 		else	return NIL(Void_t*);
77 	}
78 
79 	list = last = NIL(Pool_t*);
80 	for(;;) /* grab the free list */
81 	{	if(!(list = pool->free) )
82 			break;
83 		if(asocasptr(&pool->free, list, NIL(Block_t*)) == list)
84 			break;
85 	}
86 
87 	if(!list) /* need new memory */
88 	{	size = POOLSIZE(pool->size);
89 		if(!(blk = (*_Vmsegalloc)(vm, NIL(Block_t*), ROUND(2*size, pool->vmdt.incr), VM_SEGALL|VM_SEGEXTEND)) )
90 			return NIL(Void_t*);
91 
92 		dt = DATA(blk); enddt = dt + BDSZ(blk);
93 		list = NIL(Pool_t*); last = (Pool_t*)dt;
94 		for(; dt+size <= enddt; dt += size)
95 		{	pl = (Pool_t*)dt;
96 			pl->foo = FOOBAR;
97 			pl->next = list; list = pl;
98 		}
99 		asoaddsize(&pool->nblk, BDSZ(blk)/size);
100 	}
101 
102 	pl = list; /* grab 1 then reinsert the rest */
103 	if((list = list->next) )
104 	{	if(asocasptr(&pool->free, NIL(Block_t*), list) != NIL(Block_t*))
105 		{	if(!last)
106 				for(last = list;; last = last->next)
107 					if(!last->next)
108 						break;
109 			for(;;)
110 			{	last->next = free = pool->free;
111 				if(asocasptr(&pool->free, free, list) == free)
112 					break;
113 			}
114 		}
115 	}
116 
117 	if(!local && pl && _Vmtrace)
118 		(*_Vmtrace)(vm, NIL(Vmuchar_t*), (Vmuchar_t*)pl, pool->size, 0);
119 
120 	return (Void_t*)pl;
121 }
122 
123 #if __STD_C
poolfree(Vmalloc_t * vm,Void_t * data,int local)124 static int poolfree(Vmalloc_t* vm, Void_t* data, int local )
125 #else
126 static int poolfree(vm, data, local)
127 Vmalloc_t*	vm;
128 Void_t*		data;
129 int		local;
130 #endif
131 {
132 	Pool_t		*pl, *free;
133 	Vmpool_t	*pool = (Vmpool_t*)vm->data;
134 
135 	if(!data)
136 		return 0;
137 	if(pool->size <= 0)
138 		return -1;
139 
140 	pl = (Pool_t*)data;
141 	pl->foo = FOOBAR;
142 	for(;;)
143 	{	pl->next = free = pool->free;
144 		if(asocasptr(&pool->free, free, pl) == free)
145 			break;
146 	}
147 
148 	if(!local && _Vmtrace)
149 		(*_Vmtrace)(vm, (Vmuchar_t*)data, NIL(Vmuchar_t*), pool->size, 0);
150 
151 	return 0;
152 }
153 
154 #if __STD_C
poolresize(Vmalloc_t * vm,Void_t * data,size_t size,int type,int local)155 static Void_t* poolresize(Vmalloc_t* vm, Void_t* data, size_t size, int type, int local )
156 #else
157 static Void_t* poolresize(vm, data, size, type, local )
158 Vmalloc_t*	vm;
159 Void_t*		data;
160 size_t		size;
161 int		type;
162 int		local;
163 #endif
164 {
165 	NOTUSED(type);
166 
167 	if(!data)
168 	{	data = poolalloc(vm, size, local);
169 		if(data && (type&VM_RSZERO) )
170 			memset(data, 0, size);
171 		return data;
172 	}
173 	else if(size == 0)
174 	{	(void)poolfree(vm, data, local);
175 		return NIL(Void_t*);
176 	}
177 	else	return NIL(Void_t*);
178 }
179 
180 #if __STD_C
poolalign(Vmalloc_t * vm,size_t size,size_t align,int local)181 static Void_t* poolalign(Vmalloc_t* vm, size_t size, size_t align, int local)
182 #else
183 static Void_t* poolalign(vm, size, align, local)
184 Vmalloc_t*	vm;
185 size_t		size;
186 size_t		align;
187 int		local;
188 #endif
189 {
190 	NOTUSED(vm);
191 	NOTUSED(size);
192 	NOTUSED(align);
193 	return NIL(Void_t*);
194 }
195 
196 /* get statistics */
poolstat(Vmalloc_t * vm,Vmstat_t * st,int local)197 static int poolstat(Vmalloc_t* vm, Vmstat_t* st, int local )
198 {
199 	size_t		size;
200 	Pool_t		*pl;
201 	Vmpool_t	*pool = (Vmpool_t*)vm->data;
202 
203 	if(!st) /* just checking lock state */
204 		return 0;
205 
206 	if(pool->size <= 0 )
207 		return -1;
208 
209 	size = ROUND(pool->size, MEM_ALIGN);
210 
211 	for(pl = pool->free; pl; pl = pl->next )
212 		st->n_free += 1;
213 	st->s_free = st->n_free * size;
214 
215 	st->n_busy = pool->nblk - st->n_free;
216 	st->s_busy = st->n_busy * size;
217 
218 	return 0;
219 }
220 
poolevent(Vmalloc_t * vm,int event,Void_t * arg)221 static int poolevent(Vmalloc_t* vm, int event, Void_t* arg)
222 {
223 	Vmpool_t	*pool;
224 
225 	if(event == VM_OPEN ) /* return the size of Vmpool_t */
226 	{	if(!arg)
227 			return -1;
228 		*((ssize_t*)arg) = sizeof(Vmpool_t);
229 	}
230 	else if(event == VM_ENDOPEN) /* start as if region was cleared */
231 	{	if(!(pool = (Vmpool_t*)vm->data) )
232 			return -1;
233 		pool->size = 0;
234 		pool->free = NIL(Pool_t*);
235 	}
236 	return 0;
237 }
238 
239 /* Public interface */
240 static Vmethod_t _Vmpool =
241 {	poolalloc,
242 	poolresize,
243 	poolfree,
244 	0,
245 	poolstat,
246 	poolevent,
247 	poolalign,
248 	VM_MTPOOL
249 };
250 
251 __DEFINE__(Vmethod_t*,Vmpool,&_Vmpool);
252 
253 #ifdef NoF
254 NoF(vmpool)
255 #endif
256 
257 #endif
258