xref: /dragonfly/contrib/lvm2/dist/libdm/mm/pool-fast.c (revision 9348a738)
1 /*	$NetBSD: pool-fast.c,v 1.1.1.2 2009/12/02 00:26:09 haad Exp $	*/
2 
3 /*
4  * Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
5  * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
6  *
7  * This file is part of the device-mapper userspace tools.
8  *
9  * This copyrighted material is made available to anyone wishing to use,
10  * modify, copy, or redistribute it subject to the terms and conditions
11  * of the GNU Lesser General Public License v.2.1.
12  *
13  * You should have received a copy of the GNU Lesser General Public License
14  * along with this program; if not, write to the Free Software Foundation,
15  * Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
16  */
17 
18 #include "dmlib.h"
19 
20 struct chunk {
21 	char *begin, *end;
22 	struct chunk *prev;
23 };
24 
25 struct dm_pool {
26 	struct dm_list list;
27 	struct chunk *chunk, *spare_chunk;	/* spare_chunk is a one entry free
28 						   list to stop 'bobbling' */
29 	size_t chunk_size;
30 	size_t object_len;
31 	unsigned object_alignment;
32 };
33 
34 void _align_chunk(struct chunk *c, unsigned alignment);
35 struct chunk *_new_chunk(struct dm_pool *p, size_t s);
36 
37 /* by default things come out aligned for doubles */
38 #define DEFAULT_ALIGNMENT __alignof__ (double)
39 
40 struct dm_pool *dm_pool_create(const char *name, size_t chunk_hint)
41 {
42 	size_t new_size = 1024;
43 	struct dm_pool *p = dm_malloc(sizeof(*p));
44 
45 	if (!p) {
46 		log_error("Couldn't create memory pool %s (size %"
47 			  PRIsize_t ")", name, sizeof(*p));
48 		return 0;
49 	}
50 	memset(p, 0, sizeof(*p));
51 
52 	/* round chunk_hint up to the next power of 2 */
53 	p->chunk_size = chunk_hint + sizeof(struct chunk);
54 	while (new_size < p->chunk_size)
55 		new_size <<= 1;
56 	p->chunk_size = new_size;
57 	dm_list_add(&_dm_pools, &p->list);
58 	return p;
59 }
60 
61 void dm_pool_destroy(struct dm_pool *p)
62 {
63 	struct chunk *c, *pr;
64 	dm_free(p->spare_chunk);
65 	c = p->chunk;
66 	while (c) {
67 		pr = c->prev;
68 		dm_free(c);
69 		c = pr;
70 	}
71 
72 	dm_list_del(&p->list);
73 	dm_free(p);
74 }
75 
76 void *dm_pool_alloc(struct dm_pool *p, size_t s)
77 {
78 	return dm_pool_alloc_aligned(p, s, DEFAULT_ALIGNMENT);
79 }
80 
81 void *dm_pool_alloc_aligned(struct dm_pool *p, size_t s, unsigned alignment)
82 {
83 	struct chunk *c = p->chunk;
84 	void *r;
85 
86 	/* realign begin */
87 	if (c)
88 		_align_chunk(c, alignment);
89 
90 	/* have we got room ? */
91 	if (!c || (c->begin > c->end) || (c->end - c->begin < s)) {
92 		/* allocate new chunk */
93 		size_t needed = s + alignment + sizeof(struct chunk);
94 		c = _new_chunk(p, (needed > p->chunk_size) ?
95 			       needed : p->chunk_size);
96 
97 		if (!c)
98 			return NULL;
99 
100 		_align_chunk(c, alignment);
101 	}
102 
103 	r = c->begin;
104 	c->begin += s;
105 	return r;
106 }
107 
108 void dm_pool_empty(struct dm_pool *p)
109 {
110 	struct chunk *c;
111 
112 	for (c = p->chunk; c && c->prev; c = c->prev)
113 		;
114 
115 	if (c)
116 		dm_pool_free(p, (char *) (c + 1));
117 }
118 
119 void dm_pool_free(struct dm_pool *p, void *ptr)
120 {
121 	struct chunk *c = p->chunk;
122 
123 	while (c) {
124 		if (((char *) c < (char *) ptr) &&
125 		    ((char *) c->end > (char *) ptr)) {
126 			c->begin = ptr;
127 			break;
128 		}
129 
130 		if (p->spare_chunk)
131 			dm_free(p->spare_chunk);
132 		p->spare_chunk = c;
133 		c = c->prev;
134 	}
135 
136 	if (!c)
137 		log_error("Internal error: pool_free asked to free pointer "
138 			  "not in pool");
139 	else
140 		p->chunk = c;
141 }
142 
143 int dm_pool_begin_object(struct dm_pool *p, size_t hint)
144 {
145 	struct chunk *c = p->chunk;
146 	const size_t align = DEFAULT_ALIGNMENT;
147 
148 	p->object_len = 0;
149 	p->object_alignment = align;
150 
151 	if (c)
152 		_align_chunk(c, align);
153 
154 	if (!c || (c->begin > c->end) || (c->end - c->begin < hint)) {
155 		/* allocate a new chunk */
156 		c = _new_chunk(p,
157 			       hint > (p->chunk_size - sizeof(struct chunk)) ?
158 			       hint + sizeof(struct chunk) + align :
159 			       p->chunk_size);
160 
161 		if (!c)
162 			return 0;
163 
164 		_align_chunk(c, align);
165 	}
166 
167 	return 1;
168 }
169 
170 int dm_pool_grow_object(struct dm_pool *p, const void *extra, size_t delta)
171 {
172 	struct chunk *c = p->chunk, *nc;
173 
174 	if (!delta)
175 		delta = strlen(extra);
176 
177 	if (c->end - (c->begin + p->object_len) < delta) {
178 		/* move into a new chunk */
179 		if (p->object_len + delta > (p->chunk_size / 2))
180 			nc = _new_chunk(p, (p->object_len + delta) * 2);
181 		else
182 			nc = _new_chunk(p, p->chunk_size);
183 
184 		if (!nc)
185 			return 0;
186 
187 		_align_chunk(p->chunk, p->object_alignment);
188 		memcpy(p->chunk->begin, c->begin, p->object_len);
189 		c = p->chunk;
190 	}
191 
192 	memcpy(c->begin + p->object_len, extra, delta);
193 	p->object_len += delta;
194 	return 1;
195 }
196 
197 void *dm_pool_end_object(struct dm_pool *p)
198 {
199 	struct chunk *c = p->chunk;
200 	void *r = c->begin;
201 	c->begin += p->object_len;
202 	p->object_len = 0u;
203 	p->object_alignment = DEFAULT_ALIGNMENT;
204 	return r;
205 }
206 
207 void dm_pool_abandon_object(struct dm_pool *p)
208 {
209 	p->object_len = 0;
210 	p->object_alignment = DEFAULT_ALIGNMENT;
211 }
212 
213 void _align_chunk(struct chunk *c, unsigned alignment)
214 {
215 	c->begin += alignment - ((unsigned long) c->begin & (alignment - 1));
216 }
217 
218 struct chunk *_new_chunk(struct dm_pool *p, size_t s)
219 {
220 	struct chunk *c;
221 
222 	if (p->spare_chunk &&
223 	    ((p->spare_chunk->end - (char *) p->spare_chunk) >= s)) {
224 		/* reuse old chunk */
225 		c = p->spare_chunk;
226 		p->spare_chunk = 0;
227 	} else {
228 		if (!(c = dm_malloc(s))) {
229 			log_error("Out of memory.  Requested %" PRIsize_t
230 				  " bytes.", s);
231 			return NULL;
232 		}
233 
234 		c->end = (char *) c + s;
235 	}
236 
237 	c->prev = p->chunk;
238 	c->begin = (char *) (c + 1);
239 	p->chunk = c;
240 
241 	return c;
242 }
243