1 /*
2 * Copyright (c) 1987, 1991 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * $Id: malloc.c,v 1.1 94/10/20 00:03:04 bill Exp $
34 */
35
36 #include "sys/param.h"
37 #include "sys/errno.h"
38 #include "proc.h"
39 #include "malloc.h"
40 #include "vm.h"
41 #include "kmem.h"
42
43 #include "prototypes.h"
44
45 struct kmembuckets bucket[MINBUCKET + 16];
46 struct kmemstats kmemstats[M_LAST];
47 struct kmemusage *kmemusage;
48 char *kmembase, *kmemlimit;
49 char *memname[] = INITKMEMNAMES;
50
51 /*
52 * Allocate a block of memory
53 */
54 void *
malloc(u_long size,int type,int flags)55 malloc(u_long size, int type, int flags)
56 {
57 struct kmembuckets *kbp;
58 struct kmemusage *kup;
59 long indx, npg, alloc, allocsize;
60 int s;
61 caddr_t va, cp, savedlist;
62 #ifdef KMEMSTATS
63 struct kmemstats *ksp = &kmemstats[type];
64
65 if (((unsigned long)type) > M_LAST)
66 panic("malloc - bogus type");
67 #endif
68
69 indx = BUCKETINDX(size);
70 kbp = &bucket[indx];
71 s = splimp();
72 #ifdef KMEMSTATS
73 while (ksp->ks_memuse >= ksp->ks_limit) {
74 if (flags & M_NOWAIT) {
75 splx(s);
76 return ((void *) NULL);
77 }
78 if (ksp->ks_limblocks < 65535)
79 ksp->ks_limblocks++;
80 tsleep((caddr_t)ksp, PSWP+2, memname[type], 0);
81 }
82 #endif
83 if (kbp->kb_next == NULL) {
84 if (size >= MAXALLOCSAVE)
85 allocsize = roundup(size, CLBYTES);
86 else
87 allocsize = 1 << indx;
88 npg = clrnd(btoc(allocsize));
89 va = (caddr_t) kmem_alloc(kmem_map, (vm_size_t)ctob(npg),
90 flags);
91 if (va == NULL) {
92 splx(s);
93 return ((void *) NULL);
94 }
95 #ifdef KMEMSTATS
96 kbp->kb_total += kbp->kb_elmpercl;
97 #endif
98 kup = btokup(va);
99 kup->ku_indx = indx;
100 if (allocsize >= MAXALLOCSAVE) {
101 if (npg > 65535)
102 panic("malloc: allocation too large");
103 kup->ku_pagecnt = npg;
104 #ifdef KMEMSTATS
105 ksp->ks_memuse += allocsize;
106 #endif
107 goto out;
108 }
109 #ifdef KMEMSTATS
110 kup->ku_freecnt = kbp->kb_elmpercl;
111 kbp->kb_totalfree += kbp->kb_elmpercl;
112 #endif
113 /*
114 * Just in case we blocked while allocating memory,
115 * and someone else also allocated memory for this
116 * bucket, don't assume the list is still empty.
117 */
118 savedlist = kbp->kb_next;
119 kbp->kb_next = va + (npg * NBPG) - allocsize;
120 for (cp = kbp->kb_next; cp > va; cp -= allocsize)
121 *(caddr_t *)cp = cp - allocsize;
122 *(caddr_t *)cp = savedlist;
123 }
124 va = kbp->kb_next;
125 kbp->kb_next = *(caddr_t *)va;
126 #ifdef KMEMSTATS
127 kup = btokup(va);
128 if (kup->ku_indx != indx)
129 panic("malloc: wrong bucket");
130 if (kup->ku_freecnt == 0)
131 panic("malloc: lost data");
132 kup->ku_freecnt--;
133 kbp->kb_totalfree--;
134 ksp->ks_memuse += 1 << indx;
135 out:
136 kbp->kb_calls++;
137 ksp->ks_inuse++;
138 ksp->ks_calls++;
139 if (ksp->ks_memuse > ksp->ks_maxused)
140 ksp->ks_maxused = ksp->ks_memuse;
141 #else
142 out:
143 #endif /* KMEMSTATS */
144 splx(s);
145
146 /* clear memory? */
147 if (flags & M_ZERO_IT)
148 memset((void *)va, 0, size);
149
150 return ((void *) va);
151 }
152
153 #ifdef DIAGNOSTIC
154 long addrmask[] = { 0x00000000,
155 0x00000001, 0x00000003, 0x00000007, 0x0000000f,
156 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
157 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
158 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
159 };
160 #endif /* DIAGNOSTIC */
161
162 /*
163 * Free a block of memory allocated by malloc.
164 */
165 void
free(void * addr,int type)166 free(void *addr, int type)
167 {
168 register struct kmembuckets *kbp;
169 register struct kmemusage *kup;
170 long alloc, size;
171 int s;
172 extern int end;
173 #ifdef KMEMSTATS
174 register struct kmemstats *ksp = &kmemstats[type];
175 #endif /* KMEMSTATS */
176
177 if (addr < (void *)&end ||
178 (vm_offset_t)addr < vm_map_min(kmem_map) ||
179 (vm_offset_t)addr > vm_map_max(kmem_map)) {
180 panic("free: outside arena");
181 }
182
183 kup = btokup(addr);
184 size = 1 << kup->ku_indx;
185 #ifdef DIAGNOSTIC
186 if (size > NBPG * CLSIZE)
187 alloc = addrmask[BUCKETINDX(NBPG * CLSIZE)];
188 else
189 alloc = addrmask[kup->ku_indx];
190 if (((u_long)addr & alloc) != 0)
191 panic("free: unaligned addr");
192 #endif /* DIAGNOSTIC */
193 kbp = &bucket[kup->ku_indx];
194 s = splimp();
195 if (size >= MAXALLOCSAVE) {
196 kmem_free(kmem_map, (vm_offset_t)addr, ctob(kup->ku_pagecnt));
197 #ifdef KMEMSTATS
198 size = kup->ku_pagecnt << PGSHIFT;
199 ksp->ks_memuse -= size;
200 kup->ku_indx = 0;
201 kup->ku_pagecnt = 0;
202 if (ksp->ks_memuse + size >= ksp->ks_limit &&
203 ksp->ks_memuse < ksp->ks_limit)
204 wakeup((caddr_t)ksp);
205 ksp->ks_inuse--;
206 kbp->kb_total -= 1;
207 #endif /* KMEMSTATS */
208 splx(s);
209 return;
210 }
211 #ifdef KMEMSTATS
212 kbp->kb_totalfree++;
213 kup->ku_freecnt++;
214 if (kup->ku_freecnt >= kbp->kb_elmpercl)
215 if (kup->ku_freecnt > kbp->kb_elmpercl)
216 panic("free: multiple frees");
217 else if (kbp->kb_totalfree > kbp->kb_highwat && type != M_MBUF) {
218 caddr_t freepage = kuptob(kup);
219 register caddr_t *cpp, *nxt;
220 int freesize = kup->ku_freecnt * size;
221
222 ksp->ks_memuse -= size;
223 kup->ku_indx = 0;
224 if (ksp->ks_memuse + size >= ksp->ks_limit &&
225 ksp->ks_memuse < ksp->ks_limit)
226 wakeup((caddr_t)ksp);
227 ksp->ks_inuse--;
228 kbp->kb_total -= kbp->kb_elmpercl;
229 kbp->kb_totalfree -= kbp->kb_elmpercl;
230
231 if (--kup->ku_freecnt != 0) {
232
233 /* walk bucket list deleting entries in page freed */
234 for (cpp = &kbp->kb_next; *cpp != NULL ; ) {
235 nxt = *(caddr_t **)cpp;
236 if ((caddr_t)nxt >= freepage
237 && (caddr_t)nxt < freepage + freesize) {
238 while (*nxt >= freepage
239 && *nxt < freepage + freesize) {
240 nxt = *(caddr_t **)nxt;
241 kup->ku_freecnt--;
242 }
243 *cpp = *(caddr_t *) nxt;
244 kup->ku_freecnt--;
245 } else if (nxt == NULL)
246 break;
247 else
248 cpp = nxt;
249 }
250 if (kup->ku_freecnt != 0)
251 panic("free: missing a bucket");
252 }
253 kmem_free(kmem_map, (vm_offset_t)freepage, freesize);
254 splx(s);
255 return;
256 }
257 ksp->ks_memuse -= size;
258 if (ksp->ks_memuse + size >= ksp->ks_limit &&
259 ksp->ks_memuse < ksp->ks_limit)
260 wakeup((caddr_t)ksp);
261 ksp->ks_inuse--;
262 #endif /* KMEMSTATS */
263 *(caddr_t *)addr = kbp->kb_next;
264 kbp->kb_next = addr;
265 splx(s);
266 }
267
268 /*
269 * Initialize the kernel memory allocator
270 */
271 void
kmeminit(void)272 kmeminit(void)
273 {
274 long indx;
275 int npg;
276
277 #if ((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0)
278 ERROR!_kmeminit:_MAXALLOCSAVE_not_power_of_2
279 #endif
280 #if (MAXALLOCSAVE > MINALLOCSIZE * 32768)
281 ERROR!_kmeminit:_MAXALLOCSAVE_too_big
282 #endif
283 #if (MAXALLOCSAVE < CLBYTES)
284 ERROR!_kmeminit:_MAXALLOCSAVE_too_small
285 #endif
286 npg = VM_KMEM_SIZE / NBPG;
287 kmemusage = (struct kmemusage *) kmem_alloc(kernel_map,
288 (vm_size_t)(npg * sizeof(struct kmemusage)), 0);
289 memset((caddr_t)kmemusage, 0, npg * sizeof(struct kmemusage));
290 kmem_map = kmem_suballoc(/*kernel_map, (vm_offset_t *)&kmembase,
291 (vm_offset_t *)&kmemlimit,*/ (vm_size_t)(npg * NBPG), FALSE);
292 kmembase = (caddr_t) vm_map_min(kmem_map);
293 kmemlimit = (caddr_t) vm_map_max(kmem_map);
294 #ifdef KMEMSTATS
295 for (indx = 0; indx < MINBUCKET + 16; indx++) {
296 if (1 << indx >= CLBYTES)
297 bucket[indx].kb_elmpercl = 1;
298 else
299 bucket[indx].kb_elmpercl = CLBYTES / (1 << indx);
300 bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl;
301 }
302 for (indx = 0; indx < M_LAST; indx++)
303 kmemstats[indx].ks_limit = npg * NBPG * 6 / 10;
304 #endif
305 }
306