xref: /original-bsd/sys/kern/kern_malloc.c (revision dd262573)
1 /*
2  * Copyright (c) 1987 Regents of the University of California.
3  * All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)kern_malloc.c	7.12.1.2 (Berkeley) 12/12/90
8  */
9 
10 #include "param.h"
11 #include "vm.h"
12 #include "cmap.h"
13 #include "time.h"
14 #include "proc.h"
15 #include "map.h"
16 #include "kernel.h"
17 #include "malloc.h"
18 
19 #include "machine/pte.h"
20 
21 struct kmembuckets bucket[MINBUCKET + 16];
22 struct kmemstats kmemstats[M_LAST];
23 struct kmemusage *kmemusage;
24 char *memname[] = INITKMEMNAMES;
25 long wantkmemmap;
26 long malloc_reentered;
27 #define IN { if (malloc_reentered) panic("malloc reentered");\
28 			else malloc_reentered = 1;}
29 #define OUT (malloc_reentered = 0)
30 
31 /*
32  * Allocate a block of memory
33  */
34 qaddr_t
35 malloc(size, type, flags)
36 	unsigned long size;
37 	int type, flags;
38 {
39 	register struct kmembuckets *kbp;
40 	register struct kmemusage *kup;
41 	long indx, npg, alloc, allocsize;
42 	int s;
43 	caddr_t va, cp, rp;
44 #ifdef KMEMSTATS
45 	register struct kmemstats *ksp = &kmemstats[type];
46 
47 	if (((unsigned long)type) > M_LAST)
48 		panic("malloc - bogus type");
49 #endif
50 
51 	indx = BUCKETINDX(size);
52 	kbp = &bucket[indx];
53 	s = splimp();
54 	IN;
55 again:
56 #ifdef KMEMSTATS
57 	while (ksp->ks_memuse >= ksp->ks_limit) {
58 		if (flags & M_NOWAIT) {
59 			OUT;
60 			splx(s);
61 			return (0);
62 		}
63 		if (ksp->ks_limblocks < 65535)
64 			ksp->ks_limblocks++;
65 		OUT;
66 		tsleep((caddr_t)ksp, PSWP+2, memname[type], 0);
67 		IN;
68 	}
69 #endif
70 	if (kbp->kb_next == NULL) {
71 		if (size > MAXALLOCSAVE)
72 			allocsize = roundup(size, CLBYTES);
73 		else
74 			allocsize = 1 << indx;
75 		npg = clrnd(btoc(allocsize));
76 		if ((flags & M_NOWAIT) && freemem < npg) {
77 			OUT;
78 			splx(s);
79 			return (0);
80 		}
81 		alloc = rmalloc(kmemmap, npg);
82 		if (alloc == 0) {
83 			if (flags & M_NOWAIT) {
84 				OUT;
85 				splx(s);
86 				return (0);
87 			}
88 #ifdef KMEMSTATS
89 			if (ksp->ks_mapblocks < 65535)
90 				ksp->ks_mapblocks++;
91 #endif
92 			wantkmemmap++;
93 			OUT;
94 			tsleep((caddr_t)&wantkmemmap, PSWP+2, memname[type], 0);
95 			IN;
96 			goto again;
97 		}
98 		alloc -= CLSIZE;		/* convert to base 0 */
99 		OUT;
100 		(void) vmemall(&kmempt[alloc], (int)npg, &proc[0], CSYS);
101 		IN;
102 		va = (caddr_t) kmemxtob(alloc);
103 		vmaccess(&kmempt[alloc], va, (int)npg);
104 #ifdef KMEMSTATS
105 		kbp->kb_total += kbp->kb_elmpercl;
106 #endif
107 		kup = btokup(va);
108 		kup->ku_indx = indx;
109 		if (allocsize > MAXALLOCSAVE) {
110 			if (npg > 65535)
111 				panic("malloc: allocation too large");
112 			kup->ku_pagecnt = npg;
113 #ifdef KMEMSTATS
114 			ksp->ks_memuse += allocsize;
115 #endif
116 			goto out;
117 		}
118 #ifdef KMEMSTATS
119 		kup->ku_freecnt = kbp->kb_elmpercl;
120 		kbp->kb_totalfree += kbp->kb_elmpercl;
121 #endif
122 		rp = kbp->kb_next; /* returned while blocked in vmemall */
123 		kbp->kb_next = va + (npg * NBPG) - allocsize;
124 		for (cp = kbp->kb_next; cp >= va; cp -= allocsize) {
125 			((caddr_t *)cp)[2] = (cp > va ? cp - allocsize : rp);
126 			if (indx == 7) {
127 				long *lp = (long *)cp;
128 				lp[0] = lp[1] = lp[3] = lp[4] = -1;
129 			}
130 		}
131 	}
132 	va = kbp->kb_next;
133 	kbp->kb_next = ((caddr_t *)va)[2];
134 	if (indx == 7) {
135 		long *lp = (long *)va;
136 		if (lp[0] != -1 || lp[1] != -1 || lp[3] != -1 || lp[4] != -1)
137 			panic("malloc meddled");
138 	}
139 #ifdef KMEMSTATS
140 	kup = btokup(va);
141 	if (kup->ku_indx != indx)
142 		panic("malloc: wrong bucket");
143 	if (kup->ku_freecnt == 0)
144 		panic("malloc: lost data");
145 	kup->ku_freecnt--;
146 	kbp->kb_totalfree--;
147 	ksp->ks_memuse += 1 << indx;
148 out:
149 	kbp->kb_calls++;
150 	ksp->ks_inuse++;
151 	ksp->ks_calls++;
152 	if (ksp->ks_memuse > ksp->ks_maxused)
153 		ksp->ks_maxused = ksp->ks_memuse;
154 #else
155 out:
156 #endif
157 	OUT;
158 	splx(s);
159 	return ((qaddr_t)va);
160 }
161 
162 #ifdef DIAGNOSTIC
163 long addrmask[] = { 0x00000000,
164 	0x00000001, 0x00000003, 0x00000007, 0x0000000f,
165 	0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
166 	0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
167 	0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
168 };
169 #endif /* DIAGNOSTIC */
170 
171 /*
172  * Free a block of memory allocated by malloc.
173  */
174 void
175 free(addr, type)
176 	caddr_t addr;
177 	int type;
178 {
179 	register struct kmembuckets *kbp;
180 	register struct kmemusage *kup;
181 	long alloc, size;
182 	int s;
183 #ifdef KMEMSTATS
184 	register struct kmemstats *ksp = &kmemstats[type];
185 #endif
186 
187 	kup = btokup(addr);
188 	size = 1 << kup->ku_indx;
189 #ifdef DIAGNOSTIC
190 	if (size > NBPG * CLSIZE)
191 		alloc = addrmask[BUCKETINDX(NBPG * CLSIZE)];
192 	else
193 		alloc = addrmask[kup->ku_indx];
194 	if (((u_long)addr & alloc) != 0) {
195 		printf("free: unaligned addr 0x%x, size %d, type %d, mask %d\n",
196 			addr, size, type, alloc);
197 		panic("free: unaligned addr");
198 	}
199 #endif /* DIAGNOSTIC */
200 	kbp = &bucket[kup->ku_indx];
201 	s = splimp();
202 	IN;
203 	size = 1 << kup->ku_indx;
204 	if (size > MAXALLOCSAVE) {
205 		alloc = btokmemx(addr);
206 		(void) memfree(&kmempt[alloc], (int)kup->ku_pagecnt, 1);
207 		rmfree(kmemmap, (long)kup->ku_pagecnt, alloc + CLSIZE);
208 		OUT;
209 		if (wantkmemmap) {
210 			wakeup((caddr_t)&wantkmemmap);
211 			wantkmemmap = 0;
212 		}
213 #ifdef KMEMSTATS
214 		size = kup->ku_pagecnt << PGSHIFT;
215 		ksp->ks_memuse -= size;
216 		kup->ku_indx = 0;
217 		kup->ku_pagecnt = 0;
218 		if (ksp->ks_memuse + size >= ksp->ks_limit &&
219 		    ksp->ks_memuse < ksp->ks_limit)
220 			wakeup((caddr_t)ksp);
221 		ksp->ks_inuse--;
222 		kbp->kb_total -= 1;
223 #endif
224 		splx(s);
225 		return;
226 	}
227 	if (size == 128) {
228 		long *lp = (long *)addr;
229 		lp[0] = lp[1] = lp[3] = lp[4] = -1;
230 	}
231 #ifdef KMEMSTATS
232 	kup->ku_freecnt++;
233 	if (kup->ku_freecnt >= kbp->kb_elmpercl)
234 		if (kup->ku_freecnt > kbp->kb_elmpercl)
235 			panic("free: multiple frees");
236 		else if (kbp->kb_totalfree > kbp->kb_highwat)
237 			kbp->kb_couldfree++;
238 	kbp->kb_totalfree++;
239 	ksp->ks_memuse -= size;
240 	if (ksp->ks_memuse + size >= ksp->ks_limit &&
241 	    ksp->ks_memuse < ksp->ks_limit)
242 		wakeup((caddr_t)ksp);
243 	ksp->ks_inuse--;
244 #endif
245 	((caddr_t *)addr)[2] = kbp->kb_next;
246 	kbp->kb_next = addr;
247 	OUT;
248 	splx(s);
249 }
250 
251 /*
252  * Initialize the kernel memory allocator
253  */
254 kmeminit()
255 {
256 	register long indx;
257 	int npg;
258 
259 #if	((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0)
260 		ERROR!_kmeminit:_MAXALLOCSAVE_not_power_of_2
261 #endif
262 #if	(MAXALLOCSAVE > MINALLOCSIZE * 32768)
263 		ERROR!_kmeminit:_MAXALLOCSAVE_too_big
264 #endif
265 #if	(MAXALLOCSAVE < CLBYTES)
266 		ERROR!_kmeminit:_MAXALLOCSAVE_too_small
267 #endif
268 	npg = ekmempt - kmempt;
269 	rminit(kmemmap, (long)npg, (long)CLSIZE, "malloc map", npg);
270 #ifdef KMEMSTATS
271 	for (indx = 0; indx < MINBUCKET + 16; indx++) {
272 		if (1 << indx >= CLBYTES)
273 			bucket[indx].kb_elmpercl = 1;
274 		else
275 			bucket[indx].kb_elmpercl = CLBYTES / (1 << indx);
276 		bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl;
277 	}
278 	for (indx = 0; indx < M_LAST; indx++)
279 		kmemstats[indx].ks_limit = npg * NBPG * 6 / 10;
280 #endif
281 }
282