xref: /original-bsd/sys/kern/kern_malloc.c (revision 9a35f7df)
1 /*
2  * Copyright (c) 1987, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)kern_malloc.c	8.4 (Berkeley) 05/20/95
8  */
9 
10 #include <sys/param.h>
11 #include <sys/proc.h>
12 #include <sys/map.h>
13 #include <sys/kernel.h>
14 #include <sys/malloc.h>
15 
16 #include <vm/vm.h>
17 #include <vm/vm_kern.h>
18 
19 struct kmembuckets bucket[MINBUCKET + 16];
20 struct kmemstats kmemstats[M_LAST];
21 struct kmemusage *kmemusage;
22 char *kmembase, *kmemlimit;
23 char *memname[] = INITKMEMNAMES;
24 
25 #ifdef DIAGNOSTIC
26 /*
27  * This structure provides a set of masks to catch unaligned frees.
28  */
29 long addrmask[] = { 0,
30 	0x00000001, 0x00000003, 0x00000007, 0x0000000f,
31 	0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
32 	0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
33 	0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
34 };
35 
36 /*
37  * The WEIRD_ADDR is used as known text to copy into free objects so
38  * that modifications after frees can be detected.
39  */
40 #define WEIRD_ADDR	0xdeadbeef
41 #define MAX_COPY	32
42 
43 /*
44  * Normally the first word of the structure is used to hold the list
45  * pointer for free objects. However, when running with diagnostics,
46  * we use the third and fourth fields, so as to catch modifications
47  * in the most commonly trashed first two words.
48  */
49 struct freelist {
50 	long	spare0;
51 	short	type;
52 	long	spare1;
53 	caddr_t	next;
54 };
55 #else /* !DIAGNOSTIC */
56 struct freelist {
57 	caddr_t	next;
58 };
59 #endif /* DIAGNOSTIC */
60 
61 /*
62  * Allocate a block of memory
63  */
64 void *
65 malloc(size, type, flags)
66 	unsigned long size;
67 	int type, flags;
68 {
69 	register struct kmembuckets *kbp;
70 	register struct kmemusage *kup;
71 	register struct freelist *freep;
72 	long indx, npg, allocsize;
73 	int s;
74 	caddr_t va, cp, savedlist;
75 #ifdef DIAGNOSTIC
76 	long *end, *lp;
77 	int copysize;
78 	char *savedtype;
79 #endif
80 #ifdef DEBUG
81 	extern int simplelockrecurse;
82 #endif
83 #ifdef KMEMSTATS
84 	register struct kmemstats *ksp = &kmemstats[type];
85 
86 	if (((unsigned long)type) > M_LAST)
87 		panic("malloc - bogus type");
88 #endif
89 	indx = BUCKETINDX(size);
90 	kbp = &bucket[indx];
91 	s = splimp();
92 #ifdef KMEMSTATS
93 	while (ksp->ks_memuse >= ksp->ks_limit) {
94 		if (flags & M_NOWAIT) {
95 			splx(s);
96 			return ((void *) NULL);
97 		}
98 		if (ksp->ks_limblocks < 65535)
99 			ksp->ks_limblocks++;
100 		tsleep((caddr_t)ksp, PSWP+2, memname[type], 0);
101 	}
102 	ksp->ks_size |= 1 << indx;
103 #endif
104 #ifdef DIAGNOSTIC
105 	copysize = 1 << indx < MAX_COPY ? 1 << indx : MAX_COPY;
106 #endif
107 #ifdef DEBUG
108 	if (flags & M_NOWAIT)
109 		simplelockrecurse++;
110 #endif
111 	if (kbp->kb_next == NULL) {
112 		kbp->kb_last = NULL;
113 		if (size > MAXALLOCSAVE)
114 			allocsize = roundup(size, CLBYTES);
115 		else
116 			allocsize = 1 << indx;
117 		npg = clrnd(btoc(allocsize));
118 		va = (caddr_t) kmem_malloc(kmem_map, (vm_size_t)ctob(npg),
119 					   !(flags & M_NOWAIT));
120 		if (va == NULL) {
121 			splx(s);
122 #ifdef DEBUG
123 			if (flags & M_NOWAIT)
124 				simplelockrecurse--;
125 #endif
126 			return ((void *) NULL);
127 		}
128 #ifdef KMEMSTATS
129 		kbp->kb_total += kbp->kb_elmpercl;
130 #endif
131 		kup = btokup(va);
132 		kup->ku_indx = indx;
133 		if (allocsize > MAXALLOCSAVE) {
134 			if (npg > 65535)
135 				panic("malloc: allocation too large");
136 			kup->ku_pagecnt = npg;
137 #ifdef KMEMSTATS
138 			ksp->ks_memuse += allocsize;
139 #endif
140 			goto out;
141 		}
142 #ifdef KMEMSTATS
143 		kup->ku_freecnt = kbp->kb_elmpercl;
144 		kbp->kb_totalfree += kbp->kb_elmpercl;
145 #endif
146 		/*
147 		 * Just in case we blocked while allocating memory,
148 		 * and someone else also allocated memory for this
149 		 * bucket, don't assume the list is still empty.
150 		 */
151 		savedlist = kbp->kb_next;
152 		kbp->kb_next = cp = va + (npg * NBPG) - allocsize;
153 		for (;;) {
154 			freep = (struct freelist *)cp;
155 #ifdef DIAGNOSTIC
156 			/*
157 			 * Copy in known text to detect modification
158 			 * after freeing.
159 			 */
160 			end = (long *)&cp[copysize];
161 			for (lp = (long *)cp; lp < end; lp++)
162 				*lp = WEIRD_ADDR;
163 			freep->type = M_FREE;
164 #endif /* DIAGNOSTIC */
165 			if (cp <= va)
166 				break;
167 			cp -= allocsize;
168 			freep->next = cp;
169 		}
170 		freep->next = savedlist;
171 		if (kbp->kb_last == NULL)
172 			kbp->kb_last = (caddr_t)freep;
173 	}
174 	va = kbp->kb_next;
175 	kbp->kb_next = ((struct freelist *)va)->next;
176 #ifdef DIAGNOSTIC
177 	freep = (struct freelist *)va;
178 	savedtype = (unsigned)freep->type < M_LAST ?
179 		memname[freep->type] : "???";
180 	if (kbp->kb_next &&
181 	    !kernacc(kbp->kb_next, sizeof(struct freelist), 0)) {
182 		printf("%s of object 0x%x size %d %s %s (invalid addr 0x%x)\n",
183 			"Data modified on freelist: word 2.5", va, size,
184 			"previous type", savedtype, kbp->kb_next);
185 		kbp->kb_next = NULL;
186 	}
187 #if BYTE_ORDER == BIG_ENDIAN
188 	freep->type = WEIRD_ADDR >> 16;
189 #endif
190 #if BYTE_ORDER == LITTLE_ENDIAN
191 	freep->type = (short)WEIRD_ADDR;
192 #endif
193 	if (((long)(&freep->next)) & 0x2)
194 		freep->next = (caddr_t)((WEIRD_ADDR >> 16)|(WEIRD_ADDR << 16));
195 	else
196 		freep->next = (caddr_t)WEIRD_ADDR;
197 	end = (long *)&va[copysize];
198 	for (lp = (long *)va; lp < end; lp++) {
199 		if (*lp == WEIRD_ADDR)
200 			continue;
201 		printf("%s %d of object 0x%x size %d %s %s (0x%x != 0x%x)\n",
202 			"Data modified on freelist: word", lp - (long *)va,
203 			va, size, "previous type", savedtype, *lp, WEIRD_ADDR);
204 		break;
205 	}
206 	freep->spare0 = 0;
207 #endif /* DIAGNOSTIC */
208 #ifdef KMEMSTATS
209 	kup = btokup(va);
210 	if (kup->ku_indx != indx)
211 		panic("malloc: wrong bucket");
212 	if (kup->ku_freecnt == 0)
213 		panic("malloc: lost data");
214 	kup->ku_freecnt--;
215 	kbp->kb_totalfree--;
216 	ksp->ks_memuse += 1 << indx;
217 out:
218 	kbp->kb_calls++;
219 	ksp->ks_inuse++;
220 	ksp->ks_calls++;
221 	if (ksp->ks_memuse > ksp->ks_maxused)
222 		ksp->ks_maxused = ksp->ks_memuse;
223 #else
224 out:
225 #endif
226 	splx(s);
227 #ifdef DEBUG
228 	if (flags & M_NOWAIT)
229 		simplelockrecurse--;
230 #endif
231 	return ((void *) va);
232 }
233 
234 /*
235  * Free a block of memory allocated by malloc.
236  */
237 void
238 free(addr, type)
239 	void *addr;
240 	int type;
241 {
242 	register struct kmembuckets *kbp;
243 	register struct kmemusage *kup;
244 	register struct freelist *freep;
245 	long size;
246 	int s;
247 #ifdef DIAGNOSTIC
248 	caddr_t cp;
249 	long *end, *lp, alloc, copysize;
250 #endif
251 #ifdef KMEMSTATS
252 	register struct kmemstats *ksp = &kmemstats[type];
253 #endif
254 
255 	kup = btokup(addr);
256 	size = 1 << kup->ku_indx;
257 	kbp = &bucket[kup->ku_indx];
258 	s = splimp();
259 #ifdef DIAGNOSTIC
260 	/*
261 	 * Check for returns of data that do not point to the
262 	 * beginning of the allocation.
263 	 */
264 	if (size > NBPG * CLSIZE)
265 		alloc = addrmask[BUCKETINDX(NBPG * CLSIZE)];
266 	else
267 		alloc = addrmask[kup->ku_indx];
268 	if (((u_long)addr & alloc) != 0)
269 		panic("free: unaligned addr 0x%x, size %d, type %s, mask %d\n",
270 			addr, size, memname[type], alloc);
271 #endif /* DIAGNOSTIC */
272 	if (size > MAXALLOCSAVE) {
273 		kmem_free(kmem_map, (vm_offset_t)addr, ctob(kup->ku_pagecnt));
274 #ifdef KMEMSTATS
275 		size = kup->ku_pagecnt << PGSHIFT;
276 		ksp->ks_memuse -= size;
277 		kup->ku_indx = 0;
278 		kup->ku_pagecnt = 0;
279 		if (ksp->ks_memuse + size >= ksp->ks_limit &&
280 		    ksp->ks_memuse < ksp->ks_limit)
281 			wakeup((caddr_t)ksp);
282 		ksp->ks_inuse--;
283 		kbp->kb_total -= 1;
284 #endif
285 		splx(s);
286 		return;
287 	}
288 	freep = (struct freelist *)addr;
289 #ifdef DIAGNOSTIC
290 	/*
291 	 * Check for multiple frees. Use a quick check to see if
292 	 * it looks free before laboriously searching the freelist.
293 	 */
294 	if (freep->spare0 == WEIRD_ADDR) {
295 		for (cp = kbp->kb_next; cp; cp = *(caddr_t *)cp) {
296 			if (addr != cp)
297 				continue;
298 			printf("multiply freed item 0x%x\n", addr);
299 			panic("free: duplicated free");
300 		}
301 	}
302 	/*
303 	 * Copy in known text to detect modification after freeing
304 	 * and to make it look free. Also, save the type being freed
305 	 * so we can list likely culprit if modification is detected
306 	 * when the object is reallocated.
307 	 */
308 	copysize = size < MAX_COPY ? size : MAX_COPY;
309 	end = (long *)&((caddr_t)addr)[copysize];
310 	for (lp = (long *)addr; lp < end; lp++)
311 		*lp = WEIRD_ADDR;
312 	freep->type = type;
313 #endif /* DIAGNOSTIC */
314 #ifdef KMEMSTATS
315 	kup->ku_freecnt++;
316 	if (kup->ku_freecnt >= kbp->kb_elmpercl)
317 		if (kup->ku_freecnt > kbp->kb_elmpercl)
318 			panic("free: multiple frees");
319 		else if (kbp->kb_totalfree > kbp->kb_highwat)
320 			kbp->kb_couldfree++;
321 	kbp->kb_totalfree++;
322 	ksp->ks_memuse -= size;
323 	if (ksp->ks_memuse + size >= ksp->ks_limit &&
324 	    ksp->ks_memuse < ksp->ks_limit)
325 		wakeup((caddr_t)ksp);
326 	ksp->ks_inuse--;
327 #endif
328 	if (kbp->kb_next == NULL)
329 		kbp->kb_next = addr;
330 	else
331 		((struct freelist *)kbp->kb_last)->next = addr;
332 	freep->next = NULL;
333 	kbp->kb_last = addr;
334 	splx(s);
335 }
336 
337 /*
338  * Initialize the kernel memory allocator
339  */
340 kmeminit()
341 {
342 	register long indx;
343 	int npg;
344 
345 #if	((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0)
346 		ERROR!_kmeminit:_MAXALLOCSAVE_not_power_of_2
347 #endif
348 #if	(MAXALLOCSAVE > MINALLOCSIZE * 32768)
349 		ERROR!_kmeminit:_MAXALLOCSAVE_too_big
350 #endif
351 #if	(MAXALLOCSAVE < CLBYTES)
352 		ERROR!_kmeminit:_MAXALLOCSAVE_too_small
353 #endif
354 	npg = VM_KMEM_SIZE/ NBPG;
355 	kmemusage = (struct kmemusage *) kmem_alloc(kernel_map,
356 		(vm_size_t)(npg * sizeof(struct kmemusage)));
357 	kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase,
358 		(vm_offset_t *)&kmemlimit, (vm_size_t)(npg * NBPG), FALSE);
359 #ifdef KMEMSTATS
360 	for (indx = 0; indx < MINBUCKET + 16; indx++) {
361 		if (1 << indx >= CLBYTES)
362 			bucket[indx].kb_elmpercl = 1;
363 		else
364 			bucket[indx].kb_elmpercl = CLBYTES / (1 << indx);
365 		bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl;
366 	}
367 	for (indx = 0; indx < M_LAST; indx++)
368 		kmemstats[indx].ks_limit = npg * NBPG * 6 / 10;
369 #endif
370 }
371