xref: /original-bsd/sys/kern/kern_malloc.c (revision be1f24e8)
1 /*
2  * Copyright (c) 1987, 1991 The Regents of the University of California.
3  * All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)kern_malloc.c	7.35 (Berkeley) 10/11/92
8  */
9 
10 #include <sys/param.h>
11 #include <sys/proc.h>
12 #include <sys/map.h>
13 #include <sys/kernel.h>
14 #include <sys/malloc.h>
15 
16 #include <vm/vm.h>
17 #include <vm/vm_kern.h>
18 
19 struct kmembuckets bucket[MINBUCKET + 16];
20 struct kmemstats kmemstats[M_LAST];
21 struct kmemusage *kmemusage;
22 char *kmembase, *kmemlimit;
23 char *memname[] = INITKMEMNAMES;
24 
25 #ifdef DIAGNOSTIC
26 /*
27  * This structure provides a set of masks to catch unaligned frees.
28  */
29 long addrmask[] = { 0,
30 	0x00000001, 0x00000003, 0x00000007, 0x0000000f,
31 	0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
32 	0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
33 	0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
34 };
35 
36 /*
37  * The WEIRD_ADDR is used as known text to copy into free objects so
38  * that modifications after frees can be detected.
39  */
40 #define WEIRD_ADDR	0xdeadbeef
41 #define MAX_COPY	32
42 
43 /*
44  * Normally the first word of the structure is used to hold the list
45  * pointer for free objects. However, when running with diagnostics,
46  * we use the third and fourth fields, so as to catch modifications
47  * in the most commonly trashed first two words.
48  */
49 struct freelist {
50 	long	spare0;
51 	short	type;
52 	long	spare1;
53 	caddr_t	next;
54 };
55 #else /* !DIAGNOSTIC */
56 struct freelist {
57 	caddr_t	next;
58 };
59 #endif /* DIAGNOSTIC */
60 
61 /*
62  * Allocate a block of memory
63  */
64 void *
65 malloc(size, type, flags)
66 	unsigned long size;
67 	int type, flags;
68 {
69 	register struct kmembuckets *kbp;
70 	register struct kmemusage *kup;
71 	register struct freelist *freep;
72 	long indx, npg, alloc, allocsize;
73 	int s;
74 	caddr_t va, cp, savedlist;
75 #ifdef DIAGNOSTIC
76 	long *end, *lp;
77 	int copysize;
78 	char *savedtype;
79 #endif
80 #ifdef KMEMSTATS
81 	register struct kmemstats *ksp = &kmemstats[type];
82 
83 	if (((unsigned long)type) > M_LAST)
84 		panic("malloc - bogus type");
85 #endif
86 	indx = BUCKETINDX(size);
87 	kbp = &bucket[indx];
88 	s = splimp();
89 #ifdef KMEMSTATS
90 	while (ksp->ks_memuse >= ksp->ks_limit) {
91 		if (flags & M_NOWAIT) {
92 			splx(s);
93 			return ((void *) NULL);
94 		}
95 		if (ksp->ks_limblocks < 65535)
96 			ksp->ks_limblocks++;
97 		tsleep((caddr_t)ksp, PSWP+2, memname[type], 0);
98 	}
99 #endif
100 #ifdef DIAGNOSTIC
101 	copysize = 1 << indx < MAX_COPY ? 1 << indx : MAX_COPY;
102 #endif
103 	if (kbp->kb_next == NULL) {
104 		kbp->kb_last = NULL;
105 		if (size > MAXALLOCSAVE)
106 			allocsize = roundup(size, CLBYTES);
107 		else
108 			allocsize = 1 << indx;
109 		npg = clrnd(btoc(allocsize));
110 		va = (caddr_t) kmem_malloc(kmem_map, (vm_size_t)ctob(npg),
111 					   !(flags & M_NOWAIT));
112 		if (va == NULL) {
113 			splx(s);
114 			return ((void *) NULL);
115 		}
116 #ifdef KMEMSTATS
117 		kbp->kb_total += kbp->kb_elmpercl;
118 #endif
119 		kup = btokup(va);
120 		kup->ku_indx = indx;
121 		if (allocsize > MAXALLOCSAVE) {
122 			if (npg > 65535)
123 				panic("malloc: allocation too large");
124 			kup->ku_pagecnt = npg;
125 #ifdef KMEMSTATS
126 			ksp->ks_memuse += allocsize;
127 #endif
128 			goto out;
129 		}
130 #ifdef KMEMSTATS
131 		kup->ku_freecnt = kbp->kb_elmpercl;
132 		kbp->kb_totalfree += kbp->kb_elmpercl;
133 #endif
134 		/*
135 		 * Just in case we blocked while allocating memory,
136 		 * and someone else also allocated memory for this
137 		 * bucket, don't assume the list is still empty.
138 		 */
139 		savedlist = kbp->kb_next;
140 		kbp->kb_next = cp = va + (npg * NBPG) - allocsize;
141 		for (;;) {
142 			freep = (struct freelist *)cp;
143 #ifdef DIAGNOSTIC
144 			/*
145 			 * Copy in known text to detect modification
146 			 * after freeing.
147 			 */
148 			end = (long *)&cp[copysize];
149 			for (lp = (long *)cp; lp < end; lp++)
150 				*lp = WEIRD_ADDR;
151 			freep->type = M_FREE;
152 #endif /* DIAGNOSTIC */
153 			if (cp <= va)
154 				break;
155 			cp -= allocsize;
156 			freep->next = cp;
157 		}
158 		freep->next = savedlist;
159 		if (kbp->kb_last == NULL)
160 			kbp->kb_last = (caddr_t)freep;
161 	}
162 	va = kbp->kb_next;
163 	kbp->kb_next = ((struct freelist *)va)->next;
164 #ifdef DIAGNOSTIC
165 	freep = (struct freelist *)va;
166 	savedtype = (unsigned)freep->type < M_LAST ?
167 		memname[freep->type] : "???";
168 #if BYTE_ORDER == BIG_ENDIAN
169 	freep->type = WEIRD_ADDR >> 16;
170 #endif
171 #if BYTE_ORDER == LITTLE_ENDIAN
172 	freep->type = WEIRD_ADDR;
173 #endif
174 	if (((long)(&freep->next)) & 0x2)
175 		freep->next = (caddr_t)((WEIRD_ADDR >> 16)|(WEIRD_ADDR << 16));
176 	else
177 		freep->next = (caddr_t)WEIRD_ADDR;
178 	end = (long *)&va[copysize];
179 	for (lp = (long *)va; lp < end; lp++) {
180 		if (*lp == WEIRD_ADDR)
181 			continue;
182 		printf("%s %d of object 0x%x size %d %s %s (0x%x != 0x%x)\n",
183 			"Data modified on freelist: word", lp - (long *)va,
184 			va, size, "previous type", savedtype, *lp, WEIRD_ADDR);
185 		break;
186 	}
187 	freep->spare0 = 0;
188 #endif /* DIAGNOSTIC */
189 #ifdef KMEMSTATS
190 	kup = btokup(va);
191 	if (kup->ku_indx != indx)
192 		panic("malloc: wrong bucket");
193 	if (kup->ku_freecnt == 0)
194 		panic("malloc: lost data");
195 	kup->ku_freecnt--;
196 	kbp->kb_totalfree--;
197 	ksp->ks_memuse += 1 << indx;
198 out:
199 	kbp->kb_calls++;
200 	ksp->ks_inuse++;
201 	ksp->ks_calls++;
202 	if (ksp->ks_memuse > ksp->ks_maxused)
203 		ksp->ks_maxused = ksp->ks_memuse;
204 #else
205 out:
206 #endif
207 	splx(s);
208 	return ((void *) va);
209 }
210 
211 /*
212  * Free a block of memory allocated by malloc.
213  */
214 void
215 free(addr, type)
216 	void *addr;
217 	int type;
218 {
219 	register struct kmembuckets *kbp;
220 	register struct kmemusage *kup;
221 	register struct freelist *freep;
222 	long size;
223 	int s;
224 #ifdef DIAGNOSTIC
225 	caddr_t cp;
226 	long *end, *lp, alloc, copysize;
227 #endif
228 #ifdef KMEMSTATS
229 	register struct kmemstats *ksp = &kmemstats[type];
230 #endif
231 
232 	kup = btokup(addr);
233 	size = 1 << kup->ku_indx;
234 	kbp = &bucket[kup->ku_indx];
235 	s = splimp();
236 #ifdef DIAGNOSTIC
237 	/*
238 	 * Check for returns of data that do not point to the
239 	 * beginning of the allocation.
240 	 */
241 	if (size > NBPG * CLSIZE)
242 		alloc = addrmask[BUCKETINDX(NBPG * CLSIZE)];
243 	else
244 		alloc = addrmask[kup->ku_indx];
245 	if (((u_long)addr & alloc) != 0)
246 		panic("free: unaligned addr 0x%x, size %d, type %s, mask %d\n",
247 			addr, size, memname[type], alloc);
248 #endif /* DIAGNOSTIC */
249 	if (size > MAXALLOCSAVE) {
250 		kmem_free(kmem_map, (vm_offset_t)addr, ctob(kup->ku_pagecnt));
251 #ifdef KMEMSTATS
252 		size = kup->ku_pagecnt << PGSHIFT;
253 		ksp->ks_memuse -= size;
254 		kup->ku_indx = 0;
255 		kup->ku_pagecnt = 0;
256 		if (ksp->ks_memuse + size >= ksp->ks_limit &&
257 		    ksp->ks_memuse < ksp->ks_limit)
258 			wakeup((caddr_t)ksp);
259 		ksp->ks_inuse--;
260 		kbp->kb_total -= 1;
261 #endif
262 		splx(s);
263 		return;
264 	}
265 	freep = (struct freelist *)addr;
266 #ifdef DIAGNOSTIC
267 	/*
268 	 * Check for multiple frees. Use a quick check to see if
269 	 * it looks free before laboriously searching the freelist.
270 	 */
271 	if (freep->spare0 == WEIRD_ADDR) {
272 		for (cp = kbp->kb_next; cp; cp = *(caddr_t *)cp) {
273 			if (addr != cp)
274 				continue;
275 			printf("multiply freed item 0x%x\n", addr);
276 			panic("free: duplicated free");
277 		}
278 	}
279 	/*
280 	 * Copy in known text to detect modification after freeing
281 	 * and to make it look free. Also, save the type being freed
282 	 * so we can list likely culprit if modification is detected
283 	 * when the object is reallocated.
284 	 */
285 	copysize = size < MAX_COPY ? size : MAX_COPY;
286 	end = (long *)&((caddr_t)addr)[copysize];
287 	for (lp = (long *)addr; lp < end; lp++)
288 		*lp = WEIRD_ADDR;
289 	freep->type = type;
290 #endif /* DIAGNOSTIC */
291 #ifdef KMEMSTATS
292 	kup->ku_freecnt++;
293 	if (kup->ku_freecnt >= kbp->kb_elmpercl)
294 		if (kup->ku_freecnt > kbp->kb_elmpercl)
295 			panic("free: multiple frees");
296 		else if (kbp->kb_totalfree > kbp->kb_highwat)
297 			kbp->kb_couldfree++;
298 	kbp->kb_totalfree++;
299 	ksp->ks_memuse -= size;
300 	if (ksp->ks_memuse + size >= ksp->ks_limit &&
301 	    ksp->ks_memuse < ksp->ks_limit)
302 		wakeup((caddr_t)ksp);
303 	ksp->ks_inuse--;
304 #endif
305 	if (kbp->kb_next == NULL)
306 		kbp->kb_next = addr;
307 	else
308 		((struct freelist *)kbp->kb_last)->next = addr;
309 	freep->next = NULL;
310 	kbp->kb_last = addr;
311 	splx(s);
312 }
313 
314 /*
315  * Initialize the kernel memory allocator
316  */
317 kmeminit()
318 {
319 	register long indx;
320 	int npg;
321 
322 #if	((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0)
323 		ERROR!_kmeminit:_MAXALLOCSAVE_not_power_of_2
324 #endif
325 #if	(MAXALLOCSAVE > MINALLOCSIZE * 32768)
326 		ERROR!_kmeminit:_MAXALLOCSAVE_too_big
327 #endif
328 #if	(MAXALLOCSAVE < CLBYTES)
329 		ERROR!_kmeminit:_MAXALLOCSAVE_too_small
330 #endif
331 	npg = VM_KMEM_SIZE/ NBPG;
332 	kmemusage = (struct kmemusage *) kmem_alloc(kernel_map,
333 		(vm_size_t)(npg * sizeof(struct kmemusage)));
334 	kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase,
335 		(vm_offset_t *)&kmemlimit, (vm_size_t)(npg * NBPG), FALSE);
336 #ifdef KMEMSTATS
337 	for (indx = 0; indx < MINBUCKET + 16; indx++) {
338 		if (1 << indx >= CLBYTES)
339 			bucket[indx].kb_elmpercl = 1;
340 		else
341 			bucket[indx].kb_elmpercl = CLBYTES / (1 << indx);
342 		bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl;
343 	}
344 	for (indx = 0; indx < M_LAST; indx++)
345 		kmemstats[indx].ks_limit = npg * NBPG * 6 / 10;
346 #endif
347 }
348