xref: /openbsd/regress/usr.bin/diff/t8.2 (revision db3296cf)
1/*	$OpenBSD: t8.2,v 1.1 2003/07/17 21:04:04 otto Exp $	*/
2/*	$NetBSD: kern_malloc.c,v 1.15.4.2 1996/06/13 17:10:56 cgd Exp $	*/
3
4/*
5 * Copyright (c) 1987, 1991, 1993
6 *	The Regents of the University of California.  All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the University nor the names of its contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 *	@(#)kern_malloc.c	8.3 (Berkeley) 1/4/94
33 */
34
35#include <sys/param.h>
36#include <sys/proc.h>
37#include <sys/kernel.h>
38#include <sys/malloc.h>
39#include <sys/systm.h>
40#include <sys/sysctl.h>
41
42#include <uvm/uvm_extern.h>
43
44static struct vm_map_intrsafe kmem_map_store;
45struct vm_map *kmem_map = NULL;
46
47#ifdef NKMEMCLUSTERS
48#error NKMEMCLUSTERS is obsolete; remove it from your kernel config file and use NKMEMPAGES instead or let the kernel auto-size
49#endif
50
51/*
52 * Default number of pages in kmem_map.  We attempt to calculate this
53 * at run-time, but allow it to be either patched or set in the kernel
54 * config file.
55 */
56#ifndef NKMEMPAGES
57#define	NKMEMPAGES	0
58#endif
59int	nkmempages = NKMEMPAGES;
60
61/*
62 * Defaults for lower- and upper-bounds for the kmem_map page count.
63 * Can be overridden by kernel config options.
64 */
65#ifndef	NKMEMPAGES_MIN
66#define	NKMEMPAGES_MIN	NKMEMPAGES_MIN_DEFAULT
67#endif
68
69#ifndef NKMEMPAGES_MAX
70#define	NKMEMPAGES_MAX	NKMEMPAGES_MAX_DEFAULT
71#endif
72
73struct kmembuckets bucket[MINBUCKET + 16];
74struct kmemstats kmemstats[M_LAST];
75struct kmemusage *kmemusage;
76char *kmembase, *kmemlimit;
77char buckstring[16 * sizeof("123456,")];
78int buckstring_init = 0;
79#if defined(KMEMSTATS) || defined(DIAGNOSTIC) || defined(FFS_SOFTUPDATES)
80char *memname[] = INITKMEMNAMES;
81char *memall = NULL;
82extern struct lock sysctl_kmemlock;
83#endif
84
85#ifdef DIAGNOSTIC
86/*
87 * This structure provides a set of masks to catch unaligned frees.
88 */
89const long addrmask[] = { 0,
90	0x00000001, 0x00000003, 0x00000007, 0x0000000f,
91	0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
92	0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
93	0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
94};
95
96/*
97 * The WEIRD_ADDR is used as known text to copy into free objects so
98 * that modifications after frees can be detected.
99 */
100#define WEIRD_ADDR	((unsigned) 0xdeadbeef)
101#define MAX_COPY	32
102
103/*
104 * Normally the freelist structure is used only to hold the list pointer
105 * for free objects.  However, when running with diagnostics, the first
106 * 8 bytes of the structure is unused except for diagnostic information,
107 * and the free list pointer is at offset 8 in the structure.  Since the
108 * first 8 bytes is the portion of the structure most often modified, this
109 * helps to detect memory reuse problems and avoid free list corruption.
110 */
111struct freelist {
112	int32_t	spare0;
113	int16_t	type;
114	int16_t	spare1;
115	caddr_t	next;
116};
117#else /* !DIAGNOSTIC */
118struct freelist {
119	caddr_t	next;
120};
121#endif /* DIAGNOSTIC */
122
123/*
124 * Allocate a block of memory
125 */
126void *
127malloc(size, type, flags)
128	unsigned long size;
129	int type, flags;
130{
131	register struct kmembuckets *kbp;
132	register struct kmemusage *kup;
133	register struct freelist *freep;
134	long indx, npg, allocsize;
135	int s;
136	caddr_t va, cp, savedlist;
137#ifdef DIAGNOSTIC
138	int32_t *end, *lp;
139	int copysize;
140	char *savedtype;
141#endif
142#ifdef KMEMSTATS
143	register struct kmemstats *ksp = &kmemstats[type];
144
145	if (((unsigned long)type) >= M_LAST)
146		panic("malloc - bogus type");
147#endif
148
149#ifdef MALLOC_DEBUG
150	if (debug_malloc(size, type, flags, (void **)&va))
151		return ((void *) va);
152#endif
153
154	indx = BUCKETINDX(size);
155	kbp = &bucket[indx];
156	s = splvm();
157#ifdef KMEMSTATS
158	while (ksp->ks_memuse >= ksp->ks_limit) {
159		if (flags & M_NOWAIT) {
160			splx(s);
161			return ((void *) NULL);
162		}
163		if (ksp->ks_limblocks < 65535)
164			ksp->ks_limblocks++;
165		tsleep((caddr_t)ksp, PSWP+2, memname[type], 0);
166	}
167	ksp->ks_size |= 1 << indx;
168#endif
169#ifdef DIAGNOSTIC
170	copysize = 1 << indx < MAX_COPY ? 1 << indx : MAX_COPY;
171#endif
172	if (kbp->kb_next == NULL) {
173		kbp->kb_last = NULL;
174		if (size > MAXALLOCSAVE)
175			allocsize = round_page(size);
176		else
177			allocsize = 1 << indx;
178		npg = btoc(allocsize);
179		va = (caddr_t) uvm_km_kmemalloc(kmem_map, uvmexp.kmem_object,
180				(vsize_t)ctob(npg),
181				(flags & M_NOWAIT) ? UVM_KMF_NOWAIT : 0);
182		if (va == NULL) {
183			/*
184			 * Kmem_malloc() can return NULL, even if it can
185			 * wait, if there is no map space available, because
186			 * it can't fix that problem.  Neither can we,
187			 * right now.  (We should release pages which
188			 * are completely free and which are in buckets
189			 * with too many free elements.)
190			 */
191			if ((flags & M_NOWAIT) == 0)
192				panic("malloc: out of space in kmem_map");
193			splx(s);
194			return ((void *) NULL);
195		}
196#ifdef KMEMSTATS
197		kbp->kb_total += kbp->kb_elmpercl;
198#endif
199		kup = btokup(va);
200		kup->ku_indx = indx;
201		if (allocsize > MAXALLOCSAVE) {
202			if (npg > 65535)
203				panic("malloc: allocation too large");
204			kup->ku_pagecnt = npg;
205#ifdef KMEMSTATS
206			ksp->ks_memuse += allocsize;
207#endif
208			goto out;
209		}
210#ifdef KMEMSTATS
211		kup->ku_freecnt = kbp->kb_elmpercl;
212		kbp->kb_totalfree += kbp->kb_elmpercl;
213#endif
214		/*
215		 * Just in case we blocked while allocating memory,
216		 * and someone else also allocated memory for this
217		 * bucket, don't assume the list is still empty.
218		 */
219		savedlist = kbp->kb_next;
220		kbp->kb_next = cp = va + (npg * PAGE_SIZE) - allocsize;
221		for (;;) {
222			freep = (struct freelist *)cp;
223#ifdef DIAGNOSTIC
224			/*
225			 * Copy in known text to detect modification
226			 * after freeing.
227			 */
228			end = (int32_t *)&cp[copysize];
229			for (lp = (int32_t *)cp; lp < end; lp++)
230				*lp = WEIRD_ADDR;
231			freep->type = M_FREE;
232#endif /* DIAGNOSTIC */
233			if (cp <= va)
234				break;
235			cp -= allocsize;
236			freep->next = cp;
237		}
238		freep->next = savedlist;
239		if (kbp->kb_last == NULL)
240			kbp->kb_last = (caddr_t)freep;
241	}
242	va = kbp->kb_next;
243	kbp->kb_next = ((struct freelist *)va)->next;
244#ifdef DIAGNOSTIC
245	freep = (struct freelist *)va;
246	savedtype = (unsigned)freep->type < M_LAST ?
247		memname[freep->type] : "???";
248	if (kbp->kb_next) {
249		int rv;
250		vaddr_t addr = (vaddr_t)kbp->kb_next;
251
252		vm_map_lock(kmem_map);
253		rv = uvm_map_checkprot(kmem_map, addr,
254		    addr + sizeof(struct freelist), VM_PROT_WRITE);
255		vm_map_unlock(kmem_map);
256
257		if (!rv)  {
258		printf("%s %d of object %p size 0x%lx %s %s (invalid addr %p)\n",
259			"Data modified on freelist: word",
260			(int32_t *)&kbp->kb_next - (int32_t *)kbp, va, size,
261			"previous type", savedtype, kbp->kb_next);
262		kbp->kb_next = NULL;
263		}
264	}
265
266	/* Fill the fields that we've used with WEIRD_ADDR */
267#if BYTE_ORDER == BIG_ENDIAN
268	freep->type = WEIRD_ADDR >> 16;
269#endif
270#if BYTE_ORDER == LITTLE_ENDIAN
271	freep->type = (short)WEIRD_ADDR;
272#endif
273	end = (int32_t *)&freep->next +
274	    (sizeof(freep->next) / sizeof(int32_t));
275	for (lp = (int32_t *)&freep->next; lp < end; lp++)
276		*lp = WEIRD_ADDR;
277
278	/* and check that the data hasn't been modified. */
279	end = (int32_t *)&va[copysize];
280	for (lp = (int32_t *)va; lp < end; lp++) {
281		if (*lp == WEIRD_ADDR)
282			continue;
283		printf("%s %d of object %p size 0x%lx %s %s (0x%x != 0x%x)\n",
284			"Data modified on freelist: word", lp - (int32_t *)va,
285			va, size, "previous type", savedtype, *lp, WEIRD_ADDR);
286		break;
287	}
288
289	freep->spare0 = 0;
290#endif /* DIAGNOSTIC */
291#ifdef KMEMSTATS
292	kup = btokup(va);
293	if (kup->ku_indx != indx)
294		panic("malloc: wrong bucket");
295	if (kup->ku_freecnt == 0)
296		panic("malloc: lost data");
297	kup->ku_freecnt--;
298	kbp->kb_totalfree--;
299	ksp->ks_memuse += 1 << indx;
300out:
301	kbp->kb_calls++;
302	ksp->ks_inuse++;
303	ksp->ks_calls++;
304	if (ksp->ks_memuse > ksp->ks_maxused)
305		ksp->ks_maxused = ksp->ks_memuse;
306#else
307out:
308#endif
309	splx(s);
310	return ((void *) va);
311}
312
313/*
314 * Free a block of memory allocated by malloc.
315 */
316void
317free(addr, type)
318	void *addr;
319	int type;
320{
321	register struct kmembuckets *kbp;
322	register struct kmemusage *kup;
323	register struct freelist *freep;
324	long size;
325	int s;
326#ifdef DIAGNOSTIC
327	caddr_t cp;
328	int32_t *end, *lp;
329	long alloc, copysize;
330#endif
331#ifdef KMEMSTATS
332	register struct kmemstats *ksp = &kmemstats[type];
333#endif
334
335#ifdef MALLOC_DEBUG
336	if (debug_free(addr, type))
337		return;
338#endif
339
340#ifdef DIAGNOSTIC
341	if (addr < (void *)kmembase || addr >= (void *)kmemlimit)
342		panic("free: non-malloced addr %p type %s", addr,
343		    memname[type]);
344#endif
345
346	kup = btokup(addr);
347	size = 1 << kup->ku_indx;
348	kbp = &bucket[kup->ku_indx];
349	s = splvm();
350#ifdef DIAGNOSTIC
351	/*
352	 * Check for returns of data that do not point to the
353	 * beginning of the allocation.
354	 */
355	if (size > PAGE_SIZE)
356		alloc = addrmask[BUCKETINDX(PAGE_SIZE)];
357	else
358		alloc = addrmask[kup->ku_indx];
359	if (((u_long)addr & alloc) != 0)
360		panic("free: unaligned addr %p, size %ld, type %s, mask %ld",
361			addr, size, memname[type], alloc);
362#endif /* DIAGNOSTIC */
363	if (size > MAXALLOCSAVE) {
364		uvm_km_free(kmem_map, (vaddr_t)addr, ctob(kup->ku_pagecnt));
365#ifdef KMEMSTATS
366		size = kup->ku_pagecnt << PGSHIFT;
367		ksp->ks_memuse -= size;
368		kup->ku_indx = 0;
369		kup->ku_pagecnt = 0;
370		if (ksp->ks_memuse + size >= ksp->ks_limit &&
371		    ksp->ks_memuse < ksp->ks_limit)
372			wakeup((caddr_t)ksp);
373		ksp->ks_inuse--;
374		kbp->kb_total -= 1;
375#endif
376		splx(s);
377		return;
378	}
379	freep = (struct freelist *)addr;
380#ifdef DIAGNOSTIC
381	/*
382	 * Check for multiple frees. Use a quick check to see if
383	 * it looks free before laboriously searching the freelist.
384	 */
385	if (freep->spare0 == WEIRD_ADDR) {
386		for (cp = kbp->kb_next; cp;
387		    cp = ((struct freelist *)cp)->next) {
388			if (addr != cp)
389				continue;
390			printf("multiply freed item %p\n", addr);
391			panic("free: duplicated free");
392		}
393	}
394	/*
395	 * Copy in known text to detect modification after freeing
396	 * and to make it look free. Also, save the type being freed
397	 * so we can list likely culprit if modification is detected
398	 * when the object is reallocated.
399	 */
400	copysize = size < MAX_COPY ? size : MAX_COPY;
401	end = (int32_t *)&((caddr_t)addr)[copysize];
402	for (lp = (int32_t *)addr; lp < end; lp++)
403		*lp = WEIRD_ADDR;
404	freep->type = type;
405#endif /* DIAGNOSTIC */
406#ifdef KMEMSTATS
407	kup->ku_freecnt++;
408	if (kup->ku_freecnt >= kbp->kb_elmpercl) {
409		if (kup->ku_freecnt > kbp->kb_elmpercl)
410			panic("free: multiple frees");
411		else if (kbp->kb_totalfree > kbp->kb_highwat)
412			kbp->kb_couldfree++;
413	}
414	kbp->kb_totalfree++;
415	ksp->ks_memuse -= size;
416	if (ksp->ks_memuse + size >= ksp->ks_limit &&
417	    ksp->ks_memuse < ksp->ks_limit)
418		wakeup((caddr_t)ksp);
419	ksp->ks_inuse--;
420#endif
421	if (kbp->kb_next == NULL)
422		kbp->kb_next = addr;
423	else
424		((struct freelist *)kbp->kb_last)->next = addr;
425	freep->next = NULL;
426	kbp->kb_last = addr;
427	splx(s);
428}
429
430/*
431 * Compute the number of pages that kmem_map will map, that is,
432 * the size of the kernel malloc arena.
433 */
434void
435kmeminit_nkmempages()
436{
437	int npages;
438
439	if (nkmempages != 0) {
440		/*
441		 * It's already been set (by us being here before, or
442		 * by patching or kernel config options), bail out now.
443		 */
444		return;
445	}
446
447	/*
448	 * We use the following (simple) formula:
449	 *
450	 *	- Starting point is physical memory / 4.
451	 *
452	 *	- Clamp it down to NKMEMPAGES_MAX.
453	 *
454	 *	- Round it up to NKMEMPAGES_MIN.
455	 */
456	npages = physmem / 4;
457
458	if (npages > NKMEMPAGES_MAX)
459		npages = NKMEMPAGES_MAX;
460
461	if (npages < NKMEMPAGES_MIN)
462		npages = NKMEMPAGES_MIN;
463
464	nkmempages = npages;
465}
466
467/*
468 * Initialize the kernel memory allocator
469 */
470void
471kmeminit()
472{
473	vaddr_t base, limit;
474#ifdef KMEMSTATS
475	long indx;
476#endif
477
478#ifdef DIAGNOSTIC
479	if (sizeof(struct freelist) > (1 << MINBUCKET))
480		panic("kmeminit: minbucket too small/struct freelist too big");
481#endif
482
483	/*
484	 * Compute the number of kmem_map pages, if we have not
485	 * done so already.
486	 */
487	kmeminit_nkmempages();
488	base = vm_map_min(kernel_map);
489	kmem_map = uvm_km_suballoc(kernel_map, &base, &limit,
490	    (vsize_t)(nkmempages * PAGE_SIZE), VM_MAP_INTRSAFE, FALSE,
491	    &kmem_map_store.vmi_map);
492	kmembase = (char *)base;
493	kmemlimit = (char *)limit;
494	kmemusage = (struct kmemusage *) uvm_km_zalloc(kernel_map,
495		(vsize_t)(nkmempages * sizeof(struct kmemusage)));
496#ifdef KMEMSTATS
497	for (indx = 0; indx < MINBUCKET + 16; indx++) {
498		if (1 << indx >= PAGE_SIZE)
499			bucket[indx].kb_elmpercl = 1;
500		else
501			bucket[indx].kb_elmpercl = PAGE_SIZE / (1 << indx);
502		bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl;
503	}
504	for (indx = 0; indx < M_LAST; indx++)
505		kmemstats[indx].ks_limit = nkmempages * PAGE_SIZE * 6 / 10;
506#endif
507#ifdef MALLOC_DEBUG
508	debug_malloc_init();
509#endif
510}
511
512/*
513 * Return kernel malloc statistics information.
514 */
515int
516sysctl_malloc(name, namelen, oldp, oldlenp, newp, newlen, p)
517	int *name;
518	u_int namelen;
519	void *oldp;
520	size_t *oldlenp;
521	void *newp;
522	size_t newlen;
523	struct proc *p;
524{
525	struct kmembuckets kb;
526	int i, siz;
527
528	if (namelen != 2 && name[0] != KERN_MALLOC_BUCKETS &&
529	    name[0] != KERN_MALLOC_KMEMNAMES)
530		return (ENOTDIR);		/* overloaded */
531
532	switch (name[0]) {
533	case KERN_MALLOC_BUCKETS:
534		/* Initialize the first time */
535		if (buckstring_init == 0) {
536			buckstring_init = 1;
537			bzero(buckstring, sizeof(buckstring));
538			for (siz = 0, i = MINBUCKET; i < MINBUCKET + 16; i++) {
539				snprintf(buckstring + siz,
540				    sizeof buckstring - siz,
541				    "%d,", (u_int)(1<<i));
542				siz += strlen(buckstring + siz);
543			}
544			/* Remove trailing comma */
545			if (siz)
546				buckstring[siz - 1] = '\0';
547		}
548		return (sysctl_rdstring(oldp, oldlenp, newp, buckstring));
549
550	case KERN_MALLOC_BUCKET:
551		bcopy(&bucket[BUCKETINDX(name[1])], &kb, sizeof(kb));
552		kb.kb_next = kb.kb_last = 0;
553		return (sysctl_rdstruct(oldp, oldlenp, newp, &kb, sizeof(kb)));
554	case KERN_MALLOC_KMEMSTATS:
555#ifdef KMEMSTATS
556		if ((name[1] < 0) || (name[1] >= M_LAST))
557			return (EINVAL);
558		return (sysctl_rdstruct(oldp, oldlenp, newp,
559		    &kmemstats[name[1]], sizeof(struct kmemstats)));
560#else
561		return (EOPNOTSUPP);
562#endif
563	case KERN_MALLOC_KMEMNAMES:
564#if defined(KMEMSTATS) || defined(DIAGNOSTIC) || defined(FFS_SOFTUPDATES)
565		if (memall == NULL) {
566			int totlen;
567
568			i = lockmgr(&sysctl_kmemlock, LK_EXCLUSIVE, NULL, p);
569			if (i)
570				return (i);
571
572			/* Figure out how large a buffer we need */
573			for (totlen = 0, i = 0; i < M_LAST; i++) {
574				if (memname[i])
575					totlen += strlen(memname[i]);
576				totlen++;
577			}
578			memall = malloc(totlen + M_LAST, M_SYSCTL, M_WAITOK);
579			bzero(memall, totlen + M_LAST);
580			for (siz = 0, i = 0; i < M_LAST; i++) {
581				snprintf(memall + siz,
582				    totlen + M_LAST - siz,
583				    "%s,", memname[i] ? memname[i] : "");
584				siz += strlen(memall + siz);
585			}
586			/* Remove trailing comma */
587			if (siz)
588				memall[siz - 1] = '\0';
589
590			/* Now, convert all spaces to underscores */
591			for (i = 0; i < totlen; i++)
592				if (memall[i] == ' ')
593					memall[i] = '_';
594			lockmgr(&sysctl_kmemlock, LK_RELEASE, NULL, p);
595		}
596		return (sysctl_rdstring(oldp, oldlenp, newp, memall));
597#else
598		return (EOPNOTSUPP);
599#endif
600	default:
601		return (EOPNOTSUPP);
602	}
603	/* NOTREACHED */
604}
605
606/*
607 * Round up a size to how much malloc would actually allocate.
608 */
609size_t
610malloc_roundup(size_t sz)
611{
612	if (sz > MAXALLOCSAVE)
613		return round_page(sz);
614
615	return (1 << BUCKETINDX(sz));
616}
617