xref: /dragonfly/sys/vm/vm_zone.c (revision 6bd457ed)
1 /*
2  * Copyright (c) 1997, 1998 John S. Dyson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *	notice immediately at the beginning of the file, without modification,
10  *	this list of conditions, and the following disclaimer.
11  * 2. Absolutely no warranty of function or purpose is made by the author
12  *	John S. Dyson.
13  *
14  * $FreeBSD: src/sys/vm/vm_zone.c,v 1.30.2.6 2002/10/10 19:50:16 dillon Exp $
15  * $DragonFly: src/sys/vm/vm_zone.c,v 1.17 2004/10/26 04:33:11 dillon Exp $
16  */
17 
18 #include <sys/param.h>
19 #include <sys/systm.h>
20 #include <sys/kernel.h>
21 #include <sys/lock.h>
22 #include <sys/malloc.h>
23 #include <sys/sysctl.h>
24 #include <sys/vmmeter.h>
25 
26 #include <vm/vm.h>
27 #include <vm/vm_object.h>
28 #include <vm/vm_page.h>
29 #include <vm/vm_map.h>
30 #include <vm/vm_kern.h>
31 #include <vm/vm_extern.h>
32 #include <vm/vm_zone.h>
33 
34 static MALLOC_DEFINE(M_ZONE, "ZONE", "Zone header");
35 
36 #define	ZONE_ERROR_INVALID 0
37 #define	ZONE_ERROR_NOTFREE 1
38 #define	ZONE_ERROR_ALREADYFREE 2
39 
40 #define	ZONE_ROUNDING	32
41 
42 #define	ZENTRY_FREE	0x12342378
43 
44 static void *zget(vm_zone_t z);
45 
46 /*
47  * Return an item from the specified zone.   This function is interrupt/MP
48  * thread safe, but might block.
49  */
50 void *
51 zalloc(vm_zone_t z)
52 {
53 	void *item;
54 	lwkt_tokref ilock;
55 
56 #ifdef INVARIANTS
57 	if (z == NULL)
58 		zerror(ZONE_ERROR_INVALID);
59 #endif
60 	lwkt_gettoken(&ilock, &z->zlock);
61 	if (z->zfreecnt <= z->zfreemin) {
62 		item = zget(z);
63 		/*
64 		 * PANICFAIL allows the caller to assume that the zalloc()
65 		 * will always succeed.  If it doesn't, we panic here.
66 		 */
67 		if (item == NULL && (z->zflags & ZONE_PANICFAIL))
68 			panic("zalloc(%s) failed", z->zname);
69 	} else {
70 		item = z->zitems;
71 #ifdef INVARIANTS
72 		KASSERT(item != NULL, ("zitems unexpectedly NULL"));
73 		if (((void **) item)[1] != (void *) ZENTRY_FREE)
74 			zerror(ZONE_ERROR_NOTFREE);
75 		((void **) item)[1] = 0;
76 #endif
77 		z->zitems = ((void **) item)[0];
78 		z->zfreecnt--;
79 		z->znalloc++;
80 	}
81 	lwkt_reltoken(&ilock);
82 	return item;
83 }
84 
85 /*
86  * Free an item to the specified zone.   This function is interrupt/MP
87  * thread safe, but might block.
88  */
89 void
90 zfree(vm_zone_t z, void *item)
91 {
92 	lwkt_tokref ilock;
93 
94 	lwkt_gettoken(&ilock, &z->zlock);
95 	((void **) item)[0] = z->zitems;
96 #ifdef INVARIANTS
97 	if (((void **) item)[1] == (void *) ZENTRY_FREE)
98 		zerror(ZONE_ERROR_ALREADYFREE);
99 	((void **) item)[1] = (void *) ZENTRY_FREE;
100 #endif
101 	z->zitems = item;
102 	z->zfreecnt++;
103 	lwkt_reltoken(&ilock);
104 }
105 
106 /*
107  * This file comprises a very simple zone allocator.  This is used
108  * in lieu of the malloc allocator, where needed or more optimal.
109  *
110  * Note that the initial implementation of this had coloring, and
111  * absolutely no improvement (actually perf degradation) occurred.
112  *
113  * Note also that the zones are type stable.  The only restriction is
114  * that the first two longwords of a data structure can be changed
115  * between allocations.  Any data that must be stable between allocations
116  * must reside in areas after the first two longwords.
117  *
118  * zinitna, zinit, zbootinit are the initialization routines.
119  * zalloc, zfree, are the allocation/free routines.
120  */
121 
122 static struct vm_zone *zlist;
123 static int sysctl_vm_zone(SYSCTL_HANDLER_ARGS);
124 static int zone_kmem_pages, zone_kern_pages, zone_kmem_kvaspace;
125 
126 /*
127  * Create a zone, but don't allocate the zone structure.  If the
128  * zone had been previously created by the zone boot code, initialize
129  * various parts of the zone code.
130  *
131  * If waits are not allowed during allocation (e.g. during interrupt
132  * code), a-priori allocate the kernel virtual space, and allocate
133  * only pages when needed.
134  *
135  * Arguments:
136  * z		pointer to zone structure.
137  * obj		pointer to VM object (opt).
138  * name		name of zone.
139  * size		size of zone entries.
140  * nentries	number of zone entries allocated (only ZONE_INTERRUPT.)
141  * flags	ZONE_INTERRUPT -- items can be allocated at interrupt time.
142  * zalloc	number of pages allocated when memory is needed.
143  *
144  * Note that when using ZONE_INTERRUPT, the size of the zone is limited
145  * by the nentries argument.  The size of the memory allocatable is
146  * unlimited if ZONE_INTERRUPT is not set.
147  *
148  */
149 int
150 zinitna(vm_zone_t z, vm_object_t obj, char *name, int size,
151 	int nentries, int flags, int zalloc)
152 {
153 	int totsize;
154 
155 	if ((z->zflags & ZONE_BOOT) == 0) {
156 		z->zsize = (size + ZONE_ROUNDING - 1) & ~(ZONE_ROUNDING - 1);
157 		lwkt_token_init(&z->zlock);
158 		z->zfreecnt = 0;
159 		z->ztotal = 0;
160 		z->zmax = 0;
161 		z->zname = name;
162 		z->znalloc = 0;
163 		z->zitems = NULL;
164 
165 		z->znext = zlist;
166 		zlist = z;
167 	}
168 
169 	z->zflags |= flags;
170 
171 	/*
172 	 * If we cannot wait, allocate KVA space up front, and we will fill
173 	 * in pages as needed.  This is particularly required when creating
174 	 * an allocation space for map entries in kernel_map, because we
175 	 * do not want to go into a recursion deadlock with
176 	 * vm_map_entry_reserve().
177 	 */
178 	if (z->zflags & ZONE_INTERRUPT) {
179 
180 		totsize = round_page(z->zsize * nentries);
181 		zone_kmem_kvaspace += totsize;
182 
183 		z->zkva = kmem_alloc_pageable(kernel_map, totsize);
184 		if (z->zkva == 0) {
185 			zlist = z->znext;
186 			return 0;
187 		}
188 
189 		z->zpagemax = totsize / PAGE_SIZE;
190 		if (obj == NULL) {
191 			z->zobj = vm_object_allocate(OBJT_DEFAULT, z->zpagemax);
192 		} else {
193 			z->zobj = obj;
194 			_vm_object_allocate(OBJT_DEFAULT, z->zpagemax, obj);
195 		}
196 		z->zallocflag = VM_ALLOC_SYSTEM | VM_ALLOC_INTERRUPT;
197 		z->zmax += nentries;
198 	} else {
199 		z->zallocflag = VM_ALLOC_NORMAL | VM_ALLOC_SYSTEM;
200 		z->zmax = 0;
201 	}
202 
203 
204 	if (z->zsize > PAGE_SIZE)
205 		z->zfreemin = 1;
206 	else
207 		z->zfreemin = PAGE_SIZE / z->zsize;
208 
209 	z->zpagecount = 0;
210 	if (zalloc)
211 		z->zalloc = zalloc;
212 	else
213 		z->zalloc = 1;
214 
215 	return 1;
216 }
217 
218 /*
219  * Subroutine same as zinitna, except zone data structure is allocated
220  * automatically by malloc.  This routine should normally be used, except
221  * in certain tricky startup conditions in the VM system -- then
222  * zbootinit and zinitna can be used.  Zinit is the standard zone
223  * initialization call.
224  */
225 vm_zone_t
226 zinit(char *name, int size, int nentries, int flags, int zalloc)
227 {
228 	vm_zone_t z;
229 
230 	z = (vm_zone_t) malloc(sizeof (struct vm_zone), M_ZONE, M_NOWAIT);
231 	if (z == NULL)
232 		return NULL;
233 
234 	z->zflags = 0;
235 	if (zinitna(z, NULL, name, size, nentries, flags, zalloc) == 0) {
236 		free(z, M_ZONE);
237 		return NULL;
238 	}
239 
240 	return z;
241 }
242 
243 /*
244  * Initialize a zone before the system is fully up.  This routine should
245  * only be called before full VM startup.
246  */
247 void
248 zbootinit(vm_zone_t z, char *name, int size, void *item, int nitems)
249 {
250 	int i;
251 
252 	z->zname = name;
253 	z->zsize = size;
254 	z->zpagemax = 0;
255 	z->zobj = NULL;
256 	z->zflags = ZONE_BOOT;
257 	z->zfreemin = 0;
258 	z->zallocflag = 0;
259 	z->zpagecount = 0;
260 	z->zalloc = 0;
261 	z->znalloc = 0;
262 	lwkt_token_init(&z->zlock);
263 
264 	bzero(item, nitems * z->zsize);
265 	z->zitems = NULL;
266 	for (i = 0; i < nitems; i++) {
267 		((void **) item)[0] = z->zitems;
268 #ifdef INVARIANTS
269 		((void **) item)[1] = (void *) ZENTRY_FREE;
270 #endif
271 		z->zitems = item;
272 		item = (uint8_t *)item + z->zsize;
273 	}
274 	z->zfreecnt = nitems;
275 	z->zmax = nitems;
276 	z->ztotal = nitems;
277 
278 	if (zlist == 0) {
279 		zlist = z;
280 	} else {
281 		z->znext = zlist;
282 		zlist = z;
283 	}
284 }
285 
286 /*
287  * void *zalloc(vm_zone_t zone) --
288  *	Returns an item from a specified zone.  May not be called from a
289  *	FAST interrupt or IPI function.
290  *
291  * void zfree(vm_zone_t zone, void *item) --
292  *	Frees an item back to a specified zone.  May not be called from a
293  *	FAST interrupt or IPI function.
294  */
295 
296 /*
297  * Internal zone routine.  Not to be called from external (non vm_zone) code.
298  */
299 static void *
300 zget(vm_zone_t z)
301 {
302 	int i;
303 	vm_page_t m;
304 	int nitems, nbytes;
305 	void *item;
306 
307 	if (z == NULL)
308 		panic("zget: null zone");
309 
310 	if (z->zflags & ZONE_INTERRUPT) {
311 		/*
312 		 * Interrupt zones do not mess with the kernel_map, they
313 		 * simply populate an existing mapping.
314 		 */
315 		nbytes = z->zpagecount * PAGE_SIZE;
316 		nbytes -= nbytes % z->zsize;
317 		item = (char *) z->zkva + nbytes;
318 		for (i = 0; ((i < z->zalloc) && (z->zpagecount < z->zpagemax));
319 		     i++) {
320 			vm_offset_t zkva;
321 
322 			m = vm_page_alloc(z->zobj, z->zpagecount,
323 					  z->zallocflag);
324 			/* note: z might be modified due to blocking */
325 			if (m == NULL)
326 				break;
327 
328 			zkva = z->zkva + z->zpagecount * PAGE_SIZE;
329 			pmap_kenter(zkva, VM_PAGE_TO_PHYS(m)); /* YYY */
330 			bzero((caddr_t) zkva, PAGE_SIZE);
331 			z->zpagecount++;
332 			zone_kmem_pages++;
333 			vmstats.v_wire_count++;
334 		}
335 		nitems = ((z->zpagecount * PAGE_SIZE) - nbytes) / z->zsize;
336 	} else if (z->zflags & ZONE_SPECIAL) {
337 		/*
338 		 * The special zone is the one used for vm_map_entry_t's.
339 		 * We have to avoid an infinite recursion in
340 		 * vm_map_entry_reserve() by using vm_map_entry_kreserve()
341 		 * instead.  The map entries are pre-reserved by the kernel
342 		 * by vm_map_entry_reserve_cpu_init().
343 		 */
344 		nbytes = z->zalloc * PAGE_SIZE;
345 
346 		item = (void *)kmem_alloc3(kernel_map, nbytes, KM_KRESERVE);
347 
348 		/* note: z might be modified due to blocking */
349 		if (item != NULL) {
350 			zone_kern_pages += z->zalloc;
351 			bzero(item, nbytes);
352 		} else {
353 			nbytes = 0;
354 		}
355 		nitems = nbytes / z->zsize;
356 	} else {
357 		/*
358 		 * Otherwise allocate KVA from the kernel_map.
359 		 */
360 		nbytes = z->zalloc * PAGE_SIZE;
361 
362 		item = (void *)kmem_alloc3(kernel_map, nbytes, 0);
363 
364 		/* note: z might be modified due to blocking */
365 		if (item != NULL) {
366 			zone_kern_pages += z->zalloc;
367 			bzero(item, nbytes);
368 		} else {
369 			nbytes = 0;
370 		}
371 		nitems = nbytes / z->zsize;
372 	}
373 	z->ztotal += nitems;
374 
375 	/*
376 	 * Save one for immediate allocation
377 	 */
378 	if (nitems != 0) {
379 		nitems -= 1;
380 		for (i = 0; i < nitems; i++) {
381 			((void **) item)[0] = z->zitems;
382 #ifdef INVARIANTS
383 			((void **) item)[1] = (void *) ZENTRY_FREE;
384 #endif
385 			z->zitems = item;
386 			item = (uint8_t *)item + z->zsize;
387 		}
388 		z->zfreecnt += nitems;
389 		z->znalloc++;
390 	} else if (z->zfreecnt > 0) {
391 		item = z->zitems;
392 		z->zitems = ((void **) item)[0];
393 #ifdef INVARIANTS
394 		if (((void **) item)[1] != (void *) ZENTRY_FREE)
395 			zerror(ZONE_ERROR_NOTFREE);
396 		((void **) item)[1] = 0;
397 #endif
398 		z->zfreecnt--;
399 		z->znalloc++;
400 	} else {
401 		item = NULL;
402 	}
403 
404 	/*
405 	 * A special zone may have used a kernel-reserved vm_map_entry.  If
406 	 * so we have to be sure to recover our reserve so we don't run out.
407 	 * We will panic if we run out.
408 	 */
409 	if (z->zflags & ZONE_SPECIAL)
410 		vm_map_entry_reserve(0);
411 
412 	return item;
413 }
414 
415 static int
416 sysctl_vm_zone(SYSCTL_HANDLER_ARGS)
417 {
418 	int error=0;
419 	vm_zone_t curzone, nextzone;
420 	char tmpbuf[128];
421 	char tmpname[14];
422 
423 	snprintf(tmpbuf, sizeof(tmpbuf),
424 	    "\nITEM            SIZE     LIMIT    USED    FREE  REQUESTS\n");
425 	error = SYSCTL_OUT(req, tmpbuf, strlen(tmpbuf));
426 	if (error)
427 		return (error);
428 
429 	for (curzone = zlist; curzone; curzone = nextzone) {
430 		int i;
431 		int len;
432 		int offset;
433 
434 		nextzone = curzone->znext;
435 		len = strlen(curzone->zname);
436 		if (len >= (sizeof(tmpname) - 1))
437 			len = (sizeof(tmpname) - 1);
438 		for(i = 0; i < sizeof(tmpname) - 1; i++)
439 			tmpname[i] = ' ';
440 		tmpname[i] = 0;
441 		memcpy(tmpname, curzone->zname, len);
442 		tmpname[len] = ':';
443 		offset = 0;
444 		if (curzone == zlist) {
445 			offset = 1;
446 			tmpbuf[0] = '\n';
447 		}
448 
449 		snprintf(tmpbuf + offset, sizeof(tmpbuf) - offset,
450 			"%s %6.6u, %8.8u, %6.6u, %6.6u, %8.8u\n",
451 			tmpname, curzone->zsize, curzone->zmax,
452 			(curzone->ztotal - curzone->zfreecnt),
453 			curzone->zfreecnt, curzone->znalloc);
454 
455 		len = strlen((char *)tmpbuf);
456 		if (nextzone == NULL)
457 			tmpbuf[len - 1] = 0;
458 
459 		error = SYSCTL_OUT(req, tmpbuf, len);
460 
461 		if (error)
462 			return (error);
463 	}
464 	return (0);
465 }
466 
467 #if defined(INVARIANTS)
468 void
469 zerror(int error)
470 {
471 	char *msg;
472 
473 	switch (error) {
474 	case ZONE_ERROR_INVALID:
475 		msg = "zone: invalid zone";
476 		break;
477 	case ZONE_ERROR_NOTFREE:
478 		msg = "zone: entry not free";
479 		break;
480 	case ZONE_ERROR_ALREADYFREE:
481 		msg = "zone: freeing free entry";
482 		break;
483 	default:
484 		msg = "zone: invalid error";
485 		break;
486 	}
487 	panic(msg);
488 }
489 #endif
490 
491 SYSCTL_OID(_vm, OID_AUTO, zone, CTLTYPE_STRING|CTLFLAG_RD, \
492 	NULL, 0, sysctl_vm_zone, "A", "Zone Info");
493 
494 SYSCTL_INT(_vm, OID_AUTO, zone_kmem_pages,
495 	CTLFLAG_RD, &zone_kmem_pages, 0, "Number of interrupt safe pages allocated by zone");
496 SYSCTL_INT(_vm, OID_AUTO, zone_kmem_kvaspace,
497 	CTLFLAG_RD, &zone_kmem_kvaspace, 0, "KVA space allocated by zone");
498 SYSCTL_INT(_vm, OID_AUTO, zone_kern_pages,
499 	CTLFLAG_RD, &zone_kern_pages, 0, "Number of non-interrupt safe pages allocated by zone");
500