xref: /dragonfly/sys/vm/vm_zone.c (revision 16777b6b)
1 /*
2  * Copyright (c) 1997, 1998 John S. Dyson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *	notice immediately at the beginning of the file, without modification,
10  *	this list of conditions, and the following disclaimer.
11  * 2. Absolutely no warranty of function or purpose is made by the author
12  *	John S. Dyson.
13  *
14  * $FreeBSD: src/sys/vm/vm_zone.c,v 1.30.2.6 2002/10/10 19:50:16 dillon Exp $
15  * $DragonFly: src/sys/vm/vm_zone.c,v 1.10 2003/09/26 19:23:34 dillon Exp $
16  */
17 
18 #include <sys/param.h>
19 #include <sys/systm.h>
20 #include <sys/kernel.h>
21 #include <sys/lock.h>
22 #include <sys/malloc.h>
23 #include <sys/sysctl.h>
24 #include <sys/vmmeter.h>
25 
26 #include <vm/vm.h>
27 #include <vm/vm_object.h>
28 #include <vm/vm_page.h>
29 #include <vm/vm_map.h>
30 #include <vm/vm_kern.h>
31 #include <vm/vm_extern.h>
32 #include <vm/vm_zone.h>
33 
34 static MALLOC_DEFINE(M_ZONE, "ZONE", "Zone header");
35 
36 #define	ZONE_ERROR_INVALID 0
37 #define	ZONE_ERROR_NOTFREE 1
38 #define	ZONE_ERROR_ALREADYFREE 2
39 
40 #define	ZONE_ROUNDING	32
41 
42 #define	ZENTRY_FREE	0x12342378
43 
44 static void *zget(vm_zone_t z);
45 
46 /*
47  * Return an item from the specified zone.   This function is interrupt/MP
48  * thread safe, but might block.
49  */
50 void *
51 zalloc(vm_zone_t z)
52 {
53 	void *item;
54 
55 #ifdef INVARIANTS
56 	if (z == NULL)
57 		zerror(ZONE_ERROR_INVALID);
58 #endif
59 	lwkt_gettoken(&z->zlock);
60 	if (z->zfreecnt <= z->zfreemin) {
61 		item = zget(z);
62 		/*
63 		 * PANICFAIL allows the caller to assume that the zalloc()
64 		 * will always succeed.  If it doesn't, we panic here.
65 		 */
66 		if (item == NULL && (z->zflags & ZONE_PANICFAIL))
67 			panic("zalloc(%s) failed", z->zname);
68 	} else {
69 		item = z->zitems;
70 		z->zitems = ((void **) item)[0];
71 #ifdef INVARIANTS
72 		KASSERT(item != NULL, ("zitems unexpectedly NULL"));
73 		if (((void **) item)[1] != (void *) ZENTRY_FREE)
74 			zerror(ZONE_ERROR_NOTFREE);
75 		((void **) item)[1] = 0;
76 #endif
77 		z->zfreecnt--;
78 		z->znalloc++;
79 	}
80 	lwkt_reltoken(&z->zlock);
81 	return item;
82 }
83 
84 /*
85  * Free an item to the specified zone.   This function is interrupt/MP
86  * thread safe, but might block.
87  */
88 void
89 zfree(vm_zone_t z, void *item)
90 {
91 	lwkt_gettoken(&z->zlock);
92 	((void **) item)[0] = z->zitems;
93 #ifdef INVARIANTS
94 	if (((void **) item)[1] == (void *) ZENTRY_FREE)
95 		zerror(ZONE_ERROR_ALREADYFREE);
96 	((void **) item)[1] = (void *) ZENTRY_FREE;
97 #endif
98 	z->zitems = item;
99 	z->zfreecnt++;
100 	lwkt_reltoken(&z->zlock);
101 }
102 
103 /*
104  * This file comprises a very simple zone allocator.  This is used
105  * in lieu of the malloc allocator, where needed or more optimal.
106  *
107  * Note that the initial implementation of this had coloring, and
108  * absolutely no improvement (actually perf degradation) occurred.
109  *
110  * Note also that the zones are type stable.  The only restriction is
111  * that the first two longwords of a data structure can be changed
112  * between allocations.  Any data that must be stable between allocations
113  * must reside in areas after the first two longwords.
114  *
115  * zinitna, zinit, zbootinit are the initialization routines.
116  * zalloc, zfree, are the allocation/free routines.
117  */
118 
119 static struct vm_zone *zlist;
120 static int sysctl_vm_zone(SYSCTL_HANDLER_ARGS);
121 static int zone_kmem_pages, zone_kern_pages, zone_kmem_kvaspace;
122 
123 /*
124  * Create a zone, but don't allocate the zone structure.  If the
125  * zone had been previously created by the zone boot code, initialize
126  * various parts of the zone code.
127  *
128  * If waits are not allowed during allocation (e.g. during interrupt
129  * code), a-priori allocate the kernel virtual space, and allocate
130  * only pages when needed.
131  *
132  * Arguments:
133  * z		pointer to zone structure.
134  * obj		pointer to VM object (opt).
135  * name		name of zone.
136  * size		size of zone entries.
137  * nentries	number of zone entries allocated (only ZONE_INTERRUPT.)
138  * flags	ZONE_INTERRUPT -- items can be allocated at interrupt time.
139  * zalloc	number of pages allocated when memory is needed.
140  *
141  * Note that when using ZONE_INTERRUPT, the size of the zone is limited
142  * by the nentries argument.  The size of the memory allocatable is
143  * unlimited if ZONE_INTERRUPT is not set.
144  *
145  */
146 int
147 zinitna(vm_zone_t z, vm_object_t obj, char *name, int size,
148 	int nentries, int flags, int zalloc)
149 {
150 	int totsize;
151 
152 	if ((z->zflags & ZONE_BOOT) == 0) {
153 		z->zsize = (size + ZONE_ROUNDING - 1) & ~(ZONE_ROUNDING - 1);
154 		lwkt_inittoken(&z->zlock);
155 		z->zfreecnt = 0;
156 		z->ztotal = 0;
157 		z->zmax = 0;
158 		z->zname = name;
159 		z->znalloc = 0;
160 		z->zitems = NULL;
161 
162 		z->znext = zlist;
163 		zlist = z;
164 	}
165 
166 	z->zflags |= flags;
167 
168 	/*
169 	 * If we cannot wait, allocate KVA space up front, and we will fill
170 	 * in pages as needed.  This is particularly required when creating
171 	 * an allocation space for map entries in kernel_map, because we
172 	 * do not want to go into a recursion deadlock with
173 	 * vm_map_entry_reserve().
174 	 */
175 	if (z->zflags & ZONE_INTERRUPT) {
176 
177 		totsize = round_page(z->zsize * nentries);
178 		zone_kmem_kvaspace += totsize;
179 
180 		z->zkva = kmem_alloc_pageable(kernel_map, totsize);
181 		if (z->zkva == 0) {
182 			zlist = z->znext;
183 			return 0;
184 		}
185 
186 		z->zpagemax = totsize / PAGE_SIZE;
187 		if (obj == NULL) {
188 			z->zobj = vm_object_allocate(OBJT_DEFAULT, z->zpagemax);
189 		} else {
190 			z->zobj = obj;
191 			_vm_object_allocate(OBJT_DEFAULT, z->zpagemax, obj);
192 		}
193 		z->zallocflag = VM_ALLOC_INTERRUPT;
194 		z->zmax += nentries;
195 	} else {
196 		z->zallocflag = VM_ALLOC_SYSTEM;
197 		z->zmax = 0;
198 	}
199 
200 
201 	if (z->zsize > PAGE_SIZE)
202 		z->zfreemin = 1;
203 	else
204 		z->zfreemin = PAGE_SIZE / z->zsize;
205 
206 	z->zpagecount = 0;
207 	if (zalloc)
208 		z->zalloc = zalloc;
209 	else
210 		z->zalloc = 1;
211 
212 	return 1;
213 }
214 
215 /*
216  * Subroutine same as zinitna, except zone data structure is allocated
217  * automatically by malloc.  This routine should normally be used, except
218  * in certain tricky startup conditions in the VM system -- then
219  * zbootinit and zinitna can be used.  Zinit is the standard zone
220  * initialization call.
221  */
222 vm_zone_t
223 zinit(char *name, int size, int nentries, int flags, int zalloc)
224 {
225 	vm_zone_t z;
226 
227 	z = (vm_zone_t) malloc(sizeof (struct vm_zone), M_ZONE, M_NOWAIT);
228 	if (z == NULL)
229 		return NULL;
230 
231 	z->zflags = 0;
232 	if (zinitna(z, NULL, name, size, nentries, flags, zalloc) == 0) {
233 		free(z, M_ZONE);
234 		return NULL;
235 	}
236 
237 	return z;
238 }
239 
240 /*
241  * Initialize a zone before the system is fully up.  This routine should
242  * only be called before full VM startup.
243  */
244 void
245 zbootinit(vm_zone_t z, char *name, int size, void *item, int nitems)
246 {
247 	int i;
248 
249 	z->zname = name;
250 	z->zsize = size;
251 	z->zpagemax = 0;
252 	z->zobj = NULL;
253 	z->zflags = ZONE_BOOT;
254 	z->zfreemin = 0;
255 	z->zallocflag = 0;
256 	z->zpagecount = 0;
257 	z->zalloc = 0;
258 	z->znalloc = 0;
259 	lwkt_inittoken(&z->zlock);
260 
261 	bzero(item, nitems * z->zsize);
262 	z->zitems = NULL;
263 	for (i = 0; i < nitems; i++) {
264 		((void **) item)[0] = z->zitems;
265 #ifdef INVARIANTS
266 		((void **) item)[1] = (void *) ZENTRY_FREE;
267 #endif
268 		z->zitems = item;
269 		(char *) item += z->zsize;
270 	}
271 	z->zfreecnt = nitems;
272 	z->zmax = nitems;
273 	z->ztotal = nitems;
274 
275 	if (zlist == 0) {
276 		zlist = z;
277 	} else {
278 		z->znext = zlist;
279 		zlist = z;
280 	}
281 }
282 
283 /*
284  * void *zalloc(vm_zone_t zone) --
285  *	Returns an item from a specified zone.  May not be called from a
286  *	FAST interrupt or IPI function.
287  *
288  * void zfree(vm_zone_t zone, void *item) --
289  *	Frees an item back to a specified zone.  May not be called from a
290  *	FAST interrupt or IPI function.
291  */
292 
293 /*
294  * Internal zone routine.  Not to be called from external (non vm_zone) code.
295  */
296 static void *
297 zget(vm_zone_t z)
298 {
299 	int i;
300 	vm_page_t m;
301 	int nitems, nbytes;
302 	void *item;
303 
304 	if (z == NULL)
305 		panic("zget: null zone");
306 
307 	if (z->zflags & ZONE_INTERRUPT) {
308 		nbytes = z->zpagecount * PAGE_SIZE;
309 		nbytes -= nbytes % z->zsize;
310 		item = (char *) z->zkva + nbytes;
311 		for (i = 0; ((i < z->zalloc) && (z->zpagecount < z->zpagemax));
312 		     i++) {
313 			vm_offset_t zkva;
314 
315 			m = vm_page_alloc(z->zobj, z->zpagecount,
316 					  z->zallocflag);
317 			if (m == NULL)
318 				break;
319 			lwkt_regettoken(&z->zlock);
320 
321 			zkva = z->zkva + z->zpagecount * PAGE_SIZE;
322 			pmap_kenter(zkva, VM_PAGE_TO_PHYS(m)); /* YYY */
323 			bzero((caddr_t) zkva, PAGE_SIZE);
324 			z->zpagecount++;
325 			zone_kmem_pages++;
326 			vmstats.v_wire_count++;
327 		}
328 		nitems = ((z->zpagecount * PAGE_SIZE) - nbytes) / z->zsize;
329 	} else {
330 		nbytes = z->zalloc * PAGE_SIZE;
331 
332 #if defined(USE_KMEM_MAP)
333 		/*
334 		 * Check to see if the kernel map is already locked.
335 		 * We could allow for recursive locks, but that eliminates
336 		 * a valuable debugging mechanism, and opens up the kernel
337 		 * map for potential corruption by inconsistent data structure
338 		 * manipulation.  We could also use the interrupt allocation
339 		 * mechanism, but that has size limitations.   Luckily, we
340 		 * have kmem_map that is a submap of kernel map available
341 		 * for memory allocation, and manipulation of that map doesn't
342 		 * affect the kernel map structures themselves.
343 		 *
344 		 * We can wait, so just do normal map allocation in the
345 		 * appropriate map.
346 		 */
347 		if (lockstatus(&kernel_map->lock, NULL)) {
348 			int s;
349 			s = splvm();
350 			item = (void *) kmem_malloc(kmem_map, nbytes, M_WAITOK);
351 			lwkt_regettoken(&z->zlock);
352 			if (item != NULL)
353 				zone_kmem_pages += z->zalloc;
354 			splx(s);
355 		} else
356 #endif
357 		{
358 			item = (void *) kmem_alloc(kernel_map, nbytes);
359 			lwkt_regettoken(&z->zlock);
360 			if (item != NULL)
361 				zone_kern_pages += z->zalloc;
362 		}
363 		if (item != NULL) {
364 			bzero(item, nbytes);
365 		} else {
366 			nbytes = 0;
367 		}
368 		nitems = nbytes / z->zsize;
369 	}
370 	z->ztotal += nitems;
371 
372 	/*
373 	 * Save one for immediate allocation
374 	 */
375 	if (nitems != 0) {
376 		nitems -= 1;
377 		for (i = 0; i < nitems; i++) {
378 			((void **) item)[0] = z->zitems;
379 #ifdef INVARIANTS
380 			((void **) item)[1] = (void *) ZENTRY_FREE;
381 #endif
382 			z->zitems = item;
383 			(char *) item += z->zsize;
384 		}
385 		z->zfreecnt += nitems;
386 		z->znalloc++;
387 	} else if (z->zfreecnt > 0) {
388 		item = z->zitems;
389 		z->zitems = ((void **) item)[0];
390 #ifdef INVARIANTS
391 		if (((void **) item)[1] != (void *) ZENTRY_FREE)
392 			zerror(ZONE_ERROR_NOTFREE);
393 		((void **) item)[1] = 0;
394 #endif
395 		z->zfreecnt--;
396 		z->znalloc++;
397 	} else {
398 		item = NULL;
399 	}
400 
401 	/*
402 	 * Recover any reserve missing due to a zalloc/kreserve/krelease
403 	 * recursion.
404 	 */
405 	vm_map_entry_reserve(0);
406 
407 	return item;
408 }
409 
410 static int
411 sysctl_vm_zone(SYSCTL_HANDLER_ARGS)
412 {
413 	int error=0;
414 	vm_zone_t curzone, nextzone;
415 	char tmpbuf[128];
416 	char tmpname[14];
417 
418 	snprintf(tmpbuf, sizeof(tmpbuf),
419 	    "\nITEM            SIZE     LIMIT    USED    FREE  REQUESTS\n");
420 	error = SYSCTL_OUT(req, tmpbuf, strlen(tmpbuf));
421 	if (error)
422 		return (error);
423 
424 	for (curzone = zlist; curzone; curzone = nextzone) {
425 		int i;
426 		int len;
427 		int offset;
428 
429 		nextzone = curzone->znext;
430 		len = strlen(curzone->zname);
431 		if (len >= (sizeof(tmpname) - 1))
432 			len = (sizeof(tmpname) - 1);
433 		for(i = 0; i < sizeof(tmpname) - 1; i++)
434 			tmpname[i] = ' ';
435 		tmpname[i] = 0;
436 		memcpy(tmpname, curzone->zname, len);
437 		tmpname[len] = ':';
438 		offset = 0;
439 		if (curzone == zlist) {
440 			offset = 1;
441 			tmpbuf[0] = '\n';
442 		}
443 
444 		snprintf(tmpbuf + offset, sizeof(tmpbuf) - offset,
445 			"%s %6.6u, %8.8u, %6.6u, %6.6u, %8.8u\n",
446 			tmpname, curzone->zsize, curzone->zmax,
447 			(curzone->ztotal - curzone->zfreecnt),
448 			curzone->zfreecnt, curzone->znalloc);
449 
450 		len = strlen((char *)tmpbuf);
451 		if (nextzone == NULL)
452 			tmpbuf[len - 1] = 0;
453 
454 		error = SYSCTL_OUT(req, tmpbuf, len);
455 
456 		if (error)
457 			return (error);
458 	}
459 	return (0);
460 }
461 
462 #if defined(INVARIANTS)
463 void
464 zerror(int error)
465 {
466 	char *msg;
467 
468 	switch (error) {
469 	case ZONE_ERROR_INVALID:
470 		msg = "zone: invalid zone";
471 		break;
472 	case ZONE_ERROR_NOTFREE:
473 		msg = "zone: entry not free";
474 		break;
475 	case ZONE_ERROR_ALREADYFREE:
476 		msg = "zone: freeing free entry";
477 		break;
478 	default:
479 		msg = "zone: invalid error";
480 		break;
481 	}
482 	panic(msg);
483 }
484 #endif
485 
486 SYSCTL_OID(_vm, OID_AUTO, zone, CTLTYPE_STRING|CTLFLAG_RD, \
487 	NULL, 0, sysctl_vm_zone, "A", "Zone Info");
488 
489 SYSCTL_INT(_vm, OID_AUTO, zone_kmem_pages,
490 	CTLFLAG_RD, &zone_kmem_pages, 0, "Number of interrupt safe pages allocated by zone");
491 SYSCTL_INT(_vm, OID_AUTO, zone_kmem_kvaspace,
492 	CTLFLAG_RD, &zone_kmem_kvaspace, 0, "KVA space allocated by zone");
493 SYSCTL_INT(_vm, OID_AUTO, zone_kern_pages,
494 	CTLFLAG_RD, &zone_kern_pages, 0, "Number of non-interrupt safe pages allocated by zone");
495