xref: /dragonfly/test/debug/vmpageinfo.c (revision 2b7dbe20)
1 /*
2  * VMPAGEINFO.C
3  *
4  * cc -I/usr/src/sys vmpageinfo.c -o ~/bin/vmpageinfo -lkvm
5  *
6  * vmpageinfo
7  *
8  * Validate the vm_page_buckets[] hash array against the vm_page_array
9  *
10  *
11  * Copyright (c) 2004 The DragonFly Project.  All rights reserved.
12  *
13  * This code is derived from software contributed to The DragonFly Project
14  * by Matthew Dillon <dillon@backplane.com>
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  *
20  * 1. Redistributions of source code must retain the above copyright
21  *    notice, this list of conditions and the following disclaimer.
22  * 2. Redistributions in binary form must reproduce the above copyright
23  *    notice, this list of conditions and the following disclaimer in
24  *    the documentation and/or other materials provided with the
25  *    distribution.
26  * 3. Neither the name of The DragonFly Project nor the names of its
27  *    contributors may be used to endorse or promote products derived
28  *    from this software without specific, prior written permission.
29  *
30  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
33  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
34  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
35  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
36  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
37  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
38  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
39  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
40  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41  * SUCH DAMAGE.
42  */
43 
44 #define _KERNEL_STRUCTURES_
45 #include <sys/param.h>
46 #include <sys/user.h>
47 #include <sys/malloc.h>
48 #include <sys/signalvar.h>
49 #include <sys/vnode.h>
50 #include <sys/namecache.h>
51 #include <sys/slaballoc.h>
52 
53 #include <vm/vm.h>
54 #include <vm/vm_page.h>
55 #include <vm/vm_kern.h>
56 #include <vm/vm_object.h>
57 #include <vm/swap_pager.h>
58 #include <vm/vnode_pager.h>
59 
60 #include <stdio.h>
61 #include <stdlib.h>
62 #include <string.h>
63 #include <fcntl.h>
64 #include <kvm.h>
65 #include <nlist.h>
66 #include <getopt.h>
67 
68 struct nlist Nl[] = {
69     { "_vm_page_array" },
70     { "_vm_page_array_size" },
71     { "_kernel_object" },
72     { "_nbuf" },
73     { "_nswbuf_mem" },
74     { "_nswbuf_kva" },
75     { "_nswbuf_raw" },
76     { "_kernbase" },
77     { "__end" },
78     { NULL }
79 };
80 
81 int debugopt;
82 int verboseopt;
83 #if 0
84 struct vm_page **vm_page_buckets;
85 int vm_page_hash_mask;
86 #endif
87 struct vm_page *vm_page_array;
88 struct vm_object *kernel_object_ptr;
89 int vm_page_array_size;
90 long nbuf;
91 long nswbuf_mem;
92 long nswbuf_kva;
93 long nswbuf_raw;
94 long kern_size;
95 
96 void checkpage(kvm_t *kd, vm_page_t mptr, vm_page_t m, struct vm_object *obj);
97 static void kkread_vmpage(kvm_t *kd, u_long addr, vm_page_t m);
98 static void kkread(kvm_t *kd, u_long addr, void *buf, size_t nbytes);
99 static int kkread_err(kvm_t *kd, u_long addr, void *buf, size_t nbytes);
100 
101 #if 0
102 static void addsltrack(vm_page_t m);
103 static void dumpsltrack(kvm_t *kd);
104 #endif
105 static int unique_object(void *ptr);
106 
107 long count_free;
108 long count_wired;		/* total */
109 long count_wired_vnode;
110 long count_wired_anon;
111 long count_wired_in_pmap;
112 long count_wired_pgtable;
113 long count_wired_other;
114 long count_wired_kernel;
115 long count_wired_obj_other;
116 
117 long count_anon;
118 long count_anon_in_pmap;
119 long count_vnode;
120 long count_device;
121 long count_phys;
122 long count_kernel;
123 long count_unknown;
124 long count_noobj_offqueue;
125 long count_noobj_onqueue;
126 
127 int
128 main(int ac, char **av)
129 {
130     const char *corefile = NULL;
131     const char *sysfile = NULL;
132     struct vm_page m;
133     struct vm_object obj;
134     kvm_t *kd;
135     int ch;
136 #if 0
137     vm_page_t mptr;
138     int hv;
139 #endif
140     int i;
141     const char *qstr;
142     const char *ostr;
143 
144     while ((ch = getopt(ac, av, "M:N:dv")) != -1) {
145 	switch(ch) {
146 	case 'd':
147 	    ++debugopt;
148 	    break;
149 	case 'v':
150 	    ++verboseopt;
151 	    break;
152 	case 'M':
153 	    corefile = optarg;
154 	    break;
155 	case 'N':
156 	    sysfile = optarg;
157 	    break;
158 	default:
159 	    fprintf(stderr, "%s [-M core] [-N system]\n", av[0]);
160 	    exit(1);
161 	}
162     }
163     ac -= optind;
164     av += optind;
165 
166     if ((kd = kvm_open(sysfile, corefile, NULL, O_RDONLY, "kvm:")) == NULL) {
167 	perror("kvm_open");
168 	exit(1);
169     }
170     if (kvm_nlist(kd, Nl) != 0) {
171 	perror("kvm_nlist");
172 	exit(1);
173     }
174 
175     kkread(kd, Nl[0].n_value, &vm_page_array, sizeof(vm_page_array));
176     kkread(kd, Nl[1].n_value, &vm_page_array_size, sizeof(vm_page_array_size));
177     kernel_object_ptr = (void *)Nl[2].n_value;
178     kkread(kd, Nl[3].n_value, &nbuf, sizeof(nbuf));
179     kkread(kd, Nl[4].n_value, &nswbuf_mem, sizeof(nswbuf_mem));
180     kkread(kd, Nl[5].n_value, &nswbuf_kva, sizeof(nswbuf_kva));
181     kkread(kd, Nl[6].n_value, &nswbuf_raw, sizeof(nswbuf_raw));
182     kern_size = Nl[8].n_value - Nl[7].n_value;
183 
184     /*
185      * Scan the vm_page_array validating all pages with associated objects
186      */
187     for (i = 0; i < vm_page_array_size; ++i) {
188 	if (debugopt) {
189 	    printf("page %d\r", i);
190 	    fflush(stdout);
191 	}
192 	kkread_vmpage(kd, (u_long)&vm_page_array[i], &m);
193 	if (m.object) {
194 	    kkread(kd, (u_long)m.object, &obj, sizeof(obj));
195 	    checkpage(kd, &vm_page_array[i], &m, &obj);
196 	}
197 	if (m.queue >= PQ_HOLD) {
198 	    qstr = "HOLD";
199 	} else if (m.queue >= PQ_CACHE) {
200 	    qstr = "CACHE";
201 	} else if (m.queue >= PQ_ACTIVE) {
202 	    qstr = "ACTIVE";
203 	} else if (m.queue >= PQ_INACTIVE) {
204 	    qstr = "INACTIVE";
205 	} else if (m.queue >= PQ_FREE) {
206 	    qstr = "FREE";
207 	    ++count_free;
208 	} else {
209 	    qstr = "NONE";
210 	}
211 	if (m.wire_count) {
212 		++count_wired;
213 		if (m.object == NULL) {
214 			if ((m.flags & PG_MAPPED) &&
215 			    (m.flags & PG_WRITEABLE) &&
216 			    (m.flags & PG_UNQUEUED)) {
217 				++count_wired_pgtable;
218 			} else {
219 				++count_wired_other;
220 			}
221 		} else if (m.object == kernel_object_ptr) {
222 			++count_wired_kernel;
223 		} else {
224 			switch(obj.type) {
225 			case OBJT_VNODE:
226 				++count_wired_vnode;
227 				break;
228 			case OBJT_DEFAULT:
229 			case OBJT_SWAP:
230 				if (m.md.pmap_count)
231 					++count_wired_in_pmap;
232 				else
233 					++count_wired_anon;
234 				break;
235 			default:
236 				++count_wired_obj_other;
237 				break;
238 			}
239 		}
240 	} else if (m.md.pmap_count) {
241 		if (m.object && m.object != kernel_object_ptr) {
242 			switch(obj.type) {
243 			case OBJT_DEFAULT:
244 			case OBJT_SWAP:
245 				++count_anon_in_pmap;
246 				break;
247 			default:
248 				break;
249 			}
250 		}
251 	}
252 
253 	if (verboseopt) {
254 	    printf("page %p obj %p/%-8ju(%016jx) val=%02x dty=%02x hold=%d "
255 		   "wire=%-2d act=%-3d busy=%d w/pmapcnt=%d/%d %8s",
256 		&vm_page_array[i],
257 		m.object,
258 		(intmax_t)m.pindex,
259 		(intmax_t)m.pindex * PAGE_SIZE,
260 		m.valid,
261 		m.dirty,
262 		m.hold_count,
263 		m.wire_count,
264 		m.act_count,
265 		m.busy_count,
266 		m.md.writeable_count,
267 		m.md.pmap_count,
268 		qstr
269 	    );
270 	}
271 
272 	if (m.object == kernel_object_ptr) {
273 		ostr = "kernel";
274 		if (unique_object(m.object))
275 			count_kernel += obj.resident_page_count;
276 	} else if (m.object) {
277 	    switch(obj.type) {
278 	    case OBJT_DEFAULT:
279 		ostr = "default";
280 		if (unique_object(m.object))
281 			count_anon += obj.resident_page_count;
282 		break;
283 	    case OBJT_SWAP:
284 		ostr = "swap";
285 		if (unique_object(m.object))
286 			count_anon += obj.resident_page_count;
287 		break;
288 	    case OBJT_VNODE:
289 		ostr = "vnode";
290 		if (unique_object(m.object))
291 			count_vnode += obj.resident_page_count;
292 		break;
293 	    case OBJT_DEVICE:
294 		ostr = "device";
295 		if (unique_object(m.object))
296 			count_device += obj.resident_page_count;
297 		break;
298 	    case OBJT_PHYS:
299 		ostr = "phys";
300 		if (unique_object(m.object))
301 			count_phys += obj.resident_page_count;
302 		break;
303 	    case OBJT_DEAD:
304 		ostr = "dead";
305 		if (unique_object(m.object))
306 			count_unknown += obj.resident_page_count;
307 		break;
308 	    default:
309 		if (unique_object(m.object))
310 			count_unknown += obj.resident_page_count;
311 		ostr = "unknown";
312 		break;
313 	    }
314 	} else {
315 	    ostr = "-";
316 	    if (m.queue == PQ_NONE)
317 		    ++count_noobj_offqueue;
318 	    else if (m.queue - m.pc != PQ_FREE)
319 		    ++count_noobj_onqueue;
320 	}
321 
322 	if (verboseopt) {
323 	    printf(" %-7s", ostr);
324 	    if (m.busy_count & PBUSY_LOCKED)
325 		printf(" BUSY");
326 	    if (m.busy_count & PBUSY_WANTED)
327 		printf(" WANTED");
328 	    if (m.flags & PG_WINATCFLS)
329 		printf(" WINATCFLS");
330 	    if (m.flags & PG_FICTITIOUS)
331 		printf(" FICTITIOUS");
332 	    if (m.flags & PG_WRITEABLE)
333 		printf(" WRITEABLE");
334 	    if (m.flags & PG_MAPPED)
335 		printf(" MAPPED");
336 	    if (m.flags & PG_NEED_COMMIT)
337 		printf(" NEED_COMMIT");
338 	    if (m.flags & PG_REFERENCED)
339 		printf(" REFERENCED");
340 	    if (m.flags & PG_CLEANCHK)
341 		printf(" CLEANCHK");
342 	    if (m.busy_count & PBUSY_SWAPINPROG)
343 		printf(" SWAPINPROG");
344 	    if (m.flags & PG_NOSYNC)
345 		printf(" NOSYNC");
346 	    if (m.flags & PG_UNQUEUED)
347 		printf(" UNQUEUED");
348 	    if (m.flags & PG_MARKER)
349 		printf(" MARKER");
350 	    if (m.flags & PG_RAM)
351 		printf(" RAM");
352 	    if (m.flags & PG_SWAPPED)
353 		printf(" SWAPPED");
354 #if 0
355 	    if (m.flags & PG_SLAB)
356 		printf(" SLAB");
357 #endif
358 	    printf("\n");
359 #if 0
360 	    if (m.flags & PG_SLAB)
361 		addsltrack(&m);
362 #endif
363 	}
364     }
365     if (debugopt || verboseopt)
366 	printf("\n");
367     printf("%8.2fM free\n", count_free * 4096.0 / 1048576.0);
368 
369     printf("%8.2fM wired vnode (in buffer cache)\n",
370 	count_wired_vnode * 4096.0 / 1048576.0);
371     printf("%8.2fM wired in-pmap (probably vnode pages also in buffer cache)\n",
372 	count_wired_in_pmap * 4096.0 / 1048576.0);
373     printf("%8.2fM wired pgtable\n",
374 	count_wired_pgtable * 4096.0 / 1048576.0);
375     printf("%8.2fM wired anon\n",
376 	count_wired_anon * 4096.0 / 1048576.0);
377     printf("%8.2fM wired kernel_object\n",
378 	count_wired_kernel * 4096.0 / 1048576.0);
379 
380 	printf("\t%8.2fM vm_page_array\n",
381 	    vm_page_array_size * sizeof(struct vm_page) / 1048576.0);
382 	printf("\t%8.2fM buf, swbuf_mem, swbuf_kva, swbuf_raw\n",
383 	    (nbuf + nswbuf_mem + nswbuf_kva + nswbuf_raw) *
384 	    sizeof(struct buf) / 1048576.0);
385 	printf("\t%8.2fM kernel binary\n", kern_size / 1048576.0);
386 	printf("\t(also add in KMALLOC id kmapinfo, or loosely, vmstat -m)\n");
387 
388     printf("%8.2fM wired other (unknown object)\n",
389 	count_wired_obj_other * 4096.0 / 1048576.0);
390     printf("%8.2fM wired other (no object, probably kernel)\n",
391 	count_wired_other * 4096.0 / 1048576.0);
392 
393     printf("%8.2fM WIRED TOTAL\n",
394 	count_wired * 4096.0 / 1048576.0);
395 
396     printf("\n");
397     printf("%8.2fM anonymous (total, includes in-pmap)\n",
398 	count_anon * 4096.0 / 1048576.0);
399     printf("%8.2fM anonymous memory in-pmap\n",
400 	count_anon_in_pmap * 4096.0 / 1048576.0);
401     printf("%8.2fM vnode (includes wired)\n",
402 	count_vnode * 4096.0 / 1048576.0);
403     printf("%8.2fM device\n", count_device * 4096.0 / 1048576.0);
404     printf("%8.2fM phys\n", count_phys * 4096.0 / 1048576.0);
405     printf("%8.2fM kernel (includes wired)\n",
406 	count_kernel * 4096.0 / 1048576.0);
407     printf("%8.2fM unknown\n", count_unknown * 4096.0 / 1048576.0);
408     printf("%8.2fM no_object, off queue (includes wired w/o object)\n",
409 	count_noobj_offqueue * 4096.0 / 1048576.0);
410     printf("%8.2fM no_object, on non-free queue (includes wired w/o object)\n",
411 	count_noobj_onqueue * 4096.0 / 1048576.0);
412 
413 #if 0
414     /*
415      * Scan the vm_page_buckets array validating all pages found
416      */
417     for (i = 0; i <= vm_page_hash_mask; ++i) {
418 	if (debugopt) {
419 	    printf("index %d\r", i);
420 	    fflush(stdout);
421 	}
422 	kkread(kd, (u_long)&vm_page_buckets[i], &mptr, sizeof(mptr));
423 	while (mptr) {
424 	    kkread(kd, (u_long)mptr, &m, sizeof(m));
425 	    if (m.object) {
426 		kkread(kd, (u_long)m.object, &obj, sizeof(obj));
427 		hv = ((uintptr_t)m.object + m.pindex) ^ obj.hash_rand;
428 		hv &= vm_page_hash_mask;
429 		if (i != hv)
430 		    printf("vm_page_buckets[%d] ((struct vm_page *)%p)"
431 			" should be in bucket %d\n", i, mptr, hv);
432 		checkpage(kd, mptr, &m, &obj);
433 	    } else {
434 		printf("vm_page_buckets[%d] ((struct vm_page *)%p)"
435 			" has no object\n", i, mptr);
436 	    }
437 	    mptr = m.hnext;
438 	}
439     }
440 #endif
441     if (debugopt)
442 	printf("\n");
443 #if 0
444     dumpsltrack(kd);
445 #endif
446     return(0);
447 }
448 
449 /*
450  * A page with an object.
451  */
452 void
453 checkpage(kvm_t *kd, vm_page_t mptr, vm_page_t m, struct vm_object *obj)
454 {
455 #if 0
456     struct vm_page scan;
457     vm_page_t scanptr;
458     int hv;
459 
460     hv = ((uintptr_t)m->object + m->pindex) ^ obj->hash_rand;
461     hv &= vm_page_hash_mask;
462     kkread(kd, (u_long)&vm_page_buckets[hv], &scanptr, sizeof(scanptr));
463     while (scanptr) {
464 	if (scanptr == mptr)
465 	    break;
466 	kkread(kd, (u_long)scanptr, &scan, sizeof(scan));
467 	scanptr = scan.hnext;
468     }
469     if (scanptr) {
470 	if (debugopt > 1)
471 	    printf("good checkpage %p bucket %d\n", mptr, hv);
472     } else {
473 	printf("vm_page_buckets[%d] ((struct vm_page *)%p)"
474 		" page not found in bucket list\n", hv, mptr);
475     }
476 #endif
477 }
478 
479 /*
480  * Acclerate the reading of VM pages
481  */
482 static void
483 kkread_vmpage(kvm_t *kd, u_long addr, vm_page_t m)
484 {
485     static struct vm_page vpcache[1024];
486     static u_long vpbeg;
487     static u_long vpend;
488 
489     if (addr < vpbeg || addr >= vpend) {
490 	vpbeg = addr;
491 	vpend = addr + 1024 * sizeof(*m);
492 	if (vpend > (u_long)(uintptr_t)vm_page_array +
493 		    vm_page_array_size * sizeof(*m)) {
494 	    vpend = (u_long)(uintptr_t)vm_page_array +
495 		    vm_page_array_size * sizeof(*m);
496 	}
497 	kkread(kd, vpbeg, vpcache, vpend - vpbeg);
498     }
499     *m = vpcache[(addr - vpbeg) / sizeof(*m)];
500 }
501 
502 static void
503 kkread(kvm_t *kd, u_long addr, void *buf, size_t nbytes)
504 {
505     if (kvm_read(kd, addr, buf, nbytes) != nbytes) {
506         perror("kvm_read");
507         exit(1);
508     }
509 }
510 
511 static int
512 kkread_err(kvm_t *kd, u_long addr, void *buf, size_t nbytes)
513 {
514     if (kvm_read(kd, addr, buf, nbytes) != nbytes) {
515 	return 1;
516     }
517     return 0;
518 }
519 
520 struct SLTrack {
521         struct SLTrack *next;
522         u_long addr;
523 };
524 
525 #define SLHSIZE 1024
526 #define SLHMASK (SLHSIZE - 1)
527 
528 struct SLTrack *SLHash[SLHSIZE];
529 
530 #if 0
531 static
532 void
533 addsltrack(vm_page_t m)
534 {
535 	struct SLTrack *slt;
536 	u_long addr = (m->pindex * PAGE_SIZE) & ~131071L;
537 	int i;
538 
539 	if (m->wire_count == 0 || (m->flags & PG_MAPPED) == 0 ||
540 	    m->object == NULL)
541 		return;
542 
543 	i = (addr / 131072) & SLHMASK;
544 	for (slt = SLHash[i]; slt; slt = slt->next) {
545 		if (slt->addr == addr)
546 			break;
547 	}
548 	if (slt == NULL) {
549 		slt = malloc(sizeof(*slt));
550 		slt->addr = addr;
551 		slt->next = SLHash[i];
552 		SLHash[i] = slt;
553 	}
554 }
555 #endif
556 
557 static
558 void
559 dumpsltrack(kvm_t *kd)
560 {
561 	struct SLTrack *slt;
562 	int i;
563 	long total_zones = 0;
564 	long full_zones = 0;
565 
566 	for (i = 0; i < SLHSIZE; ++i) {
567 		for (slt = SLHash[i]; slt; slt = slt->next) {
568 			SLZone z;
569 
570 			if (kkread_err(kd, slt->addr, &z, sizeof(z))) {
571 				printf("SLZone 0x%016lx not mapped\n",
572 					slt->addr);
573 				continue;
574 			}
575 			printf("SLZone 0x%016lx { mag=%08x cpu=%-2d NFree=%-3d "
576 			       "chunksz=%-5d }\n",
577 			       slt->addr,
578 			       z.z_Magic,
579 			       z.z_Cpu,
580 			       z.z_NFree,
581 			       z.z_ChunkSize
582 			);
583 			++total_zones;
584 			if (z.z_NFree == 0)
585 				++full_zones;
586 		}
587 	}
588 	printf("FullZones/TotalZones: %ld/%ld\n", full_zones, total_zones);
589 }
590 
591 #define HASH_SIZE	(1024*1024)
592 #define HASH_MASK	(HASH_SIZE - 1)
593 
594 struct dup_entry {
595 	struct dup_entry *next;
596 	void	*ptr;
597 };
598 
599 struct dup_entry *dup_hash[HASH_SIZE];
600 
601 static int
602 unique_object(void *ptr)
603 {
604 	struct dup_entry *hen;
605 	int hv;
606 
607 	hv = (intptr_t)ptr ^ ((intptr_t)ptr >> 20);
608 	hv &= HASH_MASK;
609 	for (hen = dup_hash[hv]; hen; hen = hen->next) {
610 		if (hen->ptr == ptr)
611 			return 0;
612 	}
613 	hen = malloc(sizeof(*hen));
614 	hen->next = dup_hash[hv];
615 	hen->ptr = ptr;
616 	dup_hash[hv] = hen;
617 
618 	return 1;
619 }
620