xref: /dragonfly/test/debug/vmpageinfo.c (revision b3f5eba6)
1 /*
2  * VMPAGEINFO.C
3  *
4  * cc -I/usr/src/sys vmpageinfo.c -o ~/bin/vmpageinfo -lkvm
5  *
6  * vmpageinfo
7  *
8  * Validate the vm_page_buckets[] hash array against the vm_page_array
9  *
10  *
11  * Copyright (c) 2004 The DragonFly Project.  All rights reserved.
12  *
13  * This code is derived from software contributed to The DragonFly Project
14  * by Matthew Dillon <dillon@backplane.com>
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  *
20  * 1. Redistributions of source code must retain the above copyright
21  *    notice, this list of conditions and the following disclaimer.
22  * 2. Redistributions in binary form must reproduce the above copyright
23  *    notice, this list of conditions and the following disclaimer in
24  *    the documentation and/or other materials provided with the
25  *    distribution.
26  * 3. Neither the name of The DragonFly Project nor the names of its
27  *    contributors may be used to endorse or promote products derived
28  *    from this software without specific, prior written permission.
29  *
30  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
33  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
34  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
35  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
36  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
37  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
38  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
39  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
40  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41  * SUCH DAMAGE.
42  *
43  * $DragonFly: src/test/debug/vmpageinfo.c,v 1.2 2006/05/23 01:00:05 dillon Exp $
44  */
45 
46 #define _KERNEL_STRUCTURES_
47 #include <sys/param.h>
48 #include <sys/user.h>
49 #include <sys/malloc.h>
50 #include <sys/signalvar.h>
51 #include <sys/vnode.h>
52 #include <sys/namecache.h>
53 #include <sys/slaballoc.h>
54 
55 #include <vm/vm.h>
56 #include <vm/vm_page.h>
57 #include <vm/vm_kern.h>
58 #include <vm/vm_page.h>
59 #include <vm/vm_object.h>
60 #include <vm/swap_pager.h>
61 #include <vm/vnode_pager.h>
62 
63 #include <stdio.h>
64 #include <stdlib.h>
65 #include <string.h>
66 #include <fcntl.h>
67 #include <kvm.h>
68 #include <nlist.h>
69 #include <getopt.h>
70 
71 struct nlist Nl[] = {
72     { "_vm_page_array" },
73     { "_vm_page_array_size" },
74     { "_kernel_object" },
75     { "_nbuf" },
76     { "_nswbuf_mem" },
77     { "_nswbuf_kva" },
78     { "_nswbuf_raw" },
79     { "_kernbase" },
80     { "__end" },
81     { NULL }
82 };
83 
84 int debugopt;
85 int verboseopt;
86 #if 0
87 struct vm_page **vm_page_buckets;
88 int vm_page_hash_mask;
89 #endif
90 struct vm_page *vm_page_array;
91 struct vm_object *kernel_object_ptr;
92 int vm_page_array_size;
93 long nbuf;
94 long nswbuf_mem;
95 long nswbuf_kva;
96 long nswbuf_raw;
97 long kern_size;
98 
99 void checkpage(kvm_t *kd, vm_page_t mptr, vm_page_t m, struct vm_object *obj);
100 static void kkread_vmpage(kvm_t *kd, u_long addr, vm_page_t m);
101 static void kkread(kvm_t *kd, u_long addr, void *buf, size_t nbytes);
102 static int kkread_err(kvm_t *kd, u_long addr, void *buf, size_t nbytes);
103 
104 #if 0
105 static void addsltrack(vm_page_t m);
106 static void dumpsltrack(kvm_t *kd);
107 #endif
108 static int unique_object(void *ptr);
109 
110 long count_free;
111 long count_wired;		/* total */
112 long count_wired_vnode;
113 long count_wired_anon;
114 long count_wired_in_pmap;
115 long count_wired_pgtable;
116 long count_wired_other;
117 long count_wired_kernel;
118 long count_wired_obj_other;
119 
120 long count_anon;
121 long count_anon_in_pmap;
122 long count_vnode;
123 long count_device;
124 long count_phys;
125 long count_kernel;
126 long count_unknown;
127 long count_noobj_offqueue;
128 long count_noobj_onqueue;
129 
130 int
131 main(int ac, char **av)
132 {
133     const char *corefile = NULL;
134     const char *sysfile = NULL;
135     struct vm_page m;
136     struct vm_object obj;
137     kvm_t *kd;
138     int ch;
139 #if 0
140     vm_page_t mptr;
141     int hv;
142 #endif
143     int i;
144     const char *qstr;
145     const char *ostr;
146 
147     while ((ch = getopt(ac, av, "M:N:dv")) != -1) {
148 	switch(ch) {
149 	case 'd':
150 	    ++debugopt;
151 	    break;
152 	case 'v':
153 	    ++verboseopt;
154 	    break;
155 	case 'M':
156 	    corefile = optarg;
157 	    break;
158 	case 'N':
159 	    sysfile = optarg;
160 	    break;
161 	default:
162 	    fprintf(stderr, "%s [-M core] [-N system]\n", av[0]);
163 	    exit(1);
164 	}
165     }
166     ac -= optind;
167     av += optind;
168 
169     if ((kd = kvm_open(sysfile, corefile, NULL, O_RDONLY, "kvm:")) == NULL) {
170 	perror("kvm_open");
171 	exit(1);
172     }
173     if (kvm_nlist(kd, Nl) != 0) {
174 	perror("kvm_nlist");
175 	exit(1);
176     }
177 
178     kkread(kd, Nl[0].n_value, &vm_page_array, sizeof(vm_page_array));
179     kkread(kd, Nl[1].n_value, &vm_page_array_size, sizeof(vm_page_array_size));
180     kernel_object_ptr = (void *)Nl[2].n_value;
181     kkread(kd, Nl[3].n_value, &nbuf, sizeof(nbuf));
182     kkread(kd, Nl[4].n_value, &nswbuf_mem, sizeof(nswbuf_mem));
183     kkread(kd, Nl[5].n_value, &nswbuf_kva, sizeof(nswbuf_kva));
184     kkread(kd, Nl[6].n_value, &nswbuf_raw, sizeof(nswbuf_raw));
185     kern_size = Nl[8].n_value - Nl[7].n_value;
186 
187     /*
188      * Scan the vm_page_array validating all pages with associated objects
189      */
190     for (i = 0; i < vm_page_array_size; ++i) {
191 	if (debugopt) {
192 	    printf("page %d\r", i);
193 	    fflush(stdout);
194 	}
195 	kkread_vmpage(kd, (u_long)&vm_page_array[i], &m);
196 	if (m.object) {
197 	    kkread(kd, (u_long)m.object, &obj, sizeof(obj));
198 	    checkpage(kd, &vm_page_array[i], &m, &obj);
199 	}
200 	if (m.queue >= PQ_HOLD) {
201 	    qstr = "HOLD";
202 	} else if (m.queue >= PQ_CACHE) {
203 	    qstr = "CACHE";
204 	} else if (m.queue >= PQ_ACTIVE) {
205 	    qstr = "ACTIVE";
206 	} else if (m.queue >= PQ_INACTIVE) {
207 	    qstr = "INACTIVE";
208 	} else if (m.queue >= PQ_FREE) {
209 	    qstr = "FREE";
210 	    ++count_free;
211 	} else {
212 	    qstr = "NONE";
213 	}
214 	if (m.wire_count) {
215 		++count_wired;
216 		if (m.object == NULL) {
217 			if ((m.flags & PG_MAPPED) &&
218 			    (m.flags & PG_WRITEABLE) &&
219 			    (m.flags & PG_UNQUEUED)) {
220 				++count_wired_pgtable;
221 			} else {
222 				++count_wired_other;
223 			}
224 		} else if (m.object == kernel_object_ptr) {
225 			++count_wired_kernel;
226 		} else {
227 			switch(obj.type) {
228 			case OBJT_VNODE:
229 				++count_wired_vnode;
230 				break;
231 			case OBJT_DEFAULT:
232 			case OBJT_SWAP:
233 				if (m.md.pmap_count)
234 					++count_wired_in_pmap;
235 				else
236 					++count_wired_anon;
237 				break;
238 			default:
239 				++count_wired_obj_other;
240 				break;
241 			}
242 		}
243 	} else if (m.md.pmap_count) {
244 		if (m.object && m.object != kernel_object_ptr) {
245 			switch(obj.type) {
246 			case OBJT_DEFAULT:
247 			case OBJT_SWAP:
248 				++count_anon_in_pmap;
249 				break;
250 			default:
251 				break;
252 			}
253 		}
254 	}
255 
256 	if (verboseopt) {
257 	    printf("page %p obj %p/%-8ju(%016jx) val=%02x dty=%02x hold=%d "
258 		   "wire=%-2d act=%-3d busy=%d %8s",
259 		&vm_page_array[i],
260 		m.object,
261 		(intmax_t)m.pindex,
262 		(intmax_t)m.pindex * PAGE_SIZE,
263 		m.valid,
264 		m.dirty,
265 		m.hold_count,
266 		m.wire_count,
267 		m.act_count,
268 		m.busy_count,
269 		qstr
270 	    );
271 	}
272 
273 	if (m.object == kernel_object_ptr) {
274 		ostr = "kernel";
275 		if (unique_object(m.object))
276 			count_kernel += obj.resident_page_count;
277 	} else if (m.object) {
278 	    switch(obj.type) {
279 	    case OBJT_DEFAULT:
280 		ostr = "default";
281 		if (unique_object(m.object))
282 			count_anon += obj.resident_page_count;
283 		break;
284 	    case OBJT_SWAP:
285 		ostr = "swap";
286 		if (unique_object(m.object))
287 			count_anon += obj.resident_page_count;
288 		break;
289 	    case OBJT_VNODE:
290 		ostr = "vnode";
291 		if (unique_object(m.object))
292 			count_vnode += obj.resident_page_count;
293 		break;
294 	    case OBJT_DEVICE:
295 		ostr = "device";
296 		if (unique_object(m.object))
297 			count_device += obj.resident_page_count;
298 		break;
299 	    case OBJT_PHYS:
300 		ostr = "phys";
301 		if (unique_object(m.object))
302 			count_phys += obj.resident_page_count;
303 		break;
304 	    case OBJT_DEAD:
305 		ostr = "dead";
306 		if (unique_object(m.object))
307 			count_unknown += obj.resident_page_count;
308 		break;
309 	    default:
310 		if (unique_object(m.object))
311 			count_unknown += obj.resident_page_count;
312 		ostr = "unknown";
313 		break;
314 	    }
315 	} else {
316 	    ostr = "-";
317 	    if (m.queue == PQ_NONE)
318 		    ++count_noobj_offqueue;
319 	    else if (m.queue - m.pc != PQ_FREE)
320 		    ++count_noobj_onqueue;
321 	}
322 
323 	if (verboseopt) {
324 	    printf(" %-7s", ostr);
325 	    if (m.busy_count & PBUSY_LOCKED)
326 		printf(" BUSY");
327 	    if (m.busy_count & PBUSY_WANTED)
328 		printf(" WANTED");
329 	    if (m.flags & PG_WINATCFLS)
330 		printf(" WINATCFLS");
331 	    if (m.flags & PG_FICTITIOUS)
332 		printf(" FICTITIOUS");
333 	    if (m.flags & PG_WRITEABLE)
334 		printf(" WRITEABLE");
335 	    if (m.flags & PG_MAPPED)
336 		printf(" MAPPED");
337 	    if (m.flags & PG_NEED_COMMIT)
338 		printf(" NEED_COMMIT");
339 	    if (m.flags & PG_REFERENCED)
340 		printf(" REFERENCED");
341 	    if (m.flags & PG_CLEANCHK)
342 		printf(" CLEANCHK");
343 	    if (m.busy_count & PBUSY_SWAPINPROG)
344 		printf(" SWAPINPROG");
345 	    if (m.flags & PG_NOSYNC)
346 		printf(" NOSYNC");
347 	    if (m.flags & PG_UNQUEUED)
348 		printf(" UNQUEUED");
349 	    if (m.flags & PG_MARKER)
350 		printf(" MARKER");
351 	    if (m.flags & PG_RAM)
352 		printf(" RAM");
353 	    if (m.flags & PG_SWAPPED)
354 		printf(" SWAPPED");
355 #if 0
356 	    if (m.flags & PG_SLAB)
357 		printf(" SLAB");
358 #endif
359 	    printf("\n");
360 #if 0
361 	    if (m.flags & PG_SLAB)
362 		addsltrack(&m);
363 #endif
364 	}
365     }
366     if (debugopt || verboseopt)
367 	printf("\n");
368     printf("%8.2fM free\n", count_free * 4096.0 / 1048576.0);
369 
370     printf("%8.2fM wired vnode (in buffer cache)\n",
371 	count_wired_vnode * 4096.0 / 1048576.0);
372     printf("%8.2fM wired in-pmap (probably vnode pages also in buffer cache)\n",
373 	count_wired_in_pmap * 4096.0 / 1048576.0);
374     printf("%8.2fM wired pgtable\n",
375 	count_wired_pgtable * 4096.0 / 1048576.0);
376     printf("%8.2fM wired anon\n",
377 	count_wired_anon * 4096.0 / 1048576.0);
378     printf("%8.2fM wired kernel_object\n",
379 	count_wired_kernel * 4096.0 / 1048576.0);
380 
381 	printf("\t%8.2fM vm_page_array\n",
382 	    vm_page_array_size * sizeof(struct vm_page) / 1048576.0);
383 	printf("\t%8.2fM buf, swbuf_mem, swbuf_kva, swbuf_raw\n",
384 	    (nbuf + nswbuf_mem + nswbuf_kva + nswbuf_raw) *
385 	    sizeof(struct buf) / 1048576.0);
386 	printf("\t%8.2fM kernel binary\n", kern_size / 1048576.0);
387 	printf("\t(also add in KMALLOC id kmapinfo, or loosely, vmstat -m)\n");
388 
389     printf("%8.2fM wired other (unknown object)\n",
390 	count_wired_obj_other * 4096.0 / 1048576.0);
391     printf("%8.2fM wired other (no object, probably kernel)\n",
392 	count_wired_other * 4096.0 / 1048576.0);
393 
394     printf("%8.2fM WIRED TOTAL\n",
395 	count_wired * 4096.0 / 1048576.0);
396 
397     printf("\n");
398     printf("%8.2fM anonymous (total, includes in-pmap)\n",
399 	count_anon * 4096.0 / 1048576.0);
400     printf("%8.2fM anonymous memory in-pmap\n",
401 	count_anon_in_pmap * 4096.0 / 1048576.0);
402     printf("%8.2fM vnode (includes wired)\n",
403 	count_vnode * 4096.0 / 1048576.0);
404     printf("%8.2fM device\n", count_device * 4096.0 / 1048576.0);
405     printf("%8.2fM phys\n", count_phys * 4096.0 / 1048576.0);
406     printf("%8.2fM kernel (includes wired)\n",
407 	count_kernel * 4096.0 / 1048576.0);
408     printf("%8.2fM unknown\n", count_unknown * 4096.0 / 1048576.0);
409     printf("%8.2fM no_object, off queue (includes wired w/o object)\n",
410 	count_noobj_offqueue * 4096.0 / 1048576.0);
411     printf("%8.2fM no_object, on non-free queue (includes wired w/o object)\n",
412 	count_noobj_onqueue * 4096.0 / 1048576.0);
413 
414 #if 0
415     /*
416      * Scan the vm_page_buckets array validating all pages found
417      */
418     for (i = 0; i <= vm_page_hash_mask; ++i) {
419 	if (debugopt) {
420 	    printf("index %d\r", i);
421 	    fflush(stdout);
422 	}
423 	kkread(kd, (u_long)&vm_page_buckets[i], &mptr, sizeof(mptr));
424 	while (mptr) {
425 	    kkread(kd, (u_long)mptr, &m, sizeof(m));
426 	    if (m.object) {
427 		kkread(kd, (u_long)m.object, &obj, sizeof(obj));
428 		hv = ((uintptr_t)m.object + m.pindex) ^ obj.hash_rand;
429 		hv &= vm_page_hash_mask;
430 		if (i != hv)
431 		    printf("vm_page_buckets[%d] ((struct vm_page *)%p)"
432 			" should be in bucket %d\n", i, mptr, hv);
433 		checkpage(kd, mptr, &m, &obj);
434 	    } else {
435 		printf("vm_page_buckets[%d] ((struct vm_page *)%p)"
436 			" has no object\n", i, mptr);
437 	    }
438 	    mptr = m.hnext;
439 	}
440     }
441 #endif
442     if (debugopt)
443 	printf("\n");
444 #if 0
445     dumpsltrack(kd);
446 #endif
447     return(0);
448 }
449 
450 /*
451  * A page with an object.
452  */
453 void
454 checkpage(kvm_t *kd, vm_page_t mptr, vm_page_t m, struct vm_object *obj)
455 {
456 #if 0
457     struct vm_page scan;
458     vm_page_t scanptr;
459     int hv;
460 
461     hv = ((uintptr_t)m->object + m->pindex) ^ obj->hash_rand;
462     hv &= vm_page_hash_mask;
463     kkread(kd, (u_long)&vm_page_buckets[hv], &scanptr, sizeof(scanptr));
464     while (scanptr) {
465 	if (scanptr == mptr)
466 	    break;
467 	kkread(kd, (u_long)scanptr, &scan, sizeof(scan));
468 	scanptr = scan.hnext;
469     }
470     if (scanptr) {
471 	if (debugopt > 1)
472 	    printf("good checkpage %p bucket %d\n", mptr, hv);
473     } else {
474 	printf("vm_page_buckets[%d] ((struct vm_page *)%p)"
475 		" page not found in bucket list\n", hv, mptr);
476     }
477 #endif
478 }
479 
480 /*
481  * Acclerate the reading of VM pages
482  */
483 static void
484 kkread_vmpage(kvm_t *kd, u_long addr, vm_page_t m)
485 {
486     static struct vm_page vpcache[1024];
487     static u_long vpbeg;
488     static u_long vpend;
489 
490     if (addr < vpbeg || addr >= vpend) {
491 	vpbeg = addr;
492 	vpend = addr + 1024 * sizeof(*m);
493 	if (vpend > (u_long)(uintptr_t)vm_page_array +
494 		    vm_page_array_size * sizeof(*m)) {
495 	    vpend = (u_long)(uintptr_t)vm_page_array +
496 		    vm_page_array_size * sizeof(*m);
497 	}
498 	kkread(kd, vpbeg, vpcache, vpend - vpbeg);
499     }
500     *m = vpcache[(addr - vpbeg) / sizeof(*m)];
501 }
502 
503 static void
504 kkread(kvm_t *kd, u_long addr, void *buf, size_t nbytes)
505 {
506     if (kvm_read(kd, addr, buf, nbytes) != nbytes) {
507         perror("kvm_read");
508         exit(1);
509     }
510 }
511 
512 static int
513 kkread_err(kvm_t *kd, u_long addr, void *buf, size_t nbytes)
514 {
515     if (kvm_read(kd, addr, buf, nbytes) != nbytes) {
516 	return 1;
517     }
518     return 0;
519 }
520 
521 struct SLTrack {
522         struct SLTrack *next;
523         u_long addr;
524 };
525 
526 #define SLHSIZE 1024
527 #define SLHMASK (SLHSIZE - 1)
528 
529 struct SLTrack *SLHash[SLHSIZE];
530 
531 #if 0
532 static
533 void
534 addsltrack(vm_page_t m)
535 {
536 	struct SLTrack *slt;
537 	u_long addr = (m->pindex * PAGE_SIZE) & ~131071L;
538 	int i;
539 
540 	if (m->wire_count == 0 || (m->flags & PG_MAPPED) == 0 ||
541 	    m->object == NULL)
542 		return;
543 
544 	i = (addr / 131072) & SLHMASK;
545 	for (slt = SLHash[i]; slt; slt = slt->next) {
546 		if (slt->addr == addr)
547 			break;
548 	}
549 	if (slt == NULL) {
550 		slt = malloc(sizeof(*slt));
551 		slt->addr = addr;
552 		slt->next = SLHash[i];
553 		SLHash[i] = slt;
554 	}
555 }
556 #endif
557 
558 static
559 void
560 dumpsltrack(kvm_t *kd)
561 {
562 	struct SLTrack *slt;
563 	int i;
564 	long total_zones = 0;
565 	long full_zones = 0;
566 
567 	for (i = 0; i < SLHSIZE; ++i) {
568 		for (slt = SLHash[i]; slt; slt = slt->next) {
569 			SLZone z;
570 
571 			if (kkread_err(kd, slt->addr, &z, sizeof(z))) {
572 				printf("SLZone 0x%016lx not mapped\n",
573 					slt->addr);
574 				continue;
575 			}
576 			printf("SLZone 0x%016lx { mag=%08x cpu=%-2d NFree=%-3d "
577 			       "chunksz=%-5d }\n",
578 			       slt->addr,
579 			       z.z_Magic,
580 			       z.z_Cpu,
581 			       z.z_NFree,
582 			       z.z_ChunkSize
583 			);
584 			++total_zones;
585 			if (z.z_NFree == 0)
586 				++full_zones;
587 		}
588 	}
589 	printf("FullZones/TotalZones: %ld/%ld\n", full_zones, total_zones);
590 }
591 
592 #define HASH_SIZE	(1024*1024)
593 #define HASH_MASK	(HASH_SIZE - 1)
594 
595 struct dup_entry {
596 	struct dup_entry *next;
597 	void	*ptr;
598 };
599 
600 struct dup_entry *dup_hash[HASH_SIZE];
601 
602 static int
603 unique_object(void *ptr)
604 {
605 	struct dup_entry *hen;
606 	int hv;
607 
608 	hv = (intptr_t)ptr ^ ((intptr_t)ptr >> 20);
609 	hv &= HASH_MASK;
610 	for (hen = dup_hash[hv]; hen; hen = hen->next) {
611 		if (hen->ptr == ptr)
612 			return 0;
613 	}
614 	hen = malloc(sizeof(*hen));
615 	hen->next = dup_hash[hv];
616 	hen->ptr = ptr;
617 	dup_hash[hv] = hen;
618 
619 	return 1;
620 }
621