xref: /dragonfly/test/debug/vmpageinfo.c (revision 23b3ef78)
1 /*
2  * VMPAGEINFO.C
3  *
4  * cc -I/usr/src/sys vmpageinfo.c -o /usr/local/bin/vmpageinfo -lkvm
5  *
6  * vmpageinfo
7  *
8  * Validate the vm_page_buckets[] hash array against the vm_page_array
9  *
10  *
11  * Copyright (c) 2004 The DragonFly Project.  All rights reserved.
12  *
13  * This code is derived from software contributed to The DragonFly Project
14  * by Matthew Dillon <dillon@backplane.com>
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  *
20  * 1. Redistributions of source code must retain the above copyright
21  *    notice, this list of conditions and the following disclaimer.
22  * 2. Redistributions in binary form must reproduce the above copyright
23  *    notice, this list of conditions and the following disclaimer in
24  *    the documentation and/or other materials provided with the
25  *    distribution.
26  * 3. Neither the name of The DragonFly Project nor the names of its
27  *    contributors may be used to endorse or promote products derived
28  *    from this software without specific, prior written permission.
29  *
30  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
33  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
34  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
35  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
36  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
37  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
38  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
39  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
40  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41  * SUCH DAMAGE.
42  *
43  * $DragonFly: src/test/debug/vmpageinfo.c,v 1.2 2006/05/23 01:00:05 dillon Exp $
44  */
45 
46 #define _KERNEL_STRUCTURES_
47 #include <sys/param.h>
48 #include <sys/user.h>
49 #include <sys/malloc.h>
50 #include <sys/signalvar.h>
51 #include <sys/vnode.h>
52 #include <sys/namecache.h>
53 #include <sys/slaballoc.h>
54 
55 #include <vm/vm.h>
56 #include <vm/vm_page.h>
57 #include <vm/vm_kern.h>
58 #include <vm/vm_page.h>
59 #include <vm/vm_object.h>
60 #include <vm/swap_pager.h>
61 #include <vm/vnode_pager.h>
62 
63 #include <stdio.h>
64 #include <stdlib.h>
65 #include <string.h>
66 #include <fcntl.h>
67 #include <kvm.h>
68 #include <nlist.h>
69 #include <getopt.h>
70 
71 struct nlist Nl[] = {
72 #if 0
73     { "_vm_page_buckets" },
74     { "_vm_page_hash_mask" },
75 #endif
76     { "_vm_page_array" },
77     { "_vm_page_array_size" },
78     { NULL }
79 };
80 
81 int debugopt;
82 int verboseopt;
83 #if 0
84 struct vm_page **vm_page_buckets;
85 int vm_page_hash_mask;
86 #endif
87 struct vm_page *vm_page_array;
88 int vm_page_array_size;
89 
90 void checkpage(kvm_t *kd, vm_page_t mptr, vm_page_t m, struct vm_object *obj);
91 static void kkread(kvm_t *kd, u_long addr, void *buf, size_t nbytes);
92 static int kkread_err(kvm_t *kd, u_long addr, void *buf, size_t nbytes);
93 
94 #if 0
95 static void addsltrack(vm_page_t m);
96 #endif
97 static void dumpsltrack(kvm_t *kd);
98 
99 int
100 main(int ac, char **av)
101 {
102     const char *corefile = NULL;
103     const char *sysfile = NULL;
104     struct vm_page m;
105     struct vm_object obj;
106     kvm_t *kd;
107     int ch;
108 #if 0
109     vm_page_t mptr;
110     int hv;
111 #endif
112     int i;
113     const char *qstr;
114     const char *ostr;
115 
116     while ((ch = getopt(ac, av, "M:N:dv")) != -1) {
117 	switch(ch) {
118 	case 'd':
119 	    ++debugopt;
120 	    break;
121 	case 'v':
122 	    ++verboseopt;
123 	    break;
124 	case 'M':
125 	    corefile = optarg;
126 	    break;
127 	case 'N':
128 	    sysfile = optarg;
129 	    break;
130 	default:
131 	    fprintf(stderr, "%s [-M core] [-N system]\n", av[0]);
132 	    exit(1);
133 	}
134     }
135     ac -= optind;
136     av += optind;
137 
138     if ((kd = kvm_open(sysfile, corefile, NULL, O_RDONLY, "kvm:")) == NULL) {
139 	perror("kvm_open");
140 	exit(1);
141     }
142     if (kvm_nlist(kd, Nl) != 0) {
143 	perror("kvm_nlist");
144 	exit(1);
145     }
146 
147 #if 0
148     kkread(kd, Nl[0].n_value, &vm_page_buckets, sizeof(vm_page_buckets));
149     kkread(kd, Nl[1].n_value, &vm_page_hash_mask, sizeof(vm_page_hash_mask));
150 #endif
151     kkread(kd, Nl[0].n_value, &vm_page_array, sizeof(vm_page_array));
152     kkread(kd, Nl[1].n_value, &vm_page_array_size, sizeof(vm_page_array_size));
153 
154     /*
155      * Scan the vm_page_array validating all pages with associated objects
156      */
157     for (i = 0; i < vm_page_array_size; ++i) {
158 	if (debugopt) {
159 	    printf("page %d\r", i);
160 	    fflush(stdout);
161 	}
162 	kkread(kd, (u_long)&vm_page_array[i], &m, sizeof(m));
163 	if (m.object) {
164 	    kkread(kd, (u_long)m.object, &obj, sizeof(obj));
165 	    checkpage(kd, &vm_page_array[i], &m, &obj);
166 	}
167 	if (verboseopt) {
168 	    if (m.queue >= PQ_HOLD) {
169 		qstr = "HOLD";
170 	    } else if (m.queue >= PQ_CACHE) {
171 		qstr = "CACHE";
172 	    } else if (m.queue >= PQ_ACTIVE) {
173 		qstr = "ACTIVE";
174 	    } else if (m.queue >= PQ_INACTIVE) {
175 		qstr = "INACTIVE";
176 	    } else if (m.queue >= PQ_FREE) {
177 		qstr = "FREE";
178 	    } else {
179 		qstr = "NONE";
180 	    }
181 	    printf("page %p obj %p/%-8ju(%016jx) val=%02x dty=%02x hold=%d "
182 		   "wire=%-2d act=%-3d busy=%d %8s",
183 		&vm_page_array[i],
184 		m.object,
185 		(intmax_t)m.pindex,
186 		(intmax_t)m.pindex * PAGE_SIZE,
187 		m.valid,
188 		m.dirty,
189 		m.hold_count,
190 		m.wire_count,
191 		m.act_count,
192 		m.busy,
193 		qstr
194 	    );
195 	    if (m.object) {
196 		switch(obj.type) {
197 		case OBJT_DEFAULT:
198 		    ostr = "default";
199 		    break;
200 		case OBJT_SWAP:
201 		    ostr = "swap";
202 		    break;
203 		case OBJT_VNODE:
204 		    ostr = "vnode";
205 		    break;
206 		case OBJT_DEVICE:
207 		    ostr = "device";
208 		    break;
209 		case OBJT_PHYS:
210 		    ostr = "phys";
211 		    break;
212 		case OBJT_DEAD:
213 		    ostr = "dead";
214 		    break;
215 		default:
216 		    ostr = "unknown";
217 		    break;
218 		}
219 	    } else {
220 		ostr = "-";
221 	    }
222 	    printf(" %-7s", ostr);
223 	    if (m.flags & PG_BUSY)
224 		printf(" BUSY");
225 	    if (m.flags & PG_WANTED)
226 		printf(" WANTED");
227 	    if (m.flags & PG_WINATCFLS)
228 		printf(" WINATCFLS");
229 	    if (m.flags & PG_FICTITIOUS)
230 		printf(" FICTITIOUS");
231 	    if (m.flags & PG_WRITEABLE)
232 		printf(" WRITEABLE");
233 	    if (m.flags & PG_MAPPED)
234 		printf(" MAPPED");
235 	    if (m.flags & PG_NEED_COMMIT)
236 		printf(" NEED_COMMIT");
237 	    if (m.flags & PG_ZERO)
238 		printf(" ZERO");
239 	    if (m.flags & PG_REFERENCED)
240 		printf(" REFERENCED");
241 	    if (m.flags & PG_CLEANCHK)
242 		printf(" CLEANCHK");
243 	    if (m.flags & PG_SWAPINPROG)
244 		printf(" SWAPINPROG");
245 	    if (m.flags & PG_NOSYNC)
246 		printf(" NOSYNC");
247 	    if (m.flags & PG_UNMANAGED)
248 		printf(" UNMANAGED");
249 	    if (m.flags & PG_MARKER)
250 		printf(" MARKER");
251 	    if (m.flags & PG_RAM)
252 		printf(" RAM");
253 	    if (m.flags & PG_SWAPPED)
254 		printf(" SWAPPED");
255 #if 0
256 	    if (m.flags & PG_SLAB)
257 		printf(" SLAB");
258 #endif
259 	    printf("\n");
260 #if 0
261 	    if (m.flags & PG_SLAB)
262 		addsltrack(&m);
263 #endif
264 	}
265     }
266     if (debugopt || verboseopt)
267 	printf("\n");
268 
269 #if 0
270     /*
271      * Scan the vm_page_buckets array validating all pages found
272      */
273     for (i = 0; i <= vm_page_hash_mask; ++i) {
274 	if (debugopt) {
275 	    printf("index %d\r", i);
276 	    fflush(stdout);
277 	}
278 	kkread(kd, (u_long)&vm_page_buckets[i], &mptr, sizeof(mptr));
279 	while (mptr) {
280 	    kkread(kd, (u_long)mptr, &m, sizeof(m));
281 	    if (m.object) {
282 		kkread(kd, (u_long)m.object, &obj, sizeof(obj));
283 		hv = ((uintptr_t)m.object + m.pindex) ^ obj.hash_rand;
284 		hv &= vm_page_hash_mask;
285 		if (i != hv)
286 		    printf("vm_page_buckets[%d] ((struct vm_page *)%p)"
287 			" should be in bucket %d\n", i, mptr, hv);
288 		checkpage(kd, mptr, &m, &obj);
289 	    } else {
290 		printf("vm_page_buckets[%d] ((struct vm_page *)%p)"
291 			" has no object\n", i, mptr);
292 	    }
293 	    mptr = m.hnext;
294 	}
295     }
296 #endif
297     if (debugopt)
298 	printf("\n");
299     dumpsltrack(kd);
300     return(0);
301 }
302 
303 /*
304  * A page with an object.
305  */
306 void
307 checkpage(kvm_t *kd, vm_page_t mptr, vm_page_t m, struct vm_object *obj)
308 {
309 #if 0
310     struct vm_page scan;
311     vm_page_t scanptr;
312     int hv;
313 
314     hv = ((uintptr_t)m->object + m->pindex) ^ obj->hash_rand;
315     hv &= vm_page_hash_mask;
316     kkread(kd, (u_long)&vm_page_buckets[hv], &scanptr, sizeof(scanptr));
317     while (scanptr) {
318 	if (scanptr == mptr)
319 	    break;
320 	kkread(kd, (u_long)scanptr, &scan, sizeof(scan));
321 	scanptr = scan.hnext;
322     }
323     if (scanptr) {
324 	if (debugopt > 1)
325 	    printf("good checkpage %p bucket %d\n", mptr, hv);
326     } else {
327 	printf("vm_page_buckets[%d] ((struct vm_page *)%p)"
328 		" page not found in bucket list\n", hv, mptr);
329     }
330 #endif
331 }
332 
333 static void
334 kkread(kvm_t *kd, u_long addr, void *buf, size_t nbytes)
335 {
336     if (kvm_read(kd, addr, buf, nbytes) != nbytes) {
337         perror("kvm_read");
338         exit(1);
339     }
340 }
341 
342 static int
343 kkread_err(kvm_t *kd, u_long addr, void *buf, size_t nbytes)
344 {
345     if (kvm_read(kd, addr, buf, nbytes) != nbytes) {
346 	return 1;
347     }
348     return 0;
349 }
350 
351 struct SLTrack {
352         struct SLTrack *next;
353         u_long addr;
354 };
355 
356 #define SLHSIZE 1024
357 #define SLHMASK (SLHSIZE - 1)
358 
359 struct SLTrack *SLHash[SLHSIZE];
360 
361 #if 0
362 static
363 void
364 addsltrack(vm_page_t m)
365 {
366 	struct SLTrack *slt;
367 	u_long addr = (m->pindex * PAGE_SIZE) & ~131071L;
368 	int i;
369 
370 	if (m->wire_count == 0 || (m->flags & PG_MAPPED) == 0 ||
371 	    m->object == NULL)
372 		return;
373 
374 	i = (addr / 131072) & SLHMASK;
375 	for (slt = SLHash[i]; slt; slt = slt->next) {
376 		if (slt->addr == addr)
377 			break;
378 	}
379 	if (slt == NULL) {
380 		slt = malloc(sizeof(*slt));
381 		slt->addr = addr;
382 		slt->next = SLHash[i];
383 		SLHash[i] = slt;
384 	}
385 }
386 #endif
387 
388 static
389 void
390 dumpsltrack(kvm_t *kd)
391 {
392 	struct SLTrack *slt;
393 	int i;
394 	long total_zones = 0;
395 	long full_zones = 0;
396 
397 	for (i = 0; i < SLHSIZE; ++i) {
398 		for (slt = SLHash[i]; slt; slt = slt->next) {
399 			SLZone z;
400 
401 			if (kkread_err(kd, slt->addr, &z, sizeof(z))) {
402 				printf("SLZone 0x%016lx not mapped\n",
403 					slt->addr);
404 				continue;
405 			}
406 			printf("SLZone 0x%016lx { mag=%08x cpu=%-2d NFree=%-3d "
407 			       "chunksz=%-5d }\n",
408 			       slt->addr,
409 			       z.z_Magic,
410 			       z.z_Cpu,
411 			       z.z_NFree,
412 			       z.z_ChunkSize
413 			);
414 			++total_zones;
415 			if (z.z_NFree == 0)
416 				++full_zones;
417 		}
418 	}
419 	printf("FullZones/TotalZones: %ld/%ld\n", full_zones, total_zones);
420 }
421