1 /*
2 * VMPAGEHASH.C
3 *
4 * cc -I/usr/src/sys vmpagehash.c -o ~/bin/vmpagehash -lkvm
5 *
6 * vmpageinfo
7 *
8 * Validate the vm_page_buckets[] hash array against the vm_page_array
9 *
10 * Copyright (c) 2019 The DragonFly Project. All rights reserved.
11 *
12 * This code is derived from software contributed to The DragonFly Project
13 * by Matthew Dillon <dillon@backplane.com>
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 *
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in
23 * the documentation and/or other materials provided with the
24 * distribution.
25 * 3. Neither the name of The DragonFly Project nor the names of its
26 * contributors may be used to endorse or promote products derived
27 * from this software without specific, prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
32 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
33 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
34 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
35 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
36 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
37 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
38 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
39 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 * SUCH DAMAGE.
41 */
42
43 #define _KERNEL_STRUCTURES_
44 #include <sys/param.h>
45 #include <sys/user.h>
46 #include <sys/malloc.h>
47 #include <sys/signalvar.h>
48 #include <sys/vnode.h>
49 #include <sys/namecache.h>
50 #include <sys/slaballoc.h>
51
52 #include <vm/vm.h>
53 #include <vm/vm_page.h>
54 #include <vm/vm_kern.h>
55 #include <vm/vm_object.h>
56 #include <vm/swap_pager.h>
57 #include <vm/vnode_pager.h>
58
59 #include <stdio.h>
60 #include <stdlib.h>
61 #include <string.h>
62 #include <fcntl.h>
63 #include <kvm.h>
64 #include <nlist.h>
65 #include <getopt.h>
66
67 struct vm_page_hash_elm {
68 vm_page_t m;
69 int ticks;
70 int unused01;
71 };
72
73
74 struct nlist Nl[] = {
75 { "_vm_page_hash" },
76 { "_vm_page_hash_size" },
77 { "_ticks" },
78 { "_vm_page_array" },
79 { "_vm_page_array_size" },
80 { NULL }
81 };
82
83 int debugopt;
84 int verboseopt;
85 struct vm_page *vm_page_array_ptr;
86 struct vm_page_hash_elm *vm_page_hash_ptr;
87 struct vm_page_hash_elm *vm_page_hash;
88 int vm_page_hash_size;
89 int vm_page_array_size;
90 int ticks;
91
92 void checkpage(kvm_t *kd, vm_page_t mptr, vm_page_t m, struct vm_object *obj);
93 static void kkread_vmpage(kvm_t *kd, u_long addr, vm_page_t m);
94 static void kkread(kvm_t *kd, u_long addr, void *buf, size_t nbytes);
95 static int kkread_err(kvm_t *kd, u_long addr, void *buf, size_t nbytes);
96
97 int
main(int ac,char ** av)98 main(int ac, char **av)
99 {
100 const char *corefile = NULL;
101 const char *sysfile = NULL;
102 struct vm_page m;
103 struct vm_object obj;
104 kvm_t *kd;
105 int ch;
106 #if 0
107 vm_page_t mptr;
108 int hv;
109 #endif
110 int i;
111 const char *qstr;
112 const char *ostr;
113
114 while ((ch = getopt(ac, av, "M:N:dv")) != -1) {
115 switch(ch) {
116 case 'd':
117 ++debugopt;
118 break;
119 case 'v':
120 ++verboseopt;
121 break;
122 case 'M':
123 corefile = optarg;
124 break;
125 case 'N':
126 sysfile = optarg;
127 break;
128 default:
129 fprintf(stderr, "%s [-M core] [-N system]\n", av[0]);
130 exit(1);
131 }
132 }
133 ac -= optind;
134 av += optind;
135
136 if ((kd = kvm_open(sysfile, corefile, NULL, O_RDONLY, "kvm:")) == NULL) {
137 perror("kvm_open");
138 exit(1);
139 }
140 if (kvm_nlist(kd, Nl) != 0) {
141 perror("kvm_nlist");
142 exit(1);
143 }
144
145 kkread(kd, Nl[0].n_value, &vm_page_hash_ptr, sizeof(vm_page_hash_ptr));
146 kkread(kd, Nl[1].n_value, &vm_page_hash_size, sizeof(vm_page_hash_size));
147 kkread(kd, Nl[2].n_value, &ticks, sizeof(ticks));
148 kkread(kd, Nl[3].n_value, &vm_page_array_ptr, sizeof(vm_page_array_ptr));
149 kkread(kd, Nl[4].n_value, &vm_page_array_size, sizeof(vm_page_array_size));
150
151 vm_page_hash = malloc(vm_page_hash_size * sizeof(*vm_page_hash));
152 kkread(kd, (intptr_t)vm_page_hash_ptr, vm_page_hash,
153 vm_page_hash_size * sizeof(*vm_page_hash));
154
155 /*
156 * Scan the vm_page_hash validating all pages with associated objects
157 */
158 printf("vm_page_hash[%d]\n", vm_page_hash_size);
159 for (i = 0; i < vm_page_hash_size; ++i) {
160 struct vm_page_hash_elm *elm;
161
162 elm = &vm_page_hash[i];
163 if ((i & 3) == 0) {
164 printf(" group(%d-%d) ", i, i + 3);
165 if (elm[0].m && elm[1].m && elm[2].m && elm[3].m)
166 printf("FULL ");
167 printf("\n");
168 }
169 printf(" %016jx %9d ", elm->m, elm->ticks);
170
171 if (elm->m) {
172 kkread_vmpage(kd, (u_long)elm->m, &m);
173 if (m.object) {
174 kkread(kd, (u_long)m.object, &obj, sizeof(obj));
175 checkpage(kd, elm->m, &m, &obj);
176 }
177 if (m.queue >= PQ_HOLD) {
178 qstr = "HOLD";
179 } else if (m.queue >= PQ_CACHE) {
180 qstr = "CACHE";
181 } else if (m.queue >= PQ_ACTIVE) {
182 qstr = "ACTIVE";
183 } else if (m.queue >= PQ_INACTIVE) {
184 qstr = "INACTIVE";
185 } else if (m.queue >= PQ_FREE) {
186 qstr = "FREE";
187 } else {
188 qstr = "NONE";
189 }
190 printf("obj %p/%016jx\n\t\t\t\tval=%02x dty=%02x hold=%d "
191 "wire=%-2d act=%-3d busy=%d %8s",
192 m.object,
193 (intmax_t)m.pindex,
194 m.valid,
195 m.dirty,
196 m.hold_count,
197 m.wire_count,
198 m.act_count,
199 m.busy_count,
200 qstr
201 );
202
203 if (m.object) {
204 switch(obj.type) {
205 case OBJT_DEFAULT:
206 ostr = "default";
207 break;
208 case OBJT_SWAP:
209 ostr = "swap";
210 break;
211 case OBJT_VNODE:
212 ostr = "vnode";
213 break;
214 case OBJT_DEVICE:
215 ostr = "device";
216 break;
217 case OBJT_PHYS:
218 ostr = "phys";
219 break;
220 case OBJT_DEAD:
221 ostr = "dead";
222 break;
223 default:
224 ostr = "unknown";
225 break;
226 }
227 } else {
228 ostr = "-";
229 }
230 printf(" %-7s", ostr);
231 if (m.busy_count & PBUSY_LOCKED)
232 printf(" BUSY");
233 if (m.busy_count & PBUSY_WANTED)
234 printf(" WANTED");
235 if (m.flags & PG_WINATCFLS)
236 printf(" WINATCFLS");
237 if (m.flags & PG_FICTITIOUS)
238 printf(" FICTITIOUS");
239 if (m.flags & PG_WRITEABLE)
240 printf(" WRITEABLE");
241 if (m.flags & PG_MAPPED)
242 printf(" MAPPED");
243 if (m.flags & PG_NEED_COMMIT)
244 printf(" NEED_COMMIT");
245 if (m.flags & PG_REFERENCED)
246 printf(" REFERENCED");
247 if (m.flags & PG_CLEANCHK)
248 printf(" CLEANCHK");
249 if (m.busy_count & PBUSY_SWAPINPROG)
250 printf(" SWAPINPROG");
251 if (m.flags & PG_NOSYNC)
252 printf(" NOSYNC");
253 if (m.flags & PG_UNMANAGED)
254 printf(" UNMANAGED");
255 if (m.flags & PG_MARKER)
256 printf(" MARKER");
257 if (m.flags & PG_RAM)
258 printf(" RAM");
259 if (m.flags & PG_SWAPPED)
260 printf(" SWAPPED");
261 }
262 printf("\n");
263 }
264 return(0);
265 }
266
267 /*
268 * A page with an object.
269 */
270 void
checkpage(kvm_t * kd,vm_page_t mptr,vm_page_t m,struct vm_object * obj)271 checkpage(kvm_t *kd, vm_page_t mptr, vm_page_t m, struct vm_object *obj)
272 {
273 #if 0
274 struct vm_page scan;
275 vm_page_t scanptr;
276 int hv;
277
278 hv = ((uintptr_t)m->object + m->pindex) ^ obj->hash_rand;
279 hv &= vm_page_hash_mask;
280 kkread(kd, (u_long)&vm_page_buckets[hv], &scanptr, sizeof(scanptr));
281 while (scanptr) {
282 if (scanptr == mptr)
283 break;
284 kkread(kd, (u_long)scanptr, &scan, sizeof(scan));
285 scanptr = scan.hnext;
286 }
287 if (scanptr) {
288 if (debugopt > 1)
289 printf("good checkpage %p bucket %d\n", mptr, hv);
290 } else {
291 printf("vm_page_buckets[%d] ((struct vm_page *)%p)"
292 " page not found in bucket list\n", hv, mptr);
293 }
294 #endif
295 }
296
297 /*
298 * Acclerate the reading of VM pages
299 */
300 static void
kkread_vmpage(kvm_t * kd,u_long addr,vm_page_t m)301 kkread_vmpage(kvm_t *kd, u_long addr, vm_page_t m)
302 {
303 static struct vm_page vpcache[1024];
304 static u_long vpbeg;
305 static u_long vpend;
306
307 if (addr < vpbeg || addr >= vpend) {
308 vpbeg = addr;
309 vpend = addr + 1024 * sizeof(*m);
310 if (vpend > (u_long)(uintptr_t)vm_page_array_ptr +
311 vm_page_array_size * sizeof(*m)) {
312 vpend = (u_long)(uintptr_t)vm_page_array_ptr +
313 vm_page_array_size * sizeof(*m);
314 }
315 kkread(kd, vpbeg, vpcache, vpend - vpbeg);
316 }
317 *m = vpcache[(addr - vpbeg) / sizeof(*m)];
318 }
319
320 static void
kkread(kvm_t * kd,u_long addr,void * buf,size_t nbytes)321 kkread(kvm_t *kd, u_long addr, void *buf, size_t nbytes)
322 {
323 if (kvm_read(kd, addr, buf, nbytes) != nbytes) {
324 perror("kvm_read");
325 exit(1);
326 }
327 }
328
329 static int
kkread_err(kvm_t * kd,u_long addr,void * buf,size_t nbytes)330 kkread_err(kvm_t *kd, u_long addr, void *buf, size_t nbytes)
331 {
332 if (kvm_read(kd, addr, buf, nbytes) != nbytes) {
333 return 1;
334 }
335 return 0;
336 }
337
338 struct SLTrack {
339 struct SLTrack *next;
340 u_long addr;
341 };
342
343 #define SLHSIZE 1024
344 #define SLHMASK (SLHSIZE - 1)
345
346 struct SLTrack *SLHash[SLHSIZE];
347
348 #if 0
349 static
350 void
351 addsltrack(vm_page_t m)
352 {
353 struct SLTrack *slt;
354 u_long addr = (m->pindex * PAGE_SIZE) & ~131071L;
355 int i;
356
357 if (m->wire_count == 0 || (m->flags & PG_MAPPED) == 0 ||
358 m->object == NULL)
359 return;
360
361 i = (addr / 131072) & SLHMASK;
362 for (slt = SLHash[i]; slt; slt = slt->next) {
363 if (slt->addr == addr)
364 break;
365 }
366 if (slt == NULL) {
367 slt = malloc(sizeof(*slt));
368 slt->addr = addr;
369 slt->next = SLHash[i];
370 SLHash[i] = slt;
371 }
372 }
373 #endif
374