1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 #pragma ident	"%Z%%M%	%I%	%E% SMI"
26 
27 /*
28  * This part of the file contains the mdb support for dcmds:
29  *	::memseg_list
30  *	::page_num2pp
31  * and walkers for:
32  *	memseg - a memseg list walker for ::memseg_list
33  *
34  */
35 
36 #include <sys/types.h>
37 #include <sys/machparam.h>
38 #include <sys/controlregs.h>
39 #include <sys/mach_mmu.h>
40 #include <vm/as.h>
41 
42 #include <mdb/mdb_modapi.h>
43 #include <mdb/mdb_target.h>
44 
45 #include <vm/page.h>
46 #include <vm/hat_i86.h>
47 
48 struct pfn2pp {
49 	pfn_t pfn;
50 	page_t *pp;
51 };
52 
53 static int do_va2pa(uintptr_t, struct as *, int, physaddr_t *, pfn_t *);
54 static void get_mmu(void);
55 
56 int
57 platform_vtop(uintptr_t addr, struct as *asp, physaddr_t *pap)
58 {
59 	if (asp == NULL)
60 		return (DCMD_ERR);
61 
62 	/*
63 	 * The kernel has to at least have made it thru mmu_init()
64 	 */
65 	get_mmu();
66 	if (mmu.num_level == 0)
67 		return (DCMD_ERR);
68 
69 	return (do_va2pa(addr, asp, 0, pap, NULL));
70 }
71 
72 
73 /*ARGSUSED*/
74 int
75 page_num2pp_cb(uintptr_t addr, void *ignored, uintptr_t *data)
76 {
77 	struct memseg ms, *msp = &ms;
78 	struct pfn2pp *p = (struct pfn2pp *)data;
79 
80 	if (mdb_vread(msp, sizeof (struct memseg), addr) == -1) {
81 		mdb_warn("can't read memseg at %#lx", addr);
82 		return (DCMD_ERR);
83 	}
84 
85 	if (p->pfn >= msp->pages_base && p->pfn < msp->pages_end) {
86 		p->pp = msp->pages + (p->pfn - msp->pages_base);
87 		return (WALK_DONE);
88 	}
89 
90 	return (WALK_NEXT);
91 }
92 
93 /*
94  * ::page_num2pp dcmd
95  */
96 /*ARGSUSED*/
97 int
98 page_num2pp(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
99 {
100 	struct pfn2pp pfn2pp;
101 	page_t page;
102 
103 	if ((flags & DCMD_ADDRSPEC) == 0) {
104 		mdb_warn("page frame number missing\n");
105 			return (DCMD_USAGE);
106 	}
107 
108 	pfn2pp.pfn = (pfn_t)addr;
109 	pfn2pp.pp = NULL;
110 
111 	if (mdb_walk("memseg", (mdb_walk_cb_t)page_num2pp_cb,
112 	    (void *)&pfn2pp) == -1) {
113 		mdb_warn("can't walk memseg");
114 		return (DCMD_ERR);
115 	}
116 
117 	if (pfn2pp.pp == NULL)
118 		return (DCMD_ERR);
119 
120 	mdb_printf("%x has page at %p\n", pfn2pp.pfn, pfn2pp.pp);
121 
122 	if (mdb_vread(&page, sizeof (page_t),
123 	    (uintptr_t)pfn2pp.pp) == -1) {
124 		mdb_warn("can't read page at %p", &page);
125 		return (DCMD_ERR);
126 	}
127 
128 	if (page.p_pagenum != pfn2pp.pfn) {
129 		mdb_warn("WARNING! Found page structure contains "
130 			"different pagenumber %x\n", page.p_pagenum);
131 	}
132 
133 	return (DCMD_OK);
134 }
135 
136 
137 /*
138  * ::memseg_list dcmd and walker to implement it.
139  */
140 /*ARGSUSED*/
141 int
142 memseg_list(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
143 {
144 	struct memseg ms;
145 
146 	if (!(flags & DCMD_ADDRSPEC)) {
147 		if (mdb_pwalk_dcmd("memseg", "memseg_list",
148 		    0, NULL, 0) == -1) {
149 			mdb_warn("can't walk memseg");
150 			return (DCMD_ERR);
151 		}
152 		return (DCMD_OK);
153 	}
154 
155 	if (DCMD_HDRSPEC(flags))
156 		mdb_printf("%<u>%?s %?s %?s %?s %?s%</u>\n", "ADDR",
157 			"PAGES", "EPAGES", "BASE", "END");
158 
159 	if (mdb_vread(&ms, sizeof (struct memseg), addr) == -1) {
160 		mdb_warn("can't read memseg at %#lx", addr);
161 		return (DCMD_ERR);
162 	}
163 
164 	mdb_printf("%0?lx %0?lx %0?lx %0?lx %0?lx\n", addr,
165 		ms.pages, ms.epages, ms.pages_base, ms.pages_end);
166 
167 	return (DCMD_OK);
168 }
169 
170 /*
171  * walk the memseg structures
172  */
173 int
174 memseg_walk_init(mdb_walk_state_t *wsp)
175 {
176 	if (wsp->walk_addr != NULL) {
177 		mdb_warn("memseg only supports global walks\n");
178 		return (WALK_ERR);
179 	}
180 
181 	if (mdb_readvar(&wsp->walk_addr, "memsegs") == -1) {
182 		mdb_warn("symbol 'memsegs' not found");
183 		return (WALK_ERR);
184 	}
185 
186 	wsp->walk_data = mdb_alloc(sizeof (struct memseg), UM_SLEEP);
187 	return (WALK_NEXT);
188 
189 }
190 
191 int
192 memseg_walk_step(mdb_walk_state_t *wsp)
193 {
194 	int status;
195 
196 	if (wsp->walk_addr == 0) {
197 		return (WALK_DONE);
198 	}
199 
200 	if (mdb_vread(wsp->walk_data, sizeof (struct memseg),
201 	    wsp->walk_addr) == -1) {
202 		mdb_warn("failed to read struct memseg at %p", wsp->walk_addr);
203 		return (WALK_DONE);
204 	}
205 
206 	status = wsp->walk_callback(wsp->walk_addr, wsp->walk_data,
207 	    wsp->walk_cbdata);
208 
209 	wsp->walk_addr = (uintptr_t)(((struct memseg *)wsp->walk_data)->next);
210 
211 	return (status);
212 }
213 
214 void
215 memseg_walk_fini(mdb_walk_state_t *wsp)
216 {
217 	mdb_free(wsp->walk_data, sizeof (struct memseg));
218 }
219 
220 /*
221  * Now HAT related dcmds.
222  */
223 
224 struct hat *khat;		/* value of kas.a_hat */
225 struct hat_mmu_info mmu;
226 uintptr_t kernelbase;
227 
228 /*
229  * read mmu parameters from kernel
230  */
231 static void
232 get_mmu(void)
233 {
234 	struct as kas;
235 
236 	if (mmu.num_level != 0)
237 		return;
238 
239 	if (mdb_readsym(&mmu, sizeof (mmu), "mmu") == -1)
240 		mdb_warn("Can't use HAT information before mmu_init()\n");
241 	if (mdb_readsym(&kas, sizeof (kas), "kas") == -1)
242 		mdb_warn("Couldn't find kas - kernel's struct as\n");
243 	if (mdb_readsym(&kernelbase, sizeof (kernelbase), "kernelbase") == -1)
244 		mdb_warn("Couldn't find kernelbase\n");
245 	khat = kas.a_hat;
246 }
247 
248 #define	mdb_ma_to_pa(ma) (ma)
249 #define	mdb_mfn_to_pfn(mfn) (mfn)
250 #define	mdb_pfn_to_mfn(pfn) (pfn)
251 
252 static pfn_t
253 pte2mfn(x86pte_t pte, uint_t level)
254 {
255 	pfn_t mfn;
256 	if (level > 0 && (pte & PT_PAGESIZE))
257 		mfn = mmu_btop(pte & PT_PADDR_LGPG);
258 	else
259 		mfn = mmu_btop(pte & PT_PADDR);
260 	return (mfn);
261 }
262 
263 /*
264  * Print a PTE in more human friendly way. The PTE is assumed to be in
265  * a level 0 page table, unless -l specifies another level.
266  *
267  * The PTE value can be specified as the -p option, since on a 32 bit kernel
268  * with PAE running it's larger than a uintptr_t.
269  */
270 static int
271 do_pte_dcmd(int level, uint64_t pte)
272 {
273 	static char *attr[] = {
274 	    "wrback", "wrthru", "uncached", "uncached",
275 	    "wrback", "wrthru", "wrcombine", "uncached"};
276 	int pat_index = 0;
277 	pfn_t mfn;
278 
279 	mdb_printf("pte=%llr: ", pte);
280 	if (PTE_GET(pte, mmu.pt_nx))
281 		mdb_printf("noexec ");
282 
283 	mfn = pte2mfn(pte, level);
284 	mdb_printf("%s=0x%lr ", "pfn", mfn);
285 
286 	if (PTE_GET(pte, PT_NOCONSIST))
287 		mdb_printf("noconsist ");
288 
289 	if (PTE_GET(pte, PT_NOSYNC))
290 		mdb_printf("nosync ");
291 
292 	if (PTE_GET(pte, mmu.pt_global))
293 		mdb_printf("global ");
294 
295 	if (level > 0 && PTE_GET(pte, PT_PAGESIZE))
296 		mdb_printf("largepage ");
297 
298 	if (level > 0 && PTE_GET(pte, PT_MOD))
299 		mdb_printf("mod ");
300 
301 	if (level > 0 && PTE_GET(pte, PT_REF))
302 		mdb_printf("ref ");
303 
304 	if (PTE_GET(pte, PT_USER))
305 		mdb_printf("user ");
306 
307 	if (PTE_GET(pte, PT_WRITABLE))
308 		mdb_printf("write ");
309 
310 	/*
311 	 * Report non-standard cacheability
312 	 */
313 	pat_index = 0;
314 	if (level > 0) {
315 		if (PTE_GET(pte, PT_PAGESIZE) && PTE_GET(pte, PT_PAT_LARGE))
316 			pat_index += 4;
317 	} else {
318 		if (PTE_GET(pte, PT_PAT_4K))
319 			pat_index += 4;
320 	}
321 
322 	if (PTE_GET(pte, PT_NOCACHE))
323 		pat_index += 2;
324 
325 	if (PTE_GET(pte, PT_WRITETHRU))
326 		pat_index += 1;
327 
328 	if (pat_index != 0)
329 		mdb_printf("%s", attr[pat_index]);
330 
331 	if (PTE_GET(pte, PT_VALID) == 0)
332 		mdb_printf(" !VALID ");
333 
334 	mdb_printf("\n");
335 	return (DCMD_OK);
336 }
337 
338 /*
339  * Print a PTE in more human friendly way. The PTE is assumed to be in
340  * a level 0 page table, unless -l specifies another level.
341  *
342  * The PTE value can be specified as the -p option, since on a 32 bit kernel
343  * with PAE running it's larger than a uintptr_t.
344  */
345 /*ARGSUSED*/
346 int
347 pte_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
348 {
349 	int level = 0;
350 	uint64_t pte = 0;
351 	char *level_str = NULL;
352 	char *pte_str = NULL;
353 
354 	/*
355 	 * The kernel has to at least have made it thru mmu_init()
356 	 */
357 	get_mmu();
358 	if (mmu.num_level == 0)
359 		return (DCMD_ERR);
360 
361 	if (mdb_getopts(argc, argv,
362 	    'p', MDB_OPT_STR, &pte_str,
363 	    'l', MDB_OPT_STR, &level_str) != argc)
364 		return (DCMD_USAGE);
365 
366 	/*
367 	 * parse the PTE to decode, if it's 0, we don't do anything
368 	 */
369 	if (pte_str != NULL) {
370 		pte = mdb_strtoull(pte_str);
371 	} else {
372 		if ((flags & DCMD_ADDRSPEC) == 0)
373 			return (DCMD_USAGE);
374 		pte = addr;
375 	}
376 	if (pte == 0)
377 		return (DCMD_OK);
378 
379 	/*
380 	 * parse the level if supplied
381 	 */
382 	if (level_str != NULL) {
383 		level = mdb_strtoull(level_str);
384 		if (level < 0 || level > mmu.max_level)
385 			return (DCMD_ERR);
386 	}
387 
388 	return (do_pte_dcmd(level, pte));
389 }
390 
391 static size_t
392 va2entry(htable_t *htable, uintptr_t addr)
393 {
394 	size_t entry = (addr - htable->ht_vaddr);
395 
396 	entry >>= mmu.level_shift[htable->ht_level];
397 	return (entry & HTABLE_NUM_PTES(htable) - 1);
398 }
399 
400 static x86pte_t
401 get_pte(hat_t *hat, htable_t *htable, uintptr_t addr)
402 {
403 	x86pte_t buf;
404 	x86pte32_t *pte32 = (x86pte32_t *)&buf;
405 	size_t len;
406 
407 	if (htable->ht_flags & HTABLE_VLP) {
408 		uintptr_t ptr = (uintptr_t)hat->hat_vlp_ptes;
409 		ptr += va2entry(htable, addr) << mmu.pte_size_shift;
410 		len = mdb_vread(&buf, mmu.pte_size, ptr);
411 	} else {
412 		paddr_t paddr = mmu_ptob((paddr_t)htable->ht_pfn);
413 		paddr += va2entry(htable, addr) << mmu.pte_size_shift;
414 		len = mdb_pread(&buf, mmu.pte_size, paddr);
415 	}
416 
417 	if (len != mmu.pte_size)
418 		return (0);
419 
420 	if (mmu.pte_size == sizeof (x86pte_t))
421 		return (buf);
422 	return (*pte32);
423 }
424 
425 static int
426 do_va2pa(uintptr_t addr, struct as *asp, int print_level, physaddr_t *pap,
427     pfn_t *mfnp)
428 {
429 	struct as as;
430 	struct hat *hatp;
431 	struct hat hat;
432 	htable_t *ht;
433 	htable_t htable;
434 	uintptr_t base;
435 	int h;
436 	int level;
437 	int found = 0;
438 	x86pte_t pte;
439 	physaddr_t paddr;
440 
441 	if (asp != NULL) {
442 		if (mdb_vread(&as, sizeof (as), (uintptr_t)asp) == -1) {
443 			mdb_warn("Couldn't read struct as\n");
444 			return (DCMD_ERR);
445 		}
446 		hatp = as.a_hat;
447 	} else {
448 		hatp = khat;
449 	}
450 
451 	/*
452 	 * read the hat and its hash table
453 	 */
454 	if (mdb_vread(&hat, sizeof (hat), (uintptr_t)hatp) == -1) {
455 		mdb_warn("Couldn't read struct hat\n");
456 		return (DCMD_ERR);
457 	}
458 
459 	/*
460 	 * read the htable hashtable
461 	 */
462 	for (level = 0; level <= mmu.max_level; ++level) {
463 		if (level == TOP_LEVEL(&hat))
464 			base = 0;
465 		else
466 			base = addr & mmu.level_mask[level + 1];
467 
468 		for (h = 0; h < hat.hat_num_hash; ++h) {
469 			if (mdb_vread(&ht, sizeof (htable_t *),
470 			    (uintptr_t)(hat.hat_ht_hash + h)) == -1) {
471 				mdb_warn("Couldn't read htable\n");
472 				return (DCMD_ERR);
473 			}
474 			for (; ht != NULL; ht = htable.ht_next) {
475 				if (mdb_vread(&htable, sizeof (htable_t),
476 				    (uintptr_t)ht) == -1) {
477 					mdb_warn("Couldn't read htable\n");
478 					return (DCMD_ERR);
479 				}
480 
481 				if (htable.ht_vaddr != base ||
482 				    htable.ht_level != level)
483 					continue;
484 
485 				pte = get_pte(&hat, &htable, addr);
486 
487 				if (print_level) {
488 					mdb_printf("\tlevel=%d htable=%p "
489 					    "pte=%llr\n", level, ht, pte);
490 				}
491 
492 				if (!PTE_ISVALID(pte)) {
493 					mdb_printf("Address %p is unmapped.\n",
494 					    addr);
495 					return (DCMD_ERR);
496 				}
497 
498 				if (found)
499 					continue;
500 
501 				if (PTE_IS_LGPG(pte, level))
502 					paddr = mdb_ma_to_pa(pte &
503 					    PT_PADDR_LGPG);
504 				else
505 					paddr = mdb_ma_to_pa(pte & PT_PADDR);
506 				paddr += addr & mmu.level_offset[level];
507 				if (pap != NULL)
508 					*pap = paddr;
509 				if (mfnp != NULL)
510 					*mfnp = pte2mfn(pte, level);
511 				found = 1;
512 			}
513 		}
514 	}
515 
516 done:
517 	if (!found)
518 		return (DCMD_ERR);
519 	return (DCMD_OK);
520 }
521 
522 int
523 va2pfn_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
524 {
525 	uintptr_t addrspace;
526 	char *addrspace_str = NULL;
527 	int piped = flags & DCMD_PIPE_OUT;
528 	pfn_t pfn;
529 	pfn_t mfn;
530 	int rc;
531 
532 	/*
533 	 * The kernel has to at least have made it thru mmu_init()
534 	 */
535 	get_mmu();
536 	if (mmu.num_level == 0)
537 		return (DCMD_ERR);
538 
539 	if (mdb_getopts(argc, argv,
540 	    'a', MDB_OPT_STR, &addrspace_str) != argc)
541 		return (DCMD_USAGE);
542 
543 	if ((flags & DCMD_ADDRSPEC) == 0)
544 		return (DCMD_USAGE);
545 
546 	/*
547 	 * parse the address space
548 	 */
549 	if (addrspace_str != NULL)
550 		addrspace = mdb_strtoull(addrspace_str);
551 	else
552 		addrspace = 0;
553 
554 	rc = do_va2pa(addr, (struct as *)addrspace, !piped, NULL, &mfn);
555 
556 	if (rc != DCMD_OK)
557 		return (rc);
558 
559 	if ((pfn = mdb_mfn_to_pfn(mfn)) == -(pfn_t)1) {
560 		mdb_warn("Invalid mfn %lr\n", mfn);
561 		return (DCMD_ERR);
562 	}
563 
564 	if (piped) {
565 		mdb_printf("0x%lr\n", pfn);
566 		return (DCMD_OK);
567 	}
568 
569 	mdb_printf("Virtual address 0x%p maps pfn 0x%lr", addr, pfn);
570 
571 	mdb_printf("\n");
572 
573 	return (DCMD_OK);
574 }
575 
576 /*
577  * Report all hat's that either use PFN as a page table or that map the page.
578  */
579 static int
580 do_report_maps(pfn_t pfn)
581 {
582 	struct hat *hatp;
583 	struct hat hat;
584 	htable_t *ht;
585 	htable_t htable;
586 	uintptr_t base;
587 	int h;
588 	int level;
589 	int entry;
590 	x86pte_t pte;
591 	x86pte_t buf;
592 	x86pte32_t *pte32 = (x86pte32_t *)&buf;
593 	physaddr_t paddr;
594 	size_t len;
595 
596 	/*
597 	 * The hats are kept in a list with khat at the head.
598 	 */
599 	for (hatp = khat; hatp != NULL; hatp = hat.hat_next) {
600 		/*
601 		 * read the hat and its hash table
602 		 */
603 		if (mdb_vread(&hat, sizeof (hat), (uintptr_t)hatp) == -1) {
604 			mdb_warn("Couldn't read struct hat\n");
605 			return (DCMD_ERR);
606 		}
607 
608 		/*
609 		 * read the htable hashtable
610 		 */
611 		paddr = 0;
612 		for (h = 0; h < hat.hat_num_hash; ++h) {
613 			if (mdb_vread(&ht, sizeof (htable_t *),
614 			    (uintptr_t)(hat.hat_ht_hash + h)) == -1) {
615 				mdb_warn("Couldn't read htable\n");
616 				return (DCMD_ERR);
617 			}
618 			for (; ht != NULL; ht = htable.ht_next) {
619 				if (mdb_vread(&htable, sizeof (htable_t),
620 				    (uintptr_t)ht) == -1) {
621 					mdb_warn("Couldn't read htable\n");
622 					return (DCMD_ERR);
623 				}
624 
625 				/*
626 				 * only report kernel addresses once
627 				 */
628 				if (hatp != khat &&
629 				    htable.ht_vaddr >= kernelbase)
630 					continue;
631 
632 				/*
633 				 * Is the PFN a pagetable itself?
634 				 */
635 				if (htable.ht_pfn == pfn) {
636 					mdb_printf("Pagetable for "
637 					    "hat=%p htable=%p\n", hatp, ht);
638 					continue;
639 				}
640 
641 				/*
642 				 * otherwise, examine page mappings
643 				 */
644 				level = htable.ht_level;
645 				if (level > mmu.max_page_level)
646 					continue;
647 				paddr = mmu_ptob((physaddr_t)htable.ht_pfn);
648 				for (entry = 0;
649 				    entry < HTABLE_NUM_PTES(&htable);
650 				    ++entry) {
651 
652 					base = htable.ht_vaddr + entry *
653 					    mmu.level_size[level];
654 
655 					/*
656 					 * only report kernel addresses once
657 					 */
658 					if (hatp != khat &&
659 					    base >= kernelbase)
660 						continue;
661 
662 					len = mdb_pread(&buf, mmu.pte_size,
663 					    paddr + entry * mmu.pte_size);
664 					if (len != mmu.pte_size)
665 						return (DCMD_ERR);
666 					if (mmu.pte_size == sizeof (x86pte_t))
667 						pte = buf;
668 					else
669 						pte = *pte32;
670 
671 					if ((pte & PT_VALID) == 0)
672 						continue;
673 					if (level == 0 || !(pte & PT_PAGESIZE))
674 						pte &= PT_PADDR;
675 					else
676 						pte &= PT_PADDR_LGPG;
677 					if (mmu_btop(mdb_ma_to_pa(pte)) != pfn)
678 						continue;
679 					mdb_printf("hat=%p maps addr=%p\n",
680 						hatp, (caddr_t)base);
681 				}
682 			}
683 		}
684 	}
685 
686 done:
687 	return (DCMD_OK);
688 }
689 
690 /*
691  * given a PFN as its address argument, prints out the uses of it
692  */
693 /*ARGSUSED*/
694 int
695 report_maps_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
696 {
697 	pfn_t pfn;
698 	uint_t mflag = 0;
699 
700 	/*
701 	 * The kernel has to at least have made it thru mmu_init()
702 	 */
703 	get_mmu();
704 	if (mmu.num_level == 0)
705 		return (DCMD_ERR);
706 
707 	if ((flags & DCMD_ADDRSPEC) == 0)
708 		return (DCMD_USAGE);
709 
710 	if (mdb_getopts(argc, argv,
711 	    'm', MDB_OPT_SETBITS, TRUE, &mflag, NULL) != argc)
712 		return (DCMD_USAGE);
713 
714 	pfn = (pfn_t)addr;
715 	if (mflag)
716 		pfn = mdb_mfn_to_pfn(pfn);
717 
718 	return (do_report_maps(pfn));
719 }
720 
721 static int
722 do_ptable_dcmd(pfn_t pfn)
723 {
724 	struct hat *hatp;
725 	struct hat hat;
726 	htable_t *ht;
727 	htable_t htable;
728 	uintptr_t base;
729 	int h;
730 	int level;
731 	int entry;
732 	uintptr_t pagesize;
733 	x86pte_t pte;
734 	x86pte_t buf;
735 	x86pte32_t *pte32 = (x86pte32_t *)&buf;
736 	physaddr_t paddr;
737 	size_t len;
738 
739 	/*
740 	 * The hats are kept in a list with khat at the head.
741 	 */
742 	for (hatp = khat; hatp != NULL; hatp = hat.hat_next) {
743 		/*
744 		 * read the hat and its hash table
745 		 */
746 		if (mdb_vread(&hat, sizeof (hat), (uintptr_t)hatp) == -1) {
747 			mdb_warn("Couldn't read struct hat\n");
748 			return (DCMD_ERR);
749 		}
750 
751 		/*
752 		 * read the htable hashtable
753 		 */
754 		paddr = 0;
755 		for (h = 0; h < hat.hat_num_hash; ++h) {
756 			if (mdb_vread(&ht, sizeof (htable_t *),
757 			    (uintptr_t)(hat.hat_ht_hash + h)) == -1) {
758 				mdb_warn("Couldn't read htable\n");
759 				return (DCMD_ERR);
760 			}
761 			for (; ht != NULL; ht = htable.ht_next) {
762 				if (mdb_vread(&htable, sizeof (htable_t),
763 				    (uintptr_t)ht) == -1) {
764 					mdb_warn("Couldn't read htable\n");
765 					return (DCMD_ERR);
766 				}
767 
768 				/*
769 				 * Is this the PFN for this htable
770 				 */
771 				if (htable.ht_pfn == pfn)
772 					goto found_it;
773 			}
774 		}
775 	}
776 
777 found_it:
778 	if (htable.ht_pfn == pfn) {
779 		mdb_printf("htable=%p\n", ht);
780 		level = htable.ht_level;
781 		base = htable.ht_vaddr;
782 		pagesize = mmu.level_size[level];
783 	} else {
784 		mdb_printf("Unknown pagetable - assuming level/addr 0");
785 		level = 0;	/* assume level == 0 for PFN */
786 		base = 0;
787 		pagesize = MMU_PAGESIZE;
788 	}
789 
790 	paddr = mmu_ptob((physaddr_t)pfn);
791 	for (entry = 0; entry < mmu.ptes_per_table; ++entry) {
792 		len = mdb_pread(&buf, mmu.pte_size,
793 		    paddr + entry * mmu.pte_size);
794 		if (len != mmu.pte_size)
795 			return (DCMD_ERR);
796 		if (mmu.pte_size == sizeof (x86pte_t))
797 			pte = buf;
798 		else
799 			pte = *pte32;
800 
801 		if (pte == 0)
802 			continue;
803 
804 		mdb_printf("[%3d] va=%p ", entry, base + entry * pagesize);
805 		do_pte_dcmd(level, pte);
806 	}
807 
808 done:
809 	return (DCMD_OK);
810 }
811 
812 /*
813  * Dump the page table at the given PFN
814  */
815 /*ARGSUSED*/
816 int
817 ptable_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
818 {
819 	pfn_t pfn;
820 	uint_t mflag = 0;
821 
822 	/*
823 	 * The kernel has to at least have made it thru mmu_init()
824 	 */
825 	get_mmu();
826 	if (mmu.num_level == 0)
827 		return (DCMD_ERR);
828 
829 	if ((flags & DCMD_ADDRSPEC) == 0)
830 		return (DCMD_USAGE);
831 
832 	if (mdb_getopts(argc, argv,
833 	    'm', MDB_OPT_SETBITS, TRUE, &mflag, NULL) != argc)
834 		return (DCMD_USAGE);
835 
836 	pfn = (pfn_t)addr;
837 	if (mflag)
838 		pfn = mdb_mfn_to_pfn(pfn);
839 
840 	return (do_ptable_dcmd(pfn));
841 }
842 
843 static int
844 do_htables_dcmd(hat_t *hatp)
845 {
846 	struct hat hat;
847 	htable_t *ht;
848 	htable_t htable;
849 	int h;
850 
851 	/*
852 	 * read the hat and its hash table
853 	 */
854 	if (mdb_vread(&hat, sizeof (hat), (uintptr_t)hatp) == -1) {
855 		mdb_warn("Couldn't read struct hat\n");
856 		return (DCMD_ERR);
857 	}
858 
859 	/*
860 	 * read the htable hashtable
861 	 */
862 	for (h = 0; h < hat.hat_num_hash; ++h) {
863 		if (mdb_vread(&ht, sizeof (htable_t *),
864 		    (uintptr_t)(hat.hat_ht_hash + h)) == -1) {
865 			mdb_warn("Couldn't read htable ptr\\n");
866 			return (DCMD_ERR);
867 		}
868 		for (; ht != NULL; ht = htable.ht_next) {
869 			mdb_printf("%p\n", ht);
870 			if (mdb_vread(&htable, sizeof (htable_t),
871 			    (uintptr_t)ht) == -1) {
872 				mdb_warn("Couldn't read htable\n");
873 				return (DCMD_ERR);
874 			}
875 		}
876 	}
877 	return (DCMD_OK);
878 }
879 
880 /*
881  * Dump the htables for the given hat
882  */
883 /*ARGSUSED*/
884 int
885 htables_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
886 {
887 	hat_t *hat;
888 
889 	/*
890 	 * The kernel has to at least have made it thru mmu_init()
891 	 */
892 	get_mmu();
893 	if (mmu.num_level == 0)
894 		return (DCMD_ERR);
895 
896 	if ((flags & DCMD_ADDRSPEC) == 0)
897 		return (DCMD_USAGE);
898 
899 	hat = (hat_t *)addr;
900 
901 	return (do_htables_dcmd(hat));
902 }
903