1 /* $OpenBSD: kvm_i386.c,v 1.26 2015/04/07 05:50:40 guenther Exp $ */ 2 /* $NetBSD: kvm_i386.c,v 1.9 1996/03/18 22:33:38 thorpej Exp $ */ 3 4 /*- 5 * Copyright (c) 1989, 1992, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * This code is derived from software developed by the Computer Systems 9 * Engineering group at Lawrence Berkeley Laboratory under DARPA contract 10 * BG 91-66 and contributed to Berkeley. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 */ 36 37 /* 38 * i386 machine dependent routines for kvm. Hopefully, the forthcoming 39 * vm code will one day obsolete this module. 40 */ 41 42 #include <sys/param.h> 43 #include <sys/proc.h> 44 #include <sys/stat.h> 45 #include <stdlib.h> 46 #include <unistd.h> 47 #include <nlist.h> 48 #include <kvm.h> 49 50 #include <uvm/uvm_extern.h> 51 #include <machine/vmparam.h> 52 #include <machine/pmap.h> 53 54 #include <limits.h> 55 #include <db.h> 56 57 #include "kvm_private.h" 58 59 #include <machine/pte.h> 60 61 /* 62 * We access both normal and PAE entries in 32bit chunks. 63 * Use a local name to avoid conflicting with the kernel's maybe-public, 64 * maybe-not p[td]_entry_t typedefs. 65 */ 66 typedef u_long ptd_entry_t; 67 68 /* 69 * These must match the values in pmap.c/pmapae.c 70 * First the non-PAE versions 71 */ 72 #define PD_MASK 0xffc00000 /* page directory address bits */ 73 #define PT_MASK 0x003ff000 /* page table address bits */ 74 75 /* 76 * PAE versions 77 * 78 * paddr_t is still 32bits, so the top 32bits of PDEs and PTEs only 79 * matters for the NX bit...which libkvm doesn't care about 80 */ 81 #define PAE_PDSHIFT 21 82 #define PAE_PD_MASK 0xffe00000 /* page directory address bits */ 83 #define PAE_PT_MASK 0x001ff000 /* page table address bits */ 84 85 #define PG_FRAME 0xfffff000 86 87 static int cpu_pae; 88 89 struct vmstate { 90 ptd_entry_t *PTD; 91 ptd_entry_t PD_mask; 92 ptd_entry_t PT_mask; 93 int PD_shift; 94 int PG_shift; 95 }; 96 97 #define pdei(vm,VA) (((VA) & (vm)->PD_mask) >> (vm)->PD_shift) 98 #define ptei(vm,VA) (((VA) & (vm)->PT_mask) >> PAGE_SHIFT) 99 100 void 101 _kvm_freevtop(kvm_t *kd) 102 { 103 if (kd->vmst != NULL) { 104 if (kd->vmst->PTD != NULL) 105 free(kd->vmst->PTD); 106 107 free(kd->vmst); 108 kd->vmst = NULL; 109 } 110 } 111 112 int 113 _kvm_initvtop(kvm_t *kd) 114 { 115 struct nlist nl[4]; 116 struct vmstate *vm; 117 u_long pa, PTDsize; 118 119 vm = _kvm_malloc(kd, sizeof(*vm)); 120 if (vm == NULL) 121 return (-1); 122 kd->vmst = vm; 123 124 vm->PTD = NULL; 125 126 nl[0].n_name = "_PTDpaddr"; 127 nl[1].n_name = "_PTDsize"; 128 nl[2].n_name = "_cpu_pae"; 129 nl[3].n_name = NULL; 130 131 if (kvm_nlist(kd, nl) != 0) { 132 _kvm_err(kd, kd->program, "bad namelist"); 133 return (-1); 134 } 135 136 if (_kvm_pread(kd, kd->pmfd, &cpu_pae, sizeof cpu_pae, 137 _kvm_pa2off(kd, nl[2].n_value - KERNBASE)) != sizeof cpu_pae) 138 goto invalid; 139 140 if (_kvm_pread(kd, kd->pmfd, &PTDsize, sizeof PTDsize, 141 _kvm_pa2off(kd, nl[1].n_value - KERNBASE)) != sizeof PTDsize) 142 goto invalid; 143 144 if (_kvm_pread(kd, kd->pmfd, &pa, sizeof pa, 145 _kvm_pa2off(kd, nl[0].n_value - KERNBASE)) != sizeof pa) 146 goto invalid; 147 148 vm->PTD = _kvm_malloc(kd, PTDsize); 149 150 if (_kvm_pread(kd, kd->pmfd, vm->PTD, PTDsize, 151 _kvm_pa2off(kd, pa)) != PTDsize) 152 goto invalid; 153 154 if (cpu_pae) { 155 vm->PD_mask = PAE_PD_MASK; 156 vm->PT_mask = PAE_PT_MASK; 157 /* -1 here because entries are twice as large */ 158 vm->PD_shift = PAE_PDSHIFT - 1; 159 vm->PG_shift = PAGE_SHIFT - 1; 160 } else { 161 vm->PD_mask = PD_MASK; 162 vm->PT_mask = PT_MASK; 163 vm->PD_shift = PDSHIFT; 164 vm->PG_shift = PAGE_SHIFT; 165 } 166 167 return (0); 168 169 invalid: 170 if (vm->PTD != NULL) { 171 free(vm->PTD); 172 vm->PTD = NULL; 173 } 174 return (-1); 175 } 176 177 /* 178 * Translate a kernel virtual address to a physical address. 179 */ 180 int 181 _kvm_kvatop(kvm_t *kd, u_long va, paddr_t *pa) 182 { 183 u_long offset, pte_pa; 184 struct vmstate *vm; 185 ptd_entry_t pte; 186 187 if (!kd->vmst) { 188 _kvm_err(kd, 0, "vatop called before initvtop"); 189 return (0); 190 } 191 192 if (ISALIVE(kd)) { 193 _kvm_err(kd, 0, "vatop called in live kernel!"); 194 return (0); 195 } 196 197 vm = kd->vmst; 198 offset = va & (kd->nbpg - 1); 199 200 /* 201 * If we are initializing (kernel page table descriptor pointer 202 * not yet set) * then return pa == va to avoid infinite recursion. 203 */ 204 if (vm->PTD == NULL) { 205 *pa = va; 206 return (kd->nbpg - (int)offset); 207 } 208 if ((vm->PTD[pdei(vm,va)] & PG_V) == 0) 209 goto invalid; 210 211 pte_pa = (vm->PTD[pdei(vm,va)] & PG_FRAME) + 212 (ptei(vm,va) * sizeof(ptd_entry_t)); 213 214 /* XXX READ PHYSICAL XXX */ 215 if (_kvm_pread(kd, kd->pmfd, &pte, sizeof pte, 216 _kvm_pa2off(kd, pte_pa)) != sizeof pte) 217 goto invalid; 218 219 if ((pte & PG_V) == 0) 220 goto invalid; 221 *pa = (pte & PG_FRAME) + offset; 222 return (kd->nbpg - (int)offset); 223 224 invalid: 225 _kvm_err(kd, 0, "invalid address (%lx)", va); 226 return (0); 227 } 228 229 /* 230 * Translate a physical address to a file-offset in the crash-dump. 231 */ 232 off_t 233 _kvm_pa2off(kvm_t *kd, paddr_t pa) 234 { 235 return ((off_t)(kd->dump_off + pa)); 236 } 237