xref: /original-bsd/lib/libkvm/kvm_hp300.c (revision c0290416)
1 /*-
2  * Copyright (c) 1989, 1992 The Regents of the University of California.
3  * All rights reserved.
4  *
5  * This code is derived from software developed by the Computer Systems
6  * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
7  * BG 91-66 and contributed to Berkeley.
8  *
9  * %sccs.include.redist.c%
10  */
11 
12 #if defined(LIBC_SCCS) && !defined(lint)
13 static char sccsid[] = "@(#)kvm_hp300.c	5.28 (Berkeley) 06/05/92";
14 #endif /* LIBC_SCCS and not lint */
15 
16 /*
17  * Hp300 machine depedent routines for kvm.  Hopefully, the forthcoming
18  * vm code will one day obsolete this module.
19  */
20 
21 #include <sys/param.h>
22 #include <sys/user.h>
23 #include <sys/proc.h>
24 #include <sys/stat.h>
25 #include <unistd.h>
26 #include <nlist.h>
27 #include <kvm.h>
28 
29 #include <vm/vm.h>
30 #include <vm/vm_param.h>
31 
32 #include <limits.h>
33 #include <db.h>
34 
35 #include "kvm_private.h"
36 
37 #include <hp300/hp300/pte.h>
38 
39 #ifndef btop
40 #define	btop(x)		(((unsigned)(x)) >> PGSHIFT)	/* XXX */
41 #define	ptob(x)		((caddr_t)((x) << PGSHIFT))	/* XXX */
42 #endif
43 
44 struct vmstate {
45 	u_long lowram;
46 	int mmutype;
47 	struct ste *Sysseg;
48 };
49 
50 #define KREAD(kd, addr, p)\
51 	(kvm_read(kd, addr, (char *)(p), sizeof(*(p))) != sizeof(*(p)))
52 
53 void
54 _kvm_freevtop(kd)
55 	kvm_t *kd;
56 {
57 	if (kd->vmst != 0)
58 		free(kd->vmst);
59 }
60 
61 int
62 _kvm_initvtop(kd)
63 	kvm_t *kd;
64 {
65 	struct vmstate *vm;
66 	struct nlist nlist[4];
67 
68 	vm = (struct vmstate *)_kvm_malloc(kd, sizeof(*vm));
69 	if (vm == 0)
70 		return (-1);
71 	kd->vmst = vm;
72 
73 	nlist[0].n_name = "_lowram";
74 	nlist[1].n_name = "_mmutype";
75 	nlist[2].n_name = "_Sysseg";
76 	nlist[3].n_name = 0;
77 
78 	if (kvm_nlist(kd, nlist) != 0) {
79 		_kvm_err(kd, kd->program, "bad namelist");
80 		return (-1);
81 	}
82 	vm->Sysseg = 0;
83 	if (KREAD(kd, (u_long)nlist[0].n_value, &vm->lowram)) {
84 		_kvm_err(kd, kd->program, "cannot read lowram");
85 		return (-1);
86 	}
87 	if (KREAD(kd, (u_long)nlist[1].n_value, &vm->mmutype)) {
88 		_kvm_err(kd, kd->program, "cannot read mmutype");
89 		return (-1);
90 	}
91 	if (KREAD(kd, (u_long)nlist[2].n_value, &vm->Sysseg)) {
92 		_kvm_err(kd, kd->program, "cannot read segment table");
93 		return (-1);
94 	}
95 	return (0);
96 }
97 
98 static int
99 _kvm_vatop(kd, sta, va, pa)
100 	kvm_t *kd;
101 	struct ste *sta;
102 	u_long va;
103 	u_long *pa;
104 {
105 	register struct vmstate *vm;
106 	register u_long lowram;
107 	register u_long addr;
108 	int p, ste, pte;
109 	int offset;
110 
111 	if (ISALIVE(kd)) {
112 		_kvm_err(kd, 0, "vatop called in live kernel!");
113 		return((off_t)0);
114 	}
115 	vm = kd->vmst;
116 	offset = va & PGOFSET;
117 	/*
118 	 * If we are initializing (kernel segment table pointer not yet set)
119 	 * then return pa == va to avoid infinite recursion.
120 	 */
121 	if (vm->Sysseg == 0) {
122 		*pa = va;
123 		return (NBPG - offset);
124 	}
125 	lowram = vm->lowram;
126 	if (vm->mmutype == -2) {
127 		struct ste *sta2;
128 
129 		addr = (u_long)&sta[va >> SG4_SHIFT1];
130 		/*
131 		 * Can't use KREAD to read kernel segment table entries.
132 		 * Fortunately it is 1-to-1 mapped so we don't have to.
133 		 */
134 		if (sta == vm->Sysseg) {
135 			if (lseek(kd->pmfd, (off_t)addr, 0) == -1 ||
136 			    read(kd->pmfd, (char *)&ste, sizeof(ste)) < 0)
137 				goto invalid;
138 		} else if (KREAD(kd, addr, &ste))
139 			goto invalid;
140 		if ((ste & SG_V) == 0) {
141 			_kvm_err(kd, 0, "invalid level 1 descriptor (%x)",
142 				 ste);
143 			return((off_t)0);
144 		}
145 		sta2 = (struct ste *)(ste & SG4_ADDR1);
146 		addr = (u_long)&sta2[(va & SG4_MASK2) >> SG4_SHIFT2];
147 		/*
148 		 * Address from level 1 STE is a physical address,
149 		 * so don't use kvm_read.
150 		 */
151 		if (lseek(kd->pmfd, (off_t)(addr - lowram), 0) == -1 ||
152 		    read(kd->pmfd, (char *)&ste, sizeof(ste)) < 0)
153 			goto invalid;
154 		if ((ste & SG_V) == 0) {
155 			_kvm_err(kd, 0, "invalid level 2 descriptor (%x)",
156 				 ste);
157 			return((off_t)0);
158 		}
159 		sta2 = (struct ste *)(ste & SG4_ADDR2);
160 		addr = (u_long)&sta2[(va & SG4_MASK3) >> SG4_SHIFT3];
161 	} else {
162 		addr = (u_long)&sta[va >> SEGSHIFT];
163 		/*
164 		 * Can't use KREAD to read kernel segment table entries.
165 		 * Fortunately it is 1-to-1 mapped so we don't have to.
166 		 */
167 		if (sta == vm->Sysseg) {
168 			if (lseek(kd->pmfd, (off_t)addr, 0) == -1 ||
169 			    read(kd->pmfd, (char *)&ste, sizeof(ste)) < 0)
170 				goto invalid;
171 		} else if (KREAD(kd, addr, &ste))
172 			goto invalid;
173 		if ((ste & SG_V) == 0) {
174 			_kvm_err(kd, 0, "invalid segment (%x)", ste);
175 			return((off_t)0);
176 		}
177 		p = btop(va & SG_PMASK);
178 		addr = (ste & SG_FRAME) + (p * sizeof(struct pte));
179 	}
180 	/*
181 	 * Address from STE is a physical address so don't use kvm_read.
182 	 */
183 	if (lseek(kd->pmfd, (off_t)(addr - lowram), 0) == -1 ||
184 	    read(kd->pmfd, (char *)&pte, sizeof(pte)) < 0)
185 		goto invalid;
186 	addr = pte & PG_FRAME;
187 	if (pte == PG_NV) {
188 		_kvm_err(kd, 0, "page not valid");
189 		return (0);
190 	}
191 	*pa = addr - lowram + offset;
192 
193 	return (NBPG - offset);
194 invalid:
195 	_kvm_err(kd, 0, "invalid address (%x)", va);
196 	return (0);
197 }
198 
199 int
200 _kvm_kvatop(kd, va, pa)
201 	kvm_t *kd;
202 	u_long va;
203 	u_long *pa;
204 {
205 	return (_kvm_vatop(kd, (u_long)kd->vmst->Sysseg, va, pa));
206 }
207 
208 /*
209  * Translate a user virtual address to a physical address.
210  */
211 int
212 _kvm_uvatop(kd, p, va, pa)
213 	kvm_t *kd;
214 	const struct proc *p;
215 	u_long va;
216 	u_long *pa;
217 {
218 	register struct vmspace *vms = p->p_vmspace;
219 	int kva;
220 
221 	/*
222 	 * If this is a live kernel we just look it up in the kernel
223 	 * virtually allocated flat 4mb page table (i.e. let the kernel
224 	 * do the table walk).  In this way, we avoid needing to know
225 	 * the MMU type.
226 	 */
227 	if (ISALIVE(kd)) {
228 		struct pte *ptab;
229 		int pte, offset;
230 
231 		kva = (int)&vms->vm_pmap.pm_ptab;
232 		if (KREAD(kd, kva, &ptab)) {
233 			_kvm_err(kd, 0, "invalid address (%x)", va);
234 			return (0);
235 		}
236 		kva = (int)&ptab[btop(va)];
237 		if (KREAD(kd, kva, &pte) || (pte & PG_V) == 0) {
238 			_kvm_err(kd, 0, "invalid address (%x)", va);
239 			return (0);
240 		}
241 		offset = va & PGOFSET;
242 		*pa = (pte & PG_FRAME) | offset;
243 		return (NBPG - offset);
244 	}
245 	/*
246 	 * Otherwise, we just walk the table ourself.
247 	 */
248 	kva = (int)&vms->vm_pmap.pm_stab;
249 	if (KREAD(kd, kva, &kva)) {
250 		_kvm_err(kd, 0, "invalid address (%x)", va);
251 		return (0);
252 	}
253 	return (_kvm_vatop(kd, kva, va, pa));
254 }
255