xref: /original-bsd/lib/libkvm/kvm_hp300.c (revision c3e32dec)
1 /*-
2  * Copyright (c) 1989, 1992, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software developed by the Computer Systems
6  * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
7  * BG 91-66 and contributed to Berkeley.
8  *
9  * %sccs.include.redist.c%
10  */
11 
12 #if defined(LIBC_SCCS) && !defined(lint)
13 static char sccsid[] = "@(#)kvm_hp300.c	8.1 (Berkeley) 06/04/93";
14 #endif /* LIBC_SCCS and not lint */
15 
16 /*
17  * Hp300 machine dependent routines for kvm.  Hopefully, the forthcoming
18  * vm code will one day obsolete this module.
19  */
20 
21 #include <sys/param.h>
22 #include <sys/user.h>
23 #include <sys/proc.h>
24 #include <sys/stat.h>
25 #include <unistd.h>
26 #include <nlist.h>
27 #include <kvm.h>
28 
29 #include <vm/vm.h>
30 #include <vm/vm_param.h>
31 
32 #include <limits.h>
33 #include <db.h>
34 
35 #include "kvm_private.h"
36 
37 #if defined(hp300)
38 #include <hp300/hp300/pte.h>
39 #endif
40 
41 #if defined(luna68k)
42 #include <luna68k/luna68k/pte.h>
43 #endif
44 
45 #ifndef btop
46 #define	btop(x)		(((unsigned)(x)) >> PGSHIFT)	/* XXX */
47 #define	ptob(x)		((caddr_t)((x) << PGSHIFT))	/* XXX */
48 #endif
49 
50 struct vmstate {
51 	u_long lowram;
52 	int mmutype;
53 	struct ste *Sysseg;
54 };
55 
56 #define KREAD(kd, addr, p)\
57 	(kvm_read(kd, addr, (char *)(p), sizeof(*(p))) != sizeof(*(p)))
58 
59 void
60 _kvm_freevtop(kd)
61 	kvm_t *kd;
62 {
63 	if (kd->vmst != 0)
64 		free(kd->vmst);
65 }
66 
67 int
68 _kvm_initvtop(kd)
69 	kvm_t *kd;
70 {
71 	struct vmstate *vm;
72 	struct nlist nlist[4];
73 
74 	vm = (struct vmstate *)_kvm_malloc(kd, sizeof(*vm));
75 	if (vm == 0)
76 		return (-1);
77 	kd->vmst = vm;
78 
79 	nlist[0].n_name = "_lowram";
80 	nlist[1].n_name = "_mmutype";
81 	nlist[2].n_name = "_Sysseg";
82 	nlist[3].n_name = 0;
83 
84 	if (kvm_nlist(kd, nlist) != 0) {
85 		_kvm_err(kd, kd->program, "bad namelist");
86 		return (-1);
87 	}
88 	vm->Sysseg = 0;
89 	if (KREAD(kd, (u_long)nlist[0].n_value, &vm->lowram)) {
90 		_kvm_err(kd, kd->program, "cannot read lowram");
91 		return (-1);
92 	}
93 	if (KREAD(kd, (u_long)nlist[1].n_value, &vm->mmutype)) {
94 		_kvm_err(kd, kd->program, "cannot read mmutype");
95 		return (-1);
96 	}
97 	if (KREAD(kd, (u_long)nlist[2].n_value, &vm->Sysseg)) {
98 		_kvm_err(kd, kd->program, "cannot read segment table");
99 		return (-1);
100 	}
101 	return (0);
102 }
103 
104 static int
105 _kvm_vatop(kd, sta, va, pa)
106 	kvm_t *kd;
107 	struct ste *sta;
108 	u_long va;
109 	u_long *pa;
110 {
111 	register struct vmstate *vm;
112 	register u_long lowram;
113 	register u_long addr;
114 	int p, ste, pte;
115 	int offset;
116 
117 	if (ISALIVE(kd)) {
118 		_kvm_err(kd, 0, "vatop called in live kernel!");
119 		return((off_t)0);
120 	}
121 	vm = kd->vmst;
122 	offset = va & PGOFSET;
123 	/*
124 	 * If we are initializing (kernel segment table pointer not yet set)
125 	 * then return pa == va to avoid infinite recursion.
126 	 */
127 	if (vm->Sysseg == 0) {
128 		*pa = va;
129 		return (NBPG - offset);
130 	}
131 	lowram = vm->lowram;
132 	if (vm->mmutype == -2) {
133 		struct ste *sta2;
134 
135 		addr = (u_long)&sta[va >> SG4_SHIFT1];
136 		/*
137 		 * Can't use KREAD to read kernel segment table entries.
138 		 * Fortunately it is 1-to-1 mapped so we don't have to.
139 		 */
140 		if (sta == vm->Sysseg) {
141 			if (lseek(kd->pmfd, (off_t)addr, 0) == -1 ||
142 			    read(kd->pmfd, (char *)&ste, sizeof(ste)) < 0)
143 				goto invalid;
144 		} else if (KREAD(kd, addr, &ste))
145 			goto invalid;
146 		if ((ste & SG_V) == 0) {
147 			_kvm_err(kd, 0, "invalid level 1 descriptor (%x)",
148 				 ste);
149 			return((off_t)0);
150 		}
151 		sta2 = (struct ste *)(ste & SG4_ADDR1);
152 		addr = (u_long)&sta2[(va & SG4_MASK2) >> SG4_SHIFT2];
153 		/*
154 		 * Address from level 1 STE is a physical address,
155 		 * so don't use kvm_read.
156 		 */
157 		if (lseek(kd->pmfd, (off_t)(addr - lowram), 0) == -1 ||
158 		    read(kd->pmfd, (char *)&ste, sizeof(ste)) < 0)
159 			goto invalid;
160 		if ((ste & SG_V) == 0) {
161 			_kvm_err(kd, 0, "invalid level 2 descriptor (%x)",
162 				 ste);
163 			return((off_t)0);
164 		}
165 		sta2 = (struct ste *)(ste & SG4_ADDR2);
166 		addr = (u_long)&sta2[(va & SG4_MASK3) >> SG4_SHIFT3];
167 	} else {
168 		addr = (u_long)&sta[va >> SEGSHIFT];
169 		/*
170 		 * Can't use KREAD to read kernel segment table entries.
171 		 * Fortunately it is 1-to-1 mapped so we don't have to.
172 		 */
173 		if (sta == vm->Sysseg) {
174 			if (lseek(kd->pmfd, (off_t)addr, 0) == -1 ||
175 			    read(kd->pmfd, (char *)&ste, sizeof(ste)) < 0)
176 				goto invalid;
177 		} else if (KREAD(kd, addr, &ste))
178 			goto invalid;
179 		if ((ste & SG_V) == 0) {
180 			_kvm_err(kd, 0, "invalid segment (%x)", ste);
181 			return((off_t)0);
182 		}
183 		p = btop(va & SG_PMASK);
184 		addr = (ste & SG_FRAME) + (p * sizeof(struct pte));
185 	}
186 	/*
187 	 * Address from STE is a physical address so don't use kvm_read.
188 	 */
189 	if (lseek(kd->pmfd, (off_t)(addr - lowram), 0) == -1 ||
190 	    read(kd->pmfd, (char *)&pte, sizeof(pte)) < 0)
191 		goto invalid;
192 	addr = pte & PG_FRAME;
193 	if (pte == PG_NV) {
194 		_kvm_err(kd, 0, "page not valid");
195 		return (0);
196 	}
197 	*pa = addr - lowram + offset;
198 
199 	return (NBPG - offset);
200 invalid:
201 	_kvm_err(kd, 0, "invalid address (%x)", va);
202 	return (0);
203 }
204 
205 int
206 _kvm_kvatop(kd, va, pa)
207 	kvm_t *kd;
208 	u_long va;
209 	u_long *pa;
210 {
211 	return (_kvm_vatop(kd, (u_long)kd->vmst->Sysseg, va, pa));
212 }
213 
214 /*
215  * Translate a user virtual address to a physical address.
216  */
217 int
218 _kvm_uvatop(kd, p, va, pa)
219 	kvm_t *kd;
220 	const struct proc *p;
221 	u_long va;
222 	u_long *pa;
223 {
224 	register struct vmspace *vms = p->p_vmspace;
225 	int kva;
226 
227 	/*
228 	 * If this is a live kernel we just look it up in the kernel
229 	 * virtually allocated flat 4mb page table (i.e. let the kernel
230 	 * do the table walk).  In this way, we avoid needing to know
231 	 * the MMU type.
232 	 */
233 	if (ISALIVE(kd)) {
234 		struct pte *ptab;
235 		int pte, offset;
236 
237 		kva = (int)&vms->vm_pmap.pm_ptab;
238 		if (KREAD(kd, kva, &ptab)) {
239 			_kvm_err(kd, 0, "invalid address (%x)", va);
240 			return (0);
241 		}
242 		kva = (int)&ptab[btop(va)];
243 		if (KREAD(kd, kva, &pte) || (pte & PG_V) == 0) {
244 			_kvm_err(kd, 0, "invalid address (%x)", va);
245 			return (0);
246 		}
247 		offset = va & PGOFSET;
248 		*pa = (pte & PG_FRAME) | offset;
249 		return (NBPG - offset);
250 	}
251 	/*
252 	 * Otherwise, we just walk the table ourself.
253 	 */
254 	kva = (int)&vms->vm_pmap.pm_stab;
255 	if (KREAD(kd, kva, &kva)) {
256 		_kvm_err(kd, 0, "invalid address (%x)", va);
257 		return (0);
258 	}
259 	return (_kvm_vatop(kd, kva, va, pa));
260 }
261