xref: /netbsd/lib/libkvm/kvm_m68k.c (revision b331e636)
1 /*-
2  * Copyright (c) 1989, 1992, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software developed by the Computer Systems
6  * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
7  * BG 91-66 and contributed to Berkeley.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed by the University of
20  *	California, Berkeley and its contributors.
21  * 4. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37 
38 #if defined(LIBC_SCCS) && !defined(lint)
39 /* from: static char sccsid[] = "@(#)kvm_hp300.c	8.1 (Berkeley) 6/4/93"; */
40 static char *rcsid = "$Id: kvm_m68k.c,v 1.4 1995/04/02 20:45:26 chopps Exp $";
41 #endif /* LIBC_SCCS and not lint */
42 
43 /*
44  * m68k machine dependent routines for kvm.  Hopefully, the forthcoming
45  * vm code will one day obsolete this module.
46  */
47 
48 #include <sys/param.h>
49 #include <sys/user.h>
50 #include <sys/proc.h>
51 #include <sys/stat.h>
52 #include <unistd.h>
53 #include <nlist.h>
54 #include <kvm.h>
55 
56 #include <vm/vm.h>
57 #include <vm/vm_param.h>
58 
59 #include <limits.h>
60 #include <db.h>
61 
62 #include "kvm_private.h"
63 
64 #include <machine/pte.h>
65 
66 #ifndef btop
67 #define	btop(x)		(((unsigned)(x)) >> PGSHIFT)	/* XXX */
68 #define	ptob(x)		((caddr_t)((x) << PGSHIFT))	/* XXX */
69 #endif
70 
71 struct vmstate {
72 	u_long lowram;
73 	int mmutype;
74 	st_entry_t *Sysseg;
75 };
76 
77 #define KREAD(kd, addr, p)\
78 	(kvm_read(kd, addr, (char *)(p), sizeof(*(p))) != sizeof(*(p)))
79 
80 void
81 _kvm_freevtop(kd)
82 	kvm_t *kd;
83 {
84 	if (kd->vmst != 0)
85 		free(kd->vmst);
86 }
87 
88 int
89 _kvm_initvtop(kd)
90 	kvm_t *kd;
91 {
92 	struct vmstate *vm;
93 	struct nlist nlist[4];
94 
95 	vm = (struct vmstate *)_kvm_malloc(kd, sizeof(*vm));
96 	if (vm == 0)
97 		return (-1);
98 	kd->vmst = vm;
99 
100 	nlist[0].n_name = "_lowram";
101 #if defined(mac68k)
102 	nlist[1].n_name = "_cpu040";
103 #else
104 	nlist[1].n_name = "_mmutype";
105 #endif
106 	nlist[2].n_name = "_Sysseg";
107 	nlist[3].n_name = 0;
108 
109 	if (kvm_nlist(kd, nlist) != 0) {
110 		_kvm_err(kd, kd->program, "bad namelist");
111 		return (-1);
112 	}
113 	vm->Sysseg = 0;
114 	if (KREAD(kd, (u_long)nlist[0].n_value, &vm->lowram)) {
115 		_kvm_err(kd, kd->program, "cannot read lowram");
116 		return (-1);
117 	}
118 	if (KREAD(kd, (u_long)nlist[1].n_value, &vm->mmutype)) {
119 		_kvm_err(kd, kd->program, "cannot read mmutype");
120 		return (-1);
121 	}
122 	if (KREAD(kd, (u_long)nlist[2].n_value, &vm->Sysseg)) {
123 		_kvm_err(kd, kd->program, "cannot read segment table");
124 		return (-1);
125 	}
126 	return (0);
127 }
128 
129 static int
130 _kvm_vatop(kd, sta, va, pa)
131 	kvm_t *kd;
132 	st_entry_t *sta;
133 	u_long va;
134 	u_long *pa;
135 {
136 	register struct vmstate *vm;
137 	register u_long lowram;
138 	register u_long addr;
139 	int p, ste, pte;
140 	int offset;
141 
142 	if (ISALIVE(kd)) {
143 		_kvm_err(kd, 0, "vatop called in live kernel!");
144 		return((off_t)0);
145 	}
146 	vm = kd->vmst;
147 	offset = va & PGOFSET;
148 	/*
149 	 * If we are initializing (kernel segment table pointer not yet set)
150 	 * then return pa == va to avoid infinite recursion.
151 	 */
152 	if (vm->Sysseg == 0) {
153 		*pa = va;
154 		return (NBPG - offset);
155 	}
156 	lowram = vm->lowram;
157 #if defined(mac68k)
158 	{ int cpu040 = vm->mmutype;
159 #else
160 	if (vm->mmutype == -2) {
161 		st_entry_t *sta2;
162 
163 		addr = (u_long)&sta[va >> SG4_SHIFT1];
164 		/*
165 		 * Can't use KREAD to read kernel segment table entries.
166 		 * Fortunately it is 1-to-1 mapped so we don't have to.
167 		 */
168 		if (sta == vm->Sysseg) {
169 			if (lseek(kd->pmfd, (off_t)addr, 0) == -1 ||
170 			    read(kd->pmfd, (char *)&ste, sizeof(ste)) < 0)
171 				goto invalid;
172 		} else if (KREAD(kd, addr, &ste))
173 			goto invalid;
174 		if ((ste & SG_V) == 0) {
175 			_kvm_err(kd, 0, "invalid level 1 descriptor (%x)",
176 				 ste);
177 			return((off_t)0);
178 		}
179 		sta2 = (st_entry_t *)(ste & SG4_ADDR1);
180 		addr = (u_long)&sta2[(va & SG4_MASK2) >> SG4_SHIFT2];
181 		/*
182 		 * Address from level 1 STE is a physical address,
183 		 * so don't use kvm_read.
184 		 */
185 		if (lseek(kd->pmfd, (off_t)(addr - lowram), 0) == -1 ||
186 		    read(kd->pmfd, (char *)&ste, sizeof(ste)) < 0)
187 			goto invalid;
188 		if ((ste & SG_V) == 0) {
189 			_kvm_err(kd, 0, "invalid level 2 descriptor (%x)",
190 				 ste);
191 			return((off_t)0);
192 		}
193 		sta2 = (st_entry_t *)(ste & SG4_ADDR2);
194 		addr = (u_long)&sta2[(va & SG4_MASK3) >> SG4_SHIFT3];
195 	} else {
196 #endif
197 		addr = (u_long)&sta[va >> SEGSHIFT];
198 		/*
199 		 * Can't use KREAD to read kernel segment table entries.
200 		 * Fortunately it is 1-to-1 mapped so we don't have to.
201 		 */
202 		if (sta == vm->Sysseg) {
203 			if (lseek(kd->pmfd, (off_t)addr, 0) == -1 ||
204 			    read(kd->pmfd, (char *)&ste, sizeof(ste)) < 0)
205 				goto invalid;
206 		} else if (KREAD(kd, addr, &ste))
207 			goto invalid;
208 		if ((ste & SG_V) == 0) {
209 			_kvm_err(kd, 0, "invalid segment (%x)", ste);
210 			return((off_t)0);
211 		}
212 		p = btop(va & SG_PMASK);
213 		addr = (ste & SG_FRAME) + (p * sizeof(pt_entry_t));
214 	}
215 	/*
216 	 * Address from STE is a physical address so don't use kvm_read.
217 	 */
218 	if (lseek(kd->pmfd, (off_t)(addr - lowram), 0) == -1 ||
219 	    read(kd->pmfd, (char *)&pte, sizeof(pte)) < 0)
220 		goto invalid;
221 	addr = pte & PG_FRAME;
222 	if (pte == PG_NV) {
223 		_kvm_err(kd, 0, "page not valid");
224 		return (0);
225 	}
226 	*pa = addr - lowram + offset;
227 
228 	return (NBPG - offset);
229 invalid:
230 	_kvm_err(kd, 0, "invalid address (%x)", va);
231 	return (0);
232 }
233 
234 int
235 _kvm_kvatop(kd, va, pa)
236 	kvm_t *kd;
237 	u_long va;
238 	u_long *pa;
239 {
240 	return (_kvm_vatop(kd, (u_long)kd->vmst->Sysseg, va, pa));
241 }
242