1 /* $NetBSD: mips_machdep.c,v 1.273 2016/07/25 22:10:03 macallan Exp $ */
2
3 /*
4 * Copyright 2002 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Simon Burge for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright 2000, 2001
40 * Broadcom Corporation. All rights reserved.
41 *
42 * This software is furnished under license and may be used and copied only
43 * in accordance with the following terms and conditions. Subject to these
44 * conditions, you may download, copy, install, use, modify and distribute
45 * modified or unmodified copies of this software in source and/or binary
46 * form. No title or ownership is transferred hereby.
47 *
48 * 1) Any source code used, modified or distributed must reproduce and
49 * retain this copyright notice and list of conditions as they appear in
50 * the source file.
51 *
52 * 2) No right is granted to use any trade name, trademark, or logo of
53 * Broadcom Corporation. The "Broadcom Corporation" name may not be
54 * used to endorse or promote products derived from this software
55 * without the prior written permission of Broadcom Corporation.
56 *
57 * 3) THIS SOFTWARE IS PROVIDED "AS-IS" AND ANY EXPRESS OR IMPLIED
58 * WARRANTIES, INCLUDING BUT NOT LIMITED TO, ANY IMPLIED WARRANTIES OF
59 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR
60 * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM BE LIABLE
61 * FOR ANY DAMAGES WHATSOEVER, AND IN PARTICULAR, BROADCOM SHALL NOT BE
62 * LIABLE FOR DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
63 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
64 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
65 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
66 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
67 * OR OTHERWISE), EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
68 */
69
70 /*-
71 * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
72 * All rights reserved.
73 *
74 * This code is derived from software contributed to The NetBSD Foundation
75 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
76 * NASA Ames Research Center and by Chris Demetriou.
77 *
78 * Redistribution and use in source and binary forms, with or without
79 * modification, are permitted provided that the following conditions
80 * are met:
81 * 1. Redistributions of source code must retain the above copyright
82 * notice, this list of conditions and the following disclaimer.
83 * 2. Redistributions in binary form must reproduce the above copyright
84 * notice, this list of conditions and the following disclaimer in the
85 * documentation and/or other materials provided with the distribution.
86 *
87 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
88 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
89 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
90 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
91 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
92 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
93 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
94 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
95 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
96 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
97 * POSSIBILITY OF SUCH DAMAGE.
98 */
99
100 /*
101 * Copyright 1996 The Board of Trustees of The Leland Stanford
102 * Junior University. All Rights Reserved.
103 *
104 * Permission to use, copy, modify, and distribute this
105 * software and its documentation for any purpose and without
106 * fee is hereby granted, provided that the above copyright
107 * notice appear in all copies. Stanford University
108 * makes no representations about the suitability of this
109 * software for any purpose. It is provided "as is" without
110 * express or implied warranty.
111 */
112
113 #include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
114 __KERNEL_RCSID(0, "$NetBSD: mips_machdep.c,v 1.273 2016/07/25 22:10:03 macallan Exp $");
115
116 #define __INTR_PRIVATE
117 #include "opt_cputype.h"
118 #include "opt_compat_netbsd32.h"
119
120 #include <sys/param.h>
121 #include <sys/systm.h>
122 #include <sys/proc.h>
123 #include <sys/intr.h>
124 #include <sys/exec.h>
125 #include <sys/reboot.h>
126 #include <sys/mount.h> /* fsid_t for syscallargs */
127 #include <sys/lwp.h>
128 #include <sys/sysctl.h>
129 #include <sys/msgbuf.h>
130 #include <sys/conf.h>
131 #include <sys/core.h>
132 #include <sys/device.h>
133 #include <sys/kcore.h>
134 #include <sys/kmem.h>
135 #include <sys/ras.h>
136 #include <sys/cpu.h>
137 #include <sys/atomic.h>
138 #include <sys/ucontext.h>
139 #include <sys/bitops.h>
140
141 #include <mips/kcore.h>
142
143 #ifdef COMPAT_NETBSD32
144 #include <compat/netbsd32/netbsd32.h>
145 #endif
146
147 #include <uvm/uvm.h>
148
149 #include <dev/cons.h>
150 #include <dev/mm.h>
151
152 #include <mips/pcb.h>
153 #include <mips/cache.h>
154 #include <mips/frame.h>
155 #include <mips/regnum.h>
156 #include <mips/mips_opcode.h>
157
158 #include <mips/cpu.h>
159 #include <mips/locore.h>
160 #include <mips/psl.h>
161 #include <mips/pte.h>
162 #include <mips/userret.h>
163
164 #ifdef __HAVE_BOOTINFO_H
165 #include <machine/bootinfo.h>
166 #endif
167
168 #if (MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0
169 #include <mips/mipsNN.h> /* MIPS32/MIPS64 registers */
170
171 #define _MKINSN(a,b,c,d,e) ((uint32_t)(((a) << 26)|((b) << 21)|((c) << 16)|((d) << 11)|(e)))
172
173 #ifdef _LP64
174 #define _LOAD_V0_L_PRIVATE_A0 _MKINSN(OP_LD, _R_A0, _R_V0, 0, offsetof(lwp_t, l_private))
175 #define _MTC0_V0_USERLOCAL _MKINSN(OP_COP0, OP_DMT, _R_V0, MIPS_COP_0_TLB_CONTEXT, 2)
176 #else
177 #define _LOAD_V0_L_PRIVATE_A0 _MKINSN(OP_LW, _R_A0, _R_V0, 0, offsetof(lwp_t, l_private))
178 #define _MTC0_V0_USERLOCAL _MKINSN(OP_COP0, OP_MT, _R_V0, MIPS_COP_0_TLB_CONTEXT, 2)
179 #endif
180 #define JR_RA _MKINSN(OP_SPECIAL, _R_RA, 0, 0, OP_JR)
181
182 #endif
183
184 /* Internal routines. */
185 int cpu_dumpsize(void);
186 u_long cpu_dump_mempagecnt(void);
187 int cpu_dump(void);
188
189 #if (MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0
190 static void mips_watchpoint_init(void);
191 #endif
192
193 #if defined(_LP64) && defined(ENABLE_MIPS_16KB_PAGE)
194 vaddr_t mips_vm_maxuser_address = MIPS_VM_MAXUSER_ADDRESS;
195 #endif
196
197 #if defined(MIPS3_PLUS)
198 uint32_t mips3_cp0_tlb_page_mask_probe(void);
199 uint64_t mips3_cp0_tlb_entry_hi_probe(void);
200 uint64_t mips3_cp0_tlb_entry_lo_probe(void);
201
202 static void mips3_tlb_probe(void);
203 #endif
204
205 #if defined(MIPS1)
206 static void mips1_vector_init(const struct splsw *);
207 extern const struct locoresw mips1_locoresw;
208 extern const mips_locore_jumpvec_t mips1_locore_vec;
209 #endif
210
211 #if defined(MIPS3)
212 static void mips3_vector_init(const struct splsw *);
213 extern const struct locoresw mips3_locoresw;
214 extern const mips_locore_jumpvec_t mips3_locore_vec;
215 #endif
216
217 #if defined(MIPS3_LOONGSON2)
218 static void loongson2_vector_init(const struct splsw *);
219 extern const struct locoresw loongson2_locoresw;
220 extern const mips_locore_jumpvec_t loongson2_locore_vec;
221 #endif
222
223 #if defined(MIPS32)
224 static void mips32_vector_init(const struct splsw *);
225 extern const struct locoresw mips32_locoresw;
226 extern const mips_locore_jumpvec_t mips32_locore_vec;
227 #endif
228
229 #if defined(MIPS32R2)
230 static void mips32r2_vector_init(const struct splsw *);
231 extern const struct locoresw mips32r2_locoresw;
232 extern const mips_locore_jumpvec_t mips32r2_locore_vec;
233 #endif
234
235 #if defined(MIPS64)
236 static void mips64_vector_init(const struct splsw *);
237 extern const struct locoresw mips64_locoresw;
238 extern const mips_locore_jumpvec_t mips64_locore_vec;
239 #endif
240
241 #if defined(MIPS64R2)
242 extern const struct locoresw mips64r2_locoresw;
243 extern const mips_locore_jumpvec_t mips64r2_locore_vec;
244 #endif
245
246 #if defined(PARANOIA)
247 void std_splsw_test(void);
248 #endif
249
250 mips_locore_jumpvec_t mips_locore_jumpvec;
251
252 struct locoresw mips_locoresw;
253
254 extern const struct splsw std_splsw;
255 struct splsw mips_splsw;
256
257 struct mips_options mips_options = {
258 .mips_cpu_id = 0xffffffff,
259 .mips_fpu_id = 0xffffffff,
260 };
261
262 void * msgbufaddr;
263
264 /* the following is used externally (sysctl_hw) */
265 char machine[] = MACHINE; /* from <machine/param.h> */
266 char machine_arch[] = MACHINE_ARCH; /* from <machine/param.h> */
267
268
269 /*
270 * Assumptions:
271 * - All MIPS3+ have an r4k-style MMU. _Many_ assumptions throughout
272 * much of the mips code about this. Includes overloaded usage of
273 * MIPS3_PLUS.
274 * - All MIPS3+ use the same exception model (cp0 status, cause bits,
275 * etc). _Many_ assumptions throughout much of the mips code about
276 * this. Includes overloaded usage of MIPS3_PLUS.
277 * - All MIPS3+ have a count register. MIPS_HAS_CLOCK in <mips/cpu.h>
278 * will need to be revised if this is false.
279 */
280 #define MIPS32_FLAGS CPU_MIPS_R4K_MMU | CPU_MIPS_CAUSE_IV | CPU_MIPS_USE_WAIT
281 #define MIPS64_FLAGS MIPS32_FLAGS /* same as MIPS32 flags (for now) */
282
283 static const struct pridtab cputab[] = {
284 { 0, MIPS_R2000, -1, -1, CPU_ARCH_MIPS1, 64,
285 CPU_MIPS_NO_LLSC, 0, 0, "MIPS R2000 CPU" },
286 { 0, MIPS_R3000, MIPS_REV_R2000A, -1, CPU_ARCH_MIPS1, 64,
287 CPU_MIPS_NO_LLSC, 0, 0, "MIPS R2000A CPU" },
288 { 0, MIPS_R3000, MIPS_REV_R3000, -1, CPU_ARCH_MIPS1, 64,
289 CPU_MIPS_NO_LLSC, 0, 0, "MIPS R3000 CPU" },
290 { 0, MIPS_R3000, MIPS_REV_R3000A, -1, CPU_ARCH_MIPS1, 64,
291 CPU_MIPS_NO_LLSC, 0, 0, "MIPS R3000A CPU" },
292 { 0, MIPS_R6000, -1, -1, CPU_ARCH_MIPS2, 32,
293 MIPS_NOT_SUPP, 0, 0, "MIPS R6000 CPU" },
294
295 /*
296 * rev 0x00, 0x22 and 0x30 are R4000, 0x40, 0x50 and 0x60 are R4400.
297 * should we allow ranges and use 0x00 - 0x3f for R4000 and
298 * 0x40 - 0xff for R4400?
299 */
300 { 0, MIPS_R4000, MIPS_REV_R4000_A, -1, CPU_ARCH_MIPS3, 48,
301 CPU_MIPS_R4K_MMU | CPU_MIPS_DOUBLE_COUNT, 0, 0,
302 "MIPS R4000 CPU" },
303 { 0, MIPS_R4000, MIPS_REV_R4000_B, -1, CPU_ARCH_MIPS3, 48,
304 CPU_MIPS_R4K_MMU | CPU_MIPS_DOUBLE_COUNT, 0, 0,
305 "MIPS R4000 CPU" },
306 { 0, MIPS_R4000, MIPS_REV_R4000_C, -1, CPU_ARCH_MIPS3, 48,
307 CPU_MIPS_R4K_MMU | CPU_MIPS_DOUBLE_COUNT, 0, 0,
308 "MIPS R4000 CPU" },
309 { 0, MIPS_R4000, MIPS_REV_R4400_A, -1, CPU_ARCH_MIPS3, 48,
310 CPU_MIPS_R4K_MMU | CPU_MIPS_DOUBLE_COUNT, 0, 0,
311 "MIPS R4400 CPU" },
312 { 0, MIPS_R4000, MIPS_REV_R4400_B, -1, CPU_ARCH_MIPS3, 48,
313 CPU_MIPS_R4K_MMU | CPU_MIPS_DOUBLE_COUNT, 0, 0,
314 "MIPS R4400 CPU" },
315 { 0, MIPS_R4000, MIPS_REV_R4400_C, -1, CPU_ARCH_MIPS3, 48,
316 CPU_MIPS_R4K_MMU | CPU_MIPS_DOUBLE_COUNT, 0, 0,
317 "MIPS R4400 CPU" },
318
319 { 0, MIPS_R3LSI, -1, -1, CPU_ARCH_MIPS1, -1,
320 MIPS_NOT_SUPP, 0, 0, "LSI Logic R3000 derivative" },
321 { 0, MIPS_R6000A, -1, -1, CPU_ARCH_MIPS2, 32,
322 MIPS_NOT_SUPP, 0, 0, "MIPS R6000A CPU" },
323 { 0, MIPS_R3IDT, -1, -1, CPU_ARCH_MIPS1, -1,
324 MIPS_NOT_SUPP, 0, 0, "IDT R3041 or RC36100 CPU" },
325 { 0, MIPS_R4100, -1, -1, CPU_ARCH_MIPS3, 32,
326 CPU_MIPS_R4K_MMU | CPU_MIPS_NO_LLSC, 0, 0,
327 "NEC VR4100 CPU" },
328 { 0, MIPS_R4200, -1, -1, CPU_ARCH_MIPS3, -1,
329 MIPS_NOT_SUPP | CPU_MIPS_R4K_MMU, 0, 0,
330 "NEC VR4200 CPU" },
331 { 0, MIPS_R4300, -1, -1, CPU_ARCH_MIPS3, 32,
332 CPU_MIPS_R4K_MMU, 0, 0, "NEC VR4300 CPU" },
333 { 0, MIPS_R4600, -1, -1, CPU_ARCH_MIPS3, 48,
334 CPU_MIPS_R4K_MMU | CPU_MIPS_DOUBLE_COUNT, 0, 0,
335 "QED R4600 Orion CPU" },
336 { 0, MIPS_R4700, -1, -1, CPU_ARCH_MIPS3, 48,
337 CPU_MIPS_R4K_MMU, 0, 0, "QED R4700 Orion CPU" },
338
339 { 0, MIPS_R8000, -1, -1, CPU_ARCH_MIPS4, 384,
340 MIPS_NOT_SUPP | CPU_MIPS_R4K_MMU, 0, 0,
341 "MIPS R8000 Blackbird/TFP CPU" },
342 { 0, MIPS_R10000, -1, -1, CPU_ARCH_MIPS4, 64,
343 CPU_MIPS_R4K_MMU | CPU_MIPS_DOUBLE_COUNT, 0, 0,
344 "MIPS R10000 CPU" },
345 { 0, MIPS_R12000, -1, -1, CPU_ARCH_MIPS4, 64,
346 CPU_MIPS_R4K_MMU | CPU_MIPS_DOUBLE_COUNT, 0, 0,
347 "MIPS R12000 CPU" },
348 { 0, MIPS_R14000, -1, -1, CPU_ARCH_MIPS4, 64,
349 CPU_MIPS_R4K_MMU | CPU_MIPS_DOUBLE_COUNT, 0, 0,
350 "MIPS R14000 CPU" },
351
352 /* XXX
353 * If the Processor Revision ID of the 4650 isn't 0, the following
354 * entry needs to be adjusted. Can't use a wildcard match because
355 * the TX39 series processors share the same Processor ID value.
356 * Or maybe put TX39 CPUs first if the revid doesn't overlap with
357 * the 4650...
358 */
359 { 0, MIPS_R4650, 0, -1, CPU_ARCH_MIPS3, -1,
360 MIPS_NOT_SUPP /* no MMU! */, 0, 0, "QED R4650 CPU" },
361 { 0, MIPS_TX3900, MIPS_REV_TX3912, -1, CPU_ARCH_MIPS1, 32,
362 CPU_MIPS_NO_LLSC, 0, 0, "Toshiba TX3912 CPU" },
363 { 0, MIPS_TX3900, MIPS_REV_TX3922, -1, CPU_ARCH_MIPS1, 64,
364 CPU_MIPS_NO_LLSC, 0, 0, "Toshiba TX3922 CPU" },
365 { 0, MIPS_TX3900, MIPS_REV_TX3927, -1, CPU_ARCH_MIPS1, 64,
366 CPU_MIPS_NO_LLSC, 0, 0, "Toshiba TX3927 CPU" },
367 { 0, MIPS_R5000, -1, -1, CPU_ARCH_MIPS4, 48,
368 CPU_MIPS_R4K_MMU | CPU_MIPS_DOUBLE_COUNT, 0, 0,
369 "MIPS R5000 CPU" },
370 { 0, MIPS_RM5200, -1, -1, CPU_ARCH_MIPS4, 48,
371 CPU_MIPS_R4K_MMU | CPU_MIPS_CAUSE_IV | CPU_MIPS_DOUBLE_COUNT |
372 CPU_MIPS_USE_WAIT, 0, 0, "QED RM5200 CPU" },
373
374 /* XXX
375 * The rm7000 rev 2.0 can have 64 tlbs, and has 6 extra interrupts. See
376 * "Migrating to the RM7000 from other MIPS Microprocessors"
377 * for more details.
378 */
379 { 0, MIPS_RM7000, -1, -1, CPU_ARCH_MIPS4, 48,
380 MIPS_NOT_SUPP | CPU_MIPS_CAUSE_IV | CPU_MIPS_DOUBLE_COUNT |
381 CPU_MIPS_USE_WAIT, 0, 0, "QED RM7000 CPU" },
382
383 /*
384 * IDT RC32300 core is a 32 bit MIPS2 processor with
385 * MIPS3/MIPS4 extensions. It has an R4000-style TLB,
386 * while all registers are 32 bits and any 64 bit
387 * instructions like ld/sd/dmfc0/dmtc0 are not allowed.
388 *
389 * note that the Config register has a non-standard base
390 * for IC and DC (2^9 instead of 2^12).
391 *
392 */
393 { 0, MIPS_RC32300, -1, -1, CPU_ARCH_MIPS3, 16,
394 MIPS_NOT_SUPP | CPU_MIPS_R4K_MMU, 0, 0,
395 "IDT RC32300 CPU" },
396 { 0, MIPS_RC32364, -1, -1, CPU_ARCH_MIPS3, 16,
397 MIPS_NOT_SUPP | CPU_MIPS_R4K_MMU, 0, 0,
398 "IDT RC32364 CPU" },
399 { 0, MIPS_RC64470, -1, -1, CPU_ARCH_MIPSx, -1,
400 MIPS_NOT_SUPP | CPU_MIPS_R4K_MMU, 0, 0,
401 "IDT RC64474/RC64475 CPU" },
402
403 { 0, MIPS_R5400, -1, -1, CPU_ARCH_MIPSx, -1,
404 MIPS_NOT_SUPP | CPU_MIPS_R4K_MMU, 0, 0,
405 "NEC VR5400 CPU" },
406 { 0, MIPS_R5900, -1, -1, CPU_ARCH_MIPS3, 48,
407 CPU_MIPS_NO_LLSC | CPU_MIPS_R4K_MMU, 0, 0,
408 "Toshiba R5900 CPU" },
409
410 { 0, MIPS_TX4900, MIPS_REV_TX4927, -1, CPU_ARCH_MIPS3, 48,
411 CPU_MIPS_R4K_MMU | CPU_MIPS_DOUBLE_COUNT, 0, 0,
412 "Toshiba TX4927 CPU" },
413
414 { 0, MIPS_TX4900, -1, -1, CPU_ARCH_MIPS3, 48,
415 CPU_MIPS_R4K_MMU | CPU_MIPS_DOUBLE_COUNT, 0, 0,
416 "Toshiba TX4900 CPU" },
417
418 /*
419 * ICT Loongson2 is a MIPS64 CPU with a few quirks. For some reason
420 * the virtual aliases present with 4KB pages make the caches misbehave
421 * so we make all accesses uncached. With 16KB pages, no virtual
422 * aliases are possible so we can use caching.
423 */
424 #ifdef ENABLE_MIPS_16KB_PAGE
425 #define MIPS_LOONGSON2_CCA 0
426 #else
427 #define MIPS_LOONGSON2_CCA (CPU_MIPS_HAVE_SPECIAL_CCA | \
428 (2 << CPU_MIPS_CACHED_CCA_SHIFT))
429 #endif
430 { 0, MIPS_LOONGSON2, MIPS_REV_LOONGSON2E, -1, CPU_ARCH_MIPS3, 64,
431 CPU_MIPS_R4K_MMU | CPU_MIPS_DOUBLE_COUNT | CPU_MIPS_LOONGSON2
432 | MIPS_LOONGSON2_CCA, 0, 0, "ICT Loongson 2E CPU" },
433 { 0, MIPS_LOONGSON2, MIPS_REV_LOONGSON2F, -1, CPU_ARCH_MIPS3, 64,
434 CPU_MIPS_R4K_MMU | CPU_MIPS_DOUBLE_COUNT | CPU_MIPS_LOONGSON2
435 | MIPS_LOONGSON2_CCA, 0, 0, "ICT Loongson 2F CPU" },
436
437 #if 0 /* ID collisions : can we use a CU1 test or similar? */
438 { 0, MIPS_R3SONY, -1, -1, CPU_ARCH_MIPS1, -1,
439 MIPS_NOT_SUPP, 0, 0, "SONY R3000 derivative" }, /* 0x21; crash R4700? */
440 { 0, MIPS_R3NKK, -1, -1, CPU_ARCH_MIPS1, -1,
441 MIPS_NOT_SUPP, 0, 0, "NKK R3000 derivative" }, /* 0x23; crash R5000? */
442 #endif
443
444 { MIPS_PRID_CID_MTI, MIPS_4Kc, -1, -1, -1, 0,
445 MIPS32_FLAGS | CPU_MIPS_DOUBLE_COUNT, 0, 0, "4Kc" },
446 { MIPS_PRID_CID_MTI, MIPS_4KEc, -1, -1, -1, 0,
447 MIPS32_FLAGS | CPU_MIPS_DOUBLE_COUNT, 0, 0, "4KEc" },
448 { MIPS_PRID_CID_MTI, MIPS_4KEc_R2, -1, -1, -1, 0,
449 MIPS32_FLAGS | CPU_MIPS_DOUBLE_COUNT, 0, 0, "4KEc (Rev 2)" },
450 { MIPS_PRID_CID_MTI, MIPS_4KSc, -1, -1, -1, 0,
451 MIPS32_FLAGS | CPU_MIPS_DOUBLE_COUNT, 0, 0, "4KSc" },
452 { MIPS_PRID_CID_MTI, MIPS_5Kc, -1, -1, -1, 0,
453 MIPS64_FLAGS | CPU_MIPS_DOUBLE_COUNT, 0, 0, "5Kc" },
454 { MIPS_PRID_CID_MTI, MIPS_20Kc, -1, -1, -1, 0,
455 MIPS64_FLAGS, 0, 0, "20Kc" },
456 { MIPS_PRID_CID_MTI, MIPS_24K, -1, -1, -1, 0,
457 MIPS32_FLAGS | CPU_MIPS_DOUBLE_COUNT,
458 MIPS_CP0FL_USE |
459 MIPS_CP0FL_EBASE | MIPS_CP0FL_USERLOCAL | MIPS_CP0FL_HWRENA |
460 MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG2 |
461 MIPS_CP0FL_CONFIG3 | MIPS_CP0FL_CONFIG7,
462 0, "24K" },
463 { MIPS_PRID_CID_MTI, MIPS_24KE, -1, -1, -1, 0,
464 MIPS32_FLAGS | CPU_MIPS_DOUBLE_COUNT,
465 MIPS_CP0FL_USE |
466 MIPS_CP0FL_EBASE | MIPS_CP0FL_USERLOCAL | MIPS_CP0FL_HWRENA |
467 MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG2 |
468 MIPS_CP0FL_CONFIG3 | MIPS_CP0FL_CONFIG7,
469 0, "24KE" },
470 { MIPS_PRID_CID_MTI, MIPS_34K, -1, -1, -1, 0,
471 MIPS32_FLAGS | CPU_MIPS_DOUBLE_COUNT,
472 MIPS_CP0FL_USE |
473 MIPS_CP0FL_EBASE | MIPS_CP0FL_USERLOCAL | MIPS_CP0FL_HWRENA |
474 MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG2 |
475 MIPS_CP0FL_CONFIG3 | MIPS_CP0FL_CONFIG7,
476 0, "34K" },
477 { MIPS_PRID_CID_MTI, MIPS_74K, -1, -1, -1, 0,
478 CPU_MIPS_HAVE_SPECIAL_CCA | (0 << CPU_MIPS_CACHED_CCA_SHIFT) |
479 MIPS32_FLAGS | CPU_MIPS_DOUBLE_COUNT,
480 MIPS_CP0FL_USE |
481 MIPS_CP0FL_EBASE | MIPS_CP0FL_USERLOCAL | MIPS_CP0FL_HWRENA |
482 MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG2 |
483 MIPS_CP0FL_CONFIG3 | MIPS_CP0FL_CONFIG6 | MIPS_CP0FL_CONFIG7,
484 0, "74K" },
485 { MIPS_PRID_CID_MTI, MIPS_1004K, -1, -1, -1, 0,
486 MIPS32_FLAGS | CPU_MIPS_DOUBLE_COUNT,
487 MIPS_CP0FL_USE |
488 MIPS_CP0FL_EBASE | MIPS_CP0FL_USERLOCAL | MIPS_CP0FL_HWRENA |
489 MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG2 |
490 MIPS_CP0FL_CONFIG3 | MIPS_CP0FL_CONFIG6 | MIPS_CP0FL_CONFIG7,
491 0, "1004K" },
492 { MIPS_PRID_CID_MTI, MIPS_1074K, -1, -1, -1, 0,
493 MIPS32_FLAGS | CPU_MIPS_DOUBLE_COUNT,
494 MIPS_CP0FL_USE |
495 MIPS_CP0FL_EBASE | MIPS_CP0FL_USERLOCAL | MIPS_CP0FL_HWRENA |
496 MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG2 |
497 MIPS_CP0FL_CONFIG3 | MIPS_CP0FL_CONFIG6 | MIPS_CP0FL_CONFIG7,
498 0, "1074K" },
499
500 { MIPS_PRID_CID_BROADCOM, MIPS_BCM3302, -1, -1, -1, 0,
501 MIPS32_FLAGS | CPU_MIPS_DOUBLE_COUNT, 0, 0, "BCM3302" },
502
503 { MIPS_PRID_CID_ALCHEMY, MIPS_AU_REV1, -1, MIPS_AU1000, -1, 0,
504 MIPS32_FLAGS | CPU_MIPS_NO_WAIT | CPU_MIPS_I_D_CACHE_COHERENT, 0, 0,
505 "Au1000 (Rev 1 core)" },
506 { MIPS_PRID_CID_ALCHEMY, MIPS_AU_REV2, -1, MIPS_AU1000, -1, 0,
507 MIPS32_FLAGS | CPU_MIPS_NO_WAIT | CPU_MIPS_I_D_CACHE_COHERENT, 0, 0,
508 "Au1000 (Rev 2 core)" },
509
510 { MIPS_PRID_CID_ALCHEMY, MIPS_AU_REV1, -1, MIPS_AU1100, -1, 0,
511 MIPS32_FLAGS | CPU_MIPS_NO_WAIT | CPU_MIPS_I_D_CACHE_COHERENT, 0, 0,
512 "Au1100 (Rev 1 core)" },
513 { MIPS_PRID_CID_ALCHEMY, MIPS_AU_REV2, -1, MIPS_AU1100, -1, 0,
514 MIPS32_FLAGS | CPU_MIPS_NO_WAIT | CPU_MIPS_I_D_CACHE_COHERENT, 0, 0,
515 "Au1100 (Rev 2 core)" },
516
517 { MIPS_PRID_CID_ALCHEMY, MIPS_AU_REV1, -1, MIPS_AU1500, -1, 0,
518 MIPS32_FLAGS | CPU_MIPS_NO_WAIT | CPU_MIPS_I_D_CACHE_COHERENT, 0, 0,
519 "Au1500 (Rev 1 core)" },
520 { MIPS_PRID_CID_ALCHEMY, MIPS_AU_REV2, -1, MIPS_AU1500, -1, 0,
521 MIPS32_FLAGS | CPU_MIPS_NO_WAIT | CPU_MIPS_I_D_CACHE_COHERENT, 0, 0,
522 "Au1500 (Rev 2 core)" },
523
524 { MIPS_PRID_CID_ALCHEMY, MIPS_AU_REV2, -1, MIPS_AU1550, -1, 0,
525 MIPS32_FLAGS | CPU_MIPS_NO_WAIT | CPU_MIPS_I_D_CACHE_COHERENT, 0, 0,
526 "Au1550 (Rev 2 core)" },
527
528 /* The SB-1 CPU uses a CCA of 5 - "Cacheable Coherent Shareable" */
529 { MIPS_PRID_CID_SIBYTE, MIPS_SB1, -1, -1, -1, 0,
530 MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT |
531 CPU_MIPS_HAVE_SPECIAL_CCA | (5 << CPU_MIPS_CACHED_CCA_SHIFT), 0, 0,
532 "SB-1" },
533
534 { MIPS_PRID_CID_RMI, MIPS_XLR732B, -1, -1, -1, 0,
535 MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_NO_LLADDR |
536 CPU_MIPS_I_D_CACHE_COHERENT | CPU_MIPS_HAVE_MxCR,
537 MIPS_CP0FL_USE |
538 MIPS_CP0FL_EIRR | MIPS_CP0FL_EIMR | MIPS_CP0FL_EBASE |
539 MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG7,
540 CIDFL_RMI_TYPE_XLR|MIPS_CIDFL_RMI_CPUS(8,4)|MIPS_CIDFL_RMI_L2(2MB),
541 "XLR732B" },
542
543 { MIPS_PRID_CID_RMI, MIPS_XLR732C, -1, -1, -1, 0,
544 MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_NO_LLADDR |
545 CPU_MIPS_I_D_CACHE_COHERENT | CPU_MIPS_HAVE_MxCR,
546 MIPS_CP0FL_USE |
547 MIPS_CP0FL_EIRR | MIPS_CP0FL_EIMR | MIPS_CP0FL_EBASE |
548 MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG7,
549 CIDFL_RMI_TYPE_XLR|MIPS_CIDFL_RMI_CPUS(8,4)|MIPS_CIDFL_RMI_L2(2MB),
550 "XLR732C" },
551
552 { MIPS_PRID_CID_RMI, MIPS_XLS616, -1, -1, -1, 0,
553 MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_NO_LLADDR |
554 CPU_MIPS_I_D_CACHE_COHERENT | CPU_MIPS_HAVE_MxCR,
555 MIPS_CP0FL_USE |
556 MIPS_CP0FL_EIRR | MIPS_CP0FL_EIMR | MIPS_CP0FL_EBASE |
557 MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG7,
558 CIDFL_RMI_TYPE_XLS|MIPS_CIDFL_RMI_CPUS(4,4)|MIPS_CIDFL_RMI_L2(1MB),
559 "XLS616" },
560
561 { MIPS_PRID_CID_RMI, MIPS_XLS416, -1, -1, -1, 0,
562 MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_NO_LLADDR |
563 CPU_MIPS_I_D_CACHE_COHERENT | CPU_MIPS_HAVE_MxCR,
564 MIPS_CP0FL_USE |
565 MIPS_CP0FL_EIRR | MIPS_CP0FL_EIMR | MIPS_CP0FL_EBASE |
566 MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG7,
567 CIDFL_RMI_TYPE_XLS|MIPS_CIDFL_RMI_CPUS(4,4)|MIPS_CIDFL_RMI_L2(1MB),
568 "XLS416" },
569
570 { MIPS_PRID_CID_RMI, MIPS_XLS408, -1, -1, -1, 0,
571 MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_NO_LLADDR |
572 CPU_MIPS_I_D_CACHE_COHERENT | CPU_MIPS_HAVE_MxCR,
573 MIPS_CP0FL_USE |
574 MIPS_CP0FL_EIRR | MIPS_CP0FL_EIMR | MIPS_CP0FL_EBASE |
575 MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG7,
576 CIDFL_RMI_TYPE_XLS|MIPS_CIDFL_RMI_CPUS(2,4)|MIPS_CIDFL_RMI_L2(1MB),
577 "XLS408" },
578
579 { MIPS_PRID_CID_RMI, MIPS_XLS408LITE, -1, -1, -1, 0,
580 MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_NO_LLADDR |
581 CPU_MIPS_I_D_CACHE_COHERENT | CPU_MIPS_HAVE_MxCR,
582 MIPS_CP0FL_USE |
583 MIPS_CP0FL_EIRR | MIPS_CP0FL_EIMR | MIPS_CP0FL_EBASE |
584 MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG7,
585 CIDFL_RMI_TYPE_XLS|MIPS_CIDFL_RMI_CPUS(2,4)|MIPS_CIDFL_RMI_L2(1MB),
586 "XLS408lite" },
587
588 { MIPS_PRID_CID_RMI, MIPS_XLS404LITE, -1, -1, -1, 0,
589 MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_NO_LLADDR |
590 CPU_MIPS_I_D_CACHE_COHERENT | CPU_MIPS_HAVE_MxCR,
591 MIPS_CP0FL_USE |
592 MIPS_CP0FL_EIRR | MIPS_CP0FL_EIMR | MIPS_CP0FL_EBASE |
593 MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG7,
594 CIDFL_RMI_TYPE_XLS|MIPS_CIDFL_RMI_CPUS(1,4)|MIPS_CIDFL_RMI_L2(512KB),
595 "XLS404lite" },
596
597 { MIPS_PRID_CID_RMI, MIPS_XLS208, -1, -1, -1, 0,
598 MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_NO_LLADDR |
599 CPU_MIPS_I_D_CACHE_COHERENT | CPU_MIPS_HAVE_MxCR,
600 MIPS_CP0FL_USE |
601 MIPS_CP0FL_EIRR | MIPS_CP0FL_EIMR | MIPS_CP0FL_EBASE |
602 MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG7,
603 CIDFL_RMI_TYPE_XLS|MIPS_CIDFL_RMI_CPUS(2,4)|MIPS_CIDFL_RMI_L2(512KB),
604 "XLS208" },
605
606 { MIPS_PRID_CID_RMI, MIPS_XLS204, -1, -1, -1, 0,
607 MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_NO_LLADDR |
608 CPU_MIPS_I_D_CACHE_COHERENT | CPU_MIPS_HAVE_MxCR,
609 MIPS_CP0FL_USE |
610 MIPS_CP0FL_EIRR | MIPS_CP0FL_EIMR | MIPS_CP0FL_EBASE |
611 MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG7,
612 CIDFL_RMI_TYPE_XLS|MIPS_CIDFL_RMI_CPUS(1,4)|MIPS_CIDFL_RMI_L2(256KB),
613 "XLS204" },
614
615 { MIPS_PRID_CID_RMI, MIPS_XLS108, -1, -1, -1, 0,
616 MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_NO_LLADDR |
617 CPU_MIPS_I_D_CACHE_COHERENT | CPU_MIPS_HAVE_MxCR,
618 MIPS_CP0FL_USE |
619 MIPS_CP0FL_EIRR | MIPS_CP0FL_EIMR | MIPS_CP0FL_EBASE |
620 MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG7,
621 CIDFL_RMI_TYPE_XLS|MIPS_CIDFL_RMI_CPUS(2,4)|MIPS_CIDFL_RMI_L2(512KB),
622 "XLS108" },
623
624 { MIPS_PRID_CID_RMI, MIPS_XLS104, -1, -1, -1, 0,
625 MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_NO_LLADDR |
626 CPU_MIPS_I_D_CACHE_COHERENT | CPU_MIPS_HAVE_MxCR,
627 MIPS_CP0FL_USE |
628 MIPS_CP0FL_EIRR | MIPS_CP0FL_EIMR | MIPS_CP0FL_EBASE |
629 MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG7,
630 CIDFL_RMI_TYPE_XLS|MIPS_CIDFL_RMI_CPUS(1,4)|MIPS_CIDFL_RMI_L2(256KB),
631 "XLS104" },
632
633 { MIPS_PRID_CID_CAVIUM, MIPS_CN31XX, -1, -1, -1, 0,
634 MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_NO_LLADDR,
635 MIPS_CP0FL_USE |
636 MIPS_CP0FL_EBASE | MIPS_CP0FL_CONFIG |
637 MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG2 | MIPS_CP0FL_CONFIG3,
638 0,
639 "CN31xx" },
640
641 { MIPS_PRID_CID_CAVIUM, MIPS_CN30XX, -1, -1, -1, 0,
642 MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_NO_LLADDR,
643 MIPS_CP0FL_USE |
644 MIPS_CP0FL_EBASE | MIPS_CP0FL_CONFIG |
645 MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG2 | MIPS_CP0FL_CONFIG3,
646 0,
647 "CN30xx" },
648
649 { MIPS_PRID_CID_CAVIUM, MIPS_CN50XX, -1, -1, -1, 0,
650 MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_NO_LLADDR,
651 MIPS_CP0FL_USE |
652 MIPS_CP0FL_EBASE | MIPS_CP0FL_CONFIG | MIPS_CP0FL_HWRENA |
653 MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG2 | MIPS_CP0FL_CONFIG3,
654 0,
655 "CN50xx" },
656
657 /* Microsoft Research' extensible MIPS */
658 { MIPS_PRID_CID_MICROSOFT, MIPS_eMIPS, 1, -1, CPU_ARCH_MIPS1, 64,
659 CPU_MIPS_NO_WAIT, 0, 0, "eMIPS CPU" },
660
661 /* Ingenic XBurst */
662 { MIPS_PRID_CID_INGENIC, MIPS_XBURST, -1, -1, -1, 0,
663 MIPS32_FLAGS | CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_DOUBLE_COUNT,
664 0, 0, "XBurst" },
665
666 { 0, 0, 0, 0, 0, 0,
667 0, 0, 0, NULL }
668 };
669
670 static const struct pridtab fputab[] = {
671 { 0, MIPS_SOFT, -1, 0, 0, 0, 0, 0, 0, "software emulated floating point" },
672 { 0, MIPS_R2360, -1, 0, 0, 0, 0, 0, 0, "MIPS R2360 Floating Point Board" },
673 { 0, MIPS_R2010, -1, 0, 0, 0, 0, 0, 0, "MIPS R2010 FPC" },
674 { 0, MIPS_R3010, -1, 0, 0, 0, 0, 0, 0, "MIPS R3010 FPC" },
675 { 0, MIPS_R6010, -1, 0, 0, 0, 0, 0, 0, "MIPS R6010 FPC" },
676 { 0, MIPS_R4010, -1, 0, 0, 0, 0, 0, 0, "MIPS R4010 FPC" },
677 };
678
679 /*
680 * Company ID's are not sparse (yet), this array is indexed directly
681 * by pridtab->cpu_cid.
682 */
683 static const char * const cidnames[] = {
684 "Prehistoric",
685 "MIPS", /* or "MIPS Technologies, Inc. */
686 "Broadcom", /* or "Broadcom Corp." */
687 "Alchemy", /* or "Alchemy Semiconductor" */
688 "SiByte", /* or "Broadcom Corp. (SiByte)" */
689 "SandCraft",
690 "Phillips",
691 "Toshiba or Microsoft",
692 "LSI",
693 "(unannounced)",
694 "(unannounced)",
695 "Lexra",
696 "RMI",
697 "Cavium",
698 };
699 #define ncidnames __arraycount(cidnames)
700
701 #if defined(MIPS1)
702 /*
703 * MIPS-I locore function vector
704 */
705
706 static void
mips1_vector_init(const struct splsw * splsw)707 mips1_vector_init(const struct splsw *splsw)
708 {
709 extern char mips1_utlb_miss[], mips1_utlb_miss_end[];
710 extern char mips1_exception[], mips1_exception_end[];
711
712 /*
713 * Copy down exception vector code.
714 */
715 if (mips1_utlb_miss_end - mips1_utlb_miss > 0x80)
716 panic("startup: UTLB vector code too large");
717 if (mips1_exception_end - mips1_exception > 0x80)
718 panic("startup: general exception vector code too large");
719 memcpy((void *)MIPS_UTLB_MISS_EXC_VEC, mips1_utlb_miss,
720 mips1_exception_end - mips1_utlb_miss);
721
722 /*
723 * Copy locore-function vector.
724 */
725 mips_locore_jumpvec = mips1_locore_vec;
726
727 /*
728 * Clear out the I and D caches.
729 */
730 mips_icache_sync_all();
731 mips_dcache_wbinv_all();
732 }
733 #endif /* MIPS1 */
734
735 #if defined(MIPS3)
736 static void
mips3_vector_init(const struct splsw * splsw)737 mips3_vector_init(const struct splsw *splsw)
738 {
739 /* r4000 exception handler address and end */
740 extern char mips3_exception[], mips3_exception_end[];
741
742 /* TLB miss handler address and end */
743 extern char mips3_tlb_miss[];
744 extern char mips3_xtlb_miss[];
745
746 /* Cache error handler */
747 extern char mips3_cache[];
748 /*
749 * Copy down exception vector code.
750 */
751
752 if (mips3_xtlb_miss - mips3_tlb_miss != 0x80)
753 panic("startup: %s vector code not 128 bytes in length",
754 "UTLB");
755 if (mips3_cache - mips3_xtlb_miss != 0x80)
756 panic("startup: %s vector code not 128 bytes in length",
757 "XTLB");
758 if (mips3_exception - mips3_cache != 0x80)
759 panic("startup: %s vector code not 128 bytes in length",
760 "Cache error");
761 if (mips3_exception_end - mips3_exception > 0x80)
762 panic("startup: %s vector code too large",
763 "General exception");
764
765 memcpy((void *)MIPS_UTLB_MISS_EXC_VEC, mips3_tlb_miss,
766 mips3_exception_end - mips3_tlb_miss);
767
768 /*
769 * Copy locore-function vector.
770 */
771 mips_locore_jumpvec = mips3_locore_vec;
772
773 mips_icache_sync_all();
774 mips_dcache_wbinv_all();
775
776 /* Clear BEV in SR so we start handling our own exceptions */
777 mips_cp0_status_write(mips_cp0_status_read() & ~MIPS_SR_BEV);
778 }
779 #endif /* MIPS3 */
780
781 #if defined(MIPS3_LOONGSON2)
782 static void
loongson2_vector_init(const struct splsw * splsw)783 loongson2_vector_init(const struct splsw *splsw)
784 {
785 /* r4000 exception handler address and end */
786 extern char loongson2_exception[], loongson2_exception_end[];
787
788 /* TLB miss handler address and end */
789 extern char loongson2_tlb_miss[];
790 extern char loongson2_xtlb_miss[];
791
792 /* Cache error handler */
793 extern char loongson2_cache[];
794
795 /*
796 * Copy down exception vector code.
797 */
798
799 if (loongson2_xtlb_miss - loongson2_tlb_miss != 0x80)
800 panic("startup: %s vector code not 128 bytes in length",
801 "UTLB");
802 if (loongson2_cache - loongson2_xtlb_miss != 0x80)
803 panic("startup: %s vector code not 128 bytes in length",
804 "XTLB");
805 if (loongson2_exception - loongson2_cache != 0x80)
806 panic("startup: %s vector code not 128 bytes in length",
807 "Cache error");
808 if (loongson2_exception_end - loongson2_exception > 0x80)
809 panic("startup: %s vector code too large",
810 "General exception");
811
812 memcpy((void *)MIPS_UTLB_MISS_EXC_VEC, loongson2_tlb_miss,
813 loongson2_exception_end - loongson2_tlb_miss);
814
815 /*
816 * Copy locore-function vector.
817 */
818 mips_locore_jumpvec = loongson2_locore_vec;
819
820 mips_icache_sync_all();
821 mips_dcache_wbinv_all();
822
823 /* Clear BEV in SR so we start handling our own exceptions */
824 mips_cp0_status_write(mips_cp0_status_read() & ~MIPS_SR_BEV);
825 }
826 #endif /* MIPS3_LOONGSON2 */
827
828 #if defined(MIPS32)
829 static void
mips32_vector_init(const struct splsw * splsw)830 mips32_vector_init(const struct splsw *splsw)
831 {
832 /* r4000 exception handler address */
833 extern char mips32_exception[];
834
835 /* TLB miss handler addresses */
836 extern char mips32_tlb_miss[];
837
838 /* Cache error handler */
839 extern char mips32_cache[];
840
841 /* MIPS32 interrupt exception handler */
842 extern char mips32_intr[], mips32_intr_end[];
843
844 /*
845 * Copy down exception vector code.
846 */
847
848 if (mips32_cache - mips32_tlb_miss != 0x100)
849 panic("startup: %s vector code not 128 bytes in length",
850 "UTLB");
851 if (mips32_exception - mips32_cache != 0x80)
852 panic("startup: %s vector code not 128 bytes in length",
853 "Cache error");
854 if (mips32_intr - mips32_exception != 0x80)
855 panic("startup: %s vector code not 128 bytes in length",
856 "General exception");
857 if (mips32_intr_end - mips32_intr > 0x80)
858 panic("startup: %s vector code too large",
859 "interrupt exception");
860
861 memcpy((void *)MIPS_UTLB_MISS_EXC_VEC, mips32_tlb_miss,
862 mips32_intr_end - mips32_tlb_miss);
863
864 /*
865 * Copy locore-function vector.
866 */
867 mips_locore_jumpvec = mips32_locore_vec;
868
869 mips_icache_sync_all();
870 mips_dcache_wbinv_all();
871
872 /* Clear BEV in SR so we start handling our own exceptions */
873 mips_cp0_status_write(mips_cp0_status_read() & ~MIPS_SR_BEV);
874
875 mips_watchpoint_init();
876 }
877 #endif /* MIPS32 */
878
879 #if defined(MIPS32R2)
880 static void
mips32r2_vector_init(const struct splsw * splsw)881 mips32r2_vector_init(const struct splsw *splsw)
882 {
883 /* r4000 exception handler address */
884 extern char mips32r2_exception[];
885
886 /* TLB miss handler addresses */
887 extern char mips32r2_tlb_miss[];
888
889 /* Cache error handler */
890 extern char mips32r2_cache[];
891
892 /* MIPS32 interrupt exception handler */
893 extern char mips32r2_intr[], mips32r2_intr_end[];
894
895 /*
896 * Copy down exception vector code.
897 */
898 if (mips32r2_cache - mips32r2_tlb_miss != 0x100)
899 panic("startup: %s vector code not 128 bytes in length",
900 "UTLB");
901 if (mips32r2_exception - mips32r2_cache != 0x80)
902 panic("startup: %s vector code not 128 bytes in length",
903 "Cache error");
904 if (mips32r2_intr - mips32r2_exception != 0x80)
905 panic("startup: %s vector code not 128 bytes in length",
906 "General exception");
907 if (mips32r2_intr_end - mips32r2_intr > 0x80)
908 panic("startup: %s vector code too large",
909 "interrupt exception");
910
911 memcpy((void *)MIPS_UTLB_MISS_EXC_VEC, mips32r2_tlb_miss,
912 mips32r2_intr_end - mips32r2_tlb_miss);
913
914 /*
915 * Let see if this cpu has DSP V2 ASE...
916 */
917 uint32_t cp0flags = mips_options.mips_cpu->cpu_cp0flags;
918 if (mipsNN_cp0_config2_read() & MIPSNN_CFG2_M) {
919 const uint32_t cfg3 = mipsNN_cp0_config3_read();
920 if (cfg3 & MIPSNN_CFG3_ULRP) {
921 cp0flags |= MIPS_CP0FL_USERLOCAL;
922 }
923 if (cfg3 & MIPSNN_CFG3_DSP2P) {
924 mips_options.mips_cpu_flags |= CPU_MIPS_HAVE_DSP;
925 }
926 }
927 /*
928 * If this CPU doesn't have a COP0 USERLOCAL register, at the end
929 * of cpu_switch resume overwrite the instructions which update it.
930 */
931 if (!(cp0flags & MIPS_CP0FL_USERLOCAL)) {
932 extern uint32_t mips32r2_cpu_switch_resume[];
933 for (uint32_t *insnp = mips32r2_cpu_switch_resume;; insnp++) {
934 KASSERT(insnp[0] != JR_RA);
935 if (insnp[0] == _LOAD_V0_L_PRIVATE_A0
936 && insnp[1] == _MTC0_V0_USERLOCAL) {
937 insnp[0] = JR_RA;
938 insnp[1] = 0; /* NOP */
939 break;
940 }
941 }
942 }
943
944 /*
945 * Copy locore-function vector.
946 */
947 mips_locore_jumpvec = mips32r2_locore_vec;
948
949 mips_icache_sync_all();
950 mips_dcache_wbinv_all();
951
952 /* Clear BEV in SR so we start handling our own exceptions */
953 mips_cp0_status_write(mips_cp0_status_read() & ~MIPS_SR_BEV);
954
955 mips_watchpoint_init();
956 }
957 #endif /* MIPS32R2 */
958
959 #if defined(MIPS64)
960 static void
mips64_vector_init(const struct splsw * splsw)961 mips64_vector_init(const struct splsw *splsw)
962 {
963 /* r4000 exception handler address */
964 extern char mips64_exception[];
965
966 /* TLB miss handler addresses */
967 extern char mips64_tlb_miss[];
968 extern char mips64_xtlb_miss[];
969
970 /* Cache error handler */
971 extern char mips64_cache[];
972
973 /* MIPS64 interrupt exception handler */
974 extern char mips64_intr[], mips64_intr_end[];
975
976 /*
977 * Copy down exception vector code.
978 */
979
980 if (mips64_xtlb_miss - mips64_tlb_miss != 0x80)
981 panic("startup: %s vector code not 128 bytes in length",
982 "UTLB");
983 if (mips64_cache - mips64_xtlb_miss != 0x80)
984 panic("startup: %s vector code not 128 bytes in length",
985 "XTLB");
986 if (mips64_exception - mips64_cache != 0x80)
987 panic("startup: %s vector code not 128 bytes in length",
988 "Cache error");
989 if (mips64_intr - mips64_exception != 0x80)
990 panic("startup: %s vector code not 128 bytes in length",
991 "General exception");
992 if (mips64_intr_end - mips64_intr > 0x80)
993 panic("startup: %s vector code too large",
994 "interrupt exception");
995
996 memcpy((void *)MIPS_UTLB_MISS_EXC_VEC, mips64_tlb_miss,
997 mips64_intr_end - mips64_tlb_miss);
998
999 /*
1000 * Copy locore-function vector.
1001 */
1002 mips_locore_jumpvec = mips64_locore_vec;
1003
1004 mips_icache_sync_all();
1005 mips_dcache_wbinv_all();
1006
1007 /* Clear BEV in SR so we start handling our own exceptions */
1008 mips_cp0_status_write(mips_cp0_status_read() & ~MIPS_SR_BEV);
1009
1010 mips_watchpoint_init();
1011 }
1012 #endif /* MIPS64 */
1013
1014 #if defined(MIPS64R2)
1015 void
mips64r2_vector_init(const struct splsw * splsw)1016 mips64r2_vector_init(const struct splsw *splsw)
1017 {
1018 /* r4000 exception handler address */
1019 extern char mips64r2_exception[];
1020
1021 /* TLB miss handler addresses */
1022 extern char mips64r2_tlb_miss[];
1023 extern char mips64r2_xtlb_miss[];
1024
1025 /* Cache error handler */
1026 extern char mips64r2_cache[];
1027
1028 /* MIPS64 interrupt exception handler */
1029 extern char mips64r2_intr[], mips64r2_intr_end[];
1030
1031 /*
1032 * Copy down exception vector code.
1033 */
1034
1035 if (mips64r2_xtlb_miss - mips64r2_tlb_miss != 0x80)
1036 panic("startup: %s vector code not 128 bytes in length",
1037 "UTLB");
1038 if (mips64r2_cache - mips64r2_xtlb_miss != 0x80)
1039 panic("startup: %s vector code not 128 bytes in length",
1040 "XTLB");
1041 if (mips64r2_exception - mips64r2_cache != 0x80)
1042 panic("startup: %s vector code not 128 bytes in length",
1043 "Cache error");
1044 if (mips64r2_intr - mips64r2_exception != 0x80)
1045 panic("startup: %s vector code not 128 bytes in length",
1046 "General exception");
1047 if (mips64r2_intr_end - mips64r2_intr > 0x80)
1048 panic("startup: %s vector code too large",
1049 "interrupt exception");
1050
1051 const intptr_t ebase = (intptr_t)mipsNN_cp0_ebase_read();
1052 const int cpunum = ebase & MIPS_EBASE_CPUNUM;
1053
1054 // This may need to be on CPUs other CPU0 so use EBASE to fetch
1055 // the appropriate address for exception code. EBASE also contains
1056 // the cpunum so remove that.
1057 memcpy((void *)(intptr_t)(ebase & ~MIPS_EBASE_CPUNUM), mips64r2_tlb_miss,
1058 mips64r2_intr_end - mips64r2_tlb_miss);
1059
1060 /*
1061 * Let see if this cpu has DSP V2 ASE...
1062 */
1063 uint32_t cp0flags = mips_options.mips_cpu->cpu_cp0flags;
1064 if (mipsNN_cp0_config2_read() & MIPSNN_CFG2_M) {
1065 const uint32_t cfg3 = mipsNN_cp0_config3_read();
1066 if (cfg3 & MIPSNN_CFG3_ULRP) {
1067 cp0flags |= MIPS_CP0FL_USERLOCAL;
1068 }
1069 if (cfg3 & MIPSNN_CFG3_DSP2P) {
1070 mips_options.mips_cpu_flags |= CPU_MIPS_HAVE_DSP;
1071 }
1072 }
1073
1074 /*
1075 * If this CPU doesn't have a COP0 USERLOCAL register, at the end
1076 * of cpu_switch resume overwrite the instructions which update it.
1077 */
1078 if (!(cp0flags & MIPS_CP0FL_USERLOCAL) && cpunum == 0) {
1079 extern uint32_t mips64r2_cpu_switch_resume[];
1080 for (uint32_t *insnp = mips64r2_cpu_switch_resume;; insnp++) {
1081 KASSERT(insnp[0] != JR_RA);
1082 if (insnp[0] == _LOAD_V0_L_PRIVATE_A0
1083 && insnp[1] == _MTC0_V0_USERLOCAL) {
1084 insnp[0] = JR_RA;
1085 insnp[1] = 0; /* NOP */
1086 break;
1087 }
1088 }
1089 }
1090
1091 /*
1092 * Copy locore-function vector.
1093 */
1094 if (cpunum == 0)
1095 mips_locore_jumpvec = mips64r2_locore_vec;
1096
1097 mips_icache_sync_all();
1098 mips_dcache_wbinv_all();
1099
1100 /* Clear BEV in SR so we start handling our own exceptions */
1101 mips_cp0_status_write(mips_cp0_status_read() & ~MIPS_SR_BEV);
1102
1103 mips_watchpoint_init();
1104 }
1105 #endif /* MIPS64R2 */
1106
1107 /*
1108 * Do all the stuff that locore normally does before calling main(),
1109 * that is common to all mips-CPU NetBSD ports.
1110 *
1111 * The principal purpose of this function is to examine the
1112 * variable cpu_id, into which the kernel locore start code
1113 * writes the CPU ID register, and to then copy appropriate
1114 * code into the CPU exception-vector entries and the jump tables
1115 * used to hide the differences in cache and TLB handling in
1116 * different MIPS CPUs.
1117 *
1118 * This should be the very first thing called by each port's
1119 * init_main() function.
1120 */
1121
1122 /*
1123 * Initialize the hardware exception vectors, and the jump table used to
1124 * call locore cache and TLB management functions, based on the kind
1125 * of CPU the kernel is running on.
1126 */
1127 void
mips_vector_init(const struct splsw * splsw,bool multicpu_p)1128 mips_vector_init(const struct splsw *splsw, bool multicpu_p)
1129 {
1130 struct mips_options * const opts = &mips_options;
1131 const struct pridtab *ct;
1132 const mips_prid_t cpu_id = opts->mips_cpu_id;
1133
1134 for (ct = cputab; ct->cpu_name != NULL; ct++) {
1135 if (MIPS_PRID_CID(cpu_id) != ct->cpu_cid ||
1136 MIPS_PRID_IMPL(cpu_id) != ct->cpu_pid)
1137 continue;
1138 if (ct->cpu_rev >= 0 &&
1139 MIPS_PRID_REV(cpu_id) != ct->cpu_rev)
1140 continue;
1141 if (ct->cpu_copts >= 0 &&
1142 MIPS_PRID_COPTS(cpu_id) != ct->cpu_copts)
1143 continue;
1144
1145 opts->mips_cpu = ct;
1146 opts->mips_cpu_arch = ct->cpu_isa;
1147 opts->mips_num_tlb_entries = ct->cpu_ntlb;
1148 break;
1149 }
1150
1151 if (opts->mips_cpu == NULL)
1152 panic("CPU type (0x%x) not supported", cpu_id);
1153
1154 #if (MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0
1155 if (MIPS_PRID_CID(cpu_id) != 0) {
1156 /* MIPS32/MIPS64, use coprocessor 0 config registers */
1157 uint32_t cfg, cfg1;
1158
1159 cfg = mips3_cp0_config_read();
1160 cfg1 = mipsNN_cp0_config1_read();
1161
1162 /* pick CPU type */
1163 switch (MIPSNN_GET(CFG_AT, cfg)) {
1164 case MIPSNN_CFG_AT_MIPS32:
1165 opts->mips_cpu_arch = CPU_ARCH_MIPS32;
1166 break;
1167 case MIPSNN_CFG_AT_MIPS64:
1168 opts->mips_cpu_arch = CPU_ARCH_MIPS64;
1169 break;
1170 case MIPSNN_CFG_AT_MIPS64S:
1171 default:
1172 panic("MIPS32/64 architecture type %d not supported",
1173 MIPSNN_GET(CFG_AT, cfg));
1174 }
1175
1176 switch (MIPSNN_GET(CFG_AR, cfg)) {
1177 case MIPSNN_CFG_AR_REV1:
1178 break;
1179 case MIPSNN_CFG_AR_REV2:
1180 switch (opts->mips_cpu_arch) {
1181 case CPU_ARCH_MIPS32:
1182 opts->mips_cpu_arch = CPU_ARCH_MIPS32R2;
1183 break;
1184 case CPU_ARCH_MIPS64:
1185 opts->mips_cpu_arch = CPU_ARCH_MIPS64R2;
1186 break;
1187 default:
1188 printf("WARNING: MIPS32/64 arch %d revision %d "
1189 "unknown!\n", opts->mips_cpu_arch,
1190 MIPSNN_GET(CFG_AR, cfg));
1191 break;
1192 }
1193 break;
1194 default:
1195 printf("WARNING: MIPS32/64 arch revision %d "
1196 "unknown!\n", MIPSNN_GET(CFG_AR, cfg));
1197 break;
1198 }
1199
1200 /* figure out MMU type (and number of TLB entries) */
1201 switch (MIPSNN_GET(CFG_MT, cfg)) {
1202 case MIPSNN_CFG_MT_TLB:
1203 opts->mips_num_tlb_entries = MIPSNN_CFG1_MS(cfg1);
1204 break;
1205 case MIPSNN_CFG_MT_NONE:
1206 case MIPSNN_CFG_MT_BAT:
1207 case MIPSNN_CFG_MT_FIXED:
1208 default:
1209 panic("MIPS32/64 MMU type %d not supported",
1210 MIPSNN_GET(CFG_MT, cfg));
1211 }
1212 }
1213 #endif /* (MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0 */
1214
1215 if (opts->mips_cpu_arch < 1)
1216 panic("Unknown CPU ISA for CPU type 0x%x", cpu_id);
1217 if (opts->mips_num_tlb_entries < 1)
1218 panic("Unknown number of TLBs for CPU type 0x%x", cpu_id);
1219
1220 /*
1221 * Check CPU-specific flags.
1222 */
1223 opts->mips_cpu_flags = opts->mips_cpu->cpu_flags;
1224 opts->mips_has_r4k_mmu = (opts->mips_cpu_flags & CPU_MIPS_R4K_MMU) != 0;
1225 opts->mips_has_llsc = (opts->mips_cpu_flags & CPU_MIPS_NO_LLSC) == 0;
1226 #if defined(MIPS3_4100)
1227 if (MIPS_PRID_IMPL(cpu_id) == MIPS_R4100)
1228 opts->mips3_pg_shift = MIPS3_4100_PG_SHIFT;
1229 else
1230 #endif
1231 opts->mips3_pg_shift = MIPS3_DEFAULT_PG_SHIFT;
1232
1233 opts->mips3_cca_devmem = CCA_UNCACHED;
1234 if (opts->mips_cpu_flags & CPU_MIPS_HAVE_SPECIAL_CCA) {
1235 uint32_t cca;
1236
1237 cca = (opts->mips_cpu_flags & CPU_MIPS_CACHED_CCA_MASK) >>
1238 CPU_MIPS_CACHED_CCA_SHIFT;
1239 opts->mips3_pg_cached = MIPS3_CCA_TO_PG(cca);
1240 #ifndef __mips_o32
1241 opts->mips3_xkphys_cached = MIPS_PHYS_TO_XKPHYS(cca, 0);
1242 #endif
1243 } else {
1244 opts->mips3_pg_cached = MIPS3_DEFAULT_PG_CACHED;
1245 #ifndef __mips_o32
1246 opts->mips3_xkphys_cached = MIPS3_DEFAULT_XKPHYS_CACHED;
1247 #endif
1248 }
1249
1250 #ifdef __HAVE_MIPS_MACHDEP_CACHE_CONFIG
1251 mips_machdep_cache_config();
1252 #endif
1253
1254 /*
1255 * if 'splsw' is NULL, use standard SPL with COP0 status/cause
1256 * otherwise use chip-specific splsw
1257 */
1258 if (splsw == NULL) {
1259 mips_splsw = std_splsw;
1260 #ifdef PARANOIA
1261 std_splsw_test(); /* only works with std_splsw */
1262 #endif
1263 } else {
1264 mips_splsw = *splsw;
1265 }
1266
1267 /*
1268 * Determine cache configuration and initialize our cache
1269 * frobbing routine function pointers.
1270 */
1271 mips_config_cache();
1272
1273 /*
1274 * We default to RAS atomic ops since they are the lowest overhead.
1275 */
1276 #ifdef MULTIPROCESSOR
1277 if (multicpu_p) {
1278 /*
1279 * If we could have multiple CPUs active,
1280 * use the ll/sc variants.
1281 */
1282 mips_locore_atomicvec = mips_llsc_locore_atomicvec;
1283 }
1284 #endif
1285 /*
1286 * Now initialize our ISA-dependent function vector.
1287 */
1288 switch (opts->mips_cpu_arch) {
1289 #if defined(MIPS1)
1290 case CPU_ARCH_MIPS1:
1291 (*mips1_locore_vec.ljv_tlb_invalidate_all)();
1292 mips1_vector_init(splsw);
1293 mips_locoresw = mips1_locoresw;
1294 break;
1295 #endif
1296 #if defined(MIPS3)
1297 case CPU_ARCH_MIPS3:
1298 case CPU_ARCH_MIPS4:
1299 mips3_tlb_probe();
1300 #if defined(MIPS3_4100)
1301 if (MIPS_PRID_IMPL(cpu_id) == MIPS_R4100)
1302 mips3_cp0_pg_mask_write(MIPS4100_PG_SIZE_TO_MASK(PAGE_SIZE));
1303 else
1304 #endif
1305 mips3_cp0_pg_mask_write(MIPS3_PG_SIZE_TO_MASK(PAGE_SIZE));
1306 mips3_cp0_wired_write(0);
1307 #if defined(MIPS3_LOONGSON2)
1308 if (opts->mips_cpu_flags & CPU_MIPS_LOONGSON2) {
1309 (*loongson2_locore_vec.ljv_tlb_invalidate_all)();
1310 mips3_cp0_wired_write(pmap_tlb0_info.ti_wired);
1311 loongson2_vector_init(splsw);
1312 mips_locoresw = loongson2_locoresw;
1313 opts->mips3_cca_devmem = CCA_ACCEL;
1314 break;
1315 }
1316 #endif /* MIPS3_LOONGSON2 */
1317 (*mips3_locore_vec.ljv_tlb_invalidate_all)();
1318 mips3_cp0_wired_write(pmap_tlb0_info.ti_wired);
1319 mips3_vector_init(splsw);
1320 mips_locoresw = mips3_locoresw;
1321 break;
1322
1323 #endif /* MIPS3 */
1324 #if defined(MIPS32)
1325 case CPU_ARCH_MIPS32:
1326 mips3_tlb_probe();
1327 mips3_cp0_pg_mask_write(MIPS3_PG_SIZE_TO_MASK(PAGE_SIZE));
1328 mips3_cp0_wired_write(0);
1329 (*mips32_locore_vec.ljv_tlb_invalidate_all)();
1330 mips3_cp0_wired_write(pmap_tlb0_info.ti_wired);
1331 mips32_vector_init(splsw);
1332 mips_locoresw = mips32_locoresw;
1333 break;
1334 #endif
1335 #if defined(MIPS32R2)
1336 case CPU_ARCH_MIPS32R2:
1337 mips3_tlb_probe();
1338 mips3_cp0_pg_mask_write(MIPS3_PG_SIZE_TO_MASK(PAGE_SIZE));
1339 mips3_cp0_wired_write(0);
1340 (*mips32r2_locore_vec.ljv_tlb_invalidate_all)();
1341 mips3_cp0_wired_write(pmap_tlb0_info.ti_wired);
1342 mips32r2_vector_init(splsw);
1343 mips_locoresw = mips32r2_locoresw;
1344 break;
1345 #endif
1346 #if defined(MIPS64)
1347 case CPU_ARCH_MIPS64: {
1348 mips3_tlb_probe();
1349 mips3_cp0_pg_mask_write(MIPS3_PG_SIZE_TO_MASK(PAGE_SIZE));
1350 mips3_cp0_wired_write(0);
1351 (*mips64_locore_vec.ljv_tlb_invalidate_all)();
1352 mips3_cp0_wired_write(pmap_tlb0_info.ti_wired);
1353 mips64_vector_init(splsw);
1354 mips_locoresw = mips64_locoresw;
1355 break;
1356 }
1357 #endif
1358 #if defined(MIPS64R2)
1359 case CPU_ARCH_MIPS64R2: {
1360 mips3_tlb_probe();
1361 mips3_cp0_pg_mask_write(MIPS3_PG_SIZE_TO_MASK(PAGE_SIZE));
1362 mips3_cp0_wired_write(0);
1363 (*mips64r2_locore_vec.ljv_tlb_invalidate_all)();
1364 mips3_cp0_wired_write(pmap_tlb0_info.ti_wired);
1365 mips64r2_vector_init(splsw);
1366 mips_locoresw = mips64r2_locoresw;
1367 break;
1368 }
1369 #endif
1370 default:
1371 printf("cpu_arch 0x%x: not supported\n", opts->mips_cpu_arch);
1372 cpu_reboot(RB_HALT, NULL);
1373 }
1374
1375 /*
1376 * Now that the splsw and locoresw have been filled in, fixup the
1377 * jumps to any stubs to actually jump to the real routines.
1378 */
1379 extern uint32_t _ftext[];
1380 extern uint32_t _etext[];
1381 mips_fixup_stubs(_ftext, _etext);
1382
1383 #if (MIPS3 + MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0
1384 /*
1385 * Install power-saving idle routines.
1386 */
1387 if ((opts->mips_cpu_flags & CPU_MIPS_USE_WAIT) &&
1388 !(opts->mips_cpu_flags & CPU_MIPS_NO_WAIT))
1389 mips_locoresw.lsw_cpu_idle = mips_wait_idle;
1390 #endif /* (MIPS3 + MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0 */
1391 }
1392
1393 void
mips_set_wbflush(void (* flush_fn)(void))1394 mips_set_wbflush(void (*flush_fn)(void))
1395 {
1396 mips_locoresw.lsw_wbflush = flush_fn;
1397 (*flush_fn)();
1398 }
1399
1400 #if defined(MIPS3_PLUS)
1401 static void
mips3_tlb_probe(void)1402 mips3_tlb_probe(void)
1403 {
1404 struct mips_options * const opts = &mips_options;
1405 opts->mips3_tlb_pg_mask = mips3_cp0_tlb_page_mask_probe();
1406 if (CPUIS64BITS) {
1407 opts->mips3_tlb_vpn_mask = mips3_cp0_tlb_entry_hi_probe();
1408 opts->mips3_tlb_vpn_mask |= PAGE_MASK;
1409 opts->mips3_tlb_vpn_mask <<= 2;
1410 opts->mips3_tlb_vpn_mask >>= 2;
1411 opts->mips3_tlb_pfn_mask = mips3_cp0_tlb_entry_lo_probe();
1412 #if defined(_LP64) && defined(ENABLE_MIPS_16KB_PAGE)
1413 /*
1414 * 16KB pages could cause our page table being able to address
1415 * a larger address space than the actual chip supports. So
1416 * we need to limit the address space to what it can really
1417 * address.
1418 */
1419 if (mips_vm_maxuser_address > opts->mips3_tlb_vpn_mask + 1)
1420 mips_vm_maxuser_address = opts->mips3_tlb_vpn_mask + 1;
1421 #endif
1422 }
1423 }
1424 #endif
1425
1426 /*
1427 * Identify product revision IDs of CPU and FPU.
1428 */
1429 void
cpu_identify(device_t dev)1430 cpu_identify(device_t dev)
1431 {
1432 const struct mips_options * const opts = &mips_options;
1433 const struct mips_cache_info * const mci = &mips_cache_info;
1434 const mips_prid_t cpu_id = opts->mips_cpu_id;
1435 const mips_prid_t fpu_id = opts->mips_fpu_id;
1436 static const char * const waynames[] = {
1437 [0] = "fully set-associative",
1438 [1] = "direct-mapped",
1439 [2] = "2-way set-associative",
1440 [3] = NULL,
1441 [4] = "4-way set-associative",
1442 [5] = "5-way set-associative",
1443 [6] = "6-way set-associative",
1444 [7] = "7-way set-associative",
1445 [8] = "8-way set-associative",
1446 #ifdef MIPS64_OCTEON
1447 [64] = "64-way set-associative",
1448 #endif
1449 };
1450 #define nwaynames (sizeof(waynames) / sizeof(waynames[0]))
1451 static const char * const wtnames[] = {
1452 "write-back",
1453 "write-through",
1454 };
1455 const char *cpuname, *fpuname;
1456 int i;
1457
1458 cpuname = opts->mips_cpu->cpu_name;
1459
1460 fpuname = NULL;
1461 for (i = 0; i < sizeof(fputab)/sizeof(fputab[0]); i++) {
1462 if (MIPS_PRID_CID(fpu_id) == fputab[i].cpu_cid &&
1463 MIPS_PRID_IMPL(fpu_id) == fputab[i].cpu_pid) {
1464 fpuname = fputab[i].cpu_name;
1465 break;
1466 }
1467 }
1468 if (fpuname == NULL && MIPS_PRID_IMPL(fpu_id) == MIPS_PRID_IMPL(cpu_id))
1469 fpuname = "built-in FPU";
1470 if (MIPS_PRID_IMPL(cpu_id) == MIPS_R4700) /* FPU PRid is 0x20 */
1471 fpuname = "built-in FPU";
1472 if (MIPS_PRID_IMPL(cpu_id) == MIPS_RC64470) /* FPU PRid is 0x21 */
1473 fpuname = "built-in FPU";
1474
1475 if (opts->mips_cpu->cpu_cid != 0) {
1476 if (opts->mips_cpu->cpu_cid <= ncidnames)
1477 aprint_normal("%s ", cidnames[opts->mips_cpu->cpu_cid]);
1478 else if (opts->mips_cpu->cpu_cid == MIPS_PRID_CID_INGENIC) {
1479 aprint_normal("Ingenic ");
1480 } else {
1481 aprint_normal("Unknown Company ID - 0x%x", opts->mips_cpu->cpu_cid);
1482 aprint_normal_dev(dev, "");
1483 }
1484 }
1485 if (cpuname != NULL)
1486 aprint_normal("%s (0x%x)", cpuname, cpu_id);
1487 else
1488 aprint_normal("unknown CPU type (0x%x)", cpu_id);
1489 if (MIPS_PRID_CID(cpu_id) == MIPS_PRID_CID_PREHISTORIC)
1490 aprint_normal(" Rev. %d.%d", MIPS_PRID_REV_MAJ(cpu_id),
1491 MIPS_PRID_REV_MIN(cpu_id));
1492 else
1493 aprint_normal(" Rev. %d", MIPS_PRID_REV(cpu_id));
1494
1495 if (fpuname != NULL)
1496 aprint_normal(" with %s", fpuname);
1497 else
1498 aprint_normal(" with unknown FPC type (0x%x)", fpu_id);
1499 if (opts->mips_fpu_id != 0) {
1500 if (MIPS_PRID_CID(cpu_id) == MIPS_PRID_CID_PREHISTORIC)
1501 aprint_normal(" Rev. %d.%d", MIPS_PRID_REV_MAJ(fpu_id),
1502 MIPS_PRID_REV_MIN(fpu_id));
1503 else
1504 aprint_normal(" Rev. %d", MIPS_PRID_REV(fpu_id));
1505 }
1506 if (opts->mips_cpu_flags & MIPS_HAS_DSP) {
1507 aprint_normal(" and DSPv2");
1508 }
1509 aprint_normal("\n");
1510
1511 if (MIPS_PRID_CID(cpu_id) == MIPS_PRID_CID_PREHISTORIC &&
1512 MIPS_PRID_RSVD(cpu_id) != 0) {
1513 aprint_normal_dev(dev, "NOTE: top 8 bits of prehistoric PRID not 0!\n");
1514 aprint_normal_dev(dev, "Please mail port-mips@NetBSD.org with %s "
1515 "dmesg lines.\n", device_xname(dev));
1516 }
1517
1518 KASSERT(mci->mci_picache_ways < nwaynames);
1519 KASSERT(mci->mci_pdcache_ways < nwaynames);
1520 KASSERT(mci->mci_sicache_ways < nwaynames);
1521 KASSERT(mci->mci_sdcache_ways < nwaynames);
1522
1523 switch (opts->mips_cpu_arch) {
1524 #if defined(MIPS1)
1525 case CPU_ARCH_MIPS1:
1526 if (mci->mci_picache_size)
1527 aprint_normal_dev(dev, "%dKB/%dB %s Instruction cache, "
1528 "%d TLB entries\n", mci->mci_picache_size / 1024,
1529 mci->mci_picache_line_size, waynames[mci->mci_picache_ways],
1530 opts->mips_num_tlb_entries);
1531 else
1532 aprint_normal_dev(dev, "%d TLB entries\n",
1533 opts->mips_num_tlb_entries);
1534 if (mci->mci_pdcache_size)
1535 aprint_normal_dev(dev, "%dKB/%dB %s %s Data cache\n",
1536 mci->mci_pdcache_size / 1024, mci->mci_pdcache_line_size,
1537 waynames[mci->mci_pdcache_ways],
1538 wtnames[mci->mci_pdcache_write_through]);
1539 break;
1540 #endif /* MIPS1 */
1541 #if (MIPS3 + MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0
1542 case CPU_ARCH_MIPS3:
1543 case CPU_ARCH_MIPS4:
1544 case CPU_ARCH_MIPS32:
1545 case CPU_ARCH_MIPS32R2:
1546 case CPU_ARCH_MIPS64:
1547 case CPU_ARCH_MIPS64R2: {
1548 const char *sufx = "KMGTPE";
1549 uint32_t pg_mask;
1550 aprint_normal_dev(dev, "%d TLB entries", opts->mips_num_tlb_entries);
1551 #if !defined(__mips_o32)
1552 if (CPUIS64BITS) {
1553 int64_t pfn_mask;
1554 i = ffs(~(opts->mips3_tlb_vpn_mask >> 31)) + 30;
1555 aprint_normal(", %d%cB (%d-bit) VAs",
1556 1 << (i % 10), sufx[(i / 10) - 1], i);
1557 for (i = 64, pfn_mask = opts->mips3_tlb_pfn_mask << 6;
1558 pfn_mask > 0; i--, pfn_mask <<= 1)
1559 ;
1560 aprint_normal(", %d%cB (%d-bit) PAs",
1561 1 << (i % 10), sufx[(i / 10) - 1], i);
1562 }
1563 #endif
1564 for (i = 4, pg_mask = opts->mips3_tlb_pg_mask >> 13;
1565 pg_mask != 0; ) {
1566 if ((pg_mask & 3) != 3)
1567 break;
1568 pg_mask >>= 2;
1569 i *= 4;
1570 if (i == 1024) {
1571 i = 1;
1572 sufx++;
1573 }
1574 }
1575 aprint_normal(", %d%cB max page size\n", i, sufx[0]);
1576 if (mci->mci_picache_size)
1577 aprint_normal_dev(dev,
1578 "%dKB/%dB %s L1 instruction cache\n",
1579 mci->mci_picache_size / 1024,
1580 mci->mci_picache_line_size, waynames[mci->mci_picache_ways]);
1581 if (mci->mci_pdcache_size)
1582 aprint_normal_dev(dev,
1583 "%dKB/%dB %s %s %sL1 data cache\n",
1584 mci->mci_pdcache_size / 1024, mci->mci_pdcache_line_size,
1585 waynames[mci->mci_pdcache_ways],
1586 wtnames[mci->mci_pdcache_write_through],
1587 ((opts->mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT)
1588 ? "coherent " : ""));
1589 if (mci->mci_sdcache_line_size)
1590 aprint_normal_dev(dev,
1591 "%dKB/%dB %s %s L2 %s cache\n",
1592 mci->mci_sdcache_size / 1024, mci->mci_sdcache_line_size,
1593 waynames[mci->mci_sdcache_ways],
1594 wtnames[mci->mci_sdcache_write_through],
1595 mci->mci_scache_unified ? "unified" : "data");
1596 break;
1597 }
1598 #endif /* (MIPS3 + MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0 */
1599 default:
1600 panic("cpu_identify: impossible");
1601 }
1602 }
1603
1604 /*
1605 * Set registers on exec.
1606 * Clear all registers except sp, pc, and t9.
1607 * $sp is set to the stack pointer passed in. $pc is set to the entry
1608 * point given by the exec_package passed in, as is $t9 (used for PIC
1609 * code by the MIPS elf abi).
1610 */
1611 void
setregs(struct lwp * l,struct exec_package * pack,vaddr_t stack)1612 setregs(struct lwp *l, struct exec_package *pack, vaddr_t stack)
1613 {
1614 struct trapframe * const tf = l->l_md.md_utf;
1615 struct proc * const p = l->l_proc;
1616
1617 memset(tf, 0, sizeof(struct trapframe));
1618 tf->tf_regs[_R_SP] = (intptr_t)stack;
1619 tf->tf_regs[_R_PC] = (intptr_t)pack->ep_entry & ~3;
1620 tf->tf_regs[_R_T9] = (intptr_t)pack->ep_entry & ~3; /* abicall requirement */
1621 tf->tf_regs[_R_SR] = PSL_USERSET;
1622 #if !defined(__mips_o32)
1623 /*
1624 * allow 64bit ops in userland for non-O32 ABIs
1625 */
1626 if (p->p_md.md_abi == _MIPS_BSD_API_N32
1627 && (CPUISMIPS64 || CPUISMIPS64R2)) {
1628 tf->tf_regs[_R_SR] |= MIPS_SR_PX;
1629 } else if (p->p_md.md_abi != _MIPS_BSD_API_O32) {
1630 tf->tf_regs[_R_SR] |= MIPS_SR_UX;
1631 }
1632 if (_MIPS_SIM_NEWABI_P(p->p_md.md_abi))
1633 tf->tf_regs[_R_SR] |= MIPS3_SR_FR;
1634 #endif
1635 #ifdef _LP64
1636 /*
1637 * If we are using a 32-bit ABI on a 64-bit kernel, mark the process
1638 * that way. If we aren't, clear it.
1639 */
1640 if (p->p_md.md_abi == _MIPS_BSD_API_N32
1641 || p->p_md.md_abi == _MIPS_BSD_API_O32) {
1642 p->p_flag |= PK_32;
1643 } else {
1644 p->p_flag &= ~PK_32;
1645 }
1646 #endif
1647 /*
1648 * Set up arguments for _start():
1649 * _start(stack, obj, cleanup, ps_strings);
1650 *
1651 * Notes:
1652 * - obj and cleanup are the auxiliary and termination
1653 * vectors. They are fixed up by ld.elf_so.
1654 * - ps_strings is a NetBSD extension.
1655 */
1656 tf->tf_regs[_R_A0] = (intptr_t)stack;
1657 tf->tf_regs[_R_A1] = 0;
1658 tf->tf_regs[_R_A2] = 0;
1659 tf->tf_regs[_R_A3] = p->p_psstrp;
1660
1661 l->l_md.md_ss_addr = 0;
1662 }
1663
1664 #ifdef __HAVE_BOOTINFO_H
1665 /*
1666 * Machine dependent system variables.
1667 */
1668 static int
sysctl_machdep_booted_kernel(SYSCTLFN_ARGS)1669 sysctl_machdep_booted_kernel(SYSCTLFN_ARGS)
1670 {
1671 struct btinfo_bootpath *bibp;
1672 struct sysctlnode node;
1673
1674 bibp = lookup_bootinfo(BTINFO_BOOTPATH);
1675 if(!bibp)
1676 return(ENOENT); /* ??? */
1677
1678 node = *rnode;
1679 node.sysctl_data = bibp->bootpath;
1680 node.sysctl_size = sizeof(bibp->bootpath);
1681 return (sysctl_lookup(SYSCTLFN_CALL(&node)));
1682 }
1683 #endif
1684
1685 SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup")
1686 {
1687
1688 sysctl_createv(clog, 0, NULL, NULL,
1689 CTLFLAG_PERMANENT,
1690 CTLTYPE_NODE, "machdep", NULL,
1691 NULL, 0, NULL, 0,
1692 CTL_MACHDEP, CTL_EOL);
1693
1694 sysctl_createv(clog, 0, NULL, NULL,
1695 CTLFLAG_PERMANENT,
1696 CTLTYPE_STRUCT, "console_device", NULL,
1697 sysctl_consdev, 0, NULL, sizeof(dev_t),
1698 CTL_MACHDEP, CPU_CONSDEV, CTL_EOL);
1699 #ifdef __HAVE_BOOTINFO_H
1700 sysctl_createv(clog, 0, NULL, NULL,
1701 CTLFLAG_PERMANENT,
1702 CTLTYPE_STRING, "booted_kernel", NULL,
1703 sysctl_machdep_booted_kernel, 0, NULL, 0,
1704 CTL_MACHDEP, CPU_BOOTED_KERNEL, CTL_EOL);
1705 #endif
1706 sysctl_createv(clog, 0, NULL, NULL,
1707 CTLFLAG_PERMANENT,
1708 CTLTYPE_STRING, "root_device", NULL,
1709 sysctl_root_device, 0, NULL, 0,
1710 CTL_MACHDEP, CPU_ROOT_DEVICE, CTL_EOL);
1711 sysctl_createv(clog, 0, NULL, NULL,
1712 CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
1713 CTLTYPE_INT, "llsc", NULL,
1714 NULL, MIPS_HAS_LLSC, NULL, 0,
1715 CTL_MACHDEP, CPU_LLSC, CTL_EOL);
1716 #ifdef MIPS3_LOONGSON2
1717 sysctl_createv(clog, 0, NULL, NULL,
1718 CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
1719 CTLTYPE_INT, "loongson-mmi", NULL,
1720 NULL, MIPS_HAS_LMMI, NULL, 0,
1721 CTL_MACHDEP, CPU_LMMI, CTL_EOL);
1722 #endif
1723 sysctl_createv(clog, 0, NULL, NULL,
1724 CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
1725 CTLTYPE_INT, "fpu_present", NULL,
1726 NULL,
1727 #ifdef NOFPU
1728 0,
1729 #else
1730 1,
1731 #endif
1732 NULL, 0, CTL_MACHDEP, CTL_CREATE, CTL_EOL);
1733 }
1734
1735 /*
1736 * These are imported from platform-specific code.
1737 * XXX Should be declared in a header file.
1738 */
1739 extern phys_ram_seg_t mem_clusters[];
1740 extern int mem_cluster_cnt;
1741
1742 /*
1743 * These variables are needed by /sbin/savecore.
1744 */
1745 u_int32_t dumpmag = 0x8fca0101; /* magic number */
1746 int dumpsize = 0; /* pages */
1747 long dumplo = 0; /* blocks */
1748
1749 #if 0
1750 struct pcb dumppcb;
1751 #endif
1752
1753 /*
1754 * cpu_dumpsize: calculate size of machine-dependent kernel core dump headers.
1755 */
1756 int
cpu_dumpsize(void)1757 cpu_dumpsize(void)
1758 {
1759 int size;
1760
1761 size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t)) +
1762 ALIGN(mem_cluster_cnt * sizeof(phys_ram_seg_t));
1763 if (roundup(size, dbtob(1)) != dbtob(1))
1764 return (-1);
1765
1766 return (1);
1767 }
1768
1769 /*
1770 * cpu_dump_mempagecnt: calculate size of RAM (in pages) to be dumped.
1771 */
1772 u_long
cpu_dump_mempagecnt(void)1773 cpu_dump_mempagecnt(void)
1774 {
1775 u_long i, n;
1776
1777 n = 0;
1778 for (i = 0; i < mem_cluster_cnt; i++)
1779 n += atop(mem_clusters[i].size);
1780 return (n);
1781 }
1782
1783 /*
1784 * cpu_dump: dump machine-dependent kernel core dump headers.
1785 */
1786 int
cpu_dump(void)1787 cpu_dump(void)
1788 {
1789 int (*dump)(dev_t, daddr_t, void *, size_t);
1790 char buf[dbtob(1)];
1791 kcore_seg_t *segp;
1792 cpu_kcore_hdr_t *cpuhdrp;
1793 phys_ram_seg_t *memsegp;
1794 const struct bdevsw *bdev;
1795 int i;
1796
1797 bdev = bdevsw_lookup(dumpdev);
1798 if (bdev == NULL)
1799 return (ENXIO);
1800
1801 dump = bdev->d_dump;
1802
1803 memset(buf, 0, sizeof buf);
1804 segp = (kcore_seg_t *)buf;
1805 cpuhdrp = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(*segp))];
1806 memsegp = (phys_ram_seg_t *)&buf[ ALIGN(sizeof(*segp)) +
1807 ALIGN(sizeof(*cpuhdrp))];
1808
1809 /*
1810 * Generate a segment header.
1811 */
1812 CORE_SETMAGIC(*segp, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
1813 segp->c_size = dbtob(1) - ALIGN(sizeof(*segp));
1814
1815 /*
1816 * Add the machine-dependent header info.
1817 */
1818 if (MIPS_HAS_R4K_MMU) {
1819 cpuhdrp->archlevel = 3;
1820 cpuhdrp->pg_shift = MIPS3_PG_SHIFT;
1821 cpuhdrp->pg_frame = MIPS3_PG_FRAME;
1822 cpuhdrp->pg_v = MIPS3_PG_V;
1823 } else {
1824 cpuhdrp->archlevel = 1;
1825 cpuhdrp->pg_shift = MIPS1_PG_SHIFT;
1826 cpuhdrp->pg_frame = MIPS1_PG_FRAME;
1827 cpuhdrp->pg_v = MIPS1_PG_V;
1828 }
1829 cpuhdrp->sysmappa = MIPS_KSEG0_TO_PHYS(curcpu()->ci_pmap_kern_segtab);
1830 cpuhdrp->nmemsegs = mem_cluster_cnt;
1831
1832 /*
1833 * Fill in the memory segment descriptors.
1834 */
1835 for (i = 0; i < mem_cluster_cnt; i++) {
1836 memsegp[i].start = mem_clusters[i].start;
1837 memsegp[i].size = mem_clusters[i].size;
1838 }
1839
1840 return (dump(dumpdev, dumplo, (void *)buf, dbtob(1)));
1841 }
1842
1843 /*
1844 * This is called by main to set dumplo and dumpsize.
1845 * Dumps always skip the first CLBYTES of disk space
1846 * in case there might be a disk label stored there.
1847 * If there is extra space, put dump at the end to
1848 * reduce the chance that swapping trashes it.
1849 */
1850 void
cpu_dumpconf(void)1851 cpu_dumpconf(void)
1852 {
1853 int nblks, dumpblks; /* size of dump area */
1854
1855 if (dumpdev == NODEV)
1856 goto bad;
1857 nblks = bdev_size(dumpdev);
1858 if (nblks <= ctod(1))
1859 goto bad;
1860
1861 dumpblks = cpu_dumpsize();
1862 if (dumpblks < 0)
1863 goto bad;
1864 dumpblks += ctod(cpu_dump_mempagecnt());
1865
1866 /* If dump won't fit (incl. room for possible label), punt. */
1867 if (dumpblks > (nblks - ctod(1)))
1868 goto bad;
1869
1870 /* Put dump at end of partition */
1871 dumplo = nblks - dumpblks;
1872
1873 /* dumpsize is in page units, and doesn't include headers. */
1874 dumpsize = cpu_dump_mempagecnt();
1875 return;
1876
1877 bad:
1878 dumpsize = 0;
1879 }
1880
1881 /*
1882 * Dump the kernel's image to the swap partition.
1883 */
1884 #define BYTES_PER_DUMP PAGE_SIZE
1885
1886 void
dumpsys(void)1887 dumpsys(void)
1888 {
1889 u_long totalbytesleft, bytes, i, n, memcl;
1890 u_long maddr;
1891 int psize;
1892 daddr_t blkno;
1893 const struct bdevsw *bdev;
1894 int (*dump)(dev_t, daddr_t, void *, size_t);
1895 int error;
1896
1897 #if 0
1898 /* Save registers. */
1899 savectx(&dumppcb);
1900 #endif
1901
1902 if (dumpdev == NODEV)
1903 return;
1904 bdev = bdevsw_lookup(dumpdev);
1905 if (bdev == NULL || bdev->d_psize == NULL)
1906 return;
1907
1908 /*
1909 * For dumps during autoconfiguration,
1910 * if dump device has already configured...
1911 */
1912 if (dumpsize == 0)
1913 cpu_dumpconf();
1914 if (dumplo <= 0) {
1915 printf("\ndump to dev %u,%u not possible\n", major(dumpdev),
1916 minor(dumpdev));
1917 return;
1918 }
1919 printf("\ndumping to dev %u,%u offset %ld\n", major(dumpdev),
1920 minor(dumpdev), dumplo);
1921
1922 psize = bdev_size(dumpdev);
1923 printf("dump ");
1924 if (psize == -1) {
1925 printf("area unavailable\n");
1926 return;
1927 }
1928
1929 /* XXX should purge all outstanding keystrokes. */
1930
1931 if ((error = cpu_dump()) != 0)
1932 goto err;
1933
1934 totalbytesleft = ptoa(cpu_dump_mempagecnt());
1935 blkno = dumplo + cpu_dumpsize();
1936 dump = bdev->d_dump;
1937 error = 0;
1938
1939 for (memcl = 0; memcl < mem_cluster_cnt; memcl++) {
1940 maddr = mem_clusters[memcl].start;
1941 bytes = mem_clusters[memcl].size;
1942
1943 for (i = 0; i < bytes; i += n, totalbytesleft -= n) {
1944 void *maddr_va;
1945
1946 /* Print out how many MBs we have left to go. */
1947 if ((totalbytesleft % (1024*1024)) == 0)
1948 printf_nolog("%ld ",
1949 totalbytesleft / (1024 * 1024));
1950
1951 /* Limit size for next transfer. */
1952 n = bytes - i;
1953 if (n > BYTES_PER_DUMP)
1954 n = BYTES_PER_DUMP;
1955
1956 #ifdef _LP64
1957 maddr_va = (void *)MIPS_PHYS_TO_XKPHYS_CACHED(maddr);
1958 #else
1959 maddr_va = (void *)MIPS_PHYS_TO_KSEG0(maddr);
1960 #endif
1961 error = (*dump)(dumpdev, blkno, maddr_va, n);
1962 if (error)
1963 goto err;
1964 maddr += n;
1965 blkno += btodb(n); /* XXX? */
1966
1967 /* XXX should look for keystrokes, to cancel. */
1968 }
1969 }
1970
1971 err:
1972 switch (error) {
1973
1974 case ENXIO:
1975 printf("device bad\n");
1976 break;
1977
1978 case EFAULT:
1979 printf("device not ready\n");
1980 break;
1981
1982 case EINVAL:
1983 printf("area improper\n");
1984 break;
1985
1986 case EIO:
1987 printf("i/o error\n");
1988 break;
1989
1990 case EINTR:
1991 printf("aborted from console\n");
1992 break;
1993
1994 case 0:
1995 printf("succeeded\n");
1996 break;
1997
1998 default:
1999 printf("error %d\n", error);
2000 break;
2001 }
2002 printf("\n\n");
2003 delay(5000000); /* 5 seconds */
2004 }
2005
2006 void
mips_init_msgbuf(void)2007 mips_init_msgbuf(void)
2008 {
2009 vsize_t sz = (vsize_t)round_page(MSGBUFSIZE);
2010 vsize_t reqsz = sz;
2011 u_int bank = vm_nphysseg - 1;
2012 struct vm_physseg *vps = VM_PHYSMEM_PTR(bank);
2013 #ifndef _LP64
2014 /*
2015 * Fist the physical segment that can be mapped to KSEG0
2016 */
2017 for (; vps >= vm_physmem; vps--, bank--) {
2018 if (vps->avail_start + atop(sz) <= atop(MIPS_PHYS_MASK))
2019 break;
2020 }
2021 #endif
2022
2023 /* shrink so that it'll fit in the last segment */
2024 if ((vps->avail_end - vps->avail_start) < atop(sz))
2025 sz = ptoa(vps->avail_end - vps->avail_start);
2026
2027 vps->end -= atop(sz);
2028 vps->avail_end -= atop(sz);
2029 #ifdef _LP64
2030 msgbufaddr = (void *) MIPS_PHYS_TO_XKPHYS_CACHED(ptoa(vps->end));
2031 #else
2032 msgbufaddr = (void *) MIPS_PHYS_TO_KSEG0(ptoa(vps->end));
2033 #endif
2034 initmsgbuf(msgbufaddr, sz);
2035
2036 /* Remove the [last] segment if it now has no pages. */
2037 if (vps->start == vps->end) {
2038 for (vm_nphysseg--; bank < vm_nphysseg - 1; bank++) {
2039 VM_PHYSMEM_PTR_SWAP(bank, bank + 1);
2040 }
2041 }
2042
2043 /* warn if the message buffer had to be shrunk */
2044 if (sz != reqsz)
2045 printf("WARNING: %"PRIdVSIZE" bytes not available for msgbuf "
2046 "in last cluster (%"PRIdVSIZE" used)\n", reqsz, sz);
2047 }
2048
2049 void
mips_init_lwp0_uarea(void)2050 mips_init_lwp0_uarea(void)
2051 {
2052 struct lwp * const l = &lwp0;
2053 vaddr_t v;
2054
2055 if (l->l_addr == NULL) {
2056 v = uvm_pageboot_alloc(USPACE);
2057 uvm_lwp_setuarea(&lwp0, v);
2058 } else {
2059 v = (vaddr_t)l->l_addr;
2060 }
2061
2062 l->l_md.md_utf = (struct trapframe *)(v + USPACE) - 1;
2063 struct pcb * const pcb = lwp_getpcb(l);
2064 /*
2065 * Now zero out the only two areas of the uarea that we care about.
2066 */
2067 memset(l->l_md.md_utf, 0, sizeof(*l->l_md.md_utf));
2068 memset(pcb, 0, sizeof(*pcb));
2069
2070 pcb->pcb_context.val[_L_SR] = MIPS_SR_INT_IE
2071 | (ipl_sr_map.sr_bits[IPL_SCHED] ^ MIPS_INT_MASK);
2072 #ifdef _mips_n32
2073 pcb->pcb_context.val[_L_SR] |= MIPS_SR_KX | MIPS_SR_UX;
2074 l->l_md.md_utf->tf_regs[_R_SR] = MIPS_SR_KX | MIPS_SR_UX;
2075 #endif
2076 #ifdef _LP64
2077 pcb->pcb_context.val[_L_SR] |= MIPS_SR_KX | MIPS_SR_UX;
2078 l->l_md.md_utf->tf_regs[_R_SR] = MIPS_SR_KX | MIPS_SR_UX;
2079 #endif
2080 }
2081
2082 int mips_poolpage_vmfreelist = VM_FREELIST_DEFAULT;
2083
2084 #define HALFGIG ((paddr_t)512 * 1024 * 1024)
2085 #define FOURGIG ((paddr_t)4 * 1024 * 1024 * 1024)
2086
2087 void
mips_page_physload(vaddr_t vkernstart,vaddr_t vkernend,const phys_ram_seg_t * segs,size_t nseg,const struct mips_vmfreelist * flp,size_t nfl)2088 mips_page_physload(vaddr_t vkernstart, vaddr_t vkernend,
2089 const phys_ram_seg_t *segs, size_t nseg,
2090 const struct mips_vmfreelist *flp, size_t nfl)
2091 {
2092 const paddr_t kernstart = MIPS_KSEG0_TO_PHYS(trunc_page(vkernstart));
2093 const paddr_t kernend = MIPS_KSEG0_TO_PHYS(round_page(vkernend));
2094 #if defined(VM_FREELIST_FIRST4G) || defined(VM_FREELIST_FIRST512M)
2095 #ifdef VM_FREELIST_FIRST512M
2096 bool need512m = false;
2097 #endif
2098 #ifdef VM_FREELIST_FIRST4G
2099 bool need4g = false;
2100 #endif
2101
2102 /*
2103 * Do a first pass and see what ranges memory we have to deal with.
2104 */
2105 for (size_t i = 0; i < nseg; i++) {
2106 #ifdef VM_FREELIST_FIRST4G
2107 if (round_page(segs[i].start + segs[i].size) > FOURGIG) {
2108 need4g = true;
2109 mips_poolpage_vmfreelist = VM_FREELIST_FIRST4G;
2110 }
2111 #endif
2112 #ifdef VM_FREELIST_FIRST512M
2113 if (round_page(segs[i].start + segs[i].size) > HALFGIG) {
2114 need512m = true;
2115 mips_poolpage_vmfreelist = VM_FREELIST_FIRST512M;
2116 }
2117 #endif
2118 }
2119 #endif /* VM_FREELIST_FIRST512M || VM_FREELIST_FIRST4G */
2120
2121 for (; nseg-- > 0; segs++) {
2122 /*
2123 * Make sure everything is in page units.
2124 */
2125 paddr_t segstart = round_page(segs->start);
2126 const paddr_t segfinish = trunc_page(segs->start + segs->size);
2127
2128 printf("phys segment: %#"PRIxPADDR" @ %#"PRIxPADDR"\n",
2129 segfinish - segstart, segstart);
2130
2131 /*
2132 * Page 0 is reserved for exception vectors.
2133 */
2134 if (segstart == 0) {
2135 segstart = PAGE_SIZE;
2136 }
2137 while (segstart < segfinish) {
2138 int freelist = -1; /* unknown freelist */
2139 paddr_t segend = segfinish;
2140 for (size_t i = 0; i < nfl; i++) {
2141 /*
2142 * If this segment doesn't overlap the freelist
2143 * at all, skip it.
2144 */
2145 if (segstart >= flp[i].fl_end
2146 || segend <= flp[i].fl_start)
2147 continue;
2148 /*
2149 * If the start of this segment starts before
2150 * the start of the freelist, then limit the
2151 * segment to loaded to the part that doesn't
2152 * match this freelist and fall back to normal
2153 * freelist matching.
2154 */
2155 if (segstart < flp[i].fl_start) {
2156 segstart = flp[i].fl_start;
2157 break;
2158 }
2159
2160 /*
2161 * We've matched this freelist so remember it.
2162 */
2163 freelist = flp->fl_freelist;
2164
2165 /*
2166 * If this segment extends past the end of this
2167 * freelist, bound to segment to the freelist.
2168 */
2169 if (segend > flp[i].fl_end)
2170 segend = flp[i].fl_end;
2171 break;
2172 }
2173 /*
2174 * If we didn't match one of the port dependent
2175 * freelists, let's try the common ones.
2176 */
2177 if (freelist == -1) {
2178 #ifdef VM_FREELIST_FIRST512M
2179 if (need512m && segstart < HALFGIG) {
2180 freelist = VM_FREELIST_FIRST512M;
2181 if (segend > HALFGIG)
2182 segend = HALFGIG;
2183 } else
2184 #endif
2185 #ifdef VM_FREELIST_FIRST4G
2186 if (need4g && segstart < FOURGIG) {
2187 freelist = VM_FREELIST_FIRST4G;
2188 if (segend > FOURGIG)
2189 segend = FOURGIG;
2190 } else
2191 #endif
2192 freelist = VM_FREELIST_DEFAULT;
2193 }
2194
2195 /*
2196 * Make sure the memory we provide to uvm doesn't
2197 * include the kernel.
2198 */
2199 if (segstart < kernend && segend > kernstart) {
2200 if (segstart < kernstart) {
2201 /*
2202 * Only add the memory before the
2203 * kernel.
2204 */
2205 segend = kernstart;
2206 } else if (segend > kernend) {
2207 /*
2208 * Only add the memory after the
2209 * kernel.
2210 */
2211 segstart = kernend;
2212 } else {
2213 /*
2214 * Just skip the segment entirely since
2215 * it's completely inside the kernel.
2216 */
2217 printf("skipping %#"PRIxPADDR" @ %#"PRIxPADDR" (kernel)\n",
2218 segend - segstart, segstart);
2219 break;
2220 }
2221 }
2222
2223 /*
2224 * Now we give this segment to uvm.
2225 */
2226 printf("adding %#"PRIxPADDR" @ %#"PRIxPADDR" to freelist %d\n",
2227
2228 segend - segstart, segstart, freelist);
2229 paddr_t first = atop(segstart);
2230 paddr_t last = atop(segend);
2231 uvm_page_physload(first, last, first, last, freelist);
2232
2233 /*
2234 * Start where we finished.
2235 */
2236 segstart = segend;
2237 }
2238 }
2239 }
2240
2241 /*
2242 * Start a new LWP
2243 */
2244 void
startlwp(void * arg)2245 startlwp(void *arg)
2246 {
2247 ucontext_t * const uc = arg;
2248 lwp_t * const l = curlwp;
2249 int error __diagused;
2250
2251 error = cpu_setmcontext(l, &uc->uc_mcontext, uc->uc_flags);
2252 KASSERT(error == 0);
2253
2254 kmem_free(uc, sizeof(ucontext_t));
2255 userret(l);
2256 }
2257
2258 #ifdef COMPAT_NETBSD32
2259 /*
2260 * Start a new LWP
2261 */
2262 void
startlwp32(void * arg)2263 startlwp32(void *arg)
2264 {
2265 ucontext32_t * const uc = arg;
2266 lwp_t * const l = curlwp;
2267 int error __diagused;
2268
2269 error = cpu_setmcontext32(l, &uc->uc_mcontext, uc->uc_flags);
2270 KASSERT(error == 0);
2271
2272 /* Note: we are freeing ucontext_t, not ucontext32_t. */
2273 kmem_free(uc, sizeof(ucontext_t));
2274 userret(l);
2275 }
2276 #endif /* COMPAT_NETBSD32 */
2277
2278 #ifdef PARANOIA
2279 void
std_splsw_test(void)2280 std_splsw_test(void)
2281 {
2282 struct cpu_info * const ci = curcpu();
2283 const uint32_t * const sr_map = ipl_sr_map.sr_bits;
2284 uint32_t status = mips_cp0_status_read();
2285 uint32_t sr_bits;
2286 int s;
2287
2288 KASSERT((status & MIPS_SR_INT_IE) == 0);
2289
2290 sr_bits = sr_map[IPL_NONE];
2291
2292 splx(IPL_NONE);
2293 status = mips_cp0_status_read() & MIPS_INT_MASK;
2294 KASSERT(status == MIPS_INT_MASK);
2295 KASSERT(ci->ci_cpl == IPL_NONE);
2296
2297 s = splsoftclock();
2298 status = mips_cp0_status_read() & MIPS_INT_MASK;
2299 KASSERT((status ^ sr_map[IPL_SOFTCLOCK]) == MIPS_INT_MASK);
2300 KASSERT(ci->ci_cpl == IPL_SOFTCLOCK);
2301 KASSERT(s == IPL_NONE);
2302
2303 s = splsoftbio();
2304 status = mips_cp0_status_read() & MIPS_INT_MASK;
2305 KASSERT((status ^ sr_map[IPL_SOFTBIO]) == MIPS_INT_MASK);
2306 KASSERT(ci->ci_cpl == IPL_SOFTBIO);
2307 KASSERT(s == IPL_SOFTCLOCK);
2308
2309 s = splsoftnet();
2310 status = mips_cp0_status_read() & MIPS_INT_MASK;
2311 KASSERT((status ^ sr_map[IPL_SOFTNET]) == MIPS_INT_MASK);
2312 KASSERT(ci->ci_cpl == IPL_SOFTNET);
2313 KASSERT(s == IPL_SOFTBIO);
2314
2315 s = splsoftserial();
2316 status = mips_cp0_status_read() & MIPS_INT_MASK;
2317 KASSERT((status ^ sr_map[IPL_SOFTSERIAL]) == MIPS_INT_MASK);
2318 KASSERT(ci->ci_cpl == IPL_SOFTSERIAL);
2319 KASSERT(s == IPL_SOFTNET);
2320
2321 s = splvm();
2322 status = mips_cp0_status_read() & MIPS_INT_MASK;
2323 KASSERT((status ^ sr_map[IPL_VM]) == MIPS_INT_MASK);
2324 KASSERT(ci->ci_cpl == IPL_VM);
2325 KASSERT(s == IPL_SOFTSERIAL);
2326
2327 s = splsched();
2328 status = mips_cp0_status_read() & MIPS_INT_MASK;
2329 KASSERT((status ^ sr_map[IPL_SCHED]) == MIPS_INT_MASK);
2330 KASSERT(ci->ci_cpl == IPL_SCHED);
2331 KASSERT(s == IPL_VM);
2332
2333 s = splhigh();
2334 status = mips_cp0_status_read() & MIPS_INT_MASK;
2335 KASSERT((status ^ sr_map[IPL_HIGH]) == MIPS_INT_MASK);
2336 KASSERT(ci->ci_cpl == IPL_HIGH);
2337 KASSERT(s == IPL_SCHED);
2338
2339 splx(IPL_NONE);
2340 status = mips_cp0_status_read() & MIPS_INT_MASK;
2341 KASSERT(status == MIPS_INT_MASK);
2342 KASSERT(ci->ci_cpl == IPL_NONE);
2343
2344 for (int r = IPL_SOFTCLOCK; r <= IPL_HIGH; r++) {
2345 /*
2346 * As IPL increases, more intrs may be masked but no intrs
2347 * may become unmasked.
2348 */
2349 KASSERT((sr_map[r] & sr_bits) == sr_bits);
2350 sr_bits |= sr_map[r];
2351 s = splraise(r);
2352 KASSERT(s == IPL_NONE);
2353
2354 for (int t = r; t <= IPL_HIGH; t++) {
2355 int o = splraise(t);
2356 status = mips_cp0_status_read() & MIPS_INT_MASK;
2357 KASSERT((status ^ sr_map[t]) == MIPS_INT_MASK);
2358 KASSERT(ci->ci_cpl == t);
2359 KASSERT(o == r);
2360
2361 splx(o);
2362 status = mips_cp0_status_read() & MIPS_INT_MASK;
2363 KASSERT((status ^ sr_map[r]) == MIPS_INT_MASK);
2364 KASSERT(ci->ci_cpl == r);
2365 }
2366
2367 splx(s);
2368 status = mips_cp0_status_read() & MIPS_INT_MASK;
2369 KASSERT((status ^ sr_map[s]) == MIPS_INT_MASK);
2370 KASSERT(ci->ci_cpl == s);
2371 }
2372
2373 status = mips_cp0_status_read() & MIPS_INT_MASK;
2374 KASSERT(status == MIPS_INT_MASK);
2375 KASSERT(ci->ci_cpl == IPL_NONE);
2376 }
2377
2378 #endif /* PARANOIA */
2379
2380 bool
mm_md_direct_mapped_phys(paddr_t pa,vaddr_t * vap)2381 mm_md_direct_mapped_phys(paddr_t pa, vaddr_t *vap)
2382 {
2383 #ifdef _LP64
2384 if (MIPS_XKSEG_P(pa)) {
2385 *vap = MIPS_PHYS_TO_XKPHYS_CACHED(pa);
2386 return true;
2387 }
2388 #endif
2389 if (MIPS_KSEG0_P(pa)) {
2390 *vap = MIPS_PHYS_TO_KSEG0(pa);
2391 return true;
2392 }
2393 return false;
2394 }
2395
2396 bool
mm_md_page_color(paddr_t pa,int * colorp)2397 mm_md_page_color(paddr_t pa, int *colorp)
2398 {
2399 if (MIPS_CACHE_VIRTUAL_ALIAS) {
2400 struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
2401 KASSERT(pg != NULL);
2402 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
2403 *colorp = atop(mdpg->mdpg_first.pv_va);
2404 return !mips_cache_badalias(pa, mdpg->mdpg_first.pv_va);
2405 }
2406 *colorp = 0;
2407 return true;
2408 }
2409
2410 int
mm_md_physacc(paddr_t pa,vm_prot_t prot)2411 mm_md_physacc(paddr_t pa, vm_prot_t prot)
2412 {
2413
2414 return (pa < ctob(physmem)) ? 0 : EFAULT;
2415 }
2416
2417 int
mm_md_kernacc(void * ptr,vm_prot_t prot,bool * handled)2418 mm_md_kernacc(void *ptr, vm_prot_t prot, bool *handled)
2419 {
2420 const vaddr_t v = (vaddr_t)ptr;
2421
2422 #ifdef _LP64
2423 if (v < MIPS_XKPHYS_START) {
2424 return EFAULT;
2425 }
2426 if (MIPS_XKPHYS_P(v) && v > MIPS_PHYS_TO_XKPHYS_CACHED(pmap_limits.avail_end +
2427 mips_round_page(MSGBUFSIZE))) {
2428 return EFAULT;
2429 }
2430 if (MIPS_XKSEG_P(v) && v < MIPS_KSEG0_START) {
2431 *handled = true;
2432 return 0;
2433 }
2434 if (MIPS_KSEG1_P(v) || MIPS_KSEG2_P(v)) {
2435 return EFAULT;
2436 }
2437 #else
2438 if (v < MIPS_KSEG0_START) {
2439 return EFAULT;
2440 }
2441 if (v < MIPS_PHYS_TO_KSEG0(pmap_limits.avail_end +
2442 mips_round_page(MSGBUFSIZE))) {
2443 *handled = true;
2444 return 0;
2445 }
2446 if (v < MIPS_KSEG2_START) {
2447 return EFAULT;
2448 }
2449 #endif
2450 *handled = false;
2451 return 0;
2452 }
2453
2454 #if (MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0
2455 static void
mips_watchpoint_init(void)2456 mips_watchpoint_init(void)
2457 {
2458 /*
2459 * determine number of CPU watchpoints
2460 */
2461 curcpu()->ci_cpuwatch_count = cpuwatch_discover();
2462 }
2463 #endif
2464
2465
2466 /*
2467 * Process the tail end of a posix_spawn() for the child.
2468 */
2469 void
cpu_spawn_return(struct lwp * l)2470 cpu_spawn_return(struct lwp *l)
2471 {
2472 userret(l);
2473 }
2474
2475