1 /* $NetBSD: atari_init.c,v 1.107 2023/01/06 10:28:27 tsutsui Exp $ */
2
3 /*
4 * Copyright (c) 1995 Leo Weppelman
5 * Copyright (c) 1994 Michael L. Hitch
6 * Copyright (c) 1993 Markus Wild
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by Markus Wild.
20 * 4. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: atari_init.c,v 1.107 2023/01/06 10:28:27 tsutsui Exp $");
37
38 #include "opt_ddb.h"
39 #include "opt_mbtype.h"
40 #include "opt_m060sp.h"
41 #include "opt_m68k_arch.h"
42 #include "opt_st_pool_size.h"
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/ioctl.h>
47 #include <sys/select.h>
48 #include <sys/tty.h>
49 #include <sys/buf.h>
50 #include <sys/msgbuf.h>
51 #include <sys/mbuf.h>
52 #include <sys/extent.h>
53 #include <sys/protosw.h>
54 #include <sys/domain.h>
55 #include <sys/dkbad.h>
56 #include <sys/reboot.h>
57 #include <sys/exec.h>
58 #include <sys/exec_aout.h>
59 #include <sys/core.h>
60 #include <sys/kcore.h>
61 #include <sys/bus.h>
62
63 #include <uvm/uvm_extern.h>
64
65 #include <machine/vmparam.h>
66 #include <machine/pte.h>
67 #include <machine/cpu.h>
68 #include <machine/iomap.h>
69 #include <machine/mfp.h>
70 #include <machine/scu.h>
71 #include <machine/acia.h>
72 #include <machine/kcore.h>
73 #include <machine/intr.h>
74
75 #include <m68k/cpu.h>
76 #include <m68k/cacheops.h>
77
78 #include <atari/atari/stalloc.h>
79 #include <atari/dev/clockvar.h>
80 #include <atari/dev/ym2149reg.h>
81
82 #include "pci.h"
83
84 void start_c(int, u_int, u_int, u_int, char *);
85 static void atari_hwinit(void);
86 static void cpu_init_kcorehdr(paddr_t, paddr_t);
87 static void initcpu(void);
88 static void mmu030_setup(paddr_t, u_int, paddr_t, psize_t, paddr_t, paddr_t);
89 static void map_io_areas(paddr_t, psize_t, u_int);
90 static void set_machtype(void);
91
92 #if defined(M68040) || defined(M68060)
93 static void mmu040_setup(paddr_t, u_int, paddr_t, psize_t, paddr_t, paddr_t);
94 #endif
95
96 #if defined(_MILANHW_)
97 static u_int milan_probe_bank_1(paddr_t paddr);
98 static u_int milan_probe_bank(paddr_t paddr);
99
100 #define NBANK 2
101 #define NSLOT 4
102
103 #define MB(n) ((n) * 1024 * 1024)
104 #define MB_END(n) (MB(n) - 1)
105 #define MAGIC_4M (4 - 1)
106 #define MAGIC_4M_INV ((uint8_t)~MAGIC_4M)
107 #define MAGIC_8M (8 - 1)
108 #define MAGIC_16M (16 - 1)
109 #define MAGIC_32M (32 - 1)
110 #define MAGIC_64M (64 - 1)
111 #endif
112
113 /*
114 * All info needed to generate a panic dump. All fields are setup by
115 * start_c().
116 * XXX: Should sheck usage of phys_segs. There is some unwanted overlap
117 * here.... Also, the name is badly chosen. Phys_segs contains the
118 * segment descriptions _after_ reservations are made.
119 * XXX: 'lowram' is obsoleted by the new panicdump format
120 */
121 static cpu_kcore_hdr_t cpu_kcore_hdr;
122
123 extern u_int lowram;
124 int machineid, mmutype, cputype, astpending;
125
126 extern char *esym;
127 extern struct pcb *curpcb;
128
129 /*
130 * This is the virtual address of physical page 0. Used by 'do_boot()'.
131 */
132 vaddr_t page_zero;
133
134 /*
135 * Simple support for allocation in ST-ram.
136 * Currently 16 bit ST-ram is required to allocate DMA buffers for SCSI and
137 * FDC transfers, and video memory for the XFree68 based Xservers.
138 * The physical address is also returned because the video init needs it to
139 * setup the controller at the time the vm-system is not yet operational so
140 * 'kvtop()' cannot be used.
141 */
142 #define ST_POOL_SIZE_MIN 24 /* for DMA bounce buffers */
143 #ifndef ST_POOL_SIZE
144 #define ST_POOL_SIZE 56 /* Xserver requires 320KB (40 pages) */
145 #endif
146
147 psize_t st_pool_size = ST_POOL_SIZE * PAGE_SIZE; /* Patchable */
148 vaddr_t st_pool_virt;
149 paddr_t st_pool_phys;
150
151 /*
152 * Thresholds to restrict size of reserved ST memory to make sure
153 * the kernel at least boot even on lower memory machines.
154 * Nowadays we could assume most users have 4MB ST-RAM and 16MB TT-RAM.
155 */
156 #define STRAM_MINTHRESH (2 * 1024 * 1024)
157 #define TTRAM_MINTHRESH (4 * 1024 * 1024)
158
159 /* I/O address space variables */
160 vaddr_t stio_addr; /* Where the st io-area is mapped */
161 vaddr_t pci_conf_addr; /* KVA base of PCI config space */
162 vaddr_t pci_io_addr; /* KVA base of PCI io-space */
163 vaddr_t pci_mem_addr; /* KVA base of PCI mem-space */
164 vaddr_t pci_mem_uncached; /* KVA base of an uncached PCI mem-page */
165
166 /*
167 * Are we relocating the kernel to TT-Ram if possible? It is faster, but
168 * it is also reported not to work on all TT's. So the default is NO.
169 */
170 #ifndef RELOC_KERNEL
171 #define RELOC_KERNEL 0
172 #endif
173 int reloc_kernel = RELOC_KERNEL; /* Patchable */
174
175 #define RELOC_PA(base, pa) ((base) + (pa)) /* used to set up PTE etc. */
176
177 /*
178 * this is the C-level entry function, it's called from locore.s.
179 * Preconditions:
180 * Interrupts are disabled
181 * PA == VA, we don't have to relocate addresses before enabling
182 * the MMU
183 * Exec is no longer available (because we're loaded all over
184 * low memory, no ExecBase is available anymore)
185 *
186 * It's purpose is:
187 * Do the things that are done in locore.s in the hp300 version,
188 * this includes allocation of kernel maps and enabling the MMU.
189 *
190 * Some of the code in here is `stolen' from Amiga MACH, and was
191 * written by Bryan Ford and Niklas Hallqvist.
192 *
193 * Very crude 68040 support by Michael L. Hitch.
194 */
195 int kernel_copyback = 1;
196
197 void
start_c(int id,u_int ttphystart,u_int ttphysize,u_int stphysize,char * esym_addr)198 start_c(int id, u_int ttphystart, u_int ttphysize, u_int stphysize,
199 char *esym_addr)
200 /* id: Machine id */
201 /* ttphystart, ttphysize: Start address and size of TT-ram */
202 /* stphysize: Size of ST-ram */
203 /* esym_addr: Address of kernel '_esym' symbol */
204 {
205 extern char end[];
206 extern void etext(void);
207 extern u_long protorp[2];
208 paddr_t pstart; /* Next available physical address */
209 vaddr_t vstart; /* Next available virtual address */
210 vsize_t avail;
211 paddr_t ptpa;
212 psize_t ptsize;
213 u_int ptextra;
214 vaddr_t kva;
215 u_int tc, i;
216 pt_entry_t *pg, *epg;
217 pt_entry_t pg_proto;
218 vaddr_t end_loaded;
219 paddr_t kbase;
220 u_int kstsize;
221 paddr_t Sysptmap_pa;
222 #if defined(_MILANHW_)
223 /*
224 * The Milan Lies about the presence of TT-RAM. If you insert
225 * 16MB it is split in 14MB ST starting at address 0 and 2MB TT RAM,
226 * starting at address 16MB as the BIOS remapping memory using MMU.
227 *
228 * Milan actually has four SIMM slots and each slot has two banks,
229 * so it could have up to 8 memory segment regions.
230 */
231 const paddr_t simm_base[NBANK][NSLOT] = {
232 /* slot 0-3, bank 0 */
233 { 0x00000000, 0x04000000, 0x08000000, 0x0c000000 },
234 /* slot 0-3, bank 1 */
235 { 0x10000000, 0x14000000, 0x18000000, 0x1c000000 }
236 };
237 int slot, bank, seg;
238 u_int mb;
239
240 /* On Milan, all RAMs are fast 32 bit so no need to reloc kernel */
241 reloc_kernel = 0;
242
243 /* probe memory region in all SIMM slots and banks */
244 seg = 0;
245 ttphysize = 0;
246 for (bank = 0; bank < 2; bank++) {
247 for (slot = 0; slot < 4; slot++) {
248 if (bank == 0 && slot == 0) {
249 /*
250 * The first bank has at least 16MB because
251 * the Milan's ROM bootloader requires it
252 * to allocate ST RAM.
253 */
254 mb = milan_probe_bank_1(simm_base[bank][slot]);
255 boot_segs[0].start = 0;
256 boot_segs[0].end = MB(mb);
257 stphysize = MB(mb);
258 seg++;
259 } else {
260 /*
261 * The rest banks could be empty or
262 * have 4, 8, 16, 32, or 64MB.
263 */
264 mb = milan_probe_bank(simm_base[bank][slot]);
265 if (mb > 0) {
266 boot_segs[seg].start =
267 simm_base[bank][slot];
268 boot_segs[seg].end =
269 simm_base[bank][slot] + MB(mb);
270 ttphysize += MB(mb);
271 seg++;
272 }
273 }
274 }
275 }
276 #else /* _MILANHW_ */
277 boot_segs[0].start = 0;
278 boot_segs[0].end = stphysize;
279 boot_segs[1].start = ttphystart;
280 boot_segs[1].end = ttphystart + ttphysize;
281 boot_segs[2].start = boot_segs[2].end = 0; /* End of segments! */
282 #endif
283
284 /*
285 * We do not know how much ST memory we really need until after
286 * configuration has finished, but typical users of ST memory
287 * are bounce buffers DMA against TT-RAM for SCSI and FDC,
288 * and video memory for the Xserver.
289 * If we have enough RAMs reserve ST memory including for the Xserver.
290 * Otherwise just allocate minimum one for SCSI and FDC.
291 *
292 * The round_page() call is ment to correct errors made by
293 * binpatching!
294 */
295 if (st_pool_size > ST_POOL_SIZE_MIN * PAGE_SIZE &&
296 (stphysize <= STRAM_MINTHRESH || ttphysize <= TTRAM_MINTHRESH)) {
297 st_pool_size = ST_POOL_SIZE_MIN * PAGE_SIZE;
298 }
299 st_pool_size = m68k_round_page(st_pool_size);
300 st_pool_phys = stphysize - st_pool_size;
301 stphysize = st_pool_phys;
302
303 physmem = btoc(stphysize) + btoc(ttphysize);
304 machineid = id;
305 esym = esym_addr;
306
307 /*
308 * the kernel ends at end() or esym.
309 */
310 if (esym == NULL)
311 end_loaded = (vaddr_t)&end;
312 else
313 end_loaded = (vaddr_t)esym;
314
315 /*
316 * If we have enough fast-memory to put the kernel in and the
317 * RELOC_KERNEL option is set, do it!
318 */
319 if ((reloc_kernel != 0) && (ttphysize >= end_loaded))
320 kbase = ttphystart;
321 else
322 kbase = 0;
323
324 /*
325 * Determine the type of machine we are running on. This needs
326 * to be done early (and before initcpu())!
327 */
328 set_machtype();
329
330 /*
331 * Initialize CPU specific stuff
332 */
333 initcpu();
334
335 /*
336 * We run the kernel from ST memory at the moment.
337 * The kernel segment table is put just behind the loaded image.
338 * pstart: start of usable ST memory
339 * avail : size of ST memory available.
340 */
341 vstart = (vaddr_t)end_loaded;
342 vstart = m68k_round_page(vstart);
343 pstart = (paddr_t)vstart; /* pre-reloc PA == kernel VA here */
344 avail = stphysize - pstart;
345
346 /*
347 * Save KVA of lwp0 uarea and allocate it.
348 */
349 lwp0uarea = vstart;
350 pstart += USPACE;
351 vstart += USPACE;
352 avail -= USPACE;
353
354 /*
355 * Calculate the number of pages needed for Sysseg.
356 * For the 68030, we need 256 descriptors (segment-table-entries).
357 * This easily fits into one page.
358 * For the 68040, both the level-1 and level-2 descriptors are
359 * stored into Sysseg. We currently handle a maximum sum of MAXKL2SIZE
360 * level-1 & level-2 tables.
361 */
362 #if defined(M68040) || defined(M68060)
363 if (mmutype == MMU_68040)
364 kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE);
365 else
366 #endif
367 kstsize = 1;
368 /*
369 * allocate the kernel segment table
370 */
371 Sysseg_pa = pstart; /* pre-reloc PA to init STEs */
372 Sysseg = (st_entry_t *)vstart;
373 pstart += kstsize * PAGE_SIZE;
374 vstart += kstsize * PAGE_SIZE;
375 avail -= kstsize * PAGE_SIZE;
376
377 /*
378 * allocate kernel page table map
379 */
380 Sysptmap_pa = pstart; /* pre-reloc PA to init PTEs */
381 Sysptmap = (pt_entry_t *)vstart;
382 pstart += PAGE_SIZE;
383 vstart += PAGE_SIZE;
384 avail -= PAGE_SIZE;
385
386 /*
387 * Determine the number of pte's we need for extra's like
388 * ST I/O map's.
389 */
390 ptextra = btoc(STIO_SIZE);
391
392 /*
393 * If present, add pci areas
394 */
395 if (machineid & ATARI_HADES)
396 ptextra += btoc(PCI_CONFIG_SIZE + PCI_IO_SIZE + PCI_MEM_SIZE);
397 if (machineid & ATARI_MILAN)
398 ptextra += btoc(PCI_IO_SIZE + PCI_MEM_SIZE);
399 ptextra += btoc(BOOTM_VA_POOL);
400 /*
401 * now need to account for the kmem area, which is allocated
402 * before pmap_init() is called. It is roughly the size of physical
403 * memory.
404 */
405 ptextra += physmem;
406
407 /*
408 * The 'pt' (the initial kernel pagetable) has to map the kernel and
409 * the I/O areas. The various I/O areas are mapped (virtually) at
410 * the top of the address space mapped by 'pt' (ie. just below Sysmap).
411 */
412 ptpa = pstart; /* pre-reloc PA to init PTEs */
413 ptsize = (Sysptsize + howmany(ptextra, NPTEPG)) << PGSHIFT;
414 pstart += ptsize;
415 vstart += ptsize;
416 avail -= ptsize;
417
418 /*
419 * Sysmap is now placed at the end of Supervisor virtual address space.
420 */
421 Sysmap = (pt_entry_t *)SYSMAP_VA;
422
423 /*
424 * Initialize segment tables
425 */
426 #if defined(M68040) || defined(M68060)
427 if (mmutype == MMU_68040)
428 mmu040_setup(Sysseg_pa, kstsize, ptpa, ptsize, Sysptmap_pa,
429 kbase);
430 else
431 #endif /* defined(M68040) || defined(M68060) */
432 mmu030_setup(Sysseg_pa, kstsize, ptpa, ptsize, Sysptmap_pa,
433 kbase);
434
435 /*
436 * initialize kernel page table page(s).
437 * Assume load at VA 0.
438 * - Text pages are RO
439 * - Page zero is invalid
440 */
441 pg_proto = RELOC_PA(kbase, 0) | PG_RO | PG_V;
442 pg = (pt_entry_t *)ptpa;
443 *pg++ = PG_NV;
444
445 pg_proto += PAGE_SIZE;
446 for (kva = PAGE_SIZE; kva < (vaddr_t)etext; kva += PAGE_SIZE) {
447 *pg++ = pg_proto;
448 pg_proto += PAGE_SIZE;
449 }
450
451 /*
452 * data, bss and dynamic tables are read/write
453 */
454 pg_proto = (pg_proto & PG_FRAME) | PG_RW | PG_V;
455
456 #if defined(M68040) || defined(M68060)
457 /*
458 * Map the kernel segment table cache invalidated for 68040/68060.
459 * (for the 68040 not strictly necessary, but recommended by Motorola;
460 * for the 68060 mandatory)
461 */
462 if (mmutype == MMU_68040) {
463
464 if (kernel_copyback)
465 pg_proto |= PG_CCB;
466
467 for (; kva < (vaddr_t)Sysseg; kva += PAGE_SIZE) {
468 *pg++ = pg_proto;
469 pg_proto += PAGE_SIZE;
470 }
471
472 pg_proto = (pg_proto & ~PG_CCB) | PG_CI;
473 for (; kva < (vaddr_t)Sysptmap; kva += PAGE_SIZE) {
474 *pg++ = pg_proto;
475 pg_proto += PAGE_SIZE;
476 }
477
478 pg_proto = (pg_proto & ~PG_CI);
479 if (kernel_copyback)
480 pg_proto |= PG_CCB;
481 }
482 #endif /* defined(M68040) || defined(M68060) */
483
484 /*
485 * go till end of data allocated so far
486 * plus lwp0 u-area (to be allocated)
487 */
488 for (; kva < vstart; kva += PAGE_SIZE) {
489 *pg++ = pg_proto;
490 pg_proto += PAGE_SIZE;
491 }
492
493 /*
494 * invalidate remainder of kernel PT
495 */
496 epg = (pt_entry_t *)ptpa;
497 epg = &epg[ptsize / sizeof(pt_entry_t)];
498 while (pg < epg)
499 *pg++ = PG_NV;
500
501 /*
502 * Map various I/O areas
503 */
504 map_io_areas(ptpa, ptsize, ptextra);
505
506 /*
507 * Map the allocated space in ST-ram now. In the contig-case, there
508 * is no need to make a distinction between virtual and physical
509 * addresses. But I make it anyway to be prepared.
510 * Physical space is already reserved!
511 */
512 st_pool_virt = vstart;
513 pg = (pt_entry_t *)ptpa;
514 pg = &pg[vstart / PAGE_SIZE];
515 pg_proto = st_pool_phys | PG_RW | PG_CI | PG_V;
516 vstart += st_pool_size;
517 while (pg_proto < (st_pool_phys + st_pool_size)) {
518 *pg++ = pg_proto;
519 pg_proto += PAGE_SIZE;
520 }
521
522 /*
523 * Map physical page_zero and page-zero+1 (First ST-ram page). We need
524 * to reference it in the reboot code. Two pages are mapped, because
525 * we must make sure 'doboot()' is contained in it (see the tricky
526 * copying there....).
527 */
528 page_zero = vstart;
529 pg = (pt_entry_t *)ptpa;
530 pg = &pg[vstart / PAGE_SIZE];
531 *pg++ = PG_RW | PG_CI | PG_V;
532 vstart += PAGE_SIZE;
533 *pg = PG_RW | PG_CI | PG_V | PAGE_SIZE;
534 vstart += PAGE_SIZE;
535
536 /*
537 * All necessary STEs and PTEs have been initialized.
538 * Update Sysseg_pa and Sysptmap_pa to point relocated PA.
539 */
540 if (kbase) {
541 Sysseg_pa += kbase;
542 Sysptmap_pa += kbase;
543 }
544
545 lowram = 0 >> PGSHIFT; /* XXX */
546
547 /*
548 * Fill in usable segments. The page indexes will be initialized
549 * later when all reservations are made.
550 */
551 usable_segs[0].start = 0;
552 usable_segs[0].end = stphysize;
553 usable_segs[0].free_list = VM_FREELIST_STRAM;
554 #if defined(_MILANHW_)
555 for (i = 1; i < seg; i++) {
556 usable_segs[i].start = boot_segs[i].start;
557 usable_segs[i].end = boot_segs[i].end;
558 usable_segs[i].free_list = VM_FREELIST_TTRAM;
559 }
560 for (; i < NMEM_SEGS; i++) {
561 usable_segs[i].start = usable_segs[i].end = 0;
562 }
563 #else
564 usable_segs[1].start = ttphystart;
565 usable_segs[1].end = ttphystart + ttphysize;
566 usable_segs[1].free_list = VM_FREELIST_TTRAM;
567 usable_segs[2].start = usable_segs[2].end = 0; /* End of segments! */
568 #endif
569
570 if (kbase) {
571 /*
572 * First page of ST-ram is unusable, reserve the space
573 * for the kernel in the TT-ram segment.
574 * Note: Because physical page-zero is partially mapped to ROM
575 * by hardware, it is unusable.
576 */
577 usable_segs[0].start = PAGE_SIZE;
578 usable_segs[1].start += pstart;
579 } else
580 usable_segs[0].start += pstart;
581
582 /*
583 * As all segment sizes are now valid, calculate page indexes and
584 * available physical memory.
585 */
586 usable_segs[0].first_page = 0;
587 for (i = 1; i < NMEM_SEGS && usable_segs[i].start; i++) {
588 usable_segs[i].first_page = usable_segs[i-1].first_page;
589 usable_segs[i].first_page +=
590 (usable_segs[i-1].end - usable_segs[i-1].start) / PAGE_SIZE;
591 }
592 for (i = 0, physmem = 0; usable_segs[i].start; i++)
593 physmem += usable_segs[i].end - usable_segs[i].start;
594 physmem >>= PGSHIFT;
595
596 /*
597 * get the pmap module in sync with reality.
598 */
599 pmap_bootstrap(vstart);
600
601 /*
602 * Prepare to enable the MMU.
603 * Setup and load SRP nolimit, share global, 4 byte PTE's
604 */
605 protorp[0] = 0x80000202;
606 protorp[1] = Sysseg_pa; /* + segtable address */
607
608 cpu_init_kcorehdr(kbase, Sysseg_pa);
609
610 /*
611 * copy over the kernel (and all now initialized variables)
612 * to fastram. DONT use bcopy(), this beast is much larger
613 * than 128k !
614 */
615 if (kbase) {
616 register paddr_t *lp, *le, *fp;
617
618 lp = (paddr_t *)0;
619 le = (paddr_t *)pstart;
620 fp = (paddr_t *)kbase;
621 while (lp < le)
622 *fp++ = *lp++;
623 }
624 #if defined(M68040) || defined(M68060)
625 if (mmutype == MMU_68040) {
626 /*
627 * movel Sysseg_pa,a0;
628 * movec a0,SRP;
629 * pflusha;
630 * movel #$0xc000,d0;
631 * movec d0,TC
632 */
633 if (cputype == CPU_68060) {
634 /* XXX: Need the branch cache be cleared? */
635 __asm volatile (".word 0x4e7a,0x0002;"
636 "orl #0x400000,%%d0;"
637 ".word 0x4e7b,0x0002" : : : "d0");
638 }
639 __asm volatile ("movel %0,%%a0;"
640 ".word 0x4e7b,0x8807" : : "a" (Sysseg_pa) : "a0");
641 __asm volatile (".word 0xf518" : : );
642 __asm volatile ("movel #0xc000,%%d0;"
643 ".word 0x4e7b,0x0003" : : : "d0" );
644 } else
645 #endif
646 {
647 __asm volatile ("pmove %0@,%%srp" : : "a" (&protorp[0]));
648 /*
649 * setup and load TC register.
650 * enable_cpr, enable_srp, pagesize=8k,
651 * A = 8 bits, B = 11 bits
652 */
653 tc = 0x82d08b00;
654 __asm volatile ("pflusha" : : );
655 __asm volatile ("pmove %0@,%%tc" : : "a" (&tc));
656 }
657
658 /*
659 * Initialize the "u-area" pages etc.
660 */
661 pmap_bootstrap_finalize();
662
663 /*
664 * Get the hardware into a defined state
665 */
666 atari_hwinit();
667
668 /*
669 * Initialize stmem allocator
670 */
671 init_stmem();
672
673 /*
674 * Initialize the iomem extent for bus_space(9) to manage address
675 * spaces and allocate the physical RAM from the extent map.
676 */
677 atari_bus_space_extent_init(0x0, 0xffffffff);
678 for (i = 0; i < NMEM_SEGS && boot_segs[i].end != 0; i++) {
679 if (atari_bus_space_alloc_physmem(boot_segs[i].start,
680 boot_segs[i].end)) {
681 /* XXX: Ahum, should not happen ;-) */
682 printf("Warning: Cannot allocate boot memory from"
683 " extent map!?\n");
684 }
685 }
686
687 /*
688 * Initialize interrupt mapping.
689 */
690 intr_init();
691 }
692
693 #if defined(_MILANHW_)
694 /*
695 * Probe and return available memory size in MB at specified address.
696 * The first slot SIMM have at least 16MB, so check if it has 32 or 64 MB.
697 *
698 * Note it seems Milan does not generate bus errors on accesses against
699 * address regions where memory doesn't exist, but it returns memory images
700 * of lower address of the bank.
701 */
702 static u_int
milan_probe_bank_1(paddr_t start_paddr)703 milan_probe_bank_1(paddr_t start_paddr)
704 {
705 volatile uint8_t *base;
706 u_int mb;
707 uint8_t save_16, save_32, save_64;
708
709 /* Assume that this bank has at least 16MB */
710 mb = 16;
711
712 base = (uint8_t *)start_paddr;
713
714 /* save and write a MAGIC at the end of 16MB region */
715 save_16 = base[MB_END(16)];
716 base[MB_END(16)] = MAGIC_16M;
717
718 /* check bus error at the end of 32MB region */
719 if (badbaddr(__UNVOLATILE(base + MB_END(32)), sizeof(uint8_t))) {
720 /* bus error; assume no memory there */
721 goto out16;
722 }
723
724 /* check if the 32MB region is not image of the prior 16MB region */
725 save_32 = base[MB_END(32)];
726 base[MB_END(32)] = MAGIC_32M;
727 if (base[MB_END(32)] != MAGIC_32M || base[MB_END(16)] != MAGIC_16M) {
728 /* no memory or image at the 32MB region */
729 goto out16;
730 }
731 /* we have at least 32MB */
732 mb = 32;
733
734 /* check bus error at the end of 64MB region */
735 if (badbaddr(__UNVOLATILE(base + MB_END(64)), sizeof(uint8_t))) {
736 /* bus error; assume no memory there */
737 goto out32;
738 }
739
740 /* check if the 64MB region is not image of the prior 32MB region */
741 save_64 = base[MB_END(64)];
742 base[MB_END(64)] = MAGIC_64M;
743 if (base[MB_END(64)] != MAGIC_64M || base[MB_END(32)] != MAGIC_32M) {
744 /* no memory or image at the 64MB region */
745 goto out32;
746 }
747 /* we have 64MB */
748 mb = 64;
749 base[MB_END(64)] = save_64;
750 out32:
751 base[MB_END(32)] = save_32;
752 out16:
753 base[MB_END(16)] = save_16;
754
755 return mb;
756 }
757
758 /*
759 * Probe and return available memory size in MB at specified address.
760 * The rest slot could be empty so check all possible size.
761 */
762 static u_int
milan_probe_bank(paddr_t start_paddr)763 milan_probe_bank(paddr_t start_paddr)
764 {
765 volatile uint8_t *base;
766 u_int mb;
767 uint8_t save_4, save_8, save_16;
768
769 /* The rest banks might have no memory */
770 mb = 0;
771
772 base = (uint8_t *)start_paddr;
773
774 /* check bus error at the end of 4MB region */
775 if (badbaddr(__UNVOLATILE(base + MB_END(4)), sizeof(uint8_t))) {
776 /* bus error; assume no memory there */
777 goto out;
778 }
779
780 /* check if the 4MB region has memory */
781 save_4 = base[MB_END(4)];
782 base[MB_END(4)] = MAGIC_4M_INV;
783 if (base[MB_END(4)] != MAGIC_4M_INV) {
784 /* no memory */
785 goto out;
786 }
787 base[MB_END(4)] = MAGIC_4M;
788 if (base[MB_END(4)] != MAGIC_4M) {
789 /* no memory */
790 goto out;
791 }
792 /* we have at least 4MB */
793 mb = 4;
794
795 /* check bus error at the end of 8MB region */
796 if (badbaddr(__UNVOLATILE(base + MB_END(8)), sizeof(uint8_t))) {
797 /* bus error; assume no memory there */
798 goto out4;
799 }
800
801 /* check if the 8MB region is not image of the prior 4MB region */
802 save_8 = base[MB_END(8)];
803 base[MB_END(8)] = MAGIC_8M;
804 if (base[MB_END(8)] != MAGIC_8M || base[MB_END(4)] != MAGIC_4M) {
805 /* no memory or image at the 8MB region */
806 goto out4;
807 }
808 /* we have at least 8MB */
809 mb = 8;
810
811 /* check bus error at the end of 16MB region */
812 if (badbaddr(__UNVOLATILE(base + MB_END(16)), sizeof(uint8_t))) {
813 /* bus error; assume no memory there */
814 goto out8;
815 }
816
817 /* check if the 16MB region is not image of the prior 8MB region */
818 save_16 = base[MB_END(16)];
819 base[MB_END(16)] = MAGIC_16M;
820 if (base[MB_END(16)] != MAGIC_16M || base[MB_END(8)] != MAGIC_8M) {
821 /* no memory or image at the 32MB region */
822 goto out8;
823 }
824 /* we have at least 16MB, so check more region as the first bank */
825 mb = milan_probe_bank_1(start_paddr);
826
827 base[MB_END(16)] = save_16;
828 out8:
829 base[MB_END(8)] = save_8;
830 out4:
831 base[MB_END(4)] = save_4;
832 out:
833
834 return mb;
835 }
836 #endif /* _MILANHW_ */
837
838 /*
839 * Try to figure out on what type of machine we are running
840 * Note: This module runs *before* the io-mapping is setup!
841 */
842 static void
set_machtype(void)843 set_machtype(void)
844 {
845
846 #ifdef _MILANHW_
847 machineid |= ATARI_MILAN;
848
849 #else
850 stio_addr = 0xff8000; /* XXX: For TT & Falcon only */
851 if (badbaddr((void *)__UNVOLATILE(&MFP2->mf_gpip), sizeof(char))) {
852 /*
853 * Watch out! We can also have a Hades with < 16Mb
854 * RAM here...
855 */
856 if (!badbaddr((void *)__UNVOLATILE(&MFP->mf_gpip),
857 sizeof(char))) {
858 machineid |= ATARI_FALCON;
859 return;
860 }
861 }
862 if (!badbaddr((void *)(PCI_CONFB_PHYS + PCI_CONFM_PHYS), sizeof(char)))
863 machineid |= ATARI_HADES;
864 else
865 machineid |= ATARI_TT;
866 #endif /* _MILANHW_ */
867 }
868
869 static void
atari_hwinit(void)870 atari_hwinit(void)
871 {
872
873 #if defined(_ATARIHW_)
874 /*
875 * Initialize the sound chip
876 */
877 ym2149_init();
878
879 /*
880 * Make sure that the midi acia will not generate an interrupt
881 * unless something attaches to it. We cannot do this for the
882 * keyboard acia because this breaks the '-d' option of the
883 * booter...
884 */
885 MDI->ac_cs = 0;
886 #endif /* defined(_ATARIHW_) */
887
888 /*
889 * Initialize both MFP chips (if both present!) to generate
890 * auto-vectored interrupts with EOI. The active-edge registers are
891 * set up. The interrupt enable registers are set to disable all
892 * interrupts.
893 */
894 MFP->mf_iera = MFP->mf_ierb = 0;
895 MFP->mf_imra = MFP->mf_imrb = 0;
896 MFP->mf_aer = MFP->mf_ddr = 0;
897 MFP->mf_vr = 0x40;
898
899 #if defined(_ATARIHW_)
900 if (machineid & (ATARI_TT|ATARI_HADES)) {
901 MFP2->mf_iera = MFP2->mf_ierb = 0;
902 MFP2->mf_imra = MFP2->mf_imrb = 0;
903 MFP2->mf_aer = 0x80;
904 MFP2->mf_vr = 0x50;
905 }
906
907 if (machineid & ATARI_TT) {
908 /*
909 * Initialize the SCU, to enable interrupts on the SCC (ipl5),
910 * MFP (ipl6) and softints (ipl1).
911 */
912 SCU->sys_mask = SCU_SYS_SOFT;
913 SCU->vme_mask = SCU_MFP | SCU_SCC;
914 #ifdef DDB
915 /*
916 * This allows people with the correct hardware modification
917 * to drop into the debugger from an NMI.
918 */
919 SCU->sys_mask |= SCU_IRQ7;
920 #endif
921 }
922 #endif /* defined(_ATARIHW_) */
923
924 /*
925 * Initialize a timer for delay(9).
926 */
927 init_delay();
928
929 #if NPCI > 0
930 if (machineid & (ATARI_HADES|ATARI_MILAN)) {
931 /*
932 * Configure PCI-bus
933 */
934 init_pci_bus();
935 }
936 #endif
937
938 }
939
940 /*
941 * Do the dull work of mapping the various I/O areas. They MUST be Cache
942 * inhibited!
943 * All I/O areas are virtually mapped at the end of the pt-table.
944 */
945 static void
map_io_areas(paddr_t ptpa,psize_t ptsize,u_int ptextra)946 map_io_areas(paddr_t ptpa, psize_t ptsize, u_int ptextra)
947 /* ptsize: Size of 'pt' in bytes */
948 /* ptextra: #of additional I/O pte's */
949 {
950 vaddr_t ioaddr;
951 pt_entry_t *pt, *pg, *epg;
952 pt_entry_t pg_proto;
953 u_long mask;
954
955 pt = (pt_entry_t *)ptpa;
956 ioaddr = ((ptsize / sizeof(pt_entry_t)) - ptextra) * PAGE_SIZE;
957
958 /*
959 * Map ST-IO area
960 */
961 stio_addr = ioaddr;
962 ioaddr += STIO_SIZE;
963 pg = &pt[stio_addr / PAGE_SIZE];
964 epg = &pg[btoc(STIO_SIZE)];
965 #ifdef _MILANHW_
966 /*
967 * Turn on byte swaps in the ST I/O area. On the Milan, the
968 * U0 signal of the MMU controls the BigEndian signal
969 * of the PLX9080. We use this setting so we can read/write the
970 * PLX registers (and PCI-config space) in big-endian mode.
971 */
972 pg_proto = STIO_PHYS | PG_RW | PG_CI | PG_V | 0x100;
973 #else
974 pg_proto = STIO_PHYS | PG_RW | PG_CI | PG_V;
975 #endif
976 while (pg < epg) {
977 *pg++ = pg_proto;
978 pg_proto += PAGE_SIZE;
979 }
980
981 /*
982 * Map PCI areas
983 */
984 if (machineid & ATARI_HADES) {
985 /*
986 * Only Hades maps the PCI-config space!
987 */
988 pci_conf_addr = ioaddr;
989 ioaddr += PCI_CONFIG_SIZE;
990 pg = &pt[pci_conf_addr / PAGE_SIZE];
991 epg = &pg[btoc(PCI_CONFIG_SIZE)];
992 mask = PCI_CONFM_PHYS;
993 pg_proto = PCI_CONFB_PHYS | PG_RW | PG_CI | PG_V;
994 for (; pg < epg; mask <<= 1)
995 *pg++ = pg_proto | mask;
996 } else
997 pci_conf_addr = 0; /* XXX: should crash */
998
999 if (machineid & (ATARI_HADES|ATARI_MILAN)) {
1000 pci_io_addr = ioaddr;
1001 ioaddr += PCI_IO_SIZE;
1002 pg = &pt[pci_io_addr / PAGE_SIZE];
1003 epg = &pg[btoc(PCI_IO_SIZE)];
1004 pg_proto = PCI_IO_PHYS | PG_RW | PG_CI | PG_V;
1005 while (pg < epg) {
1006 *pg++ = pg_proto;
1007 pg_proto += PAGE_SIZE;
1008 }
1009
1010 pci_mem_addr = ioaddr;
1011 /* Provide an uncached PCI address for the MILAN */
1012 pci_mem_uncached = ioaddr;
1013 ioaddr += PCI_MEM_SIZE;
1014 epg = &pg[btoc(PCI_MEM_SIZE)];
1015 pg_proto = PCI_VGA_PHYS | PG_RW | PG_CI | PG_V;
1016 while (pg < epg) {
1017 *pg++ = pg_proto;
1018 pg_proto += PAGE_SIZE;
1019 }
1020 }
1021
1022 bootm_init(ioaddr, pg, BOOTM_VA_POOL);
1023 /*
1024 * ioaddr += BOOTM_VA_POOL;
1025 * pg = &pg[btoc(BOOTM_VA_POOL)];
1026 */
1027 }
1028
1029 /*
1030 * Used by dumpconf() to get the size of the machine-dependent panic-dump
1031 * header in disk blocks.
1032 */
1033
1034 #define CHDRSIZE (ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t)))
1035 #define MDHDRSIZE roundup(CHDRSIZE, dbtob(1))
1036
1037 int
cpu_dumpsize(void)1038 cpu_dumpsize(void)
1039 {
1040
1041 return btodb(MDHDRSIZE);
1042 }
1043
1044 /*
1045 * Called by dumpsys() to dump the machine-dependent header.
1046 * XXX: Assumes that it will all fit in one diskblock.
1047 */
1048 int
cpu_dump(int (* dump)(dev_t,daddr_t,void *,size_t),daddr_t * p_blkno)1049 cpu_dump(int (*dump)(dev_t, daddr_t, void *, size_t), daddr_t *p_blkno)
1050 {
1051 int buf[MDHDRSIZE/sizeof(int)];
1052 int error;
1053 kcore_seg_t *kseg_p;
1054 cpu_kcore_hdr_t *chdr_p;
1055
1056 kseg_p = (kcore_seg_t *)buf;
1057 chdr_p = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(*kseg_p)) / sizeof(int)];
1058
1059 /*
1060 * Generate a segment header
1061 */
1062 CORE_SETMAGIC(*kseg_p, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
1063 kseg_p->c_size = MDHDRSIZE - ALIGN(sizeof(*kseg_p));
1064
1065 /*
1066 * Add the md header
1067 */
1068 *chdr_p = cpu_kcore_hdr;
1069 error = dump(dumpdev, *p_blkno, (void *)buf, sizeof(buf));
1070 *p_blkno += btodb(sizeof(buf));
1071 return (error);
1072 }
1073
1074 #if (M68K_NPHYS_RAM_SEGS < NMEM_SEGS)
1075 #error "Configuration error: M68K_NPHYS_RAM_SEGS < NMEM_SEGS"
1076 #endif
1077 /*
1078 * Initialize the cpu_kcore_header.
1079 */
1080 static void
cpu_init_kcorehdr(paddr_t kbase,paddr_t sysseg_pa)1081 cpu_init_kcorehdr(paddr_t kbase, paddr_t sysseg_pa)
1082 {
1083 cpu_kcore_hdr_t *h = &cpu_kcore_hdr;
1084 struct m68k_kcore_hdr *m = &h->un._m68k;
1085 extern char end[];
1086 int i;
1087
1088 memset(&cpu_kcore_hdr, 0, sizeof(cpu_kcore_hdr));
1089
1090 /*
1091 * Initialize the `dispatcher' portion of the header.
1092 */
1093 strcpy(h->name, machine);
1094 h->page_size = PAGE_SIZE;
1095 h->kernbase = KERNBASE;
1096
1097 /*
1098 * Fill in information about our MMU configuration.
1099 */
1100 m->mmutype = mmutype;
1101 m->sg_v = SG_V;
1102 m->sg_frame = SG_FRAME;
1103 m->sg_ishift = SG_ISHIFT;
1104 m->sg_pmask = SG_PMASK;
1105 m->sg40_shift1 = SG4_SHIFT1;
1106 m->sg40_mask2 = SG4_MASK2;
1107 m->sg40_shift2 = SG4_SHIFT2;
1108 m->sg40_mask3 = SG4_MASK3;
1109 m->sg40_shift3 = SG4_SHIFT3;
1110 m->sg40_addr1 = SG4_ADDR1;
1111 m->sg40_addr2 = SG4_ADDR2;
1112 m->pg_v = PG_V;
1113 m->pg_frame = PG_FRAME;
1114
1115 /*
1116 * Initialize pointer to kernel segment table.
1117 */
1118 m->sysseg_pa = sysseg_pa; /* PA after relocation */
1119
1120 /*
1121 * Initialize relocation value such that:
1122 *
1123 * pa = (va - KERNBASE) + reloc
1124 */
1125 m->reloc = kbase;
1126
1127 /*
1128 * Define the end of the relocatable range.
1129 */
1130 m->relocend = (vaddr_t)end;
1131
1132 for (i = 0; i < NMEM_SEGS; i++) {
1133 m->ram_segs[i].start = boot_segs[i].start;
1134 m->ram_segs[i].size = boot_segs[i].end -
1135 boot_segs[i].start;
1136 }
1137 }
1138
1139 void
mmu030_setup(paddr_t sysseg_pa,u_int kstsize,paddr_t ptpa,psize_t ptsize,paddr_t sysptmap_pa,paddr_t kbase)1140 mmu030_setup(paddr_t sysseg_pa, u_int kstsize, paddr_t ptpa, psize_t ptsize,
1141 paddr_t sysptmap_pa, paddr_t kbase)
1142 /* sysseg_pa: System segment table */
1143 /* kstsize: size of 'sysseg' in pages */
1144 /* ptpa: Kernel page table */
1145 /* ptsize: size of 'pt' in bytes */
1146 /* sysptmap_pa: System page table */
1147 {
1148 st_entry_t sg_proto, *sg, *esg;
1149 pt_entry_t pg_proto, *pg, *epg;
1150
1151 /*
1152 * Map the page table pages in both the HW segment table
1153 * and the software Sysptmap.
1154 */
1155 sg = (st_entry_t *)sysseg_pa;
1156 pg = (pt_entry_t *)sysptmap_pa;
1157 epg = &pg[ptsize >> PGSHIFT];
1158 sg_proto = RELOC_PA(kbase, ptpa) | SG_RW | SG_V;
1159 pg_proto = RELOC_PA(kbase, ptpa) | PG_RW | PG_CI | PG_V;
1160 while (pg < epg) {
1161 *sg++ = sg_proto;
1162 *pg++ = pg_proto;
1163 sg_proto += PAGE_SIZE;
1164 pg_proto += PAGE_SIZE;
1165 }
1166
1167 /*
1168 * Invalidate the remainder of the tables.
1169 */
1170 esg = (st_entry_t *)sysseg_pa;
1171 esg = &esg[TIA_SIZE];
1172 while (sg < esg)
1173 *sg++ = SG_NV;
1174 epg = (pt_entry_t *)sysptmap_pa;
1175 epg = &epg[TIB_SIZE];
1176 while (pg < epg)
1177 *pg++ = PG_NV;
1178
1179 /*
1180 * Initialize the PTE for the last one to point Sysptmap.
1181 */
1182 sg = (st_entry_t *)sysseg_pa;
1183 sg = &sg[SYSMAP_VA >> SEGSHIFT];
1184 pg = (pt_entry_t *)sysptmap_pa;
1185 pg = &pg[SYSMAP_VA >> SEGSHIFT];
1186 *sg = RELOC_PA(kbase, sysptmap_pa) | SG_RW | SG_V;
1187 *pg = RELOC_PA(kbase, sysptmap_pa) | PG_RW | PG_CI | PG_V;
1188 }
1189
1190 #if defined(M68040) || defined(M68060)
1191 void
mmu040_setup(paddr_t sysseg_pa,u_int kstsize,paddr_t ptpa,psize_t ptsize,paddr_t sysptmap_pa,paddr_t kbase)1192 mmu040_setup(paddr_t sysseg_pa, u_int kstsize, paddr_t ptpa, psize_t ptsize,
1193 paddr_t sysptmap_pa, paddr_t kbase)
1194 /* sysseg_pa: System segment table */
1195 /* kstsize: size of 'sysseg' in pages */
1196 /* ptpa: Kernel page table */
1197 /* ptsize: size of 'pt' in bytes */
1198 /* sysptmap_pa: System page table */
1199 {
1200 int nl1desc, nl2desc, i;
1201 st_entry_t sg_proto, *sg, *esg;
1202 pt_entry_t pg_proto, *pg, *epg;
1203
1204 /*
1205 * First invalidate the entire "segment table" pages
1206 * (levels 1 and 2 have the same "invalid" values).
1207 */
1208 sg = (st_entry_t *)sysseg_pa;
1209 esg = &sg[kstsize * NPTEPG];
1210 while (sg < esg)
1211 *sg++ = SG_NV;
1212
1213 /*
1214 * Initialize level 2 descriptors (which immediately
1215 * follow the level 1 table).
1216 * We need:
1217 * NPTEPG / SG4_LEV3SIZE
1218 * level 2 descriptors to map each of the nptpages
1219 * pages of PTEs. Note that we set the "used" bit
1220 * now to save the HW the expense of doing it.
1221 */
1222 nl2desc = (ptsize >> PGSHIFT) * (NPTEPG / SG4_LEV3SIZE);
1223 sg = (st_entry_t *)sysseg_pa;
1224 sg = &sg[SG4_LEV1SIZE];
1225 esg = &sg[nl2desc];
1226 sg_proto = RELOC_PA(kbase, ptpa) | SG_U | SG_RW | SG_V;
1227 while (sg < esg) {
1228 *sg++ = sg_proto;
1229 sg_proto += (SG4_LEV3SIZE * sizeof(st_entry_t));
1230 }
1231
1232 /*
1233 * Initialize level 1 descriptors. We need:
1234 * howmany(nl2desc, SG4_LEV2SIZE)
1235 * level 1 descriptors to map the 'nl2desc' level 2's.
1236 */
1237 nl1desc = howmany(nl2desc, SG4_LEV2SIZE);
1238 sg = (st_entry_t *)sysseg_pa;
1239 esg = &sg[nl1desc];
1240 sg_proto = RELOC_PA(kbase, (paddr_t)&sg[SG4_LEV1SIZE])
1241 | SG_U | SG_RW | SG_V;
1242 while (sg < esg) {
1243 *sg++ = sg_proto;
1244 sg_proto += (SG4_LEV2SIZE * sizeof(st_entry_t));
1245 }
1246
1247 /* Sysmap is last entry in level 1 */
1248 sg = (st_entry_t *)sysseg_pa;
1249 sg = &sg[SG4_LEV1SIZE - 1];
1250 *sg = sg_proto;
1251
1252 /*
1253 * Kernel segment table at end of next level 2 table
1254 */
1255 i = SG4_LEV1SIZE + (nl1desc * SG4_LEV2SIZE);
1256 sg = (st_entry_t *)sysseg_pa;
1257 sg = &sg[i + SG4_LEV2SIZE - (NPTEPG / SG4_LEV3SIZE)];
1258 esg = &sg[NPTEPG / SG4_LEV3SIZE];
1259 sg_proto = RELOC_PA(kbase, sysptmap_pa) | SG_U | SG_RW | SG_V;
1260 while (sg < esg) {
1261 *sg++ = sg_proto;
1262 sg_proto += (SG4_LEV3SIZE * sizeof(st_entry_t));
1263 }
1264
1265 /* Include additional level 2 table for Sysmap in protostfree */
1266 protostfree = (~0 << (1 + nl1desc + 1)) /* & ~(~0 << MAXKL2SIZE) */;
1267
1268 /*
1269 * Initialize Sysptmap
1270 */
1271 pg = (pt_entry_t *)sysptmap_pa;
1272 epg = &pg[ptsize >> PGSHIFT];
1273 pg_proto = RELOC_PA(kbase, ptpa) | PG_RW | PG_CI | PG_V;
1274 while (pg < epg) {
1275 *pg++ = pg_proto;
1276 pg_proto += PAGE_SIZE;
1277 }
1278
1279 /*
1280 * Invalidate rest of Sysptmap page.
1281 */
1282 epg = (pt_entry_t *)sysptmap_pa;
1283 epg = &epg[TIB_SIZE];
1284 while (pg < epg)
1285 *pg++ = PG_NV;
1286
1287 /*
1288 * Initialize the PTE for the last one to point Sysptmap.
1289 */
1290 pg = (pt_entry_t *)sysptmap_pa;
1291 pg = &pg[SYSMAP_VA >> SEGSHIFT];
1292 *pg = RELOC_PA(kbase, sysptmap_pa) | PG_RW | PG_CI | PG_V;
1293 }
1294 #endif /* M68040 */
1295
1296 #if defined(M68060)
1297 int m68060_pcr_init = 0x21; /* make this patchable */
1298 #endif
1299
1300 static void
initcpu(void)1301 initcpu(void)
1302 {
1303 typedef void trapfun(void);
1304
1305 switch (cputype) {
1306
1307 #if defined(M68060)
1308 case CPU_68060:
1309 {
1310 extern trapfun *vectab[256];
1311 extern trapfun buserr60, addrerr4060, fpfault;
1312 #if defined(M060SP)
1313 extern u_int8_t FP_CALL_TOP[], I_CALL_TOP[];
1314 #else
1315 extern trapfun illinst;
1316 #endif
1317
1318 __asm volatile ("movl %0,%%d0; .word 0x4e7b,0x0808" : :
1319 "d"(m68060_pcr_init):"d0" );
1320
1321 /* bus/addrerr vectors */
1322 vectab[2] = buserr60;
1323 vectab[3] = addrerr4060;
1324
1325 #if defined(M060SP)
1326 /* integer support */
1327 vectab[61] = (trapfun *)&I_CALL_TOP[128 + 0x00];
1328
1329 /* floating point support */
1330 /*
1331 * XXX maybe we really should run-time check for the
1332 * stack frame format here:
1333 */
1334 vectab[11] = (trapfun *)&FP_CALL_TOP[128 + 0x30];
1335
1336 vectab[55] = (trapfun *)&FP_CALL_TOP[128 + 0x38];
1337 vectab[60] = (trapfun *)&FP_CALL_TOP[128 + 0x40];
1338
1339 vectab[54] = (trapfun *)&FP_CALL_TOP[128 + 0x00];
1340 vectab[52] = (trapfun *)&FP_CALL_TOP[128 + 0x08];
1341 vectab[53] = (trapfun *)&FP_CALL_TOP[128 + 0x10];
1342 vectab[51] = (trapfun *)&FP_CALL_TOP[128 + 0x18];
1343 vectab[50] = (trapfun *)&FP_CALL_TOP[128 + 0x20];
1344 vectab[49] = (trapfun *)&FP_CALL_TOP[128 + 0x28];
1345 #else
1346 vectab[61] = illinst;
1347 #endif
1348 vectab[48] = fpfault;
1349 }
1350 break;
1351 #endif /* defined(M68060) */
1352 #if defined(M68040)
1353 case CPU_68040:
1354 {
1355 extern trapfun *vectab[256];
1356 extern trapfun buserr40, addrerr4060;
1357
1358 /* bus/addrerr vectors */
1359 vectab[2] = buserr40;
1360 vectab[3] = addrerr4060;
1361 }
1362 break;
1363 #endif /* defined(M68040) */
1364 #if defined(M68030) || defined(M68020)
1365 case CPU_68030:
1366 case CPU_68020:
1367 {
1368 extern trapfun *vectab[256];
1369 extern trapfun buserr2030, addrerr2030;
1370
1371 /* bus/addrerr vectors */
1372 vectab[2] = buserr2030;
1373 vectab[3] = addrerr2030;
1374 }
1375 break;
1376 #endif /* defined(M68030) || defined(M68020) */
1377 }
1378
1379 DCIS();
1380 }
1381
1382 #ifdef DEBUG
1383 void dump_segtable(u_int *);
1384 void dump_pagetable(u_int *, u_int, u_int);
1385 u_int vmtophys(u_int *, u_int);
1386
1387 void
dump_segtable(u_int * stp)1388 dump_segtable(u_int *stp)
1389 {
1390 u_int *s, *es;
1391 int shift, i;
1392
1393 s = stp;
1394 {
1395 es = s + (M68K_STSIZE >> 2);
1396 shift = SG_ISHIFT;
1397 }
1398
1399 /*
1400 * XXX need changes for 68040
1401 */
1402 for (i = 0; s < es; s++, i++)
1403 if (*s & SG_V)
1404 printf("$%08x: $%08x\t", i << shift, *s & SG_FRAME);
1405 printf("\n");
1406 }
1407
1408 void
dump_pagetable(u_int * ptp,u_int i,u_int n)1409 dump_pagetable(u_int *ptp, u_int i, u_int n)
1410 {
1411 u_int *p, *ep;
1412
1413 p = ptp + i;
1414 ep = p + n;
1415 for (; p < ep; p++, i++)
1416 if (*p & PG_V)
1417 printf("$%08x -> $%08x\t", i, *p & PG_FRAME);
1418 printf("\n");
1419 }
1420
1421 u_int
vmtophys(u_int * ste,u_int vm)1422 vmtophys(u_int *ste, u_int vm)
1423 {
1424
1425 ste = (u_int *)(*(ste + (vm >> SEGSHIFT)) & SG_FRAME);
1426 ste += (vm & SG_PMASK) >> PGSHIFT;
1427 return (*ste & -PAGE_SIZE) | (vm & (PAGE_SIZE - 1));
1428 }
1429
1430 #endif
1431