1 /*	$NetBSD: sa11x0_hpc_machdep.c,v 1.2 2010/06/26 00:25:02 tsutsui Exp $	*/
2 
3 /*
4  * Copyright (c) 1994-1998 Mark Brinicombe.
5  * Copyright (c) 1994 Brini.
6  * All rights reserved.
7  *
8  * This code is derived from software written for Brini by Mark Brinicombe
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *      This product includes software developed by Brini.
21  * 4. The name of the company nor the name of the author may be used to
22  *    endorse or promote products derived from this software without specific
23  *    prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
26  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
28  * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
29  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
30  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
31  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37 
38 /*
39  * Machine dependent functions for kernel setup.
40  */
41 
42 #include <sys/cdefs.h>
43 __KERNEL_RCSID(0, "$NetBSD: sa11x0_hpc_machdep.c,v 1.2 2010/06/26 00:25:02 tsutsui Exp $");
44 
45 #include "opt_ddb.h"
46 #include "opt_dram_pages.h"
47 #include "opt_modular.h"
48 #include "opt_pmap_debug.h"
49 #include "ksyms.h"
50 
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/kernel.h>
54 #include <sys/reboot.h>
55 #include <sys/proc.h>
56 #include <sys/msgbuf.h>
57 #include <sys/exec.h>
58 #include <sys/ksyms.h>
59 #include <sys/boot_flag.h>
60 #include <sys/conf.h>	/* XXX for consinit related hacks */
61 #include <sys/device.h>
62 #include <sys/termios.h>
63 
64 #if NKSYMS || defined(DDB) || defined(MODULAR)
65 #include <machine/db_machdep.h>
66 #include <ddb/db_sym.h>
67 #include <ddb/db_extern.h>
68 #ifndef DB_ELFSIZE
69 #error Must define DB_ELFSIZE!
70 #endif
71 #define ELFSIZE	DB_ELFSIZE
72 #include <sys/exec_elf.h>
73 #endif
74 
75 #include <uvm/uvm.h>
76 
77 #include <arm/sa11x0/sa11x0_reg.h>
78 #include <arm/cpuconf.h>
79 #include <arm/undefined.h>
80 
81 #include <machine/bootconfig.h>
82 #include <machine/bootinfo.h>
83 #include <machine/bus.h>
84 #include <machine/cpu.h>
85 #include <machine/frame.h>
86 #include <machine/intr.h>
87 #include <machine/io.h>
88 #include <machine/platid.h>
89 #include <machine/platid_mask.h>
90 #include <machine/rtc.h>
91 #include <machine/signal.h>
92 
93 #include <dev/cons.h>
94 #include <dev/hpc/apm/apmvar.h>
95 #include <dev/hpc/bicons.h>
96 
97 #include <sys/mount.h>
98 #include <nfs/rpcv2.h>
99 #include <nfs/nfsproto.h>
100 #include <nfs/nfs.h>
101 #include <nfs/nfsmount.h>
102 
103 /* Kernel text starts 256K in from the bottom of the kernel address space. */
104 #define	KERNEL_TEXT_BASE	(KERNEL_BASE + 0x00040000)
105 #define	KERNEL_VM_BASE		(KERNEL_BASE + 0x00C00000)
106 #define	KERNEL_VM_SIZE		0x05000000
107 
108 /*
109  * Address to call from cpu_reset() to reset the machine.
110  * This is machine architecture dependent as it varies depending
111  * on where the ROM appears when you turn the MMU off.
112  */
113 u_int cpu_reset_address = 0;
114 
115 /* Define various stack sizes in pages */
116 #define IRQ_STACK_SIZE	1
117 #define ABT_STACK_SIZE	1
118 #define UND_STACK_SIZE	1
119 
120 extern BootConfig bootconfig;		/* Boot config storage */
121 extern struct bootinfo *bootinfo, bootinfo_storage;
122 extern char booted_kernel_storage[80];
123 extern char *booted_kernel;
124 
125 extern paddr_t physical_start;
126 extern paddr_t physical_freestart;
127 extern paddr_t physical_freeend;
128 extern paddr_t physical_end;
129 extern int physmem;
130 
131 /* Physical and virtual addresses for some global pages */
132 extern pv_addr_t irqstack;
133 extern pv_addr_t undstack;
134 extern pv_addr_t abtstack;
135 extern pv_addr_t kernelstack;
136 
137 extern char *boot_args;
138 extern char boot_file[16];
139 
140 extern vaddr_t msgbufphys;
141 
142 extern u_int data_abort_handler_address;
143 extern u_int prefetch_abort_handler_address;
144 extern u_int undefined_handler_address;
145 extern int end;
146 
147 #ifdef PMAP_DEBUG
148 extern int pmap_debug_level;
149 #endif /* PMAP_DEBUG */
150 
151 #define	KERNEL_PT_VMEM		0	/* Page table for mapping video memory */
152 #define	KERNEL_PT_SYS		1	/* Page table for mapping proc0 zero page */
153 #define	KERNEL_PT_IO		2	/* Page table for mapping IO */
154 #define	KERNEL_PT_KERNEL	3	/* Page table for mapping kernel */
155 #define	KERNEL_PT_KERNEL_NUM	4
156 #define	KERNEL_PT_VMDATA	(KERNEL_PT_KERNEL + KERNEL_PT_KERNEL_NUM)
157 					/* Page tables for mapping kernel VM */
158 #define	KERNEL_PT_VMDATA_NUM	4	/* start with 16MB of KVM */
159 #define	NUM_KERNEL_PTS		(KERNEL_PT_VMDATA + KERNEL_PT_VMDATA_NUM)
160 
161 pv_addr_t kernel_pt_table[NUM_KERNEL_PTS];
162 
163 #define CPU_SA110_CACHE_CLEAN_SIZE (0x4000 * 2)
164 extern unsigned int sa1_cache_clean_addr;
165 extern unsigned int sa1_cache_clean_size;
166 static vaddr_t sa1_cc_base;
167 
168 /* Non-buffered non-cacheable memory needed to enter idle mode */
169 extern vaddr_t sa11x0_idle_mem;
170 
171 /* Prototypes */
172 void data_abort_handler(trapframe_t *);
173 void prefetch_abort_handler(trapframe_t *);
174 void undefinedinstruction_bounce(trapframe_t *);
175 u_int cpu_get_control(void);
176 
177 u_int initarm(int, char **, struct bootinfo *);
178 
179 #ifdef BOOT_DUMP
180 void    dumppages(char *, int);
181 #endif
182 
183 /* Mode dependent sleep function holder */
184 extern void (*__sleep_func)(void *);
185 extern void *__sleep_ctx;
186 
187 /* Number of DRAM pages which are installed */
188 /* Units are 4K pages, so 8192 is 32 MB of memory */
189 #ifndef DRAM_PAGES
190 #define DRAM_PAGES	8192
191 #endif
192 
193 /*
194  * Static device mappings. These peripheral registers are mapped at
195  * fixed virtual addresses very early in initarm() so that we can use
196  * them while booting the kernel and stay at the same address
197  * throughout whole kernel's life time.
198  */
199 static const struct pmap_devmap sa11x0_devmap[] = {
200 	/* Physical/virtual address for UART #3. */
201 	{
202 		SACOM3_VBASE,
203 		SACOM3_BASE,
204 		0x24,
205 		VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE
206 	},
207 	{ 0, 0, 0, 0, 0 }
208 };
209 
210 /*
211  * Initial entry point on startup. This gets called before main() is
212  * entered.
213  * It should be responsible for setting up everything that must be
214  * in place when main is called.
215  * This includes:
216  *   Taking a copy of the boot configuration structure.
217  *   Initializing the physical console so characters can be printed.
218  *   Setting up page tables for the kernel.
219  */
220 u_int
221 initarm(int argc, char **argv, struct bootinfo *bi)
222 {
223 	u_int kerneldatasize, symbolsize;
224 	u_int l1pagetable;
225 	vaddr_t freemempos;
226 	vsize_t pt_size;
227 	int loop, i;
228 #if NKSYMS || defined(DDB) || defined(MODULAR)
229 	Elf_Shdr *sh;
230 #endif
231 
232 	__sleep_func = NULL;
233 	__sleep_ctx = NULL;
234 
235 	/* parse kernel args */
236 	boothowto = 0;
237 	boot_file[0] = '\0';
238 	strncpy(booted_kernel_storage, argv[0], sizeof(booted_kernel_storage));
239 	for (i = 1; i < argc; i++) {
240 		char *cp = argv[i];
241 
242 		switch (*cp) {
243 		case 'b':
244 			/* boot device: -b=sd0 etc. */
245 			cp = cp + 2;
246 			if (strcmp(cp, MOUNT_NFS) == 0)
247 				rootfstype = MOUNT_NFS;
248 			else
249 				strncpy(boot_file, cp, sizeof(boot_file));
250 			break;
251 		default:
252 			BOOT_FLAG(*cp, boothowto);
253 			break;
254 		}
255 	}
256 
257 	/* copy bootinfo into known kernel space */
258 	bootinfo_storage = *bi;
259 	bootinfo = &bootinfo_storage;
260 
261 #ifdef BOOTINFO_FB_WIDTH
262 	bootinfo->fb_line_bytes = BOOTINFO_FB_LINE_BYTES;
263 	bootinfo->fb_width = BOOTINFO_FB_WIDTH;
264 	bootinfo->fb_height = BOOTINFO_FB_HEIGHT;
265 	bootinfo->fb_type = BOOTINFO_FB_TYPE;
266 #endif
267 
268 	if (bootinfo->magic == BOOTINFO_MAGIC) {
269 		platid.dw.dw0 = bootinfo->platid_cpu;
270 		platid.dw.dw1 = bootinfo->platid_machine;
271 	}
272 
273 #ifndef RTC_OFFSET
274 	/*
275 	 * rtc_offset from bootinfo.timezone set by hpcboot.exe
276 	 */
277 	if (rtc_offset == 0 &&
278 	    (bootinfo->timezone > (-12 * 60) &&
279 	     bootinfo->timezone <= (12 * 60)))
280 		rtc_offset = bootinfo->timezone;
281 #endif
282 
283 	/*
284 	 * Heads up ... Setup the CPU / MMU / TLB functions.
285 	 */
286 	set_cpufuncs();
287 	IRQdisable;
288 
289 #ifdef DEBUG_BEFOREMMU
290 	/*
291 	 * At this point, we cannot call real consinit().
292 	 * Just call a faked up version of consinit(), which does the thing
293 	 * with MMU disabled.
294 	 */
295 	fakecninit();
296 #endif
297 
298 	/*
299 	 * XXX for now, overwrite bootconfig to hardcoded values.
300 	 * XXX kill bootconfig and directly call uvm_physload
301 	 */
302 	bootconfig.dram[0].address = 0xc0000000;
303 	bootconfig.dram[0].pages = DRAM_PAGES;
304 	bootconfig.dramblocks = 1;
305 
306 	kerneldatasize = (uint32_t)&end - (uint32_t)KERNEL_TEXT_BASE;
307 	symbolsize = 0;
308 #if NKSYMS || defined(DDB) || defined(MODULAR)
309 	if (!memcmp(&end, "\177ELF", 4)) {
310 		sh = (Elf_Shdr *)((char *)&end + ((Elf_Ehdr *)&end)->e_shoff);
311 		loop = ((Elf_Ehdr *)&end)->e_shnum;
312 		for (; loop; loop--, sh++)
313 			if (sh->sh_offset > 0 &&
314 			    (sh->sh_offset + sh->sh_size) > symbolsize)
315 				symbolsize = sh->sh_offset + sh->sh_size;
316 	}
317 #endif
318 
319 	printf("kernsize=0x%x\n", kerneldatasize);
320 	kerneldatasize += symbolsize;
321 	kerneldatasize = ((kerneldatasize - 1) & ~(PAGE_SIZE * 4 - 1)) +
322 	    PAGE_SIZE * 8;
323 
324 	/*
325 	 * hpcboot has loaded me with MMU disabled.
326 	 * So create kernel page tables and enable MMU.
327 	 */
328 
329 	/*
330 	 * Set up the variables that define the availability of physcial
331 	 * memory.
332 	 */
333 	physical_start = bootconfig.dram[0].address;
334 	physical_freestart = physical_start
335 	    + (KERNEL_TEXT_BASE - KERNEL_BASE) + kerneldatasize;
336 	physical_end = bootconfig.dram[bootconfig.dramblocks - 1].address
337 	    + bootconfig.dram[bootconfig.dramblocks - 1].pages * PAGE_SIZE;
338 	physical_freeend = physical_end;
339 
340 	for (loop = 0; loop < bootconfig.dramblocks; ++loop)
341 		physmem += bootconfig.dram[loop].pages;
342 
343 	/* XXX handle UMA framebuffer memory */
344 
345 	/* Use the first 256kB to allocate things */
346 	freemempos = KERNEL_BASE;
347 	memset((void *)KERNEL_BASE, 0, KERNEL_TEXT_BASE - KERNEL_BASE);
348 
349 	/*
350 	 * Right. We have the bottom meg of memory mapped to 0x00000000
351 	 * so was can get at it. The kernel will occupy the start of it.
352 	 * After the kernel/args we allocate some of the fixed page tables
353 	 * we need to get the system going.
354 	 * We allocate one page directory and NUM_KERNEL_PTS page tables
355 	 * and store the physical addresses in the kernel_pt_table array.
356 	 * Must remember that neither the page L1 or L2 page tables are the
357 	 * same size as a page !
358 	 *
359 	 * Ok, the next bit of physical allocate may look complex but it is
360 	 * simple really. I have done it like this so that no memory gets
361 	 * wasted during the allocate of various pages and tables that are
362 	 * all different sizes.
363 	 * The start address will be page aligned.
364 	 * We allocate the kernel page directory on the first free 16KB
365 	 * boundary we find.
366 	 * We allocate the kernel page tables on the first 1KB boundary we
367 	 * find.  We allocate at least 9 PT's (12 currently).  This means
368 	 * that in the process we KNOW that we will encounter at least one
369 	 * 16KB boundary.
370 	 *
371 	 * Eventually if the top end of the memory gets used for process L1
372 	 * page tables the kernel L1 page table may be moved up there.
373 	 */
374 
375 #ifdef VERBOSE_INIT_ARM
376 	printf("Allocating page tables\n");
377 #endif
378 
379 	/* Define a macro to simplify memory allocation */
380 #define	valloc_pages(var, np)			\
381 	alloc_pages((var).pv_pa, (np));		\
382 	(var).pv_va = KERNEL_BASE + (var).pv_pa - physical_start;
383 #define	alloc_pages(var, np)			\
384 	(var) = freemempos;			\
385 	freemempos += (np) * PAGE_SIZE;
386 
387 	valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
388 	for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
389 		alloc_pages(kernel_pt_table[loop].pv_pa,
390 		    L2_TABLE_SIZE / PAGE_SIZE);
391 		kernel_pt_table[loop].pv_va = kernel_pt_table[loop].pv_pa;
392 	}
393 
394 	/* This should never be able to happen but better confirm that. */
395 	if (!kernel_l1pt.pv_pa || (kernel_l1pt.pv_pa & (L1_TABLE_SIZE-1)) != 0)
396 		panic("initarm: Failed to align the kernel page directory");
397 
398 	/*
399 	 * Allocate a page for the system page mapped to V0x00000000
400 	 * This page will just contain the system vectors and can be
401 	 * shared by all processes.
402 	 */
403 	valloc_pages(systempage, 1);
404 
405 	pt_size = round_page(freemempos) - physical_start;
406 
407 	/* Allocate stacks for all modes */
408 	valloc_pages(irqstack, IRQ_STACK_SIZE);
409 	valloc_pages(abtstack, ABT_STACK_SIZE);
410 	valloc_pages(undstack, UND_STACK_SIZE);
411 	valloc_pages(kernelstack, UPAGES);
412 
413 #ifdef VERBOSE_INIT_ARM
414 	printf("IRQ stack: p0x%08lx v0x%08lx\n", irqstack.pv_pa,
415 	    irqstack.pv_va);
416 	printf("ABT stack: p0x%08lx v0x%08lx\n", abtstack.pv_pa,
417 	    abtstack.pv_va);
418 	printf("UND stack: p0x%08lx v0x%08lx\n", undstack.pv_pa,
419 	    undstack.pv_va);
420 	printf("SVC stack: p0x%08lx v0x%08lx\n", kernelstack.pv_pa,
421 	    kernelstack.pv_va);
422 #endif
423 
424 	alloc_pages(msgbufphys, round_page(MSGBUFSIZE) / PAGE_SIZE);
425 
426 	/*
427 	 * XXX Actually, we only need virtual space and don't need
428 	 * XXX physical memory for sa110_cc_base and sa11x0_idle_mem.
429 	 */
430 	/*
431 	 * XXX totally stuffed hack to work round problems introduced
432 	 * in recent versions of the pmap code. Due to the calls used there
433 	 * we cannot allocate virtual memory during bootstrap.
434 	 */
435 	for (;;) {
436 		alloc_pages(sa1_cc_base, 1);
437 		if (!(sa1_cc_base & (CPU_SA110_CACHE_CLEAN_SIZE - 1)))
438 			break;
439 	}
440 	{
441 		vaddr_t dummy;
442 		alloc_pages(dummy, CPU_SA110_CACHE_CLEAN_SIZE / PAGE_SIZE - 1);
443 	}
444 	sa1_cache_clean_addr = sa1_cc_base;
445 	sa1_cache_clean_size = CPU_SA110_CACHE_CLEAN_SIZE / 2;
446 
447 	alloc_pages(sa11x0_idle_mem, 1);
448 
449 	/*
450 	 * Ok, we have allocated physical pages for the primary kernel
451 	 * page tables.
452 	 */
453 
454 #ifdef VERBOSE_INIT_ARM
455 	printf("Creating L1 page table\n");
456 #endif
457 
458 	/*
459 	 * Now we start construction of the L1 page table.
460 	 * We start by mapping the L2 page tables into the L1.
461 	 * This means that we can replace L1 mappings later on if necessary.
462 	 */
463 	l1pagetable = kernel_l1pt.pv_pa;
464 
465 	/* Map the L2 pages tables in the L1 page table */
466 	pmap_link_l2pt(l1pagetable, 0x00000000,
467 	    &kernel_pt_table[KERNEL_PT_SYS]);
468 #define SAIPIO_BASE		0xd0000000		/* XXX XXX */
469 	pmap_link_l2pt(l1pagetable, SAIPIO_BASE,
470 	    &kernel_pt_table[KERNEL_PT_IO]);
471 	for (loop = 0; loop < KERNEL_PT_KERNEL_NUM; ++loop)
472 		pmap_link_l2pt(l1pagetable, KERNEL_BASE + loop * 0x00400000,
473 		    &kernel_pt_table[KERNEL_PT_KERNEL + loop]);
474 	for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop)
475 		pmap_link_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000,
476 		    &kernel_pt_table[KERNEL_PT_VMDATA + loop]);
477 
478 	/* update the top of the kernel VM */
479 	pmap_curmaxkvaddr =
480 	    KERNEL_VM_BASE + (KERNEL_PT_VMDATA_NUM * 0x00400000);
481 
482 #ifdef VERBOSE_INIT_ARM
483 	printf("Mapping kernel\n");
484 #endif
485 
486 	/* Now we fill in the L2 pagetable for the kernel code/data */
487 
488 	/*
489 	 * XXX there is no ELF header to find RO region.
490 	 * XXX What should we do?
491 	 */
492 #if 0
493 	if (N_GETMAGIC(kernexec[0]) == ZMAGIC) {
494 		logical = pmap_map_chunk(l1pagetable, KERNEL_TEXT_BASE,
495 		    physical_start, kernexec->a_text,
496 		    VM_PROT_READ, PTE_CACHE);
497 		logical += pmap_map_chunk(l1pagetable,
498 		    KERNEL_TEXT_BASE + logical, physical_start + logical,
499 		    kerneldatasize - kernexec->a_text,
500 		    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
501 	} else
502 #endif
503 		pmap_map_chunk(l1pagetable, KERNEL_TEXT_BASE,
504 		    KERNEL_TEXT_BASE - KERNEL_BASE + physical_start,
505 		    kerneldatasize, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
506 
507 #ifdef VERBOSE_INIT_ARM
508 	printf("Constructing L2 page tables\n");
509 #endif
510 
511 	/* Map the stack pages */
512 	pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa,
513 	    IRQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
514 	pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa,
515 	    ABT_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
516 	pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
517 	    UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
518 	pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa,
519 	    UPAGES * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
520 
521 	pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
522 	    L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
523 
524 	/* Map page tables */
525 	pmap_map_chunk(l1pagetable, KERNEL_BASE, physical_start, pt_size,
526 	    VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
527 
528 	/* Map a page for entering idle mode */
529 	pmap_map_entry(l1pagetable, sa11x0_idle_mem, sa11x0_idle_mem,
530 	    VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
531 
532 	/* Map the vector page. */
533 	pmap_map_entry(l1pagetable, vector_page, systempage.pv_pa,
534 	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
535 
536 	/* Map the statically mapped devices. */
537 	pmap_devmap_bootstrap(l1pagetable, sa11x0_devmap);
538 
539 	pmap_map_chunk(l1pagetable, sa1_cache_clean_addr, 0xe0000000,
540 	    CPU_SA110_CACHE_CLEAN_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
541 
542 	/*
543 	 * Now we have the real page tables in place so we can switch to them.
544 	 * Once this is done we will be running with the REAL kernel page
545 	 * tables.
546 	 */
547 
548 #ifdef VERBOSE_INIT_ARM
549 	printf("done.\n");
550 #endif
551 
552 	/*
553 	 * Pages were allocated during the secondary bootstrap for the
554 	 * stacks for different CPU modes.
555 	 * We must now set the r13 registers in the different CPU modes to
556 	 * point to these stacks.
557 	 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
558 	 * of the stack memory.
559 	 */
560 #ifdef VERBOSE_INIT_ARM
561 	printf("init subsystems: stacks ");
562 #endif
563 
564 	set_stackptr(PSR_IRQ32_MODE,
565 	    irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE);
566 	set_stackptr(PSR_ABT32_MODE,
567 	    abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE);
568 	set_stackptr(PSR_UND32_MODE,
569 	    undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE);
570 #ifdef PMAP_DEBUG
571 	if (pmap_debug_level >= 0)
572 		printf("kstack V%08lx P%08lx\n", kernelstack.pv_va,
573 		    kernelstack.pv_pa);
574 #endif /* PMAP_DEBUG */
575 
576 	/*
577 	 * Well we should set a data abort handler.
578 	 * Once things get going this will change as we will need a proper
579 	 * handler. Until then we will use a handler that just panics but
580 	 * tells us why.
581 	 * Initialization of the vectors will just panic on a data abort.
582 	 * This just fills in a slightly better one.
583 	 */
584 #ifdef VERBOSE_INIT_ARM
585 	printf("vectors ");
586 #endif
587 	data_abort_handler_address = (u_int)data_abort_handler;
588 	prefetch_abort_handler_address = (u_int)prefetch_abort_handler;
589 	undefined_handler_address = (u_int)undefinedinstruction_bounce;
590 #ifdef DEBUG
591 	printf("%08x %08x %08x\n", data_abort_handler_address,
592 	    prefetch_abort_handler_address, undefined_handler_address);
593 #endif
594 
595 	/* Initialize the undefined instruction handlers */
596 #ifdef VERBOSE_INIT_ARM
597 	printf("undefined\n");
598 #endif
599 	undefined_init();
600 
601 	/* Set the page table address. */
602 #ifdef VERBOSE_INIT_ARM
603 	printf("switching to new L1 page table  @%#lx...\n", kernel_l1pt.pv_pa);
604 #endif
605 	cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT);
606 	cpu_setttb(kernel_l1pt.pv_pa);
607 	cpu_tlb_flushID();
608 	cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2));
609 
610 	/*
611 	 * Moved from cpu_startup() as data_abort_handler() references
612 	 * this during uvm init.
613 	 */
614 	uvm_lwp_setuarea(&lwp0, kernelstack.pv_va);
615 
616 #ifdef BOOT_DUMP
617 	dumppages((char *)0xc0000000, 16 * PAGE_SIZE);
618 	dumppages((char *)0xb0100000, 64); /* XXX */
619 #endif
620 	/* Enable MMU, I-cache, D-cache, write buffer. */
621 	cpufunc_control(0x337f, 0x107d);
622 
623 	arm32_vector_init(ARM_VECTORS_LOW, ARM_VEC_ALL);
624 
625 	consinit();
626 
627 #ifdef VERBOSE_INIT_ARM
628 	printf("bootstrap done.\n");
629 #endif
630 
631 #ifdef VERBOSE_INIT_ARM
632 	printf("freemempos=%08lx\n", freemempos);
633 	printf("MMU enabled. control=%08x\n", cpu_get_control());
634 #endif
635 
636 	/* Load memory into UVM. */
637 	uvm_setpagesize();	/* initialize PAGE_SIZE-dependent variables */
638 	for (loop = 0; loop < bootconfig.dramblocks; loop++) {
639 		paddr_t dblk_start = (paddr_t)bootconfig.dram[loop].address;
640 		paddr_t dblk_end = dblk_start
641 			+ (bootconfig.dram[loop].pages * PAGE_SIZE);
642 
643 		if (dblk_start < physical_freestart)
644 			dblk_start = physical_freestart;
645 		if (dblk_end > physical_freeend)
646 			dblk_end = physical_freeend;
647 
648 		uvm_page_physload(atop(dblk_start), atop(dblk_end),
649 		    atop(dblk_start), atop(dblk_end), VM_FREELIST_DEFAULT);
650 	}
651 
652 	/* Boot strap pmap telling it where the kernel page table is */
653 	pmap_bootstrap(KERNEL_VM_BASE, KERNEL_VM_BASE + KERNEL_VM_SIZE);
654 
655 #ifdef BOOT_DUMP
656 	dumppages((char *)kernel_l1pt.pv_va, 16);
657 #endif
658 
659 #ifdef DDB
660 	db_machine_init();
661 #endif
662 #if NKSYMS || defined(DDB) || defined(MODULAR)
663 	ksyms_addsyms_elf(symbolsize, ((int *)&end), ((char *)&end) + symbolsize);
664 #endif
665 
666 	printf("kernsize=0x%x", kerneldatasize);
667 	printf(" (including 0x%x symbols)\n", symbolsize);
668 
669 #ifdef DDB
670 	if (boothowto & RB_KDB)
671 		Debugger();
672 #endif /* DDB */
673 
674 	/* We return the new stack pointer address */
675 	return (kernelstack.pv_va + USPACE_SVC_STACK_TOP);
676 }
677 
678 void
679 consinit(void)
680 {
681 	static int consinit_called = 0;
682 
683 	if (consinit_called != 0)
684 		return;
685 
686 	consinit_called = 1;
687 	if (bootinfo->bi_cnuse == BI_CNUSE_SERIAL) {
688 		cninit();
689 	}
690 }
691 
692 #ifdef DEBUG_BEFOREMMU
693 cons_decl(sacom);
694 
695 static void
696 fakecninit(void)
697 {
698 	static struct consdev fakecntab = cons_init(sacom);
699 	cn_tab = &fakecntab;
700 
701 	(*cn_tab->cn_init)(0);
702 	cn_tab->cn_pri = CN_REMOTE;
703 }
704 #endif
705