1 /*	$NetBSD: pxa2x0_hpc_machdep.c,v 1.8 2010/06/26 00:25:02 tsutsui Exp $	*/
2 
3 /*
4  * Copyright (c) 1994-1998 Mark Brinicombe.
5  * Copyright (c) 1994 Brini.
6  * All rights reserved.
7  *
8  * This code is derived from software written for Brini by Mark Brinicombe
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *      This product includes software developed by Brini.
21  * 4. The name of the company nor the name of the author may be used to
22  *    endorse or promote products derived from this software without specific
23  *    prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
26  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
28  * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
29  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
30  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
31  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37 
38 /*
39  * Machine dependent functions for kernel setup.
40  */
41 
42 #include <sys/cdefs.h>
43 __KERNEL_RCSID(0, "$NetBSD: pxa2x0_hpc_machdep.c,v 1.8 2010/06/26 00:25:02 tsutsui Exp $");
44 
45 #include "opt_ddb.h"
46 #include "opt_dram_pages.h"
47 #include "opt_modular.h"
48 #include "opt_pmap_debug.h"
49 #include "ksyms.h"
50 
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/kernel.h>
54 #include <sys/reboot.h>
55 #include <sys/proc.h>
56 #include <sys/msgbuf.h>
57 #include <sys/exec.h>
58 #include <sys/ksyms.h>
59 #include <sys/boot_flag.h>
60 #include <sys/conf.h>	/* XXX for consinit related hacks */
61 #include <sys/device.h>
62 #include <sys/termios.h>
63 
64 #if NKSYMS || defined(DDB) || defined(MODULAR)
65 #include <machine/db_machdep.h>
66 #include <ddb/db_sym.h>
67 #include <ddb/db_extern.h>
68 #ifndef DB_ELFSIZE
69 #error Must define DB_ELFSIZE!
70 #endif
71 #define ELFSIZE	DB_ELFSIZE
72 #include <sys/exec_elf.h>
73 #endif
74 
75 #include <uvm/uvm.h>
76 
77 #include <arm/xscale/pxa2x0cpu.h>
78 #include <arm/xscale/pxa2x0reg.h>
79 #include <arm/xscale/pxa2x0var.h>
80 #include <arm/xscale/pxa2x0_gpio.h>
81 #include <arm/cpuconf.h>
82 #include <arm/undefined.h>
83 
84 #include <machine/bootconfig.h>
85 #include <machine/bootinfo.h>
86 #include <machine/bus.h>
87 #include <machine/cpu.h>
88 #include <machine/frame.h>
89 #include <machine/intr.h>
90 #include <machine/io.h>
91 #include <machine/platid.h>
92 #include <machine/platid_mask.h>
93 #include <machine/rtc.h>
94 #include <machine/signal.h>
95 
96 #include <dev/cons.h>
97 #include <dev/hpc/apm/apmvar.h>
98 #include <dev/hpc/bicons.h>
99 
100 #include "com.h"
101 #if (NCOM > 0)
102 #include "opt_com.h"
103 #include <dev/ic/comvar.h>
104 #endif	/* NCOM > 0 */
105 #include "lcd.h"
106 #include "wzero3lcd.h"
107 
108 #include <sys/mount.h>
109 #include <nfs/rpcv2.h>
110 #include <nfs/nfsproto.h>
111 #include <nfs/nfs.h>
112 #include <nfs/nfsmount.h>
113 
114 /* Kernel text starts 2MB in from the bottom of the kernel address space. */
115 #define	KERNEL_TEXT_BASE	(KERNEL_BASE + 0x00200000)
116 #define	KERNEL_VM_BASE		(KERNEL_BASE + 0x00C00000)
117 #define	KERNEL_VM_SIZE		0x05000000
118 
119 /*
120  * Address to call from cpu_reset() to reset the machine.
121  * This is machine architecture dependant as it varies depending
122  * on where the ROM appears when you turn the MMU off.
123  */
124 u_int cpu_reset_address = 0;
125 
126 /* Define various stack sizes in pages */
127 #define IRQ_STACK_SIZE	1
128 #define ABT_STACK_SIZE	1
129 #define UND_STACK_SIZE	1
130 
131 extern BootConfig bootconfig;		/* Boot config storage */
132 extern struct bootinfo *bootinfo, bootinfo_storage;
133 extern char booted_kernel_storage[80];
134 extern char *booted_kernel;
135 
136 extern paddr_t physical_start;
137 extern paddr_t physical_freestart;
138 extern paddr_t physical_freeend;
139 extern paddr_t physical_end;
140 extern int physmem;
141 
142 /* Physical and virtual addresses for some global pages */
143 extern pv_addr_t irqstack;
144 extern pv_addr_t undstack;
145 extern pv_addr_t abtstack;
146 extern pv_addr_t kernelstack;
147 
148 extern char *boot_args;
149 extern char boot_file[16];
150 
151 extern vaddr_t msgbufphys;
152 
153 extern u_int data_abort_handler_address;
154 extern u_int prefetch_abort_handler_address;
155 extern u_int undefined_handler_address;
156 extern int end;
157 
158 #ifdef PMAP_DEBUG
159 extern int pmap_debug_level;
160 #endif /* PMAP_DEBUG */
161 
162 #define	KERNEL_PT_VMEM		0	/* Page table for mapping video memory */
163 #define	KERNEL_PT_SYS		1	/* Page table for mapping proc0 zero page */
164 #define	KERNEL_PT_IO		2	/* Page table for mapping IO */
165 #define	KERNEL_PT_KERNEL	3	/* Page table for mapping kernel */
166 #define	KERNEL_PT_KERNEL_NUM	4
167 #define	KERNEL_PT_VMDATA	(KERNEL_PT_KERNEL + KERNEL_PT_KERNEL_NUM)
168 					/* Page tables for mapping kernel VM */
169 #define	KERNEL_PT_VMDATA_NUM	4	/* start with 16MB of KVM */
170 #define	NUM_KERNEL_PTS		(KERNEL_PT_VMDATA + KERNEL_PT_VMDATA_NUM)
171 
172 pv_addr_t kernel_pt_table[NUM_KERNEL_PTS];
173 
174 pv_addr_t minidataclean;
175 
176 /* Prototypes */
177 void data_abort_handler(trapframe_t *);
178 void prefetch_abort_handler(trapframe_t *);
179 void undefinedinstruction_bounce(trapframe_t *);
180 u_int cpu_get_control(void);
181 
182 u_int initarm(int, char **, struct bootinfo *);
183 
184 /* Mode dependent sleep function holder */
185 extern void (*__sleep_func)(void *);
186 extern void *__sleep_ctx;
187 
188 extern void (*__cpu_reset)(void);
189 
190 #ifdef DEBUG_BEFOREMMU
191 static void	fakecninit(void);
192 #endif
193 
194 /* Number of DRAM pages which are installed */
195 /* Units are 4K pages, so 8192 is 32 MB of memory */
196 #ifndef DRAM_PAGES
197 #define DRAM_PAGES	8192
198 #endif
199 
200 /*
201  * Static device mappings. These peripheral registers are mapped at
202  * fixed virtual addresses very early in initarm() so that we can use
203  * them while booting the kernel and stay at the same address
204  * throughout whole kernel's life time.
205  */
206 #define	PXA2X0_GPIO_VBASE	0xfd000000
207 #define	PXA2X0_CLKMAN_VBASE	0xfd100000
208 #define	PXA2X0_INTCTL_VBASE	0xfd200000
209 #define	PXA2X0_MEMCTL_VBASE	0xfd300000
210 #define	PXA2X0_FFUART_VBASE	0xfd400000
211 #define	PXA2X0_BTUART_VBASE	0xfd500000
212 #define	PXA2X0_STUART_VBASE	0xfd600000
213 
214 #define	_A(a)	((a) & L1_S_FRAME)
215 #define	_S(s)	(((s) + L1_S_SIZE - 1) & L1_S_FRAME)
216 const struct pmap_devmap pxa2x0_devmap[] = {
217     {
218 	    PXA2X0_GPIO_VBASE,
219 	    _A(PXA2X0_GPIO_BASE),
220 	    _S(PXA2X0_GPIO_SIZE),
221 	    VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE,
222     },
223     {
224 	    PXA2X0_CLKMAN_VBASE,
225 	    _A(PXA2X0_CLKMAN_BASE),
226 	    _S(PXA2X0_CLKMAN_SIZE),
227 	    VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE,
228     },
229     {
230 	    PXA2X0_INTCTL_VBASE,
231 	    _A(PXA2X0_INTCTL_BASE),
232 	    _S(PXA2X0_INTCTL_SIZE),
233 	    VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE,
234     },
235     {
236 	    PXA2X0_MEMCTL_VBASE,
237 	    _A(PXA2X0_MEMCTL_BASE),
238 	    _S(PXA2X0_MEMCTL_SIZE),
239 	    VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE,
240     },
241     {
242 	    PXA2X0_FFUART_VBASE,
243 	    _A(PXA2X0_FFUART_BASE),
244 	    _S(4 * COM_NPORTS),
245 	    VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE,
246     },
247     {
248 	    PXA2X0_BTUART_VBASE,
249 	    _A(PXA2X0_BTUART_BASE),
250 	    _S(4 * COM_NPORTS),
251 	    VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE,
252     },
253     {
254 	    PXA2X0_STUART_VBASE,
255 	    _A(PXA2X0_STUART_BASE),
256 	    _S(4 * COM_NPORTS),
257 	    VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE,
258     },
259 
260     { 0, 0, 0, 0, 0, }
261 };
262 #undef	_A
263 #undef	_S
264 
265 static void
266 ws003sh_cpu_reset(void)
267 {
268 	uint32_t rv;
269 
270 	rv = pxa2x0_memctl_read(MEMCTL_MSC0);
271 	if ((rv & 0xffff0000) == 0x7ff00000) {
272 		pxa2x0_memctl_write(MEMCTL_MSC0, (rv & 0xffff) | 0x7ee00000);
273 	}
274 
275 	pxa2x0_gpio_set_function(89, GPIO_OUT | GPIO_SET);
276 	for (;;)
277 		continue;
278 }
279 
280 static struct pxa2x0_gpioconf ws003sh_boarddep_gpioconf[] = {
281 	/* FFUART */
282 	{  98, GPIO_ALT_FN_3_OUT },	/* FFRTS */
283 	{  99, GPIO_ALT_FN_3_OUT },	/* FFTXD */
284 	/* SSP3 */
285 	{  34, GPIO_ALT_FN_3_OUT },	/* SSPSCLK3 */
286 	{  38, GPIO_ALT_FN_1_OUT },	/* SSPTXD3 */
287 	{  82, GPIO_ALT_FN_1_IN },	/* SSPRXD3 */
288 
289 	{ -1 }
290 };
291 
292 static struct pxa2x0_gpioconf ws007sh_boarddep_gpioconf[] = {
293 	/* FFUART */
294 	{  98, GPIO_ALT_FN_3_OUT },	/* FFRTS */
295 	{  99, GPIO_ALT_FN_3_OUT },	/* FFTXD */
296 	/* SSP2 */
297 	{  19, GPIO_ALT_FN_1_OUT },	/* SSPSCLK2 */
298 	{  86, GPIO_ALT_FN_1_IN },	/* SSPRXD2 */
299 	{  87, GPIO_ALT_FN_1_OUT },	/* SSPTXD2 */
300 	/* SSP3 */
301 	{  38, GPIO_ALT_FN_1_OUT },	/* SSPTXD3 */
302 	{  52, GPIO_ALT_FN_2_OUT },	/* SSPSCLK3 */
303 	{  89, GPIO_ALT_FN_1_IN },	/* SSPRXD3 */
304 
305 	{ -1 }
306 };
307 
308 static struct pxa2x0_gpioconf ws011sh_boarddep_gpioconf[] = {
309 	/* FFUART */
310 	{  98, GPIO_ALT_FN_3_OUT },	/* FFRTS */
311 	{  99, GPIO_ALT_FN_3_OUT },	/* FFTXD */
312 	/* SSP2 */
313 	{  19, GPIO_ALT_FN_1_OUT },	/* SSPSCLK2 */
314 	{  86, GPIO_ALT_FN_1_IN },	/* SSPRXD2 */
315 	{  87, GPIO_ALT_FN_1_OUT },	/* SSPTXD2 */
316 
317 	{ -1 }
318 };
319 
320 static struct pxa2x0_gpioconf *ws003sh_gpioconf[] = {
321 	pxa27x_com_ffuart_gpioconf,
322 	pxa27x_pxamci_gpioconf,
323 	pxa27x_ohci_gpioconf,
324 	ws003sh_boarddep_gpioconf,
325 	NULL
326 };
327 
328 static struct pxa2x0_gpioconf *ws007sh_gpioconf[] = {
329 	pxa27x_com_ffuart_gpioconf,
330 	pxa27x_pxamci_gpioconf,
331 	pxa27x_ohci_gpioconf,
332 	ws007sh_boarddep_gpioconf,
333 	NULL
334 };
335 
336 static struct pxa2x0_gpioconf *ws011sh_gpioconf[] = {
337 	pxa27x_com_ffuart_gpioconf,
338 	pxa27x_pxamci_gpioconf,
339 	pxa27x_ohci_gpioconf,
340 	ws011sh_boarddep_gpioconf,
341 	NULL
342 };
343 
344 static inline pd_entry_t *
345 read_ttb(void)
346 {
347 	u_long ttb;
348 
349 	__asm volatile("mrc p15, 0, %0, c2, c0, 0" : "=r" (ttb));
350 
351 	return (pd_entry_t *)(ttb & ~((1 << 14) - 1));
352 }
353 
354 /*
355  * Initial entry point on startup. This gets called before main() is
356  * entered.
357  * It should be responsible for setting up everything that must be
358  * in place when main is called.
359  * This includes:
360  *   Taking a copy of the boot configuration structure.
361  *   Initializing the physical console so characters can be printed.
362  *   Setting up page tables for the kernel.
363  */
364 u_int
365 initarm(int argc, char **argv, struct bootinfo *bi)
366 {
367 #ifdef DIAGNOSTIC
368 	extern vsize_t xscale_minidata_clean_size; /* used in KASSERT */
369 #endif
370 	extern vaddr_t xscale_cache_clean_addr;
371 	u_int kerneldatasize, symbolsize;
372 	u_int l1pagetable;
373 	vaddr_t freemempos;
374 	vsize_t pt_size;
375 	int loop, i;
376 #if NKSYMS || defined(DDB) || defined(MODULAR)
377 	Elf_Shdr *sh;
378 #endif
379 
380 	__sleep_func = NULL;
381 	__sleep_ctx = NULL;
382 
383 	/* parse kernel args */
384 	boothowto = 0;
385 	boot_file[0] = '\0';
386 	strncpy(booted_kernel_storage, argv[0], sizeof(booted_kernel_storage));
387 	for (i = 1; i < argc; i++) {
388 		char *cp = argv[i];
389 
390 		switch (*cp) {
391 		case 'b':
392 			/* boot device: -b=sd0 etc. */
393 			cp = cp + 2;
394 			if (strcmp(cp, MOUNT_NFS) == 0)
395 				rootfstype = MOUNT_NFS;
396 			else
397 				strncpy(boot_file, cp, sizeof(boot_file));
398 			break;
399 		default:
400 			BOOT_FLAG(*cp, boothowto);
401 			break;
402 		}
403 	}
404 
405 	/* copy bootinfo into known kernel space */
406 	bootinfo_storage = *bi;
407 	bootinfo = &bootinfo_storage;
408 
409 #ifdef BOOTINFO_FB_WIDTH
410 	bootinfo->fb_line_bytes = BOOTINFO_FB_LINE_BYTES;
411 	bootinfo->fb_width = BOOTINFO_FB_WIDTH;
412 	bootinfo->fb_height = BOOTINFO_FB_HEIGHT;
413 	bootinfo->fb_type = BOOTINFO_FB_TYPE;
414 #endif
415 
416 	if (bootinfo->magic == BOOTINFO_MAGIC) {
417 		platid.dw.dw0 = bootinfo->platid_cpu;
418 		platid.dw.dw1 = bootinfo->platid_machine;
419 	}
420 
421 #ifndef RTC_OFFSET
422 	/*
423 	 * rtc_offset from bootinfo.timezone set by hpcboot.exe
424 	 */
425 	if (rtc_offset == 0 &&
426 	    (bootinfo->timezone > (-12 * 60) &&
427 	     bootinfo->timezone <= (12 * 60)))
428 		rtc_offset = bootinfo->timezone;
429 #endif
430 
431 	/*
432 	 * Heads up ... Setup the CPU / MMU / TLB functions.
433 	 */
434 	set_cpufuncs();
435 	IRQdisable;
436 
437 	pxa2x0_memctl_bootstrap(PXA2X0_MEMCTL_BASE);
438 	pxa2x0_intr_bootstrap(PXA2X0_INTCTL_BASE);
439 	pmap_devmap_bootstrap((vaddr_t)read_ttb(), pxa2x0_devmap);
440 	pxa2x0_memctl_bootstrap(PXA2X0_MEMCTL_VBASE);
441 	pxa2x0_intr_bootstrap(PXA2X0_INTCTL_VBASE);
442 	pxa2x0_clkman_bootstrap(PXA2X0_CLKMAN_VBASE);
443 	pxa2x0_gpio_bootstrap(PXA2X0_GPIO_VBASE);
444 
445 	if (platid_match(&platid, &platid_mask_MACH_SHARP_WZERO3_WS003SH)
446 	 || platid_match(&platid, &platid_mask_MACH_SHARP_WZERO3_WS004SH)
447 	 || platid_match(&platid, &platid_mask_MACH_SHARP_WZERO3_WS007SH)
448 	 || platid_match(&platid, &platid_mask_MACH_SHARP_WZERO3_WS011SH)
449 	 || platid_match(&platid, &platid_mask_MACH_SHARP_WZERO3_WS020SH)) {
450 		if (platid_match(&platid, &platid_mask_MACH_SHARP_WZERO3_WS003SH)
451 		 || platid_match(&platid, &platid_mask_MACH_SHARP_WZERO3_WS004SH)) {
452 			pxa2x0_gpio_config(ws003sh_gpioconf);
453 			__cpu_reset = ws003sh_cpu_reset;
454 		} else if (platid_match(&platid, &platid_mask_MACH_SHARP_WZERO3_WS007SH)) {
455 			pxa2x0_gpio_config(ws007sh_gpioconf);
456 		} else if (platid_match(&platid, &platid_mask_MACH_SHARP_WZERO3_WS011SH)) {
457 			pxa2x0_gpio_config(ws011sh_gpioconf);
458 		}
459 		pxa2x0_clkman_config(CKEN_FFUART, 1);
460 		pxa2x0_clkman_config(CKEN_OST, 1);
461 		pxa2x0_clkman_config(CKEN_USBHC, 0);
462 		pxa2x0_clkman_config(CKEN_USBDC, 0);
463 		pxa2x0_clkman_config(CKEN_AC97, 0);
464 		pxa2x0_clkman_config(CKEN_SSP, 0);
465 		pxa2x0_clkman_config(CKEN_HWUART, 0);
466 		pxa2x0_clkman_config(CKEN_STUART, 0);
467 		pxa2x0_clkman_config(CKEN_BTUART, 0);
468 		pxa2x0_clkman_config(CKEN_I2S, 0);
469 		pxa2x0_clkman_config(CKEN_MMC, 0);
470 		pxa2x0_clkman_config(CKEN_FICP, 0);
471 		pxa2x0_clkman_config(CKEN_I2C, 0);
472 		pxa2x0_clkman_config(CKEN_PWM1, 0);
473 		if (!platid_match(&platid, &platid_mask_MACH_SHARP_WZERO3_WS011SH)) {
474 			pxa2x0_clkman_config(CKEN_PWM0, 0); /* WS011SH: DON'T DISABLE */
475 		}
476 	}
477 
478 #ifdef DEBUG_BEFOREMMU
479 	/*
480 	 * At this point, we cannot call real consinit().
481 	 * Just call a faked up version of consinit(), which does the thing
482 	 * with MMU disabled.
483 	 */
484 	fakecninit();
485 #endif
486 
487 	/*
488 	 * XXX for now, overwrite bootconfig to hardcoded values.
489 	 * XXX kill bootconfig and directly call uvm_physload
490 	 */
491 	bootconfig.dram[0].address = 0xa0000000;
492 	bootconfig.dram[0].pages = DRAM_PAGES;
493 	bootconfig.dramblocks = 1;
494 
495 	if (platid_match(&platid, &platid_mask_MACH_SHARP_WZERO3_WS003SH)
496 	 || platid_match(&platid, &platid_mask_MACH_SHARP_WZERO3_WS004SH)
497 	 || platid_match(&platid, &platid_mask_MACH_SHARP_WZERO3_WS007SH)) {
498 		bootconfig.dram[0].pages = 16384; /* 64MiB */
499 	} else
500 	if (platid_match(&platid, &platid_mask_MACH_SHARP_WZERO3_WS011SH)
501 	 || platid_match(&platid, &platid_mask_MACH_SHARP_WZERO3_WS020SH)) {
502 		bootconfig.dram[0].pages = 32768; /* 128MiB */
503 	}
504 
505 	kerneldatasize = (uint32_t)&end - (uint32_t)KERNEL_TEXT_BASE;
506 	symbolsize = 0;
507 #if NKSYMS || defined(DDB) || defined(MODULAR)
508 	if (!memcmp(&end, "\177ELF", 4)) {
509 		sh = (Elf_Shdr *)((char *)&end + ((Elf_Ehdr *)&end)->e_shoff);
510 		loop = ((Elf_Ehdr *)&end)->e_shnum;
511 		for (; loop; loop--, sh++)
512 			if (sh->sh_offset > 0 &&
513 			    (sh->sh_offset + sh->sh_size) > symbolsize)
514 				symbolsize = sh->sh_offset + sh->sh_size;
515 	}
516 #endif
517 
518 	printf("kernsize=0x%x\n", kerneldatasize);
519 	kerneldatasize += symbolsize;
520 	kerneldatasize = ((kerneldatasize - 1) & ~(PAGE_SIZE * 4 - 1)) +
521 	    PAGE_SIZE * 8;
522 
523 	/*
524 	 * hpcboot has loaded me with MMU disabled.
525 	 * So create kernel page tables and enable MMU.
526 	 */
527 
528 	/*
529 	 * Set up the variables that define the availability of physcial
530 	 * memory.
531 	 */
532 	physical_start = bootconfig.dram[0].address;
533 	physical_freestart = physical_start
534 	    + (KERNEL_TEXT_BASE - KERNEL_BASE) + kerneldatasize;
535 	physical_end = bootconfig.dram[bootconfig.dramblocks - 1].address
536 	    + bootconfig.dram[bootconfig.dramblocks - 1].pages * PAGE_SIZE;
537 	physical_freeend = physical_end;
538 
539 	for (loop = 0; loop < bootconfig.dramblocks; ++loop)
540 		physmem += bootconfig.dram[loop].pages;
541 
542 	/* XXX handle UMA framebuffer memory */
543 
544 	freemempos = 0xa0009000UL;
545 	memset((void *)freemempos, 0, KERNEL_TEXT_BASE - KERNEL_BASE - 0x9000);
546 
547 	/*
548 	 * Right. We have the bottom meg of memory mapped to 0x00000000
549 	 * so was can get at it. The kernel will occupy the start of it.
550 	 * After the kernel/args we allocate some of the fixed page tables
551 	 * we need to get the system going.
552 	 * We allocate one page directory and NUM_KERNEL_PTS page tables
553 	 * and store the physical addresses in the kernel_pt_table array.
554 	 * Must remember that neither the page L1 or L2 page tables are the
555 	 * same size as a page !
556 	 *
557 	 * Ok, the next bit of physical allocate may look complex but it is
558 	 * simple really. I have done it like this so that no memory gets
559 	 * wasted during the allocate of various pages and tables that are
560 	 * all different sizes.
561 	 * The start address will be page aligned.
562 	 * We allocate the kernel page directory on the first free 16KB
563 	 * boundary we find.
564 	 * We allocate the kernel page tables on the first 1KB boundary we
565 	 * find.  We allocate at least 9 PT's (12 currently).  This means
566 	 * that in the process we KNOW that we will encounter at least one
567 	 * 16KB boundary.
568 	 *
569 	 * Eventually if the top end of the memory gets used for process L1
570 	 * page tables the kernel L1 page table may be moved up there.
571 	 */
572 
573 #ifdef VERBOSE_INIT_ARM
574 	printf("Allocating page tables\n");
575 #endif
576 
577 	/* Define a macro to simplify memory allocation */
578 #define	valloc_pages(var, np)			\
579 	alloc_pages((var).pv_pa, (np));		\
580 	(var).pv_va = KERNEL_BASE + (var).pv_pa - physical_start;
581 #define	alloc_pages(var, np)			\
582 	(var) = freemempos;			\
583 	freemempos += (np) * PAGE_SIZE;
584 
585 	{
586 		int loop1 = 0;
587 		kernel_l1pt.pv_pa = 0;
588 		kernel_l1pt.pv_va = 0;
589 		for (loop = 0; loop <= NUM_KERNEL_PTS; ++loop) {
590 			/* Are we 16KB aligned for an L1 ? */
591 			if (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) == 0
592 			    && kernel_l1pt.pv_pa == 0) {
593 				valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
594 			} else {
595 				valloc_pages(kernel_pt_table[loop1],
596 				    L2_TABLE_SIZE / PAGE_SIZE);
597 				++loop1;
598 			}
599 		}
600 	}
601 
602 	/* This should never be able to happen but better confirm that. */
603 	if (!kernel_l1pt.pv_pa || (kernel_l1pt.pv_pa & (L1_TABLE_SIZE-1)) != 0)
604 		panic("initarm: Failed to align the kernel page directory");
605 
606 	/*
607 	 * Allocate a page for the system page mapped to V0x00000000
608 	 * This page will just contain the system vectors and can be
609 	 * shared by all processes.
610 	 */
611 	valloc_pages(systempage, 1);
612 
613 	pt_size = round_page(freemempos) - physical_start;
614 
615 	/* Allocate stacks for all modes */
616 	valloc_pages(irqstack, IRQ_STACK_SIZE);
617 	valloc_pages(abtstack, ABT_STACK_SIZE);
618 	valloc_pages(undstack, UND_STACK_SIZE);
619 	valloc_pages(kernelstack, UPAGES);
620 
621 #ifdef VERBOSE_INIT_ARM
622 	printf("IRQ stack: p0x%08lx v0x%08lx\n", irqstack.pv_pa,
623 	    irqstack.pv_va);
624 	printf("ABT stack: p0x%08lx v0x%08lx\n", abtstack.pv_pa,
625 	    abtstack.pv_va);
626 	printf("UND stack: p0x%08lx v0x%08lx\n", undstack.pv_pa,
627 	    undstack.pv_va);
628 	printf("SVC stack: p0x%08lx v0x%08lx\n", kernelstack.pv_pa,
629 	    kernelstack.pv_va);
630 #endif
631 
632 	alloc_pages(msgbufphys, round_page(MSGBUFSIZE) / PAGE_SIZE);
633 
634 	/* Allocate enough pages for cleaning the Mini-Data cache. */
635 	KASSERT(xscale_minidata_clean_size <= PAGE_SIZE);
636 	valloc_pages(minidataclean, 1);
637 #ifdef VERBOSE_INIT_ARM
638 	printf("minidataclean: p0x%08lx v0x%08lx, size = %ld\n",
639 	    minidataclean.pv_pa, minidataclean.pv_va,
640 	    xscale_minidata_clean_size);
641 #endif
642 
643 	/*
644 	 * Ok, we have allocated physical pages for the primary kernel
645 	 * page tables.
646 	 */
647 
648 #ifdef VERBOSE_INIT_ARM
649 	printf("Creating L1 page table\n");
650 #endif
651 
652 	/*
653 	 * Now we start construction of the L1 page table.
654 	 * We start by mapping the L2 page tables into the L1.
655 	 * This means that we can replace L1 mappings later on if necessary.
656 	 */
657 	l1pagetable = kernel_l1pt.pv_pa;
658 
659 	/* Map the L2 pages tables in the L1 page table */
660 	pmap_link_l2pt(l1pagetable, 0x00000000,
661 	    &kernel_pt_table[KERNEL_PT_SYS]);
662 	for (loop = 0; loop < KERNEL_PT_KERNEL_NUM; ++loop)
663 		pmap_link_l2pt(l1pagetable, KERNEL_BASE + loop * 0x00400000,
664 		    &kernel_pt_table[KERNEL_PT_KERNEL + loop]);
665 	for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop)
666 		pmap_link_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000,
667 		    &kernel_pt_table[KERNEL_PT_VMDATA + loop]);
668 
669 	/* update the top of the kernel VM */
670 	pmap_curmaxkvaddr =
671 	    KERNEL_VM_BASE + (KERNEL_PT_VMDATA_NUM * 0x00400000);
672 
673 #ifdef VERBOSE_INIT_ARM
674 	printf("Mapping kernel\n");
675 #endif
676 
677 	/* Now we fill in the L2 pagetable for the kernel code/data */
678 
679 	/*
680 	 * XXX there is no ELF header to find RO region.
681 	 * XXX What should we do?
682 	 */
683 #if 0
684 	if (N_GETMAGIC(kernexec[0]) == ZMAGIC) {
685 		logical = pmap_map_chunk(l1pagetable, KERNEL_TEXT_BASE,
686 		    physical_start, kernexec->a_text,
687 		    VM_PROT_READ, PTE_CACHE);
688 		logical += pmap_map_chunk(l1pagetable,
689 		    KERNEL_TEXT_BASE + logical, physical_start + logical,
690 		    kerneldatasize - kernexec->a_text,
691 		    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
692 	} else
693 #endif
694 		pmap_map_chunk(l1pagetable, KERNEL_TEXT_BASE,
695 		    KERNEL_TEXT_BASE - KERNEL_BASE + physical_start,
696 		    kerneldatasize, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
697 
698 #ifdef VERBOSE_INIT_ARM
699 	printf("Constructing L2 page tables\n");
700 #endif
701 
702 	/* Map the stack pages */
703 	pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa,
704 	    IRQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
705 	pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa,
706 	    ABT_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
707 	pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
708 	    UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
709 	pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa,
710 	    UPAGES * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
711 
712 	pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
713 	    L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
714 
715 	/* Map page tables */
716 	for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
717 		pmap_map_chunk(l1pagetable, kernel_pt_table[loop].pv_va,
718 		    kernel_pt_table[loop].pv_pa, L2_TABLE_SIZE,
719 		    VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
720 	}
721 
722 	/* Map the Mini-Data cache clean area. */
723 	xscale_setup_minidata(l1pagetable, minidataclean.pv_va,
724 	    minidataclean.pv_pa);
725 
726 	/* Map the vector page. */
727 	pmap_map_entry(l1pagetable, vector_page, systempage.pv_pa,
728 	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
729 
730 	/*
731 	 * map integrated peripherals at same address in l1pagetable
732 	 * so that we can continue to use console.
733 	 */
734 	pmap_devmap_bootstrap(l1pagetable, pxa2x0_devmap);
735 
736 	/*
737 	 * Give the XScale global cache clean code an appropriately
738 	 * sized chunk of unmapped VA space starting at 0xff000000
739 	 * (our device mappings end before this address).
740 	 */
741 	xscale_cache_clean_addr = 0xff000000U;
742 
743 	/*
744 	 * Now we have the real page tables in place so we can switch to them.
745 	 * Once this is done we will be running with the REAL kernel page
746 	 * tables.
747 	 */
748 
749 #ifdef VERBOSE_INIT_ARM
750 	printf("done.\n");
751 #endif
752 
753 	/*
754 	 * Pages were allocated during the secondary bootstrap for the
755 	 * stacks for different CPU modes.
756 	 * We must now set the r13 registers in the different CPU modes to
757 	 * point to these stacks.
758 	 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
759 	 * of the stack memory.
760 	 */
761 #ifdef VERBOSE_INIT_ARM
762 	printf("init subsystems: stacks ");
763 #endif
764 
765 	set_stackptr(PSR_IRQ32_MODE,
766 	    irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE);
767 	set_stackptr(PSR_ABT32_MODE,
768 	    abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE);
769 	set_stackptr(PSR_UND32_MODE,
770 	    undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE);
771 #ifdef PMAP_DEBUG
772 	if (pmap_debug_level >= 0)
773 		printf("kstack V%08lx P%08lx\n", kernelstack.pv_va,
774 		    kernelstack.pv_pa);
775 #endif /* PMAP_DEBUG */
776 
777 	/*
778 	 * Well we should set a data abort handler.
779 	 * Once things get going this will change as we will need a proper
780 	 * handler. Until then we will use a handler that just panics but
781 	 * tells us why.
782 	 * Initialization of the vectors will just panic on a data abort.
783 	 * This just fills in a slightly better one.
784 	 */
785 #ifdef VERBOSE_INIT_ARM
786 	printf("vectors ");
787 #endif
788 	data_abort_handler_address = (u_int)data_abort_handler;
789 	prefetch_abort_handler_address = (u_int)prefetch_abort_handler;
790 	undefined_handler_address = (u_int)undefinedinstruction_bounce;
791 #ifdef DEBUG
792 	printf("%08x %08x %08x\n", data_abort_handler_address,
793 	    prefetch_abort_handler_address, undefined_handler_address);
794 #endif
795 
796 	/* Initialize the undefined instruction handlers */
797 #ifdef VERBOSE_INIT_ARM
798 	printf("undefined\n");
799 #endif
800 	undefined_init();
801 
802 	/* Set the page table address. */
803 #ifdef VERBOSE_INIT_ARM
804 	printf("switching to new L1 page table  @%#lx...\n", kernel_l1pt.pv_pa);
805 #endif
806 	cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT);
807 	cpu_setttb(kernel_l1pt.pv_pa);
808 	cpu_tlb_flushID();
809 	cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2));
810 
811 	/*
812 	 * Moved from cpu_startup() as data_abort_handler() references
813 	 * this during uvm init.
814 	 */
815 	uvm_lwp_setuarea(&lwp0, kernelstack.pv_va);
816 
817 	arm32_vector_init(ARM_VECTORS_LOW, ARM_VEC_ALL);
818 
819 	consinit();
820 
821 #ifdef VERBOSE_INIT_ARM
822 	printf("bootstrap done.\n");
823 #endif
824 
825 #ifdef VERBOSE_INIT_ARM
826 	printf("freemempos=%08lx\n", freemempos);
827 	printf("MMU enabled. control=%08x\n", cpu_get_control());
828 #endif
829 
830 	/* Load memory into UVM. */
831 	uvm_setpagesize();	/* initialize PAGE_SIZE-dependent variables */
832 	for (loop = 0; loop < bootconfig.dramblocks; loop++) {
833 		paddr_t dblk_start = (paddr_t)bootconfig.dram[loop].address;
834 		paddr_t dblk_end = dblk_start
835 			+ (bootconfig.dram[loop].pages * PAGE_SIZE);
836 
837 		if (dblk_start < physical_freestart)
838 			dblk_start = physical_freestart;
839 		if (dblk_end > physical_freeend)
840 			dblk_end = physical_freeend;
841 
842 		uvm_page_physload(atop(dblk_start), atop(dblk_end),
843 		    atop(dblk_start), atop(dblk_end), VM_FREELIST_DEFAULT);
844 	}
845 
846 	/* Boot strap pmap telling it where the kernel page table is */
847 	pmap_bootstrap(KERNEL_VM_BASE, KERNEL_VM_BASE + KERNEL_VM_SIZE);
848 
849 #ifdef DDB
850 	db_machine_init();
851 #endif
852 #if NKSYMS || defined(DDB) || defined(MODULAR)
853 	ksyms_addsyms_elf(symbolsize, ((int *)&end), ((char *)&end) + symbolsize);
854 #endif
855 
856 	printf("kernsize=0x%x", kerneldatasize);
857 	printf(" (including 0x%x symbols)\n", symbolsize);
858 
859 #ifdef DDB
860 	if (boothowto & RB_KDB)
861 		Debugger();
862 #endif /* DDB */
863 
864 	/* We return the new stack pointer address */
865 	return (kernelstack.pv_va + USPACE_SVC_STACK_TOP);
866 }
867 
868 #if (NCOM > 0) && defined(COM_PXA2X0)
869 #ifndef	CONSPEED
870 #define	CONSPEED 9600
871 #endif
872 #ifndef	CONMODE
873 #define	CONMODE ((TTYDEF_CFLAG & ~(CSIZE | CSTOPB | PARENB)) | CS8) /* 8N1 */
874 #endif
875 
876 int comcnspeed = CONSPEED;
877 int comcnmode = CONMODE;
878 
879 #if defined(HWUARTCONSOLE)
880 #define	CONADDR	PXA2X0_HWUART_BASE
881 #elsif defined(BTUARTCONSOLE)
882 #define	CONADDR	PXA2X0_BTUART_BASE
883 #elsif defined(STUARTCONSOLE)
884 #define	CONADDR	PXA2X0_STUART_BASE
885 #else
886 #define	CONADDR	PXA2X0_FFUART_BASE
887 #endif
888 
889 bus_addr_t comcnaddr = CONADDR;
890 #endif	/* NCOM > 0 && COM_PXA2X0 */
891 
892 void
893 consinit(void)
894 {
895 	static int consinit_called = 0;
896 
897 	if (consinit_called != 0)
898 		return;
899 
900 	consinit_called = 1;
901 	if (bootinfo->bi_cnuse == BI_CNUSE_SERIAL) {
902 #if (NCOM > 0) && defined(COM_PXA2X0)
903 		comcnattach(&pxa2x0_a4x_bs_tag, comcnaddr, comcnspeed,
904 		    PXA2X0_COM_FREQ, COM_TYPE_PXA2x0, comcnmode);
905 #endif
906 	} else {
907 #if (NLCD > 0)
908 #if NWZERO3LCD > 0
909 		if (platid_match(&platid,&platid_mask_MACH_SHARP_WZERO3_WS003SH)
910 		 || platid_match(&platid,&platid_mask_MACH_SHARP_WZERO3_WS004SH)
911 		 || platid_match(&platid,&platid_mask_MACH_SHARP_WZERO3_WS007SH)
912 		 || platid_match(&platid,&platid_mask_MACH_SHARP_WZERO3_WS011SH)
913 		 || platid_match(&platid,&platid_mask_MACH_SHARP_WZERO3_WS020SH)) {
914 			extern void wzero3lcd_cnattach(void);
915 			wzero3lcd_cnattach();
916 		}
917 #endif
918 #endif
919 	}
920 }
921 
922 #ifdef DEBUG_BEFOREMMU
923 static void
924 fakecninit(void)
925 {
926 #if (NCOM > 0) && defined(COM_PXA2X0)
927 	comcnattach(&pxa2x0_a4x_bs_tag, comcnaddr, comcnspeed,
928 	    PXA2X0_COM_FREQ, COM_TYPE_PXA2x0, comcnmode);
929 #endif
930 }
931 #endif
932