xref: /netbsd/sys/arch/evbarm/nslu2/nslu2_start.S (revision 6550d01e)
1/*	$NetBSD: nslu2_start.S,v 1.3 2011/01/31 06:28:05 matt Exp $	*/
2
3/*
4 * Copyright (c) 2006 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Ichiro FUKUHARA, Jason R. Thorpe, and Steve C. Woodford.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32#include <machine/asm.h>
33#include <arm/armreg.h>
34#include "assym.h"
35
36#include <arm/xscale/ixp425reg.h>
37
38RCSID("$NetBSD: nslu2_start.S,v 1.3 2011/01/31 06:28:05 matt Exp $")
39
40	.section .start,"ax",%progbits
41
42	.global	_C_LABEL(nslu2_start)
43_C_LABEL(nslu2_start):
44        /*
45         * We will go ahead and disable the MMU here so that we don't
46         * have to worry about flushing caches, etc.
47         *
48         * Note that we may not currently be running VA==PA, which means
49         * we'll need to leap to the next insn after disabing the MMU.
50         */
51        adr     r8, Lunmapped
52        bic     r8, r8, #0xff000000     /* clear upper 8 bits */
53        orr     r8, r8, #0x10000000     /* OR in physical base address */
54
55        mrc     p15, 0, r2, c1, c0, 0
56        bic     r2, r2, #CPU_CONTROL_MMU_ENABLE
57	orr	r2, r2, #CPU_CONTROL_BEND_ENABLE
58        mcr     p15, 0, r2, c1, c0, 0
59
60        nop
61        nop
62        nop
63
64        mov     pc, r8                  /* Heave-ho! */
65
66Lunmapped:
67	/*
68	 * We want to construct a memory map that maps us
69	 * VA==PA (SDRAM at 0x10000000). We create these
70	 * mappings uncached and unbuffered to be safe.
71	 */
72
73	/*
74	 * Step 1: Map the entire address space VA==PA.
75	 */
76	adr	r0, Ltable
77	ldr	r0, [r0]			/* r0 = &l1table */
78
79	mov	r1, #(L1_TABLE_SIZE / 4)	/* 4096 entry */
80	mov	r2, #(L1_S_SIZE)		/* 1MB / section */
81	mov	r3, #(L1_S_AP_KRW)		/* kernel read/write */
82	orr	r3, r3, #(L1_TYPE_S)		/* L1 entry is section */
831:
84	str	r3, [r0], #0x04
85	add	r3, r3, r2
86	subs	r1, r1, #1
87	bgt	1b
88
89        /*
90         * Step 2: Map VA 0xc0000000->0xc3ffffff to PA 0x10000000->0x13ffffff.
91         */
92        adr     r0, Ltable                      /* r0 = &l1table */
93        ldr     r0, [r0]
94
95        mov     r3, #(L1_S_AP_KRW)
96        orr     r3, r3, #(L1_TYPE_S)
97        orr     r3, r3, #0x10000000
98        add     r0, r0, #(0xc00 * 4)            /* offset to 0xc00xxxxx */
99        mov     r1, #0x40                       /* 64MB */
1001:
101        str     r3, [r0], #0x04
102        add     r3, r3, r2
103        subs    r1, r1, #1
104        bgt     1b
105
106	/*
107	 * Step 3: Map VA 0xf0000000->0xf0100000 to PA 0xc8000000->0xc8100000.
108	 */
109	adr	r0, Ltable			/* r0 = &l1table */
110	ldr	r0, [r0]
111
112	add	r0, r0, #(0xf00 * 4)		/* offset to 0xf0000000 */
113	mov	r3, #0xc8000000
114	add	r3, r3, #0x00100000
115	orr	r3, r3, #(L1_S_AP_KRW)
116	orr	r3, r3, #(L1_TYPE_S)
117	str	r3, [r0]
118
119	/*
120	 * Step 4: Map VA 0xf0200000->0xf0300000 to PA 0xcc000000->0xcc100000.
121	 */
122	adr	r0, Ltable			/* r0 = &l1table */
123	ldr	r0, [r0]
124
125	add	r0, r0, #(0xf00 * 4)		/* offset to 0xf0200000 */
126	add	r0, r0, #(0x002 * 4)
127	mov	r3, #0xcc000000
128	add	r3, r3, #0x00100000
129	orr	r3, r3, #(L1_S_AP_KRW)
130	orr	r3, r3, #(L1_TYPE_S)
131	str	r3, [r0]
132
133	/* OK!  Page table is set up.  Give it to the CPU. */
134	adr	r0, Ltable
135	ldr	r0, [r0]
136	mcr	p15, 0, r0, c2, c0, 0
137
138	/* Flush the old TLBs, just in case. */
139	mcr	p15, 0, r0, c8, c7, 0
140
141	/* Set the Domain Access register.  Very important! */
142	mov	r0, #1
143	mcr	p15, 0, r0, c3, c0, 0
144
145	/* Get ready to jump to the "real" kernel entry point... */
146	ldr	r1, Lstart
147	mov	r1, r1			/* Make sure the load completes! */
148
149	/* OK, let's enable the MMU. */
150	mrc	p15, 0, r2, c1, c0, 0
151	orr	r2, r2, #CPU_CONTROL_MMU_ENABLE
152	orr	r2, r2, #CPU_CONTROL_BEND_ENABLE
153	mcr	p15, 0, r2, c1, c0, 0
154
155	nop
156	nop
157	nop
158
159	/* CPWAIT sequence to make sure the MMU is on... */
160	mrc	p15, 0, r2, c2, c0, 0	/* arbitrary read of CP15 */
161	mov	r2, r2			/* force it to complete */
162	mov	pc, r1			/* leap to kernel entry point! */
163
164Ltable:
165	.word	0x10200000 - 0x4000
166
167Lstart:
168	.word	start
169