xref: /freebsd/sys/arm/arm/locore.S (revision f05cddf9)
1/*	$NetBSD: locore.S,v 1.14 2003/04/20 16:21:40 thorpej Exp $	*/
2
3/*-
4 * Copyright 2011 Semihalf
5 * Copyright (C) 1994-1997 Mark Brinicombe
6 * Copyright (C) 1994 Brini
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed by Brini.
20 * 4. The name of Brini may not be used to endorse or promote products
21 *    derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
29 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
31 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
32 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 */
35
36#include "assym.s"
37#include <sys/syscall.h>
38#include <machine/asm.h>
39#include <machine/armreg.h>
40#include <machine/pte.h>
41
42__FBSDID("$FreeBSD$");
43
44/* What size should this really be ? It is only used by initarm() */
45#define INIT_ARM_STACK_SIZE	(2048 * 4)
46
47#define	CPWAIT_BRANCH							 \
48	sub	pc, pc, #4
49
50#define	CPWAIT(tmp)							 \
51	mrc	p15, 0, tmp, c2, c0, 0	/* arbitrary read of CP15 */	;\
52	mov	tmp, tmp		/* wait for it to complete */	;\
53	CPWAIT_BRANCH			/* branch to next insn */
54
55/*
56 * This is for kvm_mkdb, and should be the address of the beginning
57 * of the kernel text segment (not necessarily the same as kernbase).
58 */
59	.text
60	.align	0
61.globl kernbase
62.set kernbase,KERNBASE
63.globl physaddr
64.set physaddr,PHYSADDR
65
66/*
67 * On entry for FreeBSD boot ABI:
68 *	r0 - metadata pointer or 0 (boothowto on AT91's boot2)
69 *	r1 - if (r0 == 0) then metadata pointer
70 * On entry for Linux boot ABI:
71 *	r0 - 0
72 *	r1 - machine type (passed as arg2 to initarm)
73 *	r2 - Pointer to a tagged list or dtb image (phys addr) (passed as arg1 initarm)
74 *
75 * For both types of boot we gather up the args, put them in a struct arm_boot_params
76 * structure and pass that to initarm.
77 */
78ENTRY_NP(btext)
79ASENTRY_NP(_start)
80	STOP_UNWINDING		/* Can't unwind into the bootloader! */
81
82	mov	r9, r0		/* 0 or boot mode from boot2 */
83	mov	r8, r1		/* Save Machine type */
84	mov	ip, r2		/* Save meta data */
85	mov	fp, r3		/* Future expantion */
86
87	/* Make sure interrupts are disabled. */
88	mrs	r7, cpsr
89	orr	r7, r7, #(I32_bit|F32_bit)
90	msr	cpsr_c, r7
91
92#if defined (FLASHADDR) && defined(LOADERRAMADDR)
93	/* Check if we're running from flash. */
94	ldr	r7, =FLASHADDR
95	/*
96	 * If we're running with MMU disabled, test against the
97	 * physical address instead.
98	 */
99	mrc     p15, 0, r2, c1, c0, 0
100	ands	r2, r2, #CPU_CONTROL_MMU_ENABLE
101	ldreq	r6, =PHYSADDR
102	ldrne	r6, =LOADERRAMADDR
103	cmp	r7, r6
104	bls 	flash_lower
105	cmp	r7, pc
106	bhi	from_ram
107	b	do_copy
108
109flash_lower:
110	cmp	r6, pc
111	bls	from_ram
112do_copy:
113	ldr	r7, =KERNBASE
114	adr	r1, _start
115	ldr	r0, Lreal_start
116	ldr	r2, Lend
117	sub	r2, r2, r0
118	sub	r0, r0, r7
119	add	r0, r0, r6
120	mov	r4, r0
121	bl	memcpy
122	ldr	r0, Lram_offset
123	add	pc, r4, r0
124Lram_offset:	.word from_ram-_C_LABEL(_start)
125from_ram:
126	nop
127#endif
128	adr	r7, Lunmapped
129	bic     r7, r7, #0xf0000000
130	orr     r7, r7, #PHYSADDR
131
132
133disable_mmu:
134	/* Disable MMU for a while */
135	mrc     p15, 0, r2, c1, c0, 0
136	bic	r2, r2, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\
137	    CPU_CONTROL_WBUF_ENABLE)
138	bic	r2, r2, #(CPU_CONTROL_IC_ENABLE)
139	bic	r2, r2, #(CPU_CONTROL_BPRD_ENABLE)
140	mcr     p15, 0, r2, c1, c0, 0
141
142	nop
143	nop
144	nop
145	mov	pc, r7
146Lunmapped:
147#ifdef STARTUP_PAGETABLE_ADDR
148	/* build page table from scratch */
149	ldr	r0, Lstartup_pagetable
150	adr	r4, mmu_init_table
151	b	3f
152
1532:
154	str	r3, [r0, r2]
155	add	r2, r2, #4
156	add	r3, r3, #(L1_S_SIZE)
157	adds	r1, r1, #-1
158	bhi	2b
1593:
160	ldmia	r4!, {r1,r2,r3}   /* # of sections, VA, PA|attr */
161	cmp	r1, #0
162	adrne	r5, 2b
163	bicne	r5, r5, #0xf0000000
164	orrne	r5, r5, #PHYSADDR
165	movne	pc, r5
166
167#if defined(SMP)
168	orr 	r0, r0, #2		/* Set TTB shared memory flag */
169#endif
170	mcr	p15, 0, r0, c2, c0, 0	/* Set TTB */
171	mcr	p15, 0, r0, c8, c7, 0	/* Flush TLB */
172
173#if defined(CPU_ARM1136) || defined(CPU_ARM1176) || defined(CPU_CORTEXA) || defined(CPU_MV_PJ4B)
174	mov	r0, #0
175	mcr	p15, 0, r0, c13, c0, 1	/* Set ASID to 0 */
176#endif
177
178	/* Set the Domain Access register.  Very important! */
179	mov     r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
180	mcr	p15, 0, r0, c3, c0, 0
181	/*
182	 * Enable MMU.
183	 * On armv6 enable extended page tables, and set alignment checking
184	 * to modulo-4 (CPU_CONTROL_UNAL_ENABLE) for the ldrd/strd
185	 * instructions emitted by clang.
186	 */
187	mrc	p15, 0, r0, c1, c0, 0
188#ifdef _ARM_ARCH_6
189	orr	r0, r0, #(CPU_CONTROL_V6_EXTPAGE | CPU_CONTROL_UNAL_ENABLE)
190	orr	r2, r2, #(CPU_CONTROL_AFLT_ENABLE)
191	orr	r0, r0, #(CPU_CONTROL_AF_ENABLE)
192#endif
193	orr	r0, r0, #(CPU_CONTROL_MMU_ENABLE)
194	mcr	p15, 0, r0, c1, c0, 0
195	nop
196	nop
197	nop
198	CPWAIT(r0)
199
200#endif
201mmu_done:
202	nop
203	adr	r1, .Lstart
204	ldmia	r1, {r1, r2, sp}	/* Set initial stack and */
205	sub	r2, r2, r1		/* get zero init data */
206	mov	r3, #0
207.L1:
208	str	r3, [r1], #0x0004	/* get zero init data */
209	subs	r2, r2, #4
210	bgt	.L1
211	ldr	pc, .Lvirt_done
212
213virt_done:
214	mov	r1, #20			/* loader info size is 20 bytes also second arg */
215	subs	sp, sp, r1		/* allocate arm_boot_params struct on stack */
216	bic	sp, sp, #7		/* align stack to 8 bytes */
217	mov	r0, sp			/* loader info pointer is first arg */
218	str	r1, [r0]		/* Store length of loader info */
219	str	r9, [r0, #4]		/* Store r0 from boot loader */
220	str	r8, [r0, #8]		/* Store r1 from boot loader */
221	str	ip, [r0, #12]		/* store r2 from boot loader */
222	str	fp, [r0, #16]		/* store r3 from boot loader */
223	mov	fp, #0			/* trace back starts here */
224	bl	_C_LABEL(initarm)	/* Off we go */
225
226	/* init arm will return the new stack pointer. */
227	mov	sp, r0
228
229	bl	_C_LABEL(mi_startup)		/* call mi_startup()! */
230
231	adr	r0, .Lmainreturned
232	b	_C_LABEL(panic)
233	/* NOTREACHED */
234#ifdef STARTUP_PAGETABLE_ADDR
235#define MMU_INIT(va,pa,n_sec,attr) \
236	.word	n_sec					    ; \
237	.word	4*((va)>>L1_S_SHIFT)			    ; \
238	.word	(pa)|(attr)				    ;
239
240Lvirtaddr:
241	.word	KERNVIRTADDR
242Lphysaddr:
243	.word	KERNPHYSADDR
244Lreal_start:
245	.word	_start
246Lend:
247	.word	_edata
248Lstartup_pagetable:
249	.word	STARTUP_PAGETABLE_ADDR
250#ifdef SMP
251Lstartup_pagetable_secondary:
252	.word	temp_pagetable
253#endif
254END(btext)
255END(_start)
256
257mmu_init_table:
258	/* fill all table VA==PA */
259	/* map SDRAM VA==PA, WT cacheable */
260#if !defined(SMP)
261	MMU_INIT(PHYSADDR, PHYSADDR , 64, L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW))
262	/* map VA 0xc0000000..0xc3ffffff to PA */
263	MMU_INIT(KERNBASE, PHYSADDR, 64, L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW))
264#else
265	MMU_INIT(PHYSADDR, PHYSADDR , 64, L1_TYPE_S|L1_SHARED|L1_S_C|L1_S_AP(AP_KRW))
266	/* map VA 0xc0000000..0xc3ffffff to PA */
267	MMU_INIT(KERNBASE, PHYSADDR, 64, L1_TYPE_S|L1_SHARED|L1_S_C|L1_S_AP(AP_KRW))
268	MMU_INIT(0x48000000, 0x48000000, 1, L1_TYPE_S|L1_SHARED|L1_S_C|L1_S_AP(AP_KRW))
269#if defined(CPU_MV_PJ4B)
270	/* map VA 0xf1000000..0xf1100000 to PA 0xd0000000 */
271	MMU_INIT(0xf1000000, 0xd0000000, 1, L1_TYPE_S|L1_SHARED|L1_S_B|L1_S_AP(AP_KRW))
272#endif /* CPU_MV_PJ4B */
273#endif /* SMP */
274	.word 0	/* end of table */
275#endif
276.Lstart:
277	.word	_edata
278	.word	_end
279	.word	svcstk + INIT_ARM_STACK_SIZE
280
281.Lvirt_done:
282	.word	virt_done
283#if defined(SMP)
284.Lmpvirt_done:
285	.word	mpvirt_done
286#endif
287
288.Lmainreturned:
289	.asciz	"main() returned"
290	.align	0
291
292	.bss
293svcstk:
294	.space	INIT_ARM_STACK_SIZE
295
296	.text
297	.align	0
298
299.Lcpufuncs:
300	.word	_C_LABEL(cpufuncs)
301
302#if defined(SMP)
303Lsramaddr:
304	.word	0xffff0080
305
306#if 0
307#define	AP_DEBUG(tmp)			\
308	mrc	p15, 0, r1, c0, c0, 5;	\
309	ldr	r0, Lsramaddr;		\
310	add	r0, r1, lsl #2;		\
311	mov	r1, tmp;		\
312	str	r1, [r0], #0x0000;
313#else
314#define AP_DEBUG(tmp)
315#endif
316
317
318ASENTRY_NP(mptramp)
319	mov	r0, #0
320	mcr	p15, 0, r0, c7, c7, 0
321
322	AP_DEBUG(#1)
323
324	mrs	r3, cpsr_all
325	bic	r3, r3, #(PSR_MODE)
326	orr	r3, r3, #(PSR_SVC32_MODE)
327        msr	cpsr_all, r3
328
329	mrc	p15, 0, r0, c0, c0, 5
330	and	r0, #0x0f		/* Get CPU ID */
331
332	/* Read boot address for CPU */
333	mov	r1, #0x100
334	mul	r2, r0, r1
335	ldr	r1, Lpmureg
336	add	r0, r2, r1
337	ldr	r1, [r0], #0x00
338
339	mov pc, r1
340
341Lpmureg:
342        .word   0xd0022124
343END(mptramp)
344
345ASENTRY_NP(mpentry)
346
347	AP_DEBUG(#2)
348
349	/* Make sure interrupts are disabled. */
350	mrs	r7, cpsr
351	orr	r7, r7, #(I32_bit|F32_bit)
352	msr	cpsr_c, r7
353
354
355	adr     r7, Ltag
356	bic     r7, r7, #0xf0000000
357	orr     r7, r7, #PHYSADDR
358
359	/* Disable MMU for a while */
360	mrc	p15, 0, r2, c1, c0, 0
361	bic	r2, r2, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\
362	    CPU_CONTROL_WBUF_ENABLE)
363	bic	r2, r2, #(CPU_CONTROL_IC_ENABLE)
364	bic	r2, r2, #(CPU_CONTROL_BPRD_ENABLE)
365	mcr	p15, 0, r2, c1, c0, 0
366
367	nop
368	nop
369	nop
370
371	AP_DEBUG(#3)
372
373Ltag:
374	ldr	r0, Lstartup_pagetable_secondary
375	bic	r0, r0, #0xf0000000
376	orr	r0, r0, #PHYSADDR
377	ldr	r0, [r0]
378#if defined(SMP)
379	orr 	r0, r0, #0		/* Set TTB shared memory flag */
380#endif
381	mcr	p15, 0, r0, c2, c0, 0	/* Set TTB */
382	mcr	p15, 0, r0, c8, c7, 0	/* Flush TLB */
383
384#if defined(CPU_ARM1136) || defined(CPU_ARM1176) || defined(CPU_MV_PJ4B) || defined(CPU_CORTEXA)
385	mov	r0, #0
386	mcr	p15, 0, r0, c13, c0, 1	/* Set ASID to 0 */
387#endif
388
389	AP_DEBUG(#4)
390
391	/* Set the Domain Access register.  Very important! */
392	mov	r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
393	mcr	p15, 0, r0, c3, c0, 0
394	/* Enable MMU */
395	mrc	p15, 0, r0, c1, c0, 0
396#if defined(CPU_ARM1136) || defined(CPU_ARM1176) || defined(CPU_MV_PJ4B) || defined(CPU_CORTEXA)
397	orr	r0, r0, #CPU_CONTROL_V6_EXTPAGE
398#endif
399	orr	r0, r0, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE)
400	mcr	p15, 0, r0, c1, c0, 0
401	nop
402	nop
403	nop
404	CPWAIT(r0)
405
406	adr	r1, .Lstart
407	ldmia	r1, {r1, r2, sp}	/* Set initial stack and */
408	mrc	p15, 0, r0, c0, c0, 5
409	and	r0, r0, #15
410	mov	r1, #2048
411	mul	r2, r1, r0
412	sub	sp, sp, r2
413	str	r1, [sp]
414	ldr	pc, .Lmpvirt_done
415
416mpvirt_done:
417
418	mov	fp, #0			/* trace back starts here */
419	bl	_C_LABEL(init_secondary)	/* Off we go */
420
421	adr	r0, .Lmpreturned
422	b	_C_LABEL(panic)
423	/* NOTREACHED */
424
425.Lmpreturned:
426	.asciz	"main() returned"
427	.align	0
428END(mpentry)
429#endif
430
431ENTRY_NP(cpu_halt)
432	mrs     r2, cpsr
433	bic	r2, r2, #(PSR_MODE)
434	orr     r2, r2, #(PSR_SVC32_MODE)
435	orr	r2, r2, #(I32_bit | F32_bit)
436	msr     cpsr_all, r2
437
438	ldr	r4, .Lcpu_reset_address
439	ldr	r4, [r4]
440
441	ldr	r0, .Lcpufuncs
442	mov	lr, pc
443	ldr	pc, [r0, #CF_IDCACHE_WBINV_ALL]
444	mov	lr, pc
445	ldr	pc, [r0, #CF_L2CACHE_WBINV_ALL]
446
447	/*
448	 * Load the cpu_reset_needs_v4_MMU_disable flag to determine if it's
449	 * necessary.
450	 */
451
452	ldr	r1, .Lcpu_reset_needs_v4_MMU_disable
453	ldr	r1, [r1]
454	cmp	r1, #0
455	mov	r2, #0
456
457	/*
458	 * MMU & IDC off, 32 bit program & data space
459	 * Hurl ourselves into the ROM
460	 */
461	mov	r0, #(CPU_CONTROL_32BP_ENABLE | CPU_CONTROL_32BD_ENABLE)
462	mcr     15, 0, r0, c1, c0, 0
463	mcrne   15, 0, r2, c8, c7, 0 	/* nail I+D TLB on ARMv4 and greater */
464	mov     pc, r4
465
466	/*
467	 * _cpu_reset_address contains the address to branch to, to complete
468	 * the cpu reset after turning the MMU off
469	 * This variable is provided by the hardware specific code
470	 */
471.Lcpu_reset_address:
472	.word	_C_LABEL(cpu_reset_address)
473
474	/*
475	 * cpu_reset_needs_v4_MMU_disable contains a flag that signals if the
476	 * v4 MMU disable instruction needs executing... it is an illegal instruction
477	 * on f.e. ARM6/7 that locks up the computer in an endless illegal
478	 * instruction / data-abort / reset loop.
479	 */
480.Lcpu_reset_needs_v4_MMU_disable:
481	.word	_C_LABEL(cpu_reset_needs_v4_MMU_disable)
482END(cpu_halt)
483
484
485/*
486 * setjump + longjmp
487 */
488ENTRY(setjmp)
489	stmia	r0, {r4-r14}
490	mov	r0, #0x00000000
491	RET
492END(setjmp)
493
494ENTRY(longjmp)
495	ldmia	r0, {r4-r14}
496	mov	r0, #0x00000001
497	RET
498END(longjmp)
499
500	.data
501	.global _C_LABEL(esym)
502_C_LABEL(esym):	.word	_C_LABEL(end)
503
504ENTRY_NP(abort)
505	b	_C_LABEL(abort)
506END(abort)
507
508ENTRY_NP(sigcode)
509	mov	r0, sp
510
511	/*
512	 * Call the sigreturn system call.
513	 *
514	 * We have to load r7 manually rather than using
515	 * "ldr r7, =SYS_sigreturn" to ensure the value of szsigcode is
516	 * correct. Using the alternative places esigcode at the address
517	 * of the data rather than the address one past the data.
518	 */
519
520	ldr	r7, [pc, #12]	/* Load SYS_sigreturn */
521	swi	SYS_sigreturn
522
523	/* Well if that failed we better exit quick ! */
524
525	ldr	r7, [pc, #8]	/* Load SYS_exit */
526	swi	SYS_exit
527
528	/* Branch back to retry SYS_sigreturn */
529	b	. - 16
530
531	.word	SYS_sigreturn
532	.word	SYS_exit
533
534	.align	0
535	.global _C_LABEL(esigcode)
536		_C_LABEL(esigcode):
537
538	.data
539	.global szsigcode
540szsigcode:
541	.long esigcode-sigcode
542END(sigcode)
543/* End of locore.S */
544