xref: /freebsd/sys/powerpc/booke/locore.S (revision 39beb93c)
1/*-
2 * Copyright (C) 2007-2008 Semihalf, Rafal Jaworowski <raj@semihalf.com>
3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29#include "assym.s"
30
31#include <machine/asm.h>
32#include <machine/param.h>
33#include <machine/spr.h>
34#include <machine/psl.h>
35#include <machine/pte.h>
36#include <machine/trap.h>
37#include <machine/vmparam.h>
38#include <machine/tlb.h>
39#include <machine/bootinfo.h>
40
41#define TMPSTACKSZ	16384
42
43	.text
44	.globl	btext
45btext:
46
47/*
48 * This symbol is here for the benefit of kvm_mkdb, and is supposed to
49 * mark the start of kernel text.
50 */
51	.globl	kernel_text
52kernel_text:
53
54/*
55 * Startup entry.  Note, this must be the first thing in the text segment!
56 */
57	.text
58	.globl	__start
59__start:
60
61/*
62 * Assumptions on the boot loader:
63 *  - system memory starts from physical address 0
64 *  - it's mapped by a single TBL1 entry
65 *  - TLB1 mapping is 1:1 pa to va
66 *  - kernel is loaded at 16MB boundary
67 *  - all PID registers are set to the same value
68 *  - CPU is running in AS=0
69 *
70 * Registers contents provided by the loader(8):
71 *	r1	: stack pointer
72 *	r3	: metadata pointer
73 *
74 * We rearrange the TLB1 layout as follows:
75 *  - find TLB1 entry we started in
76 *  - make sure it's protected, ivalidate other entries
77 *  - create temp entry in the second AS (make sure it's not TLB[1])
78 *  - switch to temp mapping
79 *  - map 16MB of RAM in TLB1[1]
80 *  - use AS=1, set EPN to KERNBASE and RPN to kernel load address
81 *  - switch to to TLB1[1] mapping
82 *  - invalidate temp mapping
83 *
84 * locore registers use:
85 *	r1	: stack pointer
86 *	r2	: trace pointer (AP only, for early diagnostics)
87 *	r3-r27	: scratch registers
88 *	r28	: kernload
89 *	r29	: temp TLB1 entry
90 *	r30	: initial TLB1 entry we started in
91 *	r31	: metadata pointer
92 */
93
94/*
95 * Keep metadata ptr in r31 for later use.
96 */
97	mr	%r31, %r3
98
99/*
100 * Initial cleanup
101 */
102	li	%r3, PSL_DE	/* Keep debug exceptions for CodeWarrior. */
103	mtmsr	%r3
104	isync
105
106	/* Invalidate all entries in TLB0 */
107	li	%r3, 0
108	bl	tlb_inval_all
109
110/*
111 * Locate the TLB1 entry that maps this code
112 */
113	bl	1f
1141:	mflr	%r3
115	bl	tlb1_find_current	/* the entry number found is returned in r30 */
116
117	bl	tlb1_inval_all_but_current
118/*
119 * Create temporary mapping in AS=1 and switch to it
120 */
121	bl	tlb1_temp_mapping_as1
122
123	mfmsr	%r3
124	ori	%r3, %r3, (PSL_IS | PSL_DS)
125	bl	2f
1262:	mflr	%r4
127	addi	%r4, %r4, 20
128	mtspr	SPR_SRR0, %r4
129	mtspr	SPR_SRR1, %r3
130	rfi				/* Switch context */
131
132/*
133 * Invalidate initial entry
134 */
135	mr	%r3, %r30
136	bl	tlb1_inval_entry
137
138/*
139 * Setup final mapping in TLB1[1] and switch to it
140 */
141	/* Final kernel mapping, map in 16 MB of RAM */
142	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
143	li	%r4, 1			/* Entry 1 */
144	rlwimi	%r3, %r4, 16, 12, 15
145	mtspr	SPR_MAS0, %r3
146	isync
147
148	li	%r3, (TLB_SIZE_16M << MAS1_TSIZE_SHIFT)@l
149	oris	%r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
150	mtspr	SPR_MAS1, %r3		/* note TS was not filled, so it's TS=0 */
151	isync
152
153	lis	%r3, KERNBASE@h
154	ori	%r3, %r3, KERNBASE@l	/* EPN = KERNBASE */
155	mtspr	SPR_MAS2, %r3
156	isync
157
158	/* Discover phys load address */
159	bl	3f
1603:	mflr	%r4			/* Use current address */
161	rlwinm	%r4, %r4, 0, 0, 7	/* 16MB alignment mask */
162	mr	%r28, %r4		/* Keep kernel load address */
163	ori	%r4, %r4, (MAS3_SX | MAS3_SW | MAS3_SR)@l
164	mtspr	SPR_MAS3, %r4		/* Set RPN and protection */
165	isync
166	tlbwe
167	isync
168	msync
169
170	/* Switch to the above TLB1[1] mapping */
171	bl	4f
1724:	mflr	%r4
173	rlwinm	%r4, %r4, 0, 8, 31	/* Current offset from kernel load address */
174	rlwinm	%r3, %r3, 0, 0, 19
175	add	%r4, %r4, %r3		/* Convert to kernel virtual address */
176	addi	%r4, %r4, 36
177	li	%r3, PSL_DE		/* Note AS=0 */
178	mtspr   SPR_SRR0, %r4
179	mtspr   SPR_SRR1, %r3
180	rfi
181
182/*
183 * Invalidate temp mapping
184 */
185	mr	%r3, %r29
186	bl	tlb1_inval_entry
187
188/*
189 * Save kernel load address for later use.
190 */
191	lis	%r3, kernload@ha
192	addi	%r3, %r3, kernload@l
193	stw	%r28, 0(%r3)
194
195/*
196 * Setup a temporary stack
197 */
198	lis	%r1, tmpstack@ha
199	addi	%r1, %r1, tmpstack@l
200	addi	%r1, %r1, (TMPSTACKSZ - 8)
201
202/*
203 * Initialise exception vector offsets
204 */
205	bl	ivor_setup
206
207/*
208 * Set up arguments and jump to system initialization code
209 */
210	lis	%r3, kernel_text@ha
211	addi	%r3, %r3, kernel_text@l
212	lis	%r4, _end@ha
213	addi	%r4, %r4, _end@l
214	mr	%r5, %r31		/* metadata ptr */
215
216	/* Prepare e500 core */
217	bl	e500_init
218
219	/* Switch to thread0.td_kstack now */
220	mr	%r1, %r3
221	li	%r3, 0
222	stw	%r3, 0(%r1)
223
224	/* Machine independet part, does not return */
225	bl	mi_startup
226	/* NOT REACHED */
2275:	b	5b
228
229/*
230 * Invalidate all entries in the given TLB.
231 *
232 * r3	TLBSEL
233 */
234tlb_inval_all:
235	rlwinm	%r3, %r3, 3, 0x18	/* TLBSEL */
236	ori	%r3, %r3, 0x4		/* INVALL */
237	tlbivax	0, %r3
238	isync
239	msync
240
241	tlbsync
242	msync
243	blr
244
245/*
246 * expects address to look up in r3, returns entry number in r30
247 *
248 * FIXME: the hidden assumption is we are now running in AS=0, but we should
249 * retrieve actual AS from MSR[IS|DS] and put it in MAS6[SAS]
250 */
251tlb1_find_current:
252	mfspr	%r17, SPR_PID0
253	slwi	%r17, %r17, MAS6_SPID0_SHIFT
254	mtspr	SPR_MAS6, %r17
255	isync
256	tlbsx	0, %r3
257	mfspr	%r17, SPR_MAS0
258	rlwinm	%r30, %r17, 16, 20, 31		/* MAS0[ESEL] -> r30 */
259
260	/* Make sure we have IPROT set on the entry */
261	mfspr	%r17, SPR_MAS1
262	oris	%r17, %r17, MAS1_IPROT@h
263	mtspr	SPR_MAS1, %r17
264	isync
265	tlbwe
266	isync
267	msync
268	blr
269
270/*
271 * Invalidates a single entry in TLB1.
272 *
273 * r3		ESEL
274 * r4-r5	scratched
275 */
276tlb1_inval_entry:
277	lis	%r4, MAS0_TLBSEL1@h	/* Select TLB1 */
278	rlwimi	%r4, %r3, 16, 12, 15	/* Select our entry */
279	mtspr	SPR_MAS0, %r4
280	isync
281	tlbre
282	li	%r5, 0			/* MAS1[V] = 0 */
283	mtspr	SPR_MAS1, %r5
284	isync
285	tlbwe
286	isync
287	msync
288	blr
289
290/*
291 * r30		current entry number
292 * r29		returned temp entry
293 * r3-r5	scratched
294 */
295tlb1_temp_mapping_as1:
296	/* Read our current translation */
297	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
298	rlwimi	%r3, %r30, 16, 12, 15	/* Select our current entry */
299	mtspr	SPR_MAS0, %r3
300	isync
301	tlbre
302
303	/*
304	 * Prepare and write temp entry
305	 *
306	 * FIXME this is not robust against overflow i.e. when the current
307	 * entry is the last in TLB1
308	 */
309	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
310	addi	%r29, %r30, 1		/* Use next entry. */
311	li	%r4, 1
312	cmpw	%r4, %r29
313	bne	1f
314	addi	%r29, %r29, 1
3151:	rlwimi	%r3, %r29, 16, 12, 15	/* Select temp entry */
316	mtspr	SPR_MAS0, %r3
317	isync
318	mfspr	%r5, SPR_MAS1
319	li	%r4, 1			/* AS=1 */
320	rlwimi	%r5, %r4, 12, 19, 19
321	li	%r4, 0			/* Global mapping, TID=0 */
322	rlwimi	%r5, %r4, 16, 8, 15
323	oris	%r5, %r5, (MAS1_VALID | MAS1_IPROT)@h
324	mtspr	SPR_MAS1, %r5
325	isync
326	tlbwe
327	isync
328	msync
329	blr
330
331/*
332 * Loops over TLB1, invalidates all entries skipping the one which currently
333 * maps this code.
334 *
335 * r30		current entry
336 * r3-r5	scratched
337 */
338tlb1_inval_all_but_current:
339	mr	%r6, %r3
340	mfspr	%r3, SPR_TLB1CFG	/* Get number of entries */
341	andi.	%r3, %r3, TLBCFG_NENTRY_MASK@l
342	li	%r4, 0			/* Start from Entry 0 */
3431:	lis	%r5, MAS0_TLBSEL1@h
344	rlwimi	%r5, %r4, 16, 12, 15
345	mtspr	SPR_MAS0, %r5
346	isync
347	tlbre
348	mfspr	%r5, SPR_MAS1
349	cmpw	%r4, %r30		/* our current entry? */
350	beq	2f
351	rlwinm	%r5, %r5, 0, 2, 31	/* clear VALID and IPROT bits */
352	mtspr	SPR_MAS1, %r5
353	isync
354	tlbwe
355	isync
356	msync
3572:	addi	%r4, %r4, 1
358	cmpw	%r4, %r3		/* Check if this is the last entry */
359	bne	1b
360	blr
361
362/************************************************************************/
363/* locore subroutines */
364/************************************************************************/
365
366ivor_setup:
367	/* Set base address of interrupt handler routines */
368	lis	%r3, interrupt_vector_base@h
369	mtspr	SPR_IVPR, %r3
370
371	/* Assign interrupt handler routines offsets */
372	li	%r3, int_critical_input@l
373	mtspr	SPR_IVOR0, %r3
374	li	%r3, int_machine_check@l
375	mtspr	SPR_IVOR1, %r3
376	li	%r3, int_data_storage@l
377	mtspr	SPR_IVOR2, %r3
378	li	%r3, int_instr_storage@l
379	mtspr	SPR_IVOR3, %r3
380	li	%r3, int_external_input@l
381	mtspr	SPR_IVOR4, %r3
382	li	%r3, int_alignment@l
383	mtspr	SPR_IVOR5, %r3
384	li	%r3, int_program@l
385	mtspr	SPR_IVOR6, %r3
386	li	%r3, int_syscall@l
387	mtspr	SPR_IVOR8, %r3
388	li	%r3, int_decrementer@l
389	mtspr	SPR_IVOR10, %r3
390	li	%r3, int_fixed_interval_timer@l
391	mtspr	SPR_IVOR11, %r3
392	li	%r3, int_watchdog@l
393	mtspr	SPR_IVOR12, %r3
394	li	%r3, int_data_tlb_error@l
395	mtspr	SPR_IVOR13, %r3
396	li	%r3, int_inst_tlb_error@l
397	mtspr	SPR_IVOR14, %r3
398	li	%r3, int_debug@l
399	mtspr	SPR_IVOR15, %r3
400	blr
401
402/*
403 * void tid_flush(tlbtid_t tid);
404 *
405 * Invalidate all TLB0 entries which match the given TID. Note this is
406 * dedicated for cases when invalidation(s) should NOT be propagated to other
407 * CPUs.
408 *
409 * Global vars tlb0_ways, tlb0_entries_per_way are assumed to have been set up
410 * correctly (by tlb0_get_tlbconf()).
411 *
412 */
413ENTRY(tid_flush)
414	cmpwi	%r3, TID_KERNEL
415	beq	tid_flush_end	/* don't evict kernel translations */
416
417	/* Number of TLB0 ways */
418	lis	%r4, tlb0_ways@h
419	ori	%r4, %r4, tlb0_ways@l
420	lwz	%r4, 0(%r4)
421
422	/* Number of entries / way */
423	lis	%r5, tlb0_entries_per_way@h
424	ori	%r5, %r5, tlb0_entries_per_way@l
425	lwz	%r5, 0(%r5)
426
427	/* Disable interrupts */
428	mfmsr	%r10
429	wrteei	0
430
431	li	%r6, 0		/* ways counter */
432loop_ways:
433	li	%r7, 0		/* entries [per way] counter */
434loop_entries:
435	/* Select TLB0 and ESEL (way) */
436	lis	%r8, MAS0_TLBSEL0@h
437	rlwimi	%r8, %r6, 16, 14, 15
438	mtspr	SPR_MAS0, %r8
439	isync
440
441	/* Select EPN (entry within the way) */
442	rlwinm	%r8, %r7, 12, 13, 19
443	mtspr	SPR_MAS2, %r8
444	isync
445	tlbre
446
447	/* Check if valid entry */
448	mfspr	%r8, SPR_MAS1
449	andis.	%r9, %r8, MAS1_VALID@h
450	beq	next_entry	/* invalid entry */
451
452	/* Check if this is our TID */
453	rlwinm	%r9, %r8, 16, 24, 31
454
455	cmplw	%r9, %r3
456	bne	next_entry	/* not our TID */
457
458	/* Clear VALID bit */
459	rlwinm	%r8, %r8, 0, 1, 31
460	mtspr	SPR_MAS1, %r8
461	isync
462	tlbwe
463	isync
464	msync
465
466next_entry:
467	addi	%r7, %r7, 1
468	cmpw	%r7, %r5
469	bne	loop_entries
470
471	/* Next way */
472	addi	%r6, %r6, 1
473	cmpw	%r6, %r4
474	bne	loop_ways
475
476	/* Restore MSR (possibly re-enable interrupts) */
477	mtmsr	%r10
478	isync
479
480tid_flush_end:
481	blr
482
483/*
484 * Cache disable/enable/inval sequences according
485 * to section 2.16 of E500CORE RM.
486 */
487ENTRY(dcache_inval)
488	/* Invalidate d-cache */
489	mfspr	%r3, SPR_L1CSR0
490	ori	%r3, %r3, (L1CSR0_DCFI | L1CSR0_DCLFR)@l
491	msync
492	isync
493	mtspr	SPR_L1CSR0, %r3
494	isync
4951:	mfspr	%r3, SPR_L1CSR0
496	andi.	%r3, %r3, L1CSR0_DCFI
497	bne	1b
498	blr
499
500ENTRY(dcache_disable)
501	/* Disable d-cache */
502	mfspr	%r3, SPR_L1CSR0
503	li	%r4, L1CSR0_DCE@l
504	not	%r4, %r4
505	and	%r3, %r3, %r4
506	msync
507	isync
508	mtspr	SPR_L1CSR0, %r3
509	isync
510	blr
511
512ENTRY(dcache_enable)
513	/* Enable d-cache */
514	mfspr	%r3, SPR_L1CSR0
515	oris	%r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@h
516	ori	%r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@l
517	msync
518	isync
519	mtspr	SPR_L1CSR0, %r3
520	isync
521	blr
522
523ENTRY(icache_inval)
524	/* Invalidate i-cache */
525	mfspr	%r3, SPR_L1CSR1
526	ori	%r3, %r3, (L1CSR1_ICFI | L1CSR1_ICLFR)@l
527	isync
528	mtspr	SPR_L1CSR1, %r3
529	isync
5301:	mfspr	%r3, SPR_L1CSR1
531	andi.	%r3, %r3, L1CSR1_ICFI
532	bne	1b
533	blr
534
535ENTRY(icache_disable)
536	/* Disable i-cache */
537	mfspr	%r3, SPR_L1CSR1
538	li	%r4, L1CSR1_ICE@l
539	not	%r4, %r4
540	and	%r3, %r3, %r4
541	isync
542	mtspr	SPR_L1CSR1, %r3
543	isync
544	blr
545
546ENTRY(icache_enable)
547	/* Enable i-cache */
548	mfspr	%r3, SPR_L1CSR1
549	oris	%r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@h
550	ori	%r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@l
551	isync
552	mtspr	SPR_L1CSR1, %r3
553	isync
554	blr
555
556/*
557 * int setfault()
558 *
559 * Similar to setjmp to setup for handling faults on accesses to user memory.
560 * Any routine using this may only call bcopy, either the form below,
561 * or the (currently used) C code optimized, so it doesn't use any non-volatile
562 * registers.
563 */
564	.globl	setfault
565setfault:
566	mflr	%r0
567	mfsprg0	%r4
568	lwz	%r4, PC_CURTHREAD(%r4)
569	lwz	%r4, TD_PCB(%r4)
570	stw	%r3, PCB_ONFAULT(%r4)
571	mfcr	%r10
572	mfctr	%r11
573	mfxer	%r12
574	stw	%r0, 0(%r3)
575	stw	%r1, 4(%r3)
576	stw	%r2, 8(%r3)
577	stmw	%r10, 12(%r3)		/* store CR, CTR, XER, [r13 .. r31] */
578	li	%r3, 0			/* return FALSE */
579	blr
580
581/************************************************************************/
582/* Data section								*/
583/************************************************************************/
584	.data
585	.align	4
586tmpstack:
587	.space	TMPSTACKSZ
588
589/*
590 * Compiled KERNBASE locations
591 */
592	.globl	kernbase
593	.set	kernbase, KERNBASE
594
595/*
596 * Globals
597 */
598#define	INTRCNT_COUNT	256		/* max(HROWPIC_IRQMAX,OPENPIC_IRQMAX) */
599
600GLOBAL(kernload)
601	.long	0
602GLOBAL(intrnames)
603	.space	INTRCNT_COUNT * (MAXCOMLEN + 1) * 2
604GLOBAL(eintrnames)
605	.align 4
606GLOBAL(intrcnt)
607	.space	INTRCNT_COUNT * 4 * 2
608GLOBAL(eintrcnt)
609
610#include <powerpc/booke/trap_subr.S>
611