xref: /freebsd/sys/powerpc/booke/locore.S (revision 224e0c2f)
1/*-
2 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29#include "assym.s"
30
31#include "opt_hwpmc_hooks.h"
32
33#include <machine/asm.h>
34#include <machine/hid.h>
35#include <machine/param.h>
36#include <machine/spr.h>
37#include <machine/pte.h>
38#include <machine/trap.h>
39#include <machine/vmparam.h>
40#include <machine/tlb.h>
41
42#define TMPSTACKSZ	16384
43
44#ifdef __powerpc64__
45#define GET_TOCBASE(r)  \
46	mfspr	r, SPR_SPRG8
47#define	TOC_RESTORE	nop
48#define	CMPI	cmpdi
49#define	CMPL	cmpld
50#define	LOAD	ld
51#define	LOADX	ldarx
52#define	STORE	std
53#define	STOREX	stdcx.
54#define	STU	stdu
55#define	CALLSIZE	48
56#define	REDZONE		288
57#define	THREAD_REG	%r13
58#define	ADDR(x)	\
59	.llong	x
60#define	WORD_SIZE	8
61#else
62#define	GET_TOCBASE(r)
63#define	TOC_RESTORE
64#define	CMPI	cmpwi
65#define	CMPL	cmplw
66#define	LOAD	lwz
67#define	LOADX	lwarx
68#define	STOREX	stwcx.
69#define	STORE	stw
70#define	STU	stwu
71#define	CALLSIZE	8
72#define	REDZONE		0
73#define	THREAD_REG	%r2
74#define	ADDR(x)	\
75	.long	x
76#define	WORD_SIZE	4
77#endif
78
79	.text
80	.globl	btext
81btext:
82
83/*
84 * This symbol is here for the benefit of kvm_mkdb, and is supposed to
85 * mark the start of kernel text.
86 */
87	.globl	kernel_text
88kernel_text:
89
90/*
91 * Startup entry.  Note, this must be the first thing in the text segment!
92 */
93	.text
94	.globl	__start
95__start:
96
97/*
98 * Assumptions on the boot loader:
99 *  - System memory starts from physical address 0
100 *  - It's mapped by a single TLB1 entry
101 *  - TLB1 mapping is 1:1 pa to va
102 *  - Kernel is loaded at 64MB boundary
103 *  - All PID registers are set to the same value
104 *  - CPU is running in AS=0
105 *
106 * Registers contents provided by the loader(8):
107 *	r1	: stack pointer
108 *	r3	: metadata pointer
109 *
110 * We rearrange the TLB1 layout as follows:
111 *  - Find TLB1 entry we started in
112 *  - Make sure it's protected, invalidate other entries
113 *  - Create temp entry in the second AS (make sure it's not TLB[1])
114 *  - Switch to temp mapping
115 *  - Map 64MB of RAM in TLB1[1]
116 *  - Use AS=1, set EPN to KERNBASE and RPN to kernel load address
117 *  - Switch to TLB1[1] mapping
118 *  - Invalidate temp mapping
119 *
120 * locore registers use:
121 *	r1	: stack pointer
122 *	r2	: trace pointer (AP only, for early diagnostics)
123 *	r3-r27	: scratch registers
124 *	r28	: temp TLB1 entry
125 *	r29	: initial TLB1 entry we started in
126 *	r30-r31	: arguments (metadata pointer)
127 */
128
129/*
130 * Keep arguments in r30 & r31 for later use.
131 */
132	mr	%r30, %r3
133	mr	%r31, %r4
134
135/*
136 * Initial cleanup
137 */
138	li	%r3, PSL_DE	/* Keep debug exceptions for CodeWarrior. */
139#ifdef __powerpc64__
140	oris	%r3, %r3, PSL_CM@h
141#endif
142	mtmsr	%r3
143	isync
144
145/*
146 * Initial HIDs configuration
147 */
1481:
149	mfpvr	%r3
150	rlwinm	%r3, %r3, 16, 16, 31
151
152	lis	%r4, HID0_E500_DEFAULT_SET@h
153	ori	%r4, %r4, HID0_E500_DEFAULT_SET@l
154
155	/* Check for e500mc and e5500 */
156	cmpli	0, 0, %r3, FSL_E500mc
157	bne	2f
158
159	lis	%r4, HID0_E500MC_DEFAULT_SET@h
160	ori	%r4, %r4, HID0_E500MC_DEFAULT_SET@l
161	b	3f
1622:
163	cmpli	0, 0, %r3, FSL_E5500
164	bne	3f
165
166	lis	%r4, HID0_E5500_DEFAULT_SET@h
167	ori	%r4, %r4, HID0_E5500_DEFAULT_SET@l
168
1693:
170	mtspr	SPR_HID0, %r4
171	isync
172
173/*
174 * E500mc and E5500 do not have HID1 register, so skip HID1 setup on
175 * this core.
176 */
177	cmpli	0, 0, %r3, FSL_E500mc
178	beq	1f
179	cmpli	0, 0, %r3, FSL_E5500
180	beq	1f
181	cmpli	0, 0, %r3, FSL_E6500
182	beq	1f
183
184	lis	%r3, HID1_E500_DEFAULT_SET@h
185	ori	%r3, %r3, HID1_E500_DEFAULT_SET@l
186	mtspr	SPR_HID1, %r3
187	isync
1881:
189	/* Invalidate all entries in TLB0 */
190	li	%r3, 0
191	bl	tlb_inval_all
192
193	cmpwi	%r30, 0
194	beq	done_mapping
195
196/*
197 * Locate the TLB1 entry that maps this code
198 */
199	bl	1f
2001:	mflr	%r3
201	bl	tlb1_find_current	/* the entry found is returned in r29 */
202
203	bl	tlb1_inval_all_but_current
204
205/*
206 * Create temporary mapping in AS=1 and switch to it
207 */
208	bl	tlb1_temp_mapping_as1
209
210	mfmsr	%r3
211	ori	%r3, %r3, (PSL_IS | PSL_DS)
212	bl	2f
2132:	mflr	%r4
214	addi	%r4, %r4, (3f - 2b)
215	mtspr	SPR_SRR0, %r4
216	mtspr	SPR_SRR1, %r3
217	rfi				/* Switch context */
218
219/*
220 * Invalidate initial entry
221 */
2223:
223	mr	%r3, %r29
224	bl	tlb1_inval_entry
225
226/*
227 * Setup final mapping in TLB1[1] and switch to it
228 */
229	/* Final kernel mapping, map in 64 MB of RAM */
230	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
231	li	%r4, 0			/* Entry 0 */
232	rlwimi	%r3, %r4, 16, 10, 15
233	mtspr	SPR_MAS0, %r3
234	isync
235
236	li	%r3, (TLB_SIZE_64M << MAS1_TSIZE_SHIFT)@l
237	oris	%r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
238	mtspr	SPR_MAS1, %r3		/* note TS was not filled, so it's TS=0 */
239	isync
240
241	LOAD_ADDR(%r3, KERNBASE)
242	ori	%r3, %r3, (_TLB_ENTRY_SHARED | MAS2_M)@l /* WIMGE = 0b00100 */
243	mtspr	SPR_MAS2, %r3
244	isync
245
246	/* Discover phys load address */
247	bl	3f
2483:	mflr	%r4			/* Use current address */
249	rlwinm	%r4, %r4, 0, 0, 5	/* 64MB alignment mask */
250	ori	%r4, %r4, (MAS3_SX | MAS3_SW | MAS3_SR)@l
251	mtspr	SPR_MAS3, %r4		/* Set RPN and protection */
252	isync
253	bl	zero_mas7
254	bl	zero_mas8
255	tlbwe
256	isync
257	msync
258
259	/* Switch to the above TLB1[1] mapping */
260	bl	4f
2614:	mflr	%r4
262#ifdef __powerpc64__
263	clrldi	%r4, %r4, 38
264	clrrdi	%r3, %r3, 12
265#else
266	rlwinm	%r4, %r4, 0, 6, 31	/* Current offset from kernel load address */
267	rlwinm	%r3, %r3, 0, 0, 19
268#endif
269	add	%r4, %r4, %r3		/* Convert to kernel virtual address */
270	addi	%r4, %r4, (5f - 4b)
271	li	%r3, PSL_DE		/* Note AS=0 */
272#ifdef __powerpc64__
273	oris	%r3, %r3, PSL_CM@h
274#endif
275	mtspr   SPR_SRR0, %r4
276	mtspr   SPR_SRR1, %r3
277	rfi
278
279/*
280 * Invalidate temp mapping
281 */
2825:
283	mr	%r3, %r28
284	bl	tlb1_inval_entry
285
286done_mapping:
287
288#ifdef __powerpc64__
289	/* Set up the TOC pointer */
290	b	0f
291	.align 3
2920:	nop
293	bl	1f
294	.llong	__tocbase + 0x8000 - .
2951:	mflr	%r2
296	ld	%r1,0(%r2)
297	add	%r2,%r1,%r2
298	mtspr	SPR_SPRG8, %r2
299
300	/* Get load offset */
301	ld	%r31,-0x8000(%r2) /* First TOC entry is TOC base */
302	subf    %r31,%r31,%r2	/* Subtract from real TOC base to get base */
303
304	/* Set up the stack pointer */
305	ld	%r1,TOC_REF(tmpstack)(%r2)
306	addi	%r1,%r1,TMPSTACKSZ-96
307	add	%r1,%r1,%r31
308	bl	1f
309	.llong _DYNAMIC-.
3101:	mflr	%r3
311	ld	%r4,0(%r3)
312	add	%r3,%r4,%r3
313	mr	%r4,%r31
314#else
315/*
316 * Setup a temporary stack
317 */
318	bl	1f
319	.long tmpstack-.
3201:	mflr	%r1
321	lwz	%r2,0(%r1)
322	add	%r1,%r1,%r2
323	addi	%r1, %r1, (TMPSTACKSZ - 16)
324
325/*
326 * Relocate kernel
327 */
328	bl      1f
329	.long   _DYNAMIC-.
330	.long   _GLOBAL_OFFSET_TABLE_-.
3311:	mflr    %r5
332	lwz	%r3,0(%r5)	/* _DYNAMIC in %r3 */
333	add	%r3,%r3,%r5
334	lwz	%r4,4(%r5)	/* GOT pointer */
335	add	%r4,%r4,%r5
336	lwz	%r4,4(%r4)	/* got[0] is _DYNAMIC link addr */
337	subf	%r4,%r4,%r3	/* subtract to calculate relocbase */
338#endif
339	bl	CNAME(elf_reloc_self)
340	TOC_RESTORE
341
342/*
343 * Initialise exception vector offsets
344 */
345	bl	CNAME(ivor_setup)
346	TOC_RESTORE
347
348/*
349 * Set up arguments and jump to system initialization code
350 */
351	mr	%r3, %r30
352	mr	%r4, %r31
353
354	/* Prepare core */
355	bl	CNAME(booke_init)
356	TOC_RESTORE
357
358	/* Switch to thread0.td_kstack now */
359	mr	%r1, %r3
360	li	%r3, 0
361	STORE	%r3, 0(%r1)
362
363	/* Machine independet part, does not return */
364	bl	CNAME(mi_startup)
365	TOC_RESTORE
366	/* NOT REACHED */
3675:	b	5b
368
369
370#ifdef SMP
371/************************************************************************/
372/* AP Boot page */
373/************************************************************************/
374	.text
375	.globl	__boot_page
376	.align	12
377__boot_page:
378	bl	1f
379
380	.globl	bp_trace
381bp_trace:
382	.long	0
383
384	.globl	bp_kernload
385bp_kernload:
386	.long	0
387
388/*
389 * Initial configuration
390 */
3911:
392	mflr    %r31		/* r31 hold the address of bp_trace */
393
394	/* Set HIDs */
395	mfpvr	%r3
396	rlwinm	%r3, %r3, 16, 16, 31
397
398	/* HID0 for E500 is default */
399	lis	%r4, HID0_E500_DEFAULT_SET@h
400	ori	%r4, %r4, HID0_E500_DEFAULT_SET@l
401
402	cmpli	0, 0, %r3, FSL_E500mc
403	bne	2f
404	lis	%r4, HID0_E500MC_DEFAULT_SET@h
405	ori	%r4, %r4, HID0_E500MC_DEFAULT_SET@l
406	b	3f
4072:
408	cmpli	0, 0, %r3, FSL_E5500
409	bne	3f
410	lis	%r4, HID0_E5500_DEFAULT_SET@h
411	ori	%r4, %r4, HID0_E5500_DEFAULT_SET@l
4123:
413	mtspr	SPR_HID0, %r4
414	isync
415
416	/* Enable branch prediction */
417	li	%r3, BUCSR_BPEN
418	mtspr	SPR_BUCSR, %r3
419	isync
420
421	/* Invalidate all entries in TLB0 */
422	li	%r3, 0
423	bl	tlb_inval_all
424
425/*
426 * Find TLB1 entry which is translating us now
427 */
428	bl	2f
4292:	mflr	%r3
430	bl	tlb1_find_current	/* the entry number found is in r29 */
431
432	bl	tlb1_inval_all_but_current
433
434/*
435 * Create temporary translation in AS=1 and switch to it
436 */
437
438	bl	tlb1_temp_mapping_as1
439
440	mfmsr	%r3
441	ori	%r3, %r3, (PSL_IS | PSL_DS)
442#ifdef __powerpc64__
443	oris	%r3, %r3, PSL_CM@h
444#endif
445	bl	3f
4463:	mflr	%r4
447	addi	%r4, %r4, (4f - 3b)
448	mtspr	SPR_SRR0, %r4
449	mtspr	SPR_SRR1, %r3
450	rfi				/* Switch context */
451
452/*
453 * Invalidate initial entry
454 */
4554:
456	mr	%r3, %r29
457	bl	tlb1_inval_entry
458
459/*
460 * Setup final mapping in TLB1[1] and switch to it
461 */
462	/* Final kernel mapping, map in 64 MB of RAM */
463	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
464	li	%r4, 0			/* Entry 0 */
465	rlwimi	%r3, %r4, 16, 4, 15
466	mtspr	SPR_MAS0, %r3
467	isync
468
469	li	%r3, (TLB_SIZE_64M << MAS1_TSIZE_SHIFT)@l
470	oris	%r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
471	mtspr	SPR_MAS1, %r3		/* note TS was not filled, so it's TS=0 */
472	isync
473
474	LOAD_ADDR(%r3, KERNBASE)
475	ori	%r3, %r3, (_TLB_ENTRY_SHARED | MAS2_M)@l /* WIMGE = 0b00100 */
476	mtspr	SPR_MAS2, %r3
477	isync
478
479	/* Retrieve kernel load [physical] address from bp_kernload */
480#ifdef __powerpc64__
481	b	0f
482	.align	3
4830:
484	nop
485#endif
486	bl 5f
487	ADDR(bp_kernload)
488	ADDR(__boot_page)
4895:	mflr	%r3
490#ifdef __powerpc64__
491	ld	%r4, 0(%r3)
492	ld	%r5, 8(%r3)
493	clrrdi	%r3, %r3, 12
494#else
495	lwz	%r4, 0(%r3)
496	lwz	%r5, 4(%r3)
497	rlwinm	%r3, %r3, 0, 0, 19
498#endif
499	sub	%r4, %r4, %r5	/* offset of bp_kernload within __boot_page */
500	lwzx	%r3, %r4, %r3
501
502	/* Set RPN and protection */
503	ori	%r3, %r3, (MAS3_SX | MAS3_SW | MAS3_SR)@l
504	mtspr	SPR_MAS3, %r3
505	isync
506	bl	zero_mas7
507	bl	zero_mas8
508	tlbwe
509	isync
510	msync
511
512	/* Switch to the final mapping */
513	bl	6f
5146:	mflr	%r3
515	rlwinm	%r3, %r3, 0, 0xfff	/* Offset from boot page start */
516	add	%r3, %r3, %r5		/* Make this virtual address */
517	addi	%r3, %r3, (7f - 6b)
518#ifdef __powerpc64__
519	lis	%r4, PSL_CM@h		/* Note AS=0 */
520#else
521	li	%r4, 0			/* Note AS=0 */
522#endif
523	mtspr	SPR_SRR0, %r3
524	mtspr	SPR_SRR1, %r4
525	rfi
5267:
527
528/*
529 * At this point we're running at virtual addresses KERNBASE and beyond so
530 * it's allowed to directly access all locations the kernel was linked
531 * against.
532 */
533
534/*
535 * Invalidate temp mapping
536 */
537	mr	%r3, %r28
538	bl	tlb1_inval_entry
539
540#ifdef __powerpc64__
541	/* Set up the TOC pointer */
542	b	0f
543	.align 3
5440:	nop
545	bl	1f
546	.llong	__tocbase + 0x8000 - .
5471:	mflr	%r2
548	ld	%r1,0(%r2)
549	add	%r2,%r1,%r2
550	mtspr	SPR_SPRG8, %r2
551
552	/* Get load offset */
553	ld	%r31,-0x8000(%r2) /* First TOC entry is TOC base */
554	subf    %r31,%r31,%r2	/* Subtract from real TOC base to get base */
555
556	/* Set up the stack pointer */
557	ld	%r1,TOC_REF(tmpstack)(%r2)
558	addi	%r1,%r1,TMPSTACKSZ-96
559	add	%r1,%r1,%r31
560#else
561/*
562 * Setup a temporary stack
563 */
564	bl	1f
565	.long tmpstack-.
5661:	mflr	%r1
567	lwz	%r2,0(%r1)
568	add	%r1,%r1,%r2
569	stw	%r1, 0(%r1)
570	addi	%r1, %r1, (TMPSTACKSZ - 16)
571#endif
572
573/*
574 * Initialise exception vector offsets
575 */
576	bl	CNAME(ivor_setup)
577	TOC_RESTORE
578
579	/*
580	 * Assign our pcpu instance
581	 */
582	bl	1f
583	.long ap_pcpu-.
5841:	mflr	%r4
585	lwz	%r3, 0(%r4)
586	add	%r3, %r3, %r4
587	LOAD	%r3, 0(%r3)
588	mtsprg0	%r3
589
590	bl	CNAME(pmap_bootstrap_ap)
591	TOC_RESTORE
592
593	bl	CNAME(cpudep_ap_bootstrap)
594	TOC_RESTORE
595	/* Switch to the idle thread's kstack */
596	mr	%r1, %r3
597
598	bl	CNAME(machdep_ap_bootstrap)
599	TOC_RESTORE
600
601	/* NOT REACHED */
6026:	b	6b
603#endif /* SMP */
604
605#if defined (BOOKE_E500)
606/*
607 * Invalidate all entries in the given TLB.
608 *
609 * r3	TLBSEL
610 */
611tlb_inval_all:
612	rlwinm	%r3, %r3, 3, (1 << 3)	/* TLBSEL */
613	ori	%r3, %r3, (1 << 2)	/* INVALL */
614	tlbivax	0, %r3
615	isync
616	msync
617
618	tlbsync
619	msync
620	blr
621
622/*
623 * expects address to look up in r3, returns entry number in r29
624 *
625 * FIXME: the hidden assumption is we are now running in AS=0, but we should
626 * retrieve actual AS from MSR[IS|DS] and put it in MAS6[SAS]
627 */
628tlb1_find_current:
629	mfspr	%r17, SPR_PID0
630	slwi	%r17, %r17, MAS6_SPID0_SHIFT
631	mtspr	SPR_MAS6, %r17
632	isync
633	tlbsx	0, %r3
634	mfspr	%r17, SPR_MAS0
635	rlwinm	%r29, %r17, 16, 26, 31		/* MAS0[ESEL] -> r29 */
636
637	/* Make sure we have IPROT set on the entry */
638	mfspr	%r17, SPR_MAS1
639	oris	%r17, %r17, MAS1_IPROT@h
640	mtspr	SPR_MAS1, %r17
641	isync
642	tlbwe
643	isync
644	msync
645	blr
646
647/*
648 * Invalidates a single entry in TLB1.
649 *
650 * r3		ESEL
651 * r4-r5	scratched
652 */
653tlb1_inval_entry:
654	lis	%r4, MAS0_TLBSEL1@h	/* Select TLB1 */
655	rlwimi	%r4, %r3, 16, 10, 15	/* Select our entry */
656	mtspr	SPR_MAS0, %r4
657	isync
658	tlbre
659	li	%r5, 0			/* MAS1[V] = 0 */
660	mtspr	SPR_MAS1, %r5
661	isync
662	tlbwe
663	isync
664	msync
665	blr
666
667/*
668 * r29		current entry number
669 * r28		returned temp entry
670 * r3-r5	scratched
671 */
672tlb1_temp_mapping_as1:
673	/* Read our current translation */
674	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
675	rlwimi	%r3, %r29, 16, 10, 15	/* Select our current entry */
676	mtspr	SPR_MAS0, %r3
677	isync
678	tlbre
679
680	/*
681	 * Prepare and write temp entry
682	 *
683	 * FIXME this is not robust against overflow i.e. when the current
684	 * entry is the last in TLB1
685	 */
686	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
687	addi	%r28, %r29, 1		/* Use next entry. */
688	rlwimi	%r3, %r28, 16, 10, 15	/* Select temp entry */
689	mtspr	SPR_MAS0, %r3
690	isync
691	mfspr	%r5, SPR_MAS1
692	li	%r4, 1			/* AS=1 */
693	rlwimi	%r5, %r4, 12, 19, 19
694	li	%r4, 0			/* Global mapping, TID=0 */
695	rlwimi	%r5, %r4, 16, 8, 15
696	oris	%r5, %r5, (MAS1_VALID | MAS1_IPROT)@h
697	mtspr	SPR_MAS1, %r5
698	isync
699	mflr	%r3
700	bl	zero_mas7
701	bl	zero_mas8
702	mtlr	%r3
703	tlbwe
704	isync
705	msync
706	blr
707
708/*
709 * Loops over TLB1, invalidates all entries skipping the one which currently
710 * maps this code.
711 *
712 * r29		current entry
713 * r3-r5	scratched
714 */
715tlb1_inval_all_but_current:
716	mfspr	%r3, SPR_TLB1CFG	/* Get number of entries */
717	andi.	%r3, %r3, TLBCFG_NENTRY_MASK@l
718	li	%r4, 0			/* Start from Entry 0 */
7191:	lis	%r5, MAS0_TLBSEL1@h
720	rlwimi	%r5, %r4, 16, 10, 15
721	mtspr	SPR_MAS0, %r5
722	isync
723	tlbre
724	mfspr	%r5, SPR_MAS1
725	cmpw	%r4, %r29		/* our current entry? */
726	beq	2f
727	rlwinm	%r5, %r5, 0, 2, 31	/* clear VALID and IPROT bits */
728	mtspr	SPR_MAS1, %r5
729	isync
730	tlbwe
731	isync
732	msync
7332:	addi	%r4, %r4, 1
734	cmpw	%r4, %r3		/* Check if this is the last entry */
735	bne	1b
736	blr
737
738/*
739 * MAS7 and MAS8 conditional zeroing.
740 */
741.globl zero_mas7
742zero_mas7:
743	mfpvr	%r20
744	rlwinm	%r20, %r20, 16, 16, 31
745	cmpli	0, 0, %r20, FSL_E500v1
746	beq	1f
747
748	li	%r20, 0
749	mtspr	SPR_MAS7, %r20
750	isync
7511:
752	blr
753
754.globl zero_mas8
755zero_mas8:
756	mfpvr	%r20
757	rlwinm	%r20, %r20, 16, 16, 31
758	cmpli	0, 0, %r20, FSL_E500mc
759	beq	1f
760	cmpli	0, 0, %r20, FSL_E5500
761	beq	1f
762
763	blr
7641:
765	li	%r20, 0
766	mtspr	SPR_MAS8, %r20
767	isync
768	blr
769#endif
770
771#ifdef SMP
772.globl __boot_tlb1
773	/*
774	 * The __boot_tlb1 table is used to hold BSP TLB1 entries
775	 * marked with _TLB_ENTRY_SHARED flag during AP bootstrap.
776	 * The BSP fills in the table in tlb_ap_prep() function. Next,
777	 * AP loads its contents to TLB1 hardware in pmap_bootstrap_ap().
778	 */
779__boot_tlb1:
780	.space TLB1_MAX_ENTRIES * TLB_ENTRY_SIZE
781
782__boot_page_padding:
783	/*
784	 * Boot page needs to be exactly 4K, with the last word of this page
785	 * acting as the reset vector, so we need to stuff the remainder.
786	 * Upon release from holdoff CPU fetches the last word of the boot
787	 * page.
788	 */
789	.space	4092 - (__boot_page_padding - __boot_page)
790	b	__boot_page
791#endif /* SMP */
792
793/************************************************************************/
794/* locore subroutines */
795/************************************************************************/
796
797/*
798 * Cache disable/enable/inval sequences according
799 * to section 2.16 of E500CORE RM.
800 */
801ENTRY(dcache_inval)
802	/* Invalidate d-cache */
803	mfspr	%r3, SPR_L1CSR0
804	ori	%r3, %r3, (L1CSR0_DCFI | L1CSR0_DCLFR)@l
805	msync
806	isync
807	mtspr	SPR_L1CSR0, %r3
808	isync
8091:	mfspr	%r3, SPR_L1CSR0
810	andi.	%r3, %r3, L1CSR0_DCFI
811	bne	1b
812	blr
813
814ENTRY(dcache_disable)
815	/* Disable d-cache */
816	mfspr	%r3, SPR_L1CSR0
817	li	%r4, L1CSR0_DCE@l
818	not	%r4, %r4
819	and	%r3, %r3, %r4
820	msync
821	isync
822	mtspr	SPR_L1CSR0, %r3
823	isync
824	blr
825
826ENTRY(dcache_enable)
827	/* Enable d-cache */
828	mfspr	%r3, SPR_L1CSR0
829	oris	%r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@h
830	ori	%r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@l
831	msync
832	isync
833	mtspr	SPR_L1CSR0, %r3
834	isync
835	blr
836
837ENTRY(icache_inval)
838	/* Invalidate i-cache */
839	mfspr	%r3, SPR_L1CSR1
840	ori	%r3, %r3, (L1CSR1_ICFI | L1CSR1_ICLFR)@l
841	isync
842	mtspr	SPR_L1CSR1, %r3
843	isync
8441:	mfspr	%r3, SPR_L1CSR1
845	andi.	%r3, %r3, L1CSR1_ICFI
846	bne	1b
847	blr
848
849ENTRY(icache_disable)
850	/* Disable i-cache */
851	mfspr	%r3, SPR_L1CSR1
852	li	%r4, L1CSR1_ICE@l
853	not	%r4, %r4
854	and	%r3, %r3, %r4
855	isync
856	mtspr	SPR_L1CSR1, %r3
857	isync
858	blr
859
860ENTRY(icache_enable)
861	/* Enable i-cache */
862	mfspr	%r3, SPR_L1CSR1
863	oris	%r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@h
864	ori	%r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@l
865	isync
866	mtspr	SPR_L1CSR1, %r3
867	isync
868	blr
869
870/*
871 * L2 cache disable/enable/inval sequences for E500mc.
872 */
873
874ENTRY(l2cache_inval)
875	mfspr	%r3, SPR_L2CSR0
876	oris	%r3, %r3, (L2CSR0_L2FI | L2CSR0_L2LFC)@h
877	ori	%r3, %r3, (L2CSR0_L2FI | L2CSR0_L2LFC)@l
878	isync
879	mtspr	SPR_L2CSR0, %r3
880	isync
8811:	mfspr   %r3, SPR_L2CSR0
882	andis.	%r3, %r3, L2CSR0_L2FI@h
883	bne	1b
884	blr
885
886ENTRY(l2cache_enable)
887	mfspr	%r3, SPR_L2CSR0
888	oris	%r3, %r3, (L2CSR0_L2E | L2CSR0_L2PE)@h
889	isync
890	mtspr	SPR_L2CSR0, %r3
891	isync
892	blr
893
894/*
895 * Branch predictor setup.
896 */
897ENTRY(bpred_enable)
898	mfspr	%r3, SPR_BUCSR
899	ori	%r3, %r3, BUCSR_BBFI
900	isync
901	mtspr	SPR_BUCSR, %r3
902	isync
903	ori	%r3, %r3, BUCSR_BPEN
904	isync
905	mtspr	SPR_BUCSR, %r3
906	isync
907	blr
908
909ENTRY(dataloss_erratum_access)
910	/* Lock two cache lines into I-Cache */
911	sync
912	mfspr	%r11, SPR_L1CSR1
913	rlwinm	%r11, %r11, 0, ~L1CSR1_ICUL
914	sync
915	isync
916	mtspr	SPR_L1CSR1, %r11
917	isync
918
919	lis	%r8, 2f@h
920	ori	%r8, %r8, 2f@l
921	icbtls	0, 0, %r8
922	addi	%r9, %r8, 64
923
924	sync
925	mfspr	%r11, SPR_L1CSR1
9263:	andi.	%r11, %r11, L1CSR1_ICUL
927	bne	3b
928
929	icbtls	0, 0, %r9
930
931	sync
932	mfspr	%r11, SPR_L1CSR1
9333:	andi.	%r11, %r11, L1CSR1_ICUL
934	bne	3b
935
936	b	2f
937	.align	6
938	/* Inside a locked cacheline, wait a while, write, then wait a while */
9392:	sync
940
941	mfspr	%r5, TBR_TBL
9424:	addis	%r11, %r5, 0x100000@h	/* wait around one million timebase ticks */
943	mfspr	%r5, TBR_TBL
944	subf.	%r5, %r5, %r11
945	bgt	4b
946
947	stw	%r4, 0(%r3)
948
949	mfspr	%r5, TBR_TBL
9504:	addis	%r11, %r5, 0x100000@h	/* wait around one million timebase ticks */
951	mfspr	%r5, TBR_TBL
952	subf.	%r5, %r5, %r11
953	bgt	4b
954
955	sync
956
957	/*
958	 * Fill out the rest of this cache line and the next with nops,
959	 * to ensure that nothing outside the locked area will be
960	 * fetched due to a branch.
961	 */
962	.rept 19
963	nop
964	.endr
965
966	icblc	0, 0, %r8
967	icblc	0, 0, %r9
968
969	blr
970
971/*
972 * XXX: This should be moved to a shared AIM/booke asm file, if one ever is
973 * created.
974 */
975ENTRY(get_spr)
976	mfspr	%r3, 0
977	blr
978
979/************************************************************************/
980/* Data section								*/
981/************************************************************************/
982	.data
983	.align 3
984GLOBAL(__startkernel)
985	ADDR(begin)
986GLOBAL(__endkernel)
987	ADDR(end)
988	.align	4
989tmpstack:
990	.space	TMPSTACKSZ
991tmpstackbound:
992	.space 10240	/* XXX: this really should not be necessary */
993#ifdef __powerpc64__
994TOC_ENTRY(tmpstack)
995TOC_ENTRY(bp_kernload)
996#endif
997
998/*
999 * Compiled KERNBASE locations
1000 */
1001	.globl	kernbase
1002	.set	kernbase, KERNBASE
1003
1004#include <powerpc/booke/trap_subr.S>
1005