1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * Copyright 2004, 2007-2012 Freescale Semiconductor, Inc.
4 * Copyright (C) 2003  Motorola,Inc.
5 */
6
7/* U-Boot Startup Code for Motorola 85xx PowerPC based Embedded Boards
8 *
9 * The processor starts at 0xfffffffc and the code is first executed in the
10 * last 4K page(0xfffff000-0xffffffff) in flash/rom.
11 *
12 */
13
14#include <asm-offsets.h>
15#include <config.h>
16#include <mpc85xx.h>
17#include <version.h>
18
19#include <ppc_asm.tmpl>
20#include <ppc_defs.h>
21
22#include <asm/cache.h>
23#include <asm/mmu.h>
24
25#undef	MSR_KERNEL
26#define MSR_KERNEL ( MSR_ME )	/* Machine Check */
27
28#define LAW_EN		0x80000000
29
30#if defined(CONFIG_NAND_SPL) || \
31	(defined(CONFIG_SPL_BUILD) && defined(CONFIG_SPL_INIT_MINIMAL))
32#define MINIMAL_SPL
33#endif
34
35#if !defined(CONFIG_SPL) && !defined(CONFIG_SYS_RAMBOOT) && \
36	!defined(CONFIG_NXP_ESBC) && !defined(CONFIG_SRIO_PCIE_BOOT_SLAVE)
37#define NOR_BOOT
38#endif
39
40/*
41 * Set up GOT: Global Offset Table
42 *
43 * Use r12 to access the GOT
44 */
45	START_GOT
46	GOT_ENTRY(_GOT2_TABLE_)
47	GOT_ENTRY(_FIXUP_TABLE_)
48
49#ifndef MINIMAL_SPL
50	GOT_ENTRY(_start)
51	GOT_ENTRY(_start_of_vectors)
52	GOT_ENTRY(_end_of_vectors)
53	GOT_ENTRY(transfer_to_handler)
54#endif
55
56	GOT_ENTRY(__init_end)
57	GOT_ENTRY(__bss_end)
58	GOT_ENTRY(__bss_start)
59	END_GOT
60
61/*
62 * e500 Startup -- after reset only the last 4KB of the effective
63 * address space is mapped in the MMU L2 TLB1 Entry0. The .bootpg
64 * section is located at THIS LAST page and basically does three
65 * things: clear some registers, set up exception tables and
66 * add more TLB entries for 'larger spaces'(e.g. the boot rom) to
67 * continue the boot procedure.
68
69 * Once the boot rom is mapped by TLB entries we can proceed
70 * with normal startup.
71 *
72 */
73
74	.section .bootpg,"ax"
75	.globl _start_e500
76
77_start_e500:
78/* Enable debug exception */
79	li	r1,MSR_DE
80	mtmsr 	r1
81
82	/*
83	 * If we got an ePAPR device tree pointer passed in as r3, we need that
84	 * later in cpu_init_early_f(). Save it to a safe register before we
85	 * clobber it so that we can fetch it from there later.
86	 */
87	mr	r24, r3
88
89#ifdef CONFIG_SYS_FSL_ERRATUM_A004510
90	mfspr	r3,SPRN_SVR
91	rlwinm	r3,r3,0,0xff
92	li	r4,CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV
93	cmpw	r3,r4
94	beq	1f
95
96#ifdef CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV2
97	li	r4,CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV2
98	cmpw	r3,r4
99	beq	1f
100#endif
101
102	/* Not a supported revision affected by erratum */
103	li	r27,0
104	b	2f
105
1061:	li	r27,1	/* Remember for later that we have the erratum */
107	/* Erratum says set bits 55:60 to 001001 */
108	msync
109	isync
110	mfspr	r3,SPRN_HDBCR0
111	li	r4,0x48
112	rlwimi	r3,r4,0,0x1f8
113	mtspr	SPRN_HDBCR0,r3
114	isync
1152:
116#endif
117#ifdef CONFIG_SYS_FSL_ERRATUM_A005125
118	msync
119	isync
120	mfspr	r3, SPRN_HDBCR0
121	oris	r3, r3, 0x0080
122	mtspr	SPRN_HDBCR0, r3
123#endif
124
125
126#if defined(CONFIG_NXP_ESBC) && defined(CONFIG_E500MC) && \
127	!defined(CONFIG_E6500)
128	/* ISBC uses L2 as stack.
129	 * Disable L2 cache here so that u-boot can enable it later
130	 * as part of it's normal flow
131	*/
132
133	/* Check if L2 is enabled */
134	mfspr	r3, SPRN_L2CSR0
135	lis	r2, L2CSR0_L2E@h
136	ori	r2, r2, L2CSR0_L2E@l
137	and.	r4, r3, r2
138	beq	l2_disabled
139
140	mfspr r3, SPRN_L2CSR0
141	/* Flush L2 cache */
142	lis     r2,(L2CSR0_L2FL)@h
143	ori     r2, r2, (L2CSR0_L2FL)@l
144	or      r3, r2, r3
145	sync
146	isync
147	mtspr   SPRN_L2CSR0,r3
148	isync
1491:
150	mfspr r3, SPRN_L2CSR0
151	and. r1, r3, r2
152	bne 1b
153
154	mfspr r3, SPRN_L2CSR0
155	lis r2, L2CSR0_L2E@h
156	ori r2, r2, L2CSR0_L2E@l
157	andc r4, r3, r2
158	sync
159	isync
160	mtspr SPRN_L2CSR0,r4
161	isync
162
163l2_disabled:
164#endif
165
166/* clear registers/arrays not reset by hardware */
167
168	/* L1 */
169	li	r0,2
170	mtspr	L1CSR0,r0	/* invalidate d-cache */
171	mtspr	L1CSR1,r0	/* invalidate i-cache */
172
173	mfspr	r1,DBSR
174	mtspr	DBSR,r1		/* Clear all valid bits */
175
176
177	.macro	create_tlb1_entry esel ts tsize epn wimg rpn perm phy_high scratch
178	lis	\scratch, FSL_BOOKE_MAS0(1, \esel, 0)@h
179	ori	\scratch, \scratch, FSL_BOOKE_MAS0(1, \esel, 0)@l
180	mtspr	MAS0, \scratch
181	lis	\scratch, FSL_BOOKE_MAS1(1, 1, 0, \ts, \tsize)@h
182	ori	\scratch, \scratch, FSL_BOOKE_MAS1(1, 1, 0, \ts, \tsize)@l
183	mtspr	MAS1, \scratch
184	lis	\scratch, FSL_BOOKE_MAS2(\epn, \wimg)@h
185	ori	\scratch, \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@l
186	mtspr	MAS2, \scratch
187	lis	\scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@h
188	ori	\scratch, \scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@l
189	mtspr	MAS3, \scratch
190	lis	\scratch, \phy_high@h
191	ori	\scratch, \scratch, \phy_high@l
192	mtspr	MAS7, \scratch
193	isync
194	msync
195	tlbwe
196	isync
197	.endm
198
199	.macro	create_tlb0_entry esel ts tsize epn wimg rpn perm phy_high scratch
200	lis	\scratch, FSL_BOOKE_MAS0(0, \esel, 0)@h
201	ori	\scratch, \scratch, FSL_BOOKE_MAS0(0, \esel, 0)@l
202	mtspr	MAS0, \scratch
203	lis	\scratch, FSL_BOOKE_MAS1(1, 0, 0, \ts, \tsize)@h
204	ori	\scratch, \scratch, FSL_BOOKE_MAS1(1, 0, 0, \ts, \tsize)@l
205	mtspr	MAS1, \scratch
206	lis	\scratch, FSL_BOOKE_MAS2(\epn, \wimg)@h
207	ori	\scratch, \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@l
208	mtspr	MAS2, \scratch
209	lis	\scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@h
210	ori	\scratch, \scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@l
211	mtspr	MAS3, \scratch
212	lis	\scratch, \phy_high@h
213	ori	\scratch, \scratch, \phy_high@l
214	mtspr	MAS7, \scratch
215	isync
216	msync
217	tlbwe
218	isync
219	.endm
220
221	.macro	delete_tlb1_entry esel scratch
222	lis	\scratch, FSL_BOOKE_MAS0(1, \esel, 0)@h
223	ori	\scratch, \scratch, FSL_BOOKE_MAS0(1, \esel, 0)@l
224	mtspr	MAS0, \scratch
225	li	\scratch, 0
226	mtspr	MAS1, \scratch
227	isync
228	msync
229	tlbwe
230	isync
231	.endm
232
233	.macro	delete_tlb0_entry esel epn wimg scratch
234	lis	\scratch, FSL_BOOKE_MAS0(0, \esel, 0)@h
235	ori	\scratch, \scratch, FSL_BOOKE_MAS0(0, \esel, 0)@l
236	mtspr	MAS0, \scratch
237	li	\scratch, 0
238	mtspr	MAS1, \scratch
239	lis	\scratch, FSL_BOOKE_MAS2(\epn, \wimg)@h
240	ori	\scratch, \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@l
241	mtspr	MAS2, \scratch
242	isync
243	msync
244	tlbwe
245	isync
246	.endm
247
248/* Interrupt vectors do not fit in minimal SPL. */
249#if !defined(MINIMAL_SPL)
250	/* Setup interrupt vectors */
251	lis	r1,CONFIG_SYS_MONITOR_BASE@h
252	mtspr	IVPR,r1
253
254	li	r4,CriticalInput@l
255	mtspr	IVOR0,r4	/* 0: Critical input */
256	li	r4,MachineCheck@l
257	mtspr	IVOR1,r4	/* 1: Machine check */
258	li	r4,DataStorage@l
259	mtspr	IVOR2,r4	/* 2: Data storage */
260	li	r4,InstStorage@l
261	mtspr	IVOR3,r4	/* 3: Instruction storage */
262	li	r4,ExtInterrupt@l
263	mtspr	IVOR4,r4	/* 4: External interrupt */
264	li	r4,Alignment@l
265	mtspr	IVOR5,r4	/* 5: Alignment */
266	li	r4,ProgramCheck@l
267	mtspr	IVOR6,r4	/* 6: Program check */
268	li	r4,FPUnavailable@l
269	mtspr	IVOR7,r4	/* 7: floating point unavailable */
270	li	r4,SystemCall@l
271	mtspr	IVOR8,r4	/* 8: System call */
272	/* 9: Auxiliary processor unavailable(unsupported) */
273	li	r4,Decrementer@l
274	mtspr	IVOR10,r4	/* 10: Decrementer */
275	li	r4,IntervalTimer@l
276	mtspr	IVOR11,r4	/* 11: Interval timer */
277	li	r4,WatchdogTimer@l
278	mtspr	IVOR12,r4	/* 12: Watchdog timer */
279	li	r4,DataTLBError@l
280	mtspr	IVOR13,r4	/* 13: Data TLB error */
281	li	r4,InstructionTLBError@l
282	mtspr	IVOR14,r4	/* 14: Instruction TLB error */
283	li	r4,DebugBreakpoint@l
284	mtspr	IVOR15,r4	/* 15: Debug */
285#endif
286
287	/* Clear and set up some registers. */
288	li      r0,0x0000
289	lis	r1,0xffff
290	mtspr	DEC,r0			/* prevent dec exceptions */
291	mttbl	r0			/* prevent fit & wdt exceptions */
292	mttbu	r0
293	mtspr	TSR,r1			/* clear all timer exception status */
294	mtspr	TCR,r0			/* disable all */
295	mtspr	ESR,r0			/* clear exception syndrome register */
296	mtspr	MCSR,r0			/* machine check syndrome register */
297	mtxer	r0			/* clear integer exception register */
298
299#ifdef CONFIG_SYS_BOOK3E_HV
300	mtspr	MAS8,r0			/* make sure MAS8 is clear */
301#endif
302
303	/* Enable Time Base and Select Time Base Clock */
304	lis	r0,HID0_EMCP@h		/* Enable machine check */
305#if defined(CONFIG_ENABLE_36BIT_PHYS)
306	ori	r0,r0,HID0_ENMAS7@l	/* Enable MAS7 */
307#endif
308#ifndef CONFIG_E500MC
309	ori	r0,r0,HID0_TBEN@l	/* Enable Timebase */
310#endif
311	mtspr	HID0,r0
312
313#if !defined(CONFIG_E500MC) && !defined(CONFIG_ARCH_QEMU_E500)
314	li	r0,(HID1_ASTME|HID1_ABE)@l	/* Addr streaming & broadcast */
315	mfspr	r3,PVR
316	andi.	r3,r3, 0xff
317	cmpwi	r3,0x50@l	/* if we are rev 5.0 or greater set MBDD */
318	blt 1f
319	/* Set MBDD bit also */
320	ori r0, r0, HID1_MBDD@l
3211:
322	mtspr	HID1,r0
323#endif
324
325#ifdef CONFIG_SYS_FSL_ERRATUM_CPU_A003999
326	mfspr	r3,SPRN_HDBCR1
327	oris	r3,r3,0x0100
328	mtspr	SPRN_HDBCR1,r3
329#endif
330
331	/* Enable Branch Prediction */
332#if defined(CONFIG_BTB)
333	lis	r0,BUCSR_ENABLE@h
334	ori	r0,r0,BUCSR_ENABLE@l
335	mtspr	SPRN_BUCSR,r0
336#endif
337
338#if defined(CONFIG_SYS_INIT_DBCR)
339	lis	r1,0xffff
340	ori	r1,r1,0xffff
341	mtspr	DBSR,r1			/* Clear all status bits */
342	lis	r0,CONFIG_SYS_INIT_DBCR@h	/* DBCR0[IDM] must be set */
343	ori	r0,r0,CONFIG_SYS_INIT_DBCR@l
344	mtspr	DBCR0,r0
345#endif
346
347/*
348 * Search for the TLB that covers the code we're executing, and shrink it
349 * so that it covers only this 4K page.  That will ensure that any other
350 * TLB we create won't interfere with it.  We assume that the TLB exists,
351 * which is why we don't check the Valid bit of MAS1.  We also assume
352 * it is in TLB1.
353 *
354 * This is necessary, for example, when booting from the on-chip ROM,
355 * which (oddly) creates a single 4GB TLB that covers CCSR and DDR.
356 */
357	bl	nexti		/* Find our address */
358nexti:	mflr	r1		/* R1 = our PC */
359	li	r2, 0
360	mtspr	MAS6, r2	/* Assume the current PID and AS are 0 */
361	isync
362	msync
363	tlbsx	0, r1		/* This must succeed */
364
365	mfspr	r14, MAS0	/* Save ESEL for later */
366	rlwinm	r14, r14, 16, 0xfff
367
368	/* Set the size of the TLB to 4KB */
369	mfspr	r3, MAS1
370	li	r2, 0xF80
371	andc	r3, r3, r2	/* Clear the TSIZE bits */
372	ori	r3, r3, MAS1_TSIZE(BOOKE_PAGESZ_4K)@l
373	oris	r3, r3, MAS1_IPROT@h
374	mtspr	MAS1, r3
375
376	/*
377	 * Set the base address of the TLB to our PC.  We assume that
378	 * virtual == physical.  We also assume that MAS2_EPN == MAS3_RPN.
379	 */
380	lis	r3, MAS2_EPN@h
381	ori	r3, r3, MAS2_EPN@l	/* R3 = MAS2_EPN */
382
383	and	r1, r1, r3	/* Our PC, rounded down to the nearest page */
384
385	mfspr	r2, MAS2
386	andc	r2, r2, r3
387	or	r2, r2, r1
388#ifdef CONFIG_SYS_FSL_ERRATUM_A004510
389	cmpwi	r27,0
390	beq	1f
391	andi.	r15, r2, MAS2_I|MAS2_G /* save the old I/G for later */
392	rlwinm	r2, r2, 0, ~MAS2_I
393	ori	r2, r2, MAS2_G
3941:
395#endif
396	mtspr	MAS2, r2	/* Set the EPN to our PC base address */
397
398	mfspr	r2, MAS3
399	andc	r2, r2, r3
400	or	r2, r2, r1
401	mtspr	MAS3, r2	/* Set the RPN to our PC base address */
402
403	isync
404	msync
405	tlbwe
406
407/*
408 * Clear out any other TLB entries that may exist, to avoid conflicts.
409 * Our TLB entry is in r14.
410 */
411	li	r0, TLBIVAX_ALL | TLBIVAX_TLB0
412	tlbivax 0, r0
413	tlbsync
414
415	mfspr	r4, SPRN_TLB1CFG
416	rlwinm	r4, r4, 0, TLBnCFG_NENTRY_MASK
417
418	li	r3, 0
419	mtspr	MAS1, r3
4201:	cmpw	r3, r14
421	rlwinm	r5, r3, 16, MAS0_ESEL_MSK
422	addi	r3, r3, 1
423	beq	2f		/* skip the entry we're executing from */
424
425	oris	r5, r5, MAS0_TLBSEL(1)@h
426	mtspr	MAS0, r5
427
428	isync
429	tlbwe
430	isync
431	msync
432
4332:	cmpw	r3, r4
434	blt	1b
435
436#if defined(CONFIG_SYS_PPC_E500_DEBUG_TLB) && !defined(MINIMAL_SPL) && \
437	!defined(CONFIG_NXP_ESBC)
438/*
439 * TLB entry for debuggging in AS1
440 * Create temporary TLB entry in AS0 to handle debug exception
441 * As on debug exception MSR is cleared i.e. Address space is changed
442 * to 0. A TLB entry (in AS0) is required to handle debug exception generated
443 * in AS1.
444 */
445
446#ifdef NOR_BOOT
447/*
448 * TLB entry is created for IVPR + IVOR15 to map on valid OP code address
449 * bacause flash's virtual address maps to 0xff800000 - 0xffffffff.
450 * and this window is outside of 4K boot window.
451 */
452	create_tlb1_entry CONFIG_SYS_PPC_E500_DEBUG_TLB, \
453		0, BOOKE_PAGESZ_4M, \
454		CONFIG_SYS_MONITOR_BASE & 0xffc00000,  MAS2_I|MAS2_G, \
455		0xffc00000, MAS3_SX|MAS3_SW|MAS3_SR, \
456		0, r6
457
458#else
459/*
460 * TLB entry is created for IVPR + IVOR15 to map on valid OP code address
461 * because "nexti" will resize TLB to 4K
462 */
463	create_tlb1_entry CONFIG_SYS_PPC_E500_DEBUG_TLB, \
464		0, BOOKE_PAGESZ_256K, \
465		CONFIG_SYS_MONITOR_BASE & 0xfffc0000, MAS2_I, \
466		CONFIG_SYS_MONITOR_BASE & 0xfffc0000, MAS3_SX|MAS3_SW|MAS3_SR, \
467		0, r6
468#endif
469#endif
470
471/*
472 * Relocate CCSR, if necessary.  We relocate CCSR if (obviously) the default
473 * location is not where we want it.  This typically happens on a 36-bit
474 * system, where we want to move CCSR to near the top of 36-bit address space.
475 *
476 * To move CCSR, we create two temporary TLBs, one for the old location, and
477 * another for the new location.  On CoreNet systems, we also need to create
478 * a special, temporary LAW.
479 *
480 * As a general rule, TLB0 is used for short-term TLBs, and TLB1 is used for
481 * long-term TLBs, so we use TLB0 here.
482 */
483#if (CONFIG_SYS_CCSRBAR_DEFAULT != CONFIG_SYS_CCSRBAR_PHYS)
484
485#if !defined(CONFIG_SYS_CCSRBAR_PHYS_HIGH) || !defined(CONFIG_SYS_CCSRBAR_PHYS_LOW)
486#error "CONFIG_SYS_CCSRBAR_PHYS_HIGH and CONFIG_SYS_CCSRBAR_PHYS_LOW) must be defined."
487#endif
488
489create_ccsr_new_tlb:
490	/*
491	 * Create a TLB for the new location of CCSR.  Register R8 is reserved
492	 * for the virtual address of this TLB (CONFIG_SYS_CCSRBAR).
493	 */
494	lis	r8, CONFIG_SYS_CCSRBAR@h
495	ori	r8, r8, CONFIG_SYS_CCSRBAR@l
496	lis	r9, (CONFIG_SYS_CCSRBAR + 0x1000)@h
497	ori	r9, r9, (CONFIG_SYS_CCSRBAR + 0x1000)@l
498	create_tlb0_entry 0, \
499		0, BOOKE_PAGESZ_4K, \
500		CONFIG_SYS_CCSRBAR, MAS2_I|MAS2_G, \
501		CONFIG_SYS_CCSRBAR_PHYS_LOW, MAS3_SW|MAS3_SR, \
502		CONFIG_SYS_CCSRBAR_PHYS_HIGH, r3
503	/*
504	 * Create a TLB for the current location of CCSR.  Register R9 is reserved
505	 * for the virtual address of this TLB (CONFIG_SYS_CCSRBAR + 0x1000).
506	 */
507create_ccsr_old_tlb:
508	create_tlb0_entry 1, \
509		0, BOOKE_PAGESZ_4K, \
510		CONFIG_SYS_CCSRBAR + 0x1000, MAS2_I|MAS2_G, \
511		CONFIG_SYS_CCSRBAR_DEFAULT, MAS3_SW|MAS3_SR, \
512		0, r3 /* The default CCSR address is always a 32-bit number */
513
514
515	/*
516	 * We have a TLB for what we think is the current (old) CCSR.  Let's
517	 * verify that, otherwise we won't be able to move it.
518	 * CONFIG_SYS_CCSRBAR_DEFAULT is always a 32-bit number, so we only
519	 * need to compare the lower 32 bits of CCSRBAR on CoreNet systems.
520	 */
521verify_old_ccsr:
522	lis     r0, CONFIG_SYS_CCSRBAR_DEFAULT@h
523	ori     r0, r0, CONFIG_SYS_CCSRBAR_DEFAULT@l
524#ifdef CONFIG_FSL_CORENET
525	lwz	r1, 4(r9)		/* CCSRBARL */
526#else
527	lwz	r1, 0(r9)		/* CCSRBAR, shifted right by 12 */
528	slwi	r1, r1, 12
529#endif
530
531	cmpl	0, r0, r1
532
533	/*
534	 * If the value we read from CCSRBARL is not what we expect, then
535	 * enter an infinite loop.  This will at least allow a debugger to
536	 * halt execution and examine TLBs, etc.  There's no point in going
537	 * on.
538	 */
539infinite_debug_loop:
540	bne	infinite_debug_loop
541
542#ifdef CONFIG_FSL_CORENET
543
544#define CCSR_LAWBARH0	(CONFIG_SYS_CCSRBAR + 0x1000)
545#define LAW_SIZE_4K	0xb
546#define CCSRBAR_LAWAR	(LAW_EN | (0x1e << 20) | LAW_SIZE_4K)
547#define CCSRAR_C	0x80000000	/* Commit */
548
549create_temp_law:
550	/*
551	 * On CoreNet systems, we create the temporary LAW using a special LAW
552	 * target ID of 0x1e.  LAWBARH is at offset 0xc00 in CCSR.
553	 */
554	lis     r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h
555	ori     r0, r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l
556	lis     r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@h
557	ori     r1, r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@l
558	lis     r2, CCSRBAR_LAWAR@h
559	ori     r2, r2, CCSRBAR_LAWAR@l
560
561	stw     r0, 0xc00(r9)	/* LAWBARH0 */
562	stw     r1, 0xc04(r9)	/* LAWBARL0 */
563	sync
564	stw     r2, 0xc08(r9)	/* LAWAR0 */
565
566	/*
567	 * Read back from LAWAR to ensure the update is complete.  e500mc
568	 * cores also require an isync.
569	 */
570	lwz	r0, 0xc08(r9)	/* LAWAR0 */
571	isync
572
573	/*
574	 * Read the current CCSRBARH and CCSRBARL using load word instructions.
575	 * Follow this with an isync instruction. This forces any outstanding
576	 * accesses to configuration space to completion.
577	 */
578read_old_ccsrbar:
579	lwz	r0, 0(r9)	/* CCSRBARH */
580	lwz	r0, 4(r9)	/* CCSRBARL */
581	isync
582
583	/*
584	 * Write the new values for CCSRBARH and CCSRBARL to their old
585	 * locations.  The CCSRBARH has a shadow register. When the CCSRBARH
586	 * has a new value written it loads a CCSRBARH shadow register. When
587	 * the CCSRBARL is written, the CCSRBARH shadow register contents
588	 * along with the CCSRBARL value are loaded into the CCSRBARH and
589	 * CCSRBARL registers, respectively.  Follow this with a sync
590	 * instruction.
591	 */
592write_new_ccsrbar:
593	lis	r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h
594	ori	r0, r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l
595	lis	r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@h
596	ori	r1, r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@l
597	lis	r2, CCSRAR_C@h
598	ori	r2, r2, CCSRAR_C@l
599
600	stw	r0, 0(r9)	/* Write to CCSRBARH */
601	sync			/* Make sure we write to CCSRBARH first */
602	stw	r1, 4(r9)	/* Write to CCSRBARL */
603	sync
604
605	/*
606	 * Write a 1 to the commit bit (C) of CCSRAR at the old location.
607	 * Follow this with a sync instruction.
608	 */
609	stw	r2, 8(r9)
610	sync
611
612	/* Delete the temporary LAW */
613delete_temp_law:
614	li	r1, 0
615	stw	r1, 0xc08(r8)
616	sync
617	stw	r1, 0xc00(r8)
618	stw	r1, 0xc04(r8)
619	sync
620
621#else /* #ifdef CONFIG_FSL_CORENET */
622
623write_new_ccsrbar:
624	/*
625	 * Read the current value of CCSRBAR using a load word instruction
626	 * followed by an isync. This forces all accesses to configuration
627	 * space to complete.
628	 */
629	sync
630	lwz	r0, 0(r9)
631	isync
632
633/* CONFIG_SYS_CCSRBAR_PHYS right shifted by 12 */
634#define CCSRBAR_PHYS_RS12 ((CONFIG_SYS_CCSRBAR_PHYS_HIGH << 20) | \
635			   (CONFIG_SYS_CCSRBAR_PHYS_LOW >> 12))
636
637	/* Write the new value to CCSRBAR. */
638	lis	r0, CCSRBAR_PHYS_RS12@h
639	ori	r0, r0, CCSRBAR_PHYS_RS12@l
640	stw	r0, 0(r9)
641	sync
642
643	/*
644	 * The manual says to perform a load of an address that does not
645	 * access configuration space or the on-chip SRAM using an existing TLB,
646	 * but that doesn't appear to be necessary.  We will do the isync,
647	 * though.
648	 */
649	isync
650
651	/*
652	 * Read the contents of CCSRBAR from its new location, followed by
653	 * another isync.
654	 */
655	lwz	r0, 0(r8)
656	isync
657
658#endif  /* #ifdef CONFIG_FSL_CORENET */
659
660	/* Delete the temporary TLBs */
661delete_temp_tlbs:
662	delete_tlb0_entry 0, CONFIG_SYS_CCSRBAR, MAS2_I|MAS2_G, r3
663	delete_tlb0_entry 1, CONFIG_SYS_CCSRBAR + 0x1000, MAS2_I|MAS2_G, r3
664
665#endif /* #if (CONFIG_SYS_CCSRBAR_DEFAULT != CONFIG_SYS_CCSRBAR_PHYS) */
666
667#if defined(CONFIG_SYS_FSL_QORIQ_CHASSIS2) && defined(CONFIG_E6500)
668create_ccsr_l2_tlb:
669	/*
670	 * Create a TLB for the MMR location of CCSR
671	 * to access L2CSR0 register
672	 */
673	create_tlb0_entry 0, \
674		0, BOOKE_PAGESZ_4K, \
675		CONFIG_SYS_CCSRBAR + 0xC20000, MAS2_I|MAS2_G, \
676		CONFIG_SYS_CCSRBAR_PHYS_LOW + 0xC20000, MAS3_SW|MAS3_SR, \
677		CONFIG_SYS_CCSRBAR_PHYS_HIGH, r3
678
679enable_l2_cluster_l2:
680	/* enable L2 cache */
681	lis	r3, (CONFIG_SYS_CCSRBAR + 0xC20000)@h
682	ori	r3, r3, (CONFIG_SYS_CCSRBAR + 0xC20000)@l
683	li	r4, 33	/* stash id */
684	stw	r4, 4(r3)
685	lis	r4, (L2CSR0_L2FI|L2CSR0_L2LFC)@h
686	ori	r4, r4, (L2CSR0_L2FI|L2CSR0_L2LFC)@l
687	sync
688	stw	r4, 0(r3)	/* invalidate L2 */
689	/* Poll till the bits are cleared */
6901:	sync
691	lwz	r0, 0(r3)
692	twi	0, r0, 0
693	isync
694	and.	r1, r0, r4
695	bne	1b
696
697	/* L2PE must be set before L2 cache is enabled */
698	lis	r4, (L2CSR0_L2PE)@h
699	ori	r4, r4, (L2CSR0_L2PE)@l
700	sync
701	stw	r4, 0(r3)	/* enable L2 parity/ECC error checking */
702	/* Poll till the bit is set */
7031:	sync
704	lwz	r0, 0(r3)
705	twi	0, r0, 0
706	isync
707	and.	r1, r0, r4
708	beq	1b
709
710	lis	r4, (L2CSR0_L2E|L2CSR0_L2PE)@h
711	ori	r4, r4, (L2CSR0_L2REP_MODE)@l
712	sync
713	stw	r4, 0(r3)	/* enable L2 */
714	/* Poll till the bit is set */
7151:	sync
716	lwz	r0, 0(r3)
717	twi	0, r0, 0
718	isync
719	and.	r1, r0, r4
720	beq	1b
721
722delete_ccsr_l2_tlb:
723	delete_tlb0_entry 0, CONFIG_SYS_CCSRBAR + 0xC20000, MAS2_I|MAS2_G, r3
724#endif
725
726	/*
727	 * Enable the L1. On e6500, this has to be done
728	 * after the L2 is up.
729	 */
730
731#ifdef CONFIG_SYS_CACHE_STASHING
732	/* set stash id to (coreID) * 2 + 32 + L1 CT (0) */
733	li	r2,(32 + 0)
734	mtspr	L1CSR2,r2
735#endif
736
737	/* Enable/invalidate the I-Cache */
738	lis	r2,(L1CSR1_ICFI|L1CSR1_ICLFR)@h
739	ori	r2,r2,(L1CSR1_ICFI|L1CSR1_ICLFR)@l
740	mtspr	SPRN_L1CSR1,r2
7411:
742	mfspr	r3,SPRN_L1CSR1
743	and.	r1,r3,r2
744	bne	1b
745
746	lis	r3,(L1CSR1_CPE|L1CSR1_ICE)@h
747	ori	r3,r3,(L1CSR1_CPE|L1CSR1_ICE)@l
748	mtspr	SPRN_L1CSR1,r3
749	isync
7502:
751	mfspr	r3,SPRN_L1CSR1
752	andi.	r1,r3,L1CSR1_ICE@l
753	beq	2b
754
755	/* Enable/invalidate the D-Cache */
756	lis	r2,(L1CSR0_DCFI|L1CSR0_DCLFR)@h
757	ori	r2,r2,(L1CSR0_DCFI|L1CSR0_DCLFR)@l
758	mtspr	SPRN_L1CSR0,r2
7591:
760	mfspr	r3,SPRN_L1CSR0
761	and.	r1,r3,r2
762	bne	1b
763
764	lis	r3,(L1CSR0_CPE|L1CSR0_DCE)@h
765	ori	r3,r3,(L1CSR0_CPE|L1CSR0_DCE)@l
766	mtspr	SPRN_L1CSR0,r3
767	isync
7682:
769	mfspr	r3,SPRN_L1CSR0
770	andi.	r1,r3,L1CSR0_DCE@l
771	beq	2b
772#ifdef CONFIG_SYS_FSL_ERRATUM_A004510
773#define DCSR_LAWBARH0	(CONFIG_SYS_CCSRBAR + 0x1000)
774#define LAW_SIZE_1M	0x13
775#define DCSRBAR_LAWAR	(LAW_EN | (0x1d << 20) | LAW_SIZE_1M)
776
777	cmpwi	r27,0
778	beq	9f
779
780	/*
781	 * Create a TLB entry for CCSR
782	 *
783	 * We're executing out of TLB1 entry in r14, and that's the only
784	 * TLB entry that exists.  To allocate some TLB entries for our
785	 * own use, flip a bit high enough that we won't flip it again
786	 * via incrementing.
787	 */
788
789	xori	r8, r14, 32
790	lis	r0, MAS0_TLBSEL(1)@h
791	rlwimi	r0, r8, 16, MAS0_ESEL_MSK
792	lis	r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_16M)@h
793	ori	r1, r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_16M)@l
794	lis	r7, CONFIG_SYS_CCSRBAR@h
795	ori	r7, r7, CONFIG_SYS_CCSRBAR@l
796	ori	r2, r7, MAS2_I|MAS2_G
797	lis	r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_PHYS_LOW, 0, (MAS3_SW|MAS3_SR))@h
798	ori	r3, r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_PHYS_LOW, 0, (MAS3_SW|MAS3_SR))@l
799	lis	r4, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h
800	ori	r4, r4, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l
801	mtspr	MAS0, r0
802	mtspr	MAS1, r1
803	mtspr	MAS2, r2
804	mtspr	MAS3, r3
805	mtspr	MAS7, r4
806	isync
807	tlbwe
808	isync
809	msync
810
811	/* Map DCSR temporarily to physical address zero */
812	li	r0, 0
813	lis	r3, DCSRBAR_LAWAR@h
814	ori	r3, r3, DCSRBAR_LAWAR@l
815
816	stw	r0, 0xc00(r7)	/* LAWBARH0 */
817	stw	r0, 0xc04(r7)	/* LAWBARL0 */
818	sync
819	stw	r3, 0xc08(r7)	/* LAWAR0 */
820
821	/* Read back from LAWAR to ensure the update is complete. */
822	lwz	r3, 0xc08(r7)	/* LAWAR0 */
823	isync
824
825	/* Create a TLB entry for DCSR at zero */
826
827	addi	r9, r8, 1
828	lis	r0, MAS0_TLBSEL(1)@h
829	rlwimi	r0, r9, 16, MAS0_ESEL_MSK
830	lis	r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_1M)@h
831	ori	r1, r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_1M)@l
832	li	r6, 0	/* DCSR effective address */
833	ori	r2, r6, MAS2_I|MAS2_G
834	li	r3, MAS3_SW|MAS3_SR
835	li	r4, 0
836	mtspr	MAS0, r0
837	mtspr	MAS1, r1
838	mtspr	MAS2, r2
839	mtspr	MAS3, r3
840	mtspr	MAS7, r4
841	isync
842	tlbwe
843	isync
844	msync
845
846	/* enable the timebase */
847#define CTBENR	0xe2084
848	li	r3, 1
849	addis	r4, r7, CTBENR@ha
850	stw	r3, CTBENR@l(r4)
851	lwz	r3, CTBENR@l(r4)
852	twi	0,r3,0
853	isync
854
855	.macro	erratum_set_ccsr offset value
856	addis	r3, r7, \offset@ha
857	lis	r4, \value@h
858	addi	r3, r3, \offset@l
859	ori	r4, r4, \value@l
860	bl	erratum_set_value
861	.endm
862
863	.macro	erratum_set_dcsr offset value
864	addis	r3, r6, \offset@ha
865	lis	r4, \value@h
866	addi	r3, r3, \offset@l
867	ori	r4, r4, \value@l
868	bl	erratum_set_value
869	.endm
870
871	erratum_set_dcsr 0xb0e08 0xe0201800
872	erratum_set_dcsr 0xb0e18 0xe0201800
873	erratum_set_dcsr 0xb0e38 0xe0400000
874	erratum_set_dcsr 0xb0008 0x00900000
875	erratum_set_dcsr 0xb0e40 0xe00a0000
876	erratum_set_ccsr 0x18600 CONFIG_SYS_FSL_CORENET_SNOOPVEC_COREONLY
877#ifdef  CONFIG_RAMBOOT_PBL
878	erratum_set_ccsr 0x10f00 0x495e5000
879#else
880	erratum_set_ccsr 0x10f00 0x415e5000
881#endif
882	erratum_set_ccsr 0x11f00 0x415e5000
883
884	/* Make temp mapping uncacheable again, if it was initially */
885	bl	2f
8862:	mflr	r3
887	tlbsx	0, r3
888	mfspr	r4, MAS2
889	rlwimi	r4, r15, 0, MAS2_I
890	rlwimi	r4, r15, 0, MAS2_G
891	mtspr	MAS2, r4
892	isync
893	tlbwe
894	isync
895	msync
896
897	/* Clear the cache */
898	lis	r3,(L1CSR1_ICFI|L1CSR1_ICLFR)@h
899	ori	r3,r3,(L1CSR1_ICFI|L1CSR1_ICLFR)@l
900	sync
901	isync
902	mtspr	SPRN_L1CSR1,r3
903	isync
9042:	sync
905	mfspr	r4,SPRN_L1CSR1
906	and.	r4,r4,r3
907	bne	2b
908
909	lis	r3,(L1CSR1_CPE|L1CSR1_ICE)@h
910	ori	r3,r3,(L1CSR1_CPE|L1CSR1_ICE)@l
911	sync
912	isync
913	mtspr	SPRN_L1CSR1,r3
914	isync
9152:	sync
916	mfspr	r4,SPRN_L1CSR1
917	and.	r4,r4,r3
918	beq	2b
919
920	/* Remove temporary mappings */
921	lis	r0, MAS0_TLBSEL(1)@h
922	rlwimi	r0, r9, 16, MAS0_ESEL_MSK
923	li	r3, 0
924	mtspr	MAS0, r0
925	mtspr	MAS1, r3
926	isync
927	tlbwe
928	isync
929	msync
930
931	li	r3, 0
932	stw	r3, 0xc08(r7)	/* LAWAR0 */
933	lwz	r3, 0xc08(r7)
934	isync
935
936	lis	r0, MAS0_TLBSEL(1)@h
937	rlwimi	r0, r8, 16, MAS0_ESEL_MSK
938	li	r3, 0
939	mtspr	MAS0, r0
940	mtspr	MAS1, r3
941	isync
942	tlbwe
943	isync
944	msync
945
946	b	9f
947
948	/* r3 = addr, r4 = value, clobbers r5, r11, r12 */
949erratum_set_value:
950	/* Lock two cache lines into I-Cache */
951	sync
952	mfspr	r11, SPRN_L1CSR1
953	rlwinm	r11, r11, 0, ~L1CSR1_ICUL
954	sync
955	isync
956	mtspr	SPRN_L1CSR1, r11
957	isync
958
959	mflr	r12
960	bl	5f
9615:	mflr	r5
962	addi	r5, r5, 2f - 5b
963	icbtls	0, 0, r5
964	addi	r5, r5, 64
965
966	sync
967	mfspr	r11, SPRN_L1CSR1
9683:	andi.	r11, r11, L1CSR1_ICUL
969	bne	3b
970
971	icbtls	0, 0, r5
972	addi	r5, r5, 64
973
974	sync
975	mfspr	r11, SPRN_L1CSR1
9763:	andi.	r11, r11, L1CSR1_ICUL
977	bne	3b
978
979	b	2f
980	.align	6
981	/* Inside a locked cacheline, wait a while, write, then wait a while */
9822:	sync
983
984	mfspr	r5, SPRN_TBRL
985	addis	r11, r5, 0x10000@h /* wait 65536 timebase ticks */
9864:	mfspr	r5, SPRN_TBRL
987	subf.	r5, r5, r11
988	bgt	4b
989
990	stw	r4, 0(r3)
991
992	mfspr	r5, SPRN_TBRL
993	addis	r11, r5, 0x10000@h /* wait 65536 timebase ticks */
9944:	mfspr	r5, SPRN_TBRL
995	subf.	r5, r5, r11
996	bgt	4b
997
998	sync
999
1000	/*
1001	 * Fill out the rest of this cache line and the next with nops,
1002	 * to ensure that nothing outside the locked area will be
1003	 * fetched due to a branch.
1004	 */
1005	.rept 19
1006	nop
1007	.endr
1008
1009	sync
1010	mfspr	r11, SPRN_L1CSR1
1011	rlwinm	r11, r11, 0, ~L1CSR1_ICUL
1012	sync
1013	isync
1014	mtspr	SPRN_L1CSR1, r11
1015	isync
1016
1017	mtlr	r12
1018	blr
1019
10209:
1021#endif
1022
1023create_init_ram_area:
1024	lis     r6,FSL_BOOKE_MAS0(1, 15, 0)@h
1025	ori     r6,r6,FSL_BOOKE_MAS0(1, 15, 0)@l
1026
1027#ifdef NOR_BOOT
1028	/* create a temp mapping in AS=1 to the 4M boot window */
1029	create_tlb1_entry 15, \
1030		1, BOOKE_PAGESZ_4M, \
1031		CONFIG_SYS_MONITOR_BASE & 0xffc00000, MAS2_I|MAS2_G, \
1032		0xffc00000, MAS3_SX|MAS3_SW|MAS3_SR, \
1033		0, r6
1034
1035#elif !defined(CONFIG_SYS_RAMBOOT) && defined(CONFIG_NXP_ESBC)
1036	/* create a temp mapping in AS = 1 for Flash mapping
1037	 * created by PBL for ISBC code
1038	 */
1039	create_tlb1_entry 15, \
1040		1, BOOKE_PAGESZ_1M, \
1041		CONFIG_SYS_MONITOR_BASE & 0xfff00000, MAS2_I|MAS2_G, \
1042		CONFIG_SYS_PBI_FLASH_WINDOW & 0xfff00000, MAS3_SX|MAS3_SW|MAS3_SR, \
1043		0, r6
1044
1045/*
1046 * For Targets without CONFIG_SPL like P3, P5
1047 * and for targets with CONFIG_SPL like T1, T2, T4, only for
1048 * u-boot-spl i.e. CONFIG_SPL_BUILD
1049 */
1050#elif defined(CONFIG_RAMBOOT_PBL) && defined(CONFIG_NXP_ESBC) && \
1051	(!defined(CONFIG_SPL) || defined(CONFIG_SPL_BUILD))
1052	/* create a temp mapping in AS = 1 for mapping CONFIG_SYS_MONITOR_BASE
1053	 * to L3 Address configured by PBL for ISBC code
1054	 */
1055	create_tlb1_entry 15, \
1056		1, BOOKE_PAGESZ_1M, \
1057		CONFIG_SYS_MONITOR_BASE & 0xfff00000, MAS2_I|MAS2_G, \
1058		CONFIG_SYS_INIT_L3_ADDR & 0xfff00000, MAS3_SX|MAS3_SW|MAS3_SR, \
1059		0, r6
1060
1061#else
1062	/*
1063	 * create a temp mapping in AS=1 to the 1M CONFIG_SYS_MONITOR_BASE space, the main
1064	 * image has been relocated to CONFIG_SYS_MONITOR_BASE on the second stage.
1065	 */
1066	create_tlb1_entry 15, \
1067		1, BOOKE_PAGESZ_1M, \
1068		CONFIG_SYS_MONITOR_BASE & 0xfff00000, MAS2_I|MAS2_G, \
1069		CONFIG_SYS_MONITOR_BASE & 0xfff00000, MAS3_SX|MAS3_SW|MAS3_SR, \
1070		0, r6
1071#endif
1072
1073	/* create a temp mapping in AS=1 to the stack */
1074#if defined(CONFIG_SYS_INIT_RAM_ADDR_PHYS_LOW) && \
1075    defined(CONFIG_SYS_INIT_RAM_ADDR_PHYS_HIGH)
1076	create_tlb1_entry 14, \
1077		1, BOOKE_PAGESZ_16K, \
1078		CONFIG_SYS_INIT_RAM_ADDR, 0, \
1079		CONFIG_SYS_INIT_RAM_ADDR_PHYS_LOW, MAS3_SX|MAS3_SW|MAS3_SR, \
1080		CONFIG_SYS_INIT_RAM_ADDR_PHYS_HIGH, r6
1081
1082#else
1083	create_tlb1_entry 14, \
1084		1, BOOKE_PAGESZ_16K, \
1085		CONFIG_SYS_INIT_RAM_ADDR, 0, \
1086		CONFIG_SYS_INIT_RAM_ADDR, MAS3_SX|MAS3_SW|MAS3_SR, \
1087		0, r6
1088#endif
1089
1090	lis	r6,MSR_IS|MSR_DS|MSR_DE@h
1091	ori	r6,r6,MSR_IS|MSR_DS|MSR_DE@l
1092	lis	r7,switch_as@h
1093	ori	r7,r7,switch_as@l
1094
1095	mtspr	SPRN_SRR0,r7
1096	mtspr	SPRN_SRR1,r6
1097	rfi
1098
1099switch_as:
1100/* L1 DCache is used for initial RAM */
1101
1102	/* Allocate Initial RAM in data cache.
1103	 */
1104	lis	r3,CONFIG_SYS_INIT_RAM_ADDR@h
1105	ori	r3,r3,CONFIG_SYS_INIT_RAM_ADDR@l
1106	mfspr	r2, L1CFG0
1107	andi.	r2, r2, 0x1ff
1108	/* cache size * 1024 / (2 * L1 line size) */
1109	slwi	r2, r2, (10 - 1 - L1_CACHE_SHIFT)
1110	mtctr	r2
1111	li	r0,0
11121:
1113	dcbz	r0,r3
1114#ifdef CONFIG_E6500	/* Lock/unlock L2 cache long with L1 */
1115	dcbtls	2, r0, r3
1116	dcbtls	0, r0, r3
1117#else
1118	dcbtls	0, r0, r3
1119#endif
1120	addi	r3,r3,CONFIG_SYS_CACHELINE_SIZE
1121	bdnz	1b
1122
1123	/* Jump out the last 4K page and continue to 'normal' start */
1124#if defined(CONFIG_SYS_RAMBOOT) || defined(CONFIG_SPL)
1125	/* We assume that we're already running at the address we're linked at */
1126	b	_start_cont
1127#else
1128	/* Calculate absolute address in FLASH and jump there		*/
1129	/*--------------------------------------------------------------*/
1130	lis	r3,CONFIG_SYS_MONITOR_BASE@h
1131	ori	r3,r3,CONFIG_SYS_MONITOR_BASE@l
1132	addi	r3,r3,_start_cont - _start
1133	mtlr	r3
1134	blr
1135#endif
1136
1137	.text
1138	.globl	_start
1139_start:
1140	.long	0x27051956		/* U-BOOT Magic Number */
1141	.globl	version_string
1142version_string:
1143	.ascii U_BOOT_VERSION_STRING, "\0"
1144
1145	.align	4
1146	.globl	_start_cont
1147_start_cont:
1148	/* Setup the stack in initial RAM,could be L2-as-SRAM or L1 dcache*/
1149	lis	r3,(CONFIG_SYS_INIT_RAM_ADDR)@h
1150	ori	r3,r3,((CONFIG_SYS_INIT_SP_OFFSET-16)&~0xf)@l /* Align to 16 */
1151
1152#if CONFIG_VAL(SYS_MALLOC_F_LEN)
1153#if CONFIG_VAL(SYS_MALLOC_F_LEN) + GENERATED_GBL_DATA_SIZE > CONFIG_SYS_INIT_RAM_SIZE
1154#error "SYS_MALLOC_F_LEN too large to fit into initial RAM."
1155#endif
1156
1157	/* Leave 16+ byte for back chain termination and NULL return address */
1158	subi	r3,r3,((CONFIG_VAL(SYS_MALLOC_F_LEN)+16+15)&~0xf)
1159#endif
1160
1161	/* End of RAM */
1162	lis	r4,(CONFIG_SYS_INIT_RAM_ADDR)@h
1163	ori	r4,r4,(CONFIG_SYS_INIT_RAM_SIZE)@l
1164
1165	li	r0,0
1166
11671: 	subi 	r4,r4,4
1168	stw 	r0,0(r4)
1169	cmplw 	r4,r3
1170	bne	1b
1171
1172#if CONFIG_VAL(SYS_MALLOC_F_LEN)
1173	lis	r4,(CONFIG_SYS_INIT_RAM_ADDR)@h
1174	ori	r4,r4,(CONFIG_SYS_GBL_DATA_OFFSET)@l
1175
1176	addi	r3,r3,16	/* Pre-relocation malloc area */
1177	stw	r3,GD_MALLOC_BASE(r4)
1178	subi	r3,r3,16
1179#endif
1180	li	r0,0
1181	stw	r0,0(r3)	/* Terminate Back Chain */
1182	stw	r0,+4(r3)	/* NULL return address. */
1183	mr	r1,r3		/* Transfer to SP(r1) */
1184
1185	GET_GOT
1186	/* Needed for -msingle-pic-base */
1187	bl	_GLOBAL_OFFSET_TABLE_@local-4
1188	mflr	r30
1189
1190	/* Pass our potential ePAPR device tree pointer to cpu_init_early_f */
1191	mr	r3, r24
1192
1193	bl	cpu_init_early_f
1194
1195	/* switch back to AS = 0 */
1196	lis	r3,(MSR_CE|MSR_ME|MSR_DE)@h
1197	ori	r3,r3,(MSR_CE|MSR_ME|MSR_DE)@l
1198	mtmsr	r3
1199	isync
1200
1201	bl	cpu_init_f	/* return boot_flag for calling board_init_f */
1202	bl	board_init_f
1203	isync
1204
1205	/* NOTREACHED - board_init_f() does not return */
1206
1207#ifndef MINIMAL_SPL
1208	.globl	_start_of_vectors
1209_start_of_vectors:
1210
1211/* Critical input. */
1212	CRIT_EXCEPTION(0x0100, CriticalInput, CritcalInputException)
1213
1214/* Machine check */
1215	MCK_EXCEPTION(0x200, MachineCheck, MachineCheckException)
1216
1217/* Data Storage exception. */
1218	STD_EXCEPTION(0x0300, DataStorage, UnknownException)
1219
1220/* Instruction Storage exception. */
1221	STD_EXCEPTION(0x0400, InstStorage, UnknownException)
1222
1223/* External Interrupt exception. */
1224	STD_EXCEPTION(0x0500, ExtInterrupt, ExtIntException)
1225
1226/* Alignment exception. */
1227Alignment:
1228	EXCEPTION_PROLOG(SRR0, SRR1)
1229	mfspr	r4,DAR
1230	stw	r4,_DAR(r21)
1231	mfspr	r5,DSISR
1232	stw	r5,_DSISR(r21)
1233	addi	r3,r1,STACK_FRAME_OVERHEAD
1234	EXC_XFER_TEMPLATE(0x600, Alignment, AlignmentException,
1235		MSR_KERNEL, COPY_EE)
1236
1237/* Program check exception */
1238ProgramCheck:
1239	EXCEPTION_PROLOG(SRR0, SRR1)
1240	addi	r3,r1,STACK_FRAME_OVERHEAD
1241	EXC_XFER_TEMPLATE(0x700, ProgramCheck, ProgramCheckException,
1242		MSR_KERNEL, COPY_EE)
1243
1244	/* No FPU on MPC85xx.  This exception is not supposed to happen.
1245	*/
1246	STD_EXCEPTION(0x0800, FPUnavailable, UnknownException)
1247	STD_EXCEPTION(0x0900, SystemCall, UnknownException)
1248	STD_EXCEPTION(0x0a00, Decrementer, timer_interrupt)
1249	STD_EXCEPTION(0x0b00, IntervalTimer, UnknownException)
1250	STD_EXCEPTION(0x0c00, WatchdogTimer, UnknownException)
1251
1252	STD_EXCEPTION(0x0d00, DataTLBError, UnknownException)
1253	STD_EXCEPTION(0x0e00, InstructionTLBError, UnknownException)
1254
1255	CRIT_EXCEPTION(0x0f00, DebugBreakpoint, DebugException )
1256
1257	.globl	_end_of_vectors
1258_end_of_vectors:
1259
1260
1261	. = . + (0x100 - ( . & 0xff ))	/* align for debug */
1262
1263/*
1264 * This code finishes saving the registers to the exception frame
1265 * and jumps to the appropriate handler for the exception.
1266 * Register r21 is pointer into trap frame, r1 has new stack pointer.
1267 * r23 is the address of the handler.
1268 */
1269	.globl	transfer_to_handler
1270transfer_to_handler:
1271	SAVE_GPR(7, r21)
1272	SAVE_4GPRS(8, r21)
1273	SAVE_8GPRS(12, r21)
1274	SAVE_8GPRS(24, r21)
1275
1276	li	r22,0
1277	stw	r22,RESULT(r21)
1278	mtspr	SPRG2,r22		/* r1 is now kernel sp */
1279
1280	mtctr	r23			/* virtual address of handler */
1281	mtmsr	r20
1282	bctrl
1283
1284int_return:
1285	mfmsr	r28		/* Disable interrupts */
1286	li	r4,0
1287	ori	r4,r4,MSR_EE
1288	andc	r28,r28,r4
1289	SYNC			/* Some chip revs need this... */
1290	mtmsr	r28
1291	SYNC
1292	lwz	r2,_CTR(r1)
1293	lwz	r0,_LINK(r1)
1294	mtctr	r2
1295	mtlr	r0
1296	lwz	r2,_XER(r1)
1297	lwz	r0,_CCR(r1)
1298	mtspr	XER,r2
1299	mtcrf	0xFF,r0
1300	REST_10GPRS(3, r1)
1301	REST_10GPRS(13, r1)
1302	REST_8GPRS(23, r1)
1303	REST_GPR(31, r1)
1304	lwz	r2,_NIP(r1)	/* Restore environment */
1305	lwz	r0,_MSR(r1)
1306	mtspr	SRR0,r2
1307	mtspr	SRR1,r0
1308	lwz	r0,GPR0(r1)
1309	lwz	r2,GPR2(r1)
1310	lwz	r1,GPR1(r1)
1311	SYNC
1312	rfi
1313
1314/* Cache functions.
1315*/
1316.globl flush_icache
1317flush_icache:
1318.globl invalidate_icache
1319invalidate_icache:
1320	mfspr	r0,L1CSR1
1321	ori	r0,r0,L1CSR1_ICFI
1322	msync
1323	isync
1324	mtspr	L1CSR1,r0
1325	isync
1326	blr				/* entire I cache */
1327
1328.globl invalidate_dcache
1329invalidate_dcache:
1330	mfspr	r0,L1CSR0
1331	ori	r0,r0,L1CSR0_DCFI
1332	msync
1333	isync
1334	mtspr	L1CSR0,r0
1335	isync
1336	blr
1337
1338	.globl	icache_enable
1339icache_enable:
1340	mflr	r8
1341	bl	invalidate_icache
1342	mtlr	r8
1343	isync
1344	mfspr	r4,L1CSR1
1345	ori	r4,r4,(L1CSR1_CPE | L1CSR1_ICE)@l
1346	oris	r4,r4,(L1CSR1_CPE | L1CSR1_ICE)@h
1347	mtspr	L1CSR1,r4
1348	isync
1349	blr
1350
1351	.globl	icache_disable
1352icache_disable:
1353	mfspr	r0,L1CSR1
1354	lis	r3,0
1355	ori	r3,r3,L1CSR1_ICE
1356	andc	r0,r0,r3
1357	mtspr	L1CSR1,r0
1358	isync
1359	blr
1360
1361	.globl	icache_status
1362icache_status:
1363	mfspr	r3,L1CSR1
1364	andi.	r3,r3,L1CSR1_ICE
1365	blr
1366
1367	.globl	dcache_enable
1368dcache_enable:
1369	mflr	r8
1370	bl	invalidate_dcache
1371	mtlr	r8
1372	isync
1373	mfspr	r0,L1CSR0
1374	ori	r0,r0,(L1CSR0_CPE |  L1CSR0_DCE)@l
1375	oris	r0,r0,(L1CSR0_CPE |  L1CSR0_DCE)@h
1376	msync
1377	isync
1378	mtspr	L1CSR0,r0
1379	isync
1380	blr
1381
1382	.globl	dcache_disable
1383dcache_disable:
1384	mfspr	r3,L1CSR0
1385	lis	r4,0
1386	ori	r4,r4,L1CSR0_DCE
1387	andc	r3,r3,r4
1388	mtspr	L1CSR0,r3
1389	isync
1390	blr
1391
1392	.globl	dcache_status
1393dcache_status:
1394	mfspr	r3,L1CSR0
1395	andi.	r3,r3,L1CSR0_DCE
1396	blr
1397
1398/*------------------------------------------------------------------------------- */
1399/* Function:	 in8 */
1400/* Description:	 Input 8 bits */
1401/*------------------------------------------------------------------------------- */
1402	.globl	in8
1403in8:
1404	lbz	r3,0x0000(r3)
1405	blr
1406
1407/*------------------------------------------------------------------------------- */
1408/* Function:	 out8 */
1409/* Description:	 Output 8 bits */
1410/*------------------------------------------------------------------------------- */
1411	.globl	out8
1412out8:
1413	stb	r4,0x0000(r3)
1414	sync
1415	blr
1416
1417/*------------------------------------------------------------------------------- */
1418/* Function:	 out16 */
1419/* Description:	 Output 16 bits */
1420/*------------------------------------------------------------------------------- */
1421	.globl	out16
1422out16:
1423	sth	r4,0x0000(r3)
1424	sync
1425	blr
1426
1427/*------------------------------------------------------------------------------- */
1428/* Function:	 out16r */
1429/* Description:	 Byte reverse and output 16 bits */
1430/*------------------------------------------------------------------------------- */
1431	.globl	out16r
1432out16r:
1433	sthbrx	r4,r0,r3
1434	sync
1435	blr
1436
1437/*------------------------------------------------------------------------------- */
1438/* Function:	 out32 */
1439/* Description:	 Output 32 bits */
1440/*------------------------------------------------------------------------------- */
1441	.globl	out32
1442out32:
1443	stw	r4,0x0000(r3)
1444	sync
1445	blr
1446
1447/*------------------------------------------------------------------------------- */
1448/* Function:	 out32r */
1449/* Description:	 Byte reverse and output 32 bits */
1450/*------------------------------------------------------------------------------- */
1451	.globl	out32r
1452out32r:
1453	stwbrx	r4,r0,r3
1454	sync
1455	blr
1456
1457/*------------------------------------------------------------------------------- */
1458/* Function:	 in16 */
1459/* Description:	 Input 16 bits */
1460/*------------------------------------------------------------------------------- */
1461	.globl	in16
1462in16:
1463	lhz	r3,0x0000(r3)
1464	blr
1465
1466/*------------------------------------------------------------------------------- */
1467/* Function:	 in16r */
1468/* Description:	 Input 16 bits and byte reverse */
1469/*------------------------------------------------------------------------------- */
1470	.globl	in16r
1471in16r:
1472	lhbrx	r3,r0,r3
1473	blr
1474
1475/*------------------------------------------------------------------------------- */
1476/* Function:	 in32 */
1477/* Description:	 Input 32 bits */
1478/*------------------------------------------------------------------------------- */
1479	.globl	in32
1480in32:
1481	lwz	3,0x0000(3)
1482	blr
1483
1484/*------------------------------------------------------------------------------- */
1485/* Function:	 in32r */
1486/* Description:	 Input 32 bits and byte reverse */
1487/*------------------------------------------------------------------------------- */
1488	.globl	in32r
1489in32r:
1490	lwbrx	r3,r0,r3
1491	blr
1492#endif  /* !MINIMAL_SPL */
1493
1494/*------------------------------------------------------------------------------*/
1495
1496/*
1497 * void write_tlb(mas0, mas1, mas2, mas3, mas7)
1498 */
1499	.globl	write_tlb
1500write_tlb:
1501	mtspr	MAS0,r3
1502	mtspr	MAS1,r4
1503	mtspr	MAS2,r5
1504	mtspr	MAS3,r6
1505#ifdef CONFIG_ENABLE_36BIT_PHYS
1506	mtspr	MAS7,r7
1507#endif
1508	li	r3,0
1509#ifdef CONFIG_SYS_BOOK3E_HV
1510	mtspr	MAS8,r3
1511#endif
1512	isync
1513	tlbwe
1514	msync
1515	isync
1516	blr
1517
1518/*
1519 * void relocate_code(addr_sp, gd, addr_moni)
1520 *
1521 * This "function" does not return, instead it continues in RAM
1522 * after relocating the monitor code.
1523 *
1524 * r3 = dest
1525 * r4 = src
1526 * r5 = length in bytes
1527 * r6 = cachelinesize
1528 */
1529	.globl	relocate_code
1530relocate_code:
1531	mr	r1,r3		/* Set new stack pointer		*/
1532	mr	r9,r4		/* Save copy of Init Data pointer	*/
1533	mr	r10,r5		/* Save copy of Destination Address	*/
1534
1535	GET_GOT
1536#ifndef CONFIG_SPL_SKIP_RELOCATE
1537	mr	r3,r5				/* Destination Address	*/
1538	lis	r4,CONFIG_SYS_MONITOR_BASE@h		/* Source      Address	*/
1539	ori	r4,r4,CONFIG_SYS_MONITOR_BASE@l
1540	lwz	r5,GOT(__init_end)
1541	sub	r5,r5,r4
1542	li	r6,CONFIG_SYS_CACHELINE_SIZE		/* Cache Line Size	*/
1543
1544	/*
1545	 * Fix GOT pointer:
1546	 *
1547	 * New GOT-PTR = (old GOT-PTR - CONFIG_SYS_MONITOR_BASE) + Destination Address
1548	 *
1549	 * Offset:
1550	 */
1551	sub	r15,r10,r4
1552
1553	/* First our own GOT */
1554	add	r12,r12,r15
1555	/* the the one used by the C code */
1556	add	r30,r30,r15
1557
1558	/*
1559	 * Now relocate code
1560	 */
1561
1562	cmplw	cr1,r3,r4
1563	addi	r0,r5,3
1564	srwi.	r0,r0,2
1565	beq	cr1,4f		/* In place copy is not necessary	*/
1566	beq	7f		/* Protect against 0 count		*/
1567	mtctr	r0
1568	bge	cr1,2f
1569
1570	la	r8,-4(r4)
1571	la	r7,-4(r3)
15721:	lwzu	r0,4(r8)
1573	stwu	r0,4(r7)
1574	bdnz	1b
1575	b	4f
1576
15772:	slwi	r0,r0,2
1578	add	r8,r4,r0
1579	add	r7,r3,r0
15803:	lwzu	r0,-4(r8)
1581	stwu	r0,-4(r7)
1582	bdnz	3b
1583
1584/*
1585 * Now flush the cache: note that we must start from a cache aligned
1586 * address. Otherwise we might miss one cache line.
1587 */
15884:	cmpwi	r6,0
1589	add	r5,r3,r5
1590	beq	7f		/* Always flush prefetch queue in any case */
1591	subi	r0,r6,1
1592	andc	r3,r3,r0
1593	mr	r4,r3
15945:	dcbst	0,r4
1595	add	r4,r4,r6
1596	cmplw	r4,r5
1597	blt	5b
1598	sync			/* Wait for all dcbst to complete on bus */
1599	mr	r4,r3
16006:	icbi	0,r4
1601	add	r4,r4,r6
1602	cmplw	r4,r5
1603	blt	6b
16047:	sync			/* Wait for all icbi to complete on bus */
1605	isync
1606
1607/*
1608 * We are done. Do not return, instead branch to second part of board
1609 * initialization, now running from RAM.
1610 */
1611
1612	addi	r0,r10,in_ram - _start
1613
1614	/*
1615	 * As IVPR is going to point RAM address,
1616	 * Make sure IVOR15 has valid opcode to support debugger
1617	 */
1618	mtspr	IVOR15,r0
1619
1620	/*
1621	 * Re-point the IVPR at RAM
1622	 */
1623	mtspr	IVPR,r10
1624
1625	mtlr	r0
1626	blr				/* NEVER RETURNS! */
1627#endif
1628	.globl	in_ram
1629in_ram:
1630
1631	/*
1632	 * Relocation Function, r12 point to got2+0x8000
1633	 *
1634	 * Adjust got2 pointers, no need to check for 0, this code
1635	 * already puts a few entries in the table.
1636	 */
1637	li	r0,__got2_entries@sectoff@l
1638	la	r3,GOT(_GOT2_TABLE_)
1639	lwz	r11,GOT(_GOT2_TABLE_)
1640	mtctr	r0
1641	sub	r11,r3,r11
1642	addi	r3,r3,-4
16431:	lwzu	r0,4(r3)
1644	cmpwi	r0,0
1645	beq-	2f
1646	add	r0,r0,r11
1647	stw	r0,0(r3)
16482:	bdnz	1b
1649
1650	/*
1651	 * Now adjust the fixups and the pointers to the fixups
1652	 * in case we need to move ourselves again.
1653	 */
1654	li	r0,__fixup_entries@sectoff@l
1655	lwz	r3,GOT(_FIXUP_TABLE_)
1656	cmpwi	r0,0
1657	mtctr	r0
1658	addi	r3,r3,-4
1659	beq	4f
16603:	lwzu	r4,4(r3)
1661	lwzux	r0,r4,r11
1662	cmpwi	r0,0
1663	add	r0,r0,r11
1664	stw	r4,0(r3)
1665	beq-	5f
1666	stw	r0,0(r4)
16675:	bdnz	3b
16684:
1669clear_bss:
1670	/*
1671	 * Now clear BSS segment
1672	 */
1673	lwz	r3,GOT(__bss_start)
1674	lwz	r4,GOT(__bss_end)
1675
1676	cmplw	0,r3,r4
1677	beq	6f
1678
1679	li	r0,0
16805:
1681	stw	r0,0(r3)
1682	addi	r3,r3,4
1683	cmplw	0,r3,r4
1684	blt	5b
16856:
1686
1687	mr	r3,r9		/* Init Data pointer		*/
1688	mr	r4,r10		/* Destination Address		*/
1689	bl	board_init_r
1690
1691#ifndef MINIMAL_SPL
1692	/*
1693	 * Copy exception vector code to low memory
1694	 *
1695	 * r3: dest_addr
1696	 * r7: source address, r8: end address, r9: target address
1697	 */
1698	.globl	trap_init
1699trap_init:
1700	mflr	r11
1701	bl	_GLOBAL_OFFSET_TABLE_-4
1702	mflr	r12
1703
1704	/* Update IVORs as per relocation */
1705	mtspr	IVPR,r3
1706
1707	lwz	r4,CriticalInput@got(r12)
1708	mtspr	IVOR0,r4	/* 0: Critical input */
1709	lwz	r4,MachineCheck@got(r12)
1710	mtspr	IVOR1,r4	/* 1: Machine check */
1711	lwz	r4,DataStorage@got(r12)
1712	mtspr	IVOR2,r4	/* 2: Data storage */
1713	lwz	r4,InstStorage@got(r12)
1714	mtspr	IVOR3,r4	/* 3: Instruction storage */
1715	lwz	r4,ExtInterrupt@got(r12)
1716	mtspr	IVOR4,r4	/* 4: External interrupt */
1717	lwz	r4,Alignment@got(r12)
1718	mtspr	IVOR5,r4	/* 5: Alignment */
1719	lwz	r4,ProgramCheck@got(r12)
1720	mtspr	IVOR6,r4	/* 6: Program check */
1721	lwz	r4,FPUnavailable@got(r12)
1722	mtspr	IVOR7,r4	/* 7: floating point unavailable */
1723	lwz	r4,SystemCall@got(r12)
1724	mtspr	IVOR8,r4	/* 8: System call */
1725	/* 9: Auxiliary processor unavailable(unsupported) */
1726	lwz	r4,Decrementer@got(r12)
1727	mtspr	IVOR10,r4	/* 10: Decrementer */
1728	lwz	r4,IntervalTimer@got(r12)
1729	mtspr	IVOR11,r4	/* 11: Interval timer */
1730	lwz	r4,WatchdogTimer@got(r12)
1731	mtspr	IVOR12,r4	/* 12: Watchdog timer */
1732	lwz	r4,DataTLBError@got(r12)
1733	mtspr	IVOR13,r4	/* 13: Data TLB error */
1734	lwz	r4,InstructionTLBError@got(r12)
1735	mtspr	IVOR14,r4	/* 14: Instruction TLB error */
1736	lwz	r4,DebugBreakpoint@got(r12)
1737	mtspr	IVOR15,r4	/* 15: Debug */
1738
1739	mtlr	r11
1740	blr
1741
1742.globl unlock_ram_in_cache
1743unlock_ram_in_cache:
1744	/* invalidate the INIT_RAM section */
1745	lis	r3,(CONFIG_SYS_INIT_RAM_ADDR & ~(CONFIG_SYS_CACHELINE_SIZE-1))@h
1746	ori	r3,r3,(CONFIG_SYS_INIT_RAM_ADDR & ~(CONFIG_SYS_CACHELINE_SIZE-1))@l
1747	mfspr	r4,L1CFG0
1748	andi.	r4,r4,0x1ff
1749	slwi	r4,r4,(10 - 1 - L1_CACHE_SHIFT)
1750	mtctr	r4
17511:	dcbi	r0,r3
1752#ifdef CONFIG_E6500	/* lock/unlock L2 cache long with L1 */
1753	dcblc	2, r0, r3
1754	dcblc	0, r0, r3
1755#else
1756	dcblc	r0,r3
1757#endif
1758	addi	r3,r3,CONFIG_SYS_CACHELINE_SIZE
1759	bdnz	1b
1760	sync
1761
1762	/* Invalidate the TLB entries for the cache */
1763	lis	r3,CONFIG_SYS_INIT_RAM_ADDR@h
1764	ori	r3,r3,CONFIG_SYS_INIT_RAM_ADDR@l
1765	tlbivax	0,r3
1766	addi	r3,r3,0x1000
1767	tlbivax	0,r3
1768	addi	r3,r3,0x1000
1769	tlbivax	0,r3
1770	addi	r3,r3,0x1000
1771	tlbivax	0,r3
1772	isync
1773	blr
1774
1775.globl flush_dcache
1776flush_dcache:
1777	mfspr	r3,SPRN_L1CFG0
1778
1779	rlwinm	r5,r3,9,3	/* Extract cache block size */
1780	twlgti	r5,1		/* Only 32 and 64 byte cache blocks
1781				 * are currently defined.
1782				 */
1783	li	r4,32
1784	subfic	r6,r5,2		/* r6 = log2(1KiB / cache block size) -
1785				 *      log2(number of ways)
1786				 */
1787	slw	r5,r4,r5	/* r5 = cache block size */
1788
1789	rlwinm	r7,r3,0,0xff	/* Extract number of KiB in the cache */
1790	mulli	r7,r7,13	/* An 8-way cache will require 13
1791				 * loads per set.
1792				 */
1793	slw	r7,r7,r6
1794
1795	/* save off HID0 and set DCFA */
1796	mfspr	r8,SPRN_HID0
1797	ori	r9,r8,HID0_DCFA@l
1798	mtspr	SPRN_HID0,r9
1799	isync
1800
1801	lis	r4,0
1802	mtctr	r7
1803
18041:	lwz	r3,0(r4)	/* Load... */
1805	add	r4,r4,r5
1806	bdnz	1b
1807
1808	msync
1809	lis	r4,0
1810	mtctr	r7
1811
18121:	dcbf	0,r4		/* ...and flush. */
1813	add	r4,r4,r5
1814	bdnz	1b
1815
1816	/* restore HID0 */
1817	mtspr	SPRN_HID0,r8
1818	isync
1819
1820	blr
1821#endif /* !MINIMAL_SPL */
1822