1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 *  PowerPC version
4 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 *
6 *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
7 *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
8 *  Adapted for Power Macintosh by Paul Mackerras.
9 *  Low-level exception handlers and MMU support
10 *  rewritten by Paul Mackerras.
11 *    Copyright (C) 1996 Paul Mackerras.
12 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
13 *
14 *  This file contains the low-level support and setup for the
15 *  PowerPC platform, including trap and interrupt dispatch.
16 *  (The PPC 8xx embedded CPUs use head_8xx.S instead.)
17 */
18
19#include <linux/init.h>
20#include <linux/pgtable.h>
21#include <asm/reg.h>
22#include <asm/page.h>
23#include <asm/mmu.h>
24#include <asm/cputable.h>
25#include <asm/cache.h>
26#include <asm/thread_info.h>
27#include <asm/ppc_asm.h>
28#include <asm/asm-offsets.h>
29#include <asm/ptrace.h>
30#include <asm/bug.h>
31#include <asm/kvm_book3s_asm.h>
32#include <asm/export.h>
33#include <asm/feature-fixups.h>
34#include <asm/interrupt.h>
35
36#include "head_32.h"
37
38#define LOAD_BAT(n, reg, RA, RB)	\
39	/* see the comment for clear_bats() -- Cort */ \
40	li	RA,0;			\
41	mtspr	SPRN_IBAT##n##U,RA;	\
42	mtspr	SPRN_DBAT##n##U,RA;	\
43	lwz	RA,(n*16)+0(reg);	\
44	lwz	RB,(n*16)+4(reg);	\
45	mtspr	SPRN_IBAT##n##U,RA;	\
46	mtspr	SPRN_IBAT##n##L,RB;	\
47	lwz	RA,(n*16)+8(reg);	\
48	lwz	RB,(n*16)+12(reg);	\
49	mtspr	SPRN_DBAT##n##U,RA;	\
50	mtspr	SPRN_DBAT##n##L,RB
51
52	__HEAD
53	.stabs	"arch/powerpc/kernel/",N_SO,0,0,0f
54	.stabs	"head_book3s_32.S",N_SO,0,0,0f
550:
56_ENTRY(_stext);
57
58/*
59 * _start is defined this way because the XCOFF loader in the OpenFirmware
60 * on the powermac expects the entry point to be a procedure descriptor.
61 */
62_ENTRY(_start);
63	/*
64	 * These are here for legacy reasons, the kernel used to
65	 * need to look like a coff function entry for the pmac
66	 * but we're always started by some kind of bootloader now.
67	 *  -- Cort
68	 */
69	nop	/* used by __secondary_hold on prep (mtx) and chrp smp */
70	nop	/* used by __secondary_hold on prep (mtx) and chrp smp */
71	nop
72
73/* PMAC
74 * Enter here with the kernel text, data and bss loaded starting at
75 * 0, running with virtual == physical mapping.
76 * r5 points to the prom entry point (the client interface handler
77 * address).  Address translation is turned on, with the prom
78 * managing the hash table.  Interrupts are disabled.  The stack
79 * pointer (r1) points to just below the end of the half-meg region
80 * from 0x380000 - 0x400000, which is mapped in already.
81 *
82 * If we are booted from MacOS via BootX, we enter with the kernel
83 * image loaded somewhere, and the following values in registers:
84 *  r3: 'BooX' (0x426f6f58)
85 *  r4: virtual address of boot_infos_t
86 *  r5: 0
87 *
88 * PREP
89 * This is jumped to on prep systems right after the kernel is relocated
90 * to its proper place in memory by the boot loader.  The expected layout
91 * of the regs is:
92 *   r3: ptr to residual data
93 *   r4: initrd_start or if no initrd then 0
94 *   r5: initrd_end - unused if r4 is 0
95 *   r6: Start of command line string
96 *   r7: End of command line string
97 *
98 * This just gets a minimal mmu environment setup so we can call
99 * start_here() to do the real work.
100 * -- Cort
101 */
102
103	.globl	__start
104__start:
105/*
106 * We have to do any OF calls before we map ourselves to KERNELBASE,
107 * because OF may have I/O devices mapped into that area
108 * (particularly on CHRP).
109 */
110	cmpwi	0,r5,0
111	beq	1f
112
113#ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
114	/* find out where we are now */
115	bcl	20,31,$+4
1160:	mflr	r8			/* r8 = runtime addr here */
117	addis	r8,r8,(_stext - 0b)@ha
118	addi	r8,r8,(_stext - 0b)@l	/* current runtime base addr */
119	bl	prom_init
120#endif /* CONFIG_PPC_OF_BOOT_TRAMPOLINE */
121
122	/* We never return. We also hit that trap if trying to boot
123	 * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */
124	trap
125
126/*
127 * Check for BootX signature when supporting PowerMac and branch to
128 * appropriate trampoline if it's present
129 */
130#ifdef CONFIG_PPC_PMAC
1311:	lis	r31,0x426f
132	ori	r31,r31,0x6f58
133	cmpw	0,r3,r31
134	bne	1f
135	bl	bootx_init
136	trap
137#endif /* CONFIG_PPC_PMAC */
138
1391:	mr	r31,r3			/* save device tree ptr */
140	li	r24,0			/* cpu # */
141
142/*
143 * early_init() does the early machine identification and does
144 * the necessary low-level setup and clears the BSS
145 *  -- Cort <cort@fsmlabs.com>
146 */
147	bl	early_init
148
149/* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains
150 * the physical address we are running at, returned by early_init()
151 */
152 	bl	mmu_off
153__after_mmu_off:
154	bl	clear_bats
155	bl	flush_tlbs
156
157	bl	initial_bats
158	bl	load_segment_registers
159	bl	reloc_offset
160	bl	early_hash_table
161#if defined(CONFIG_BOOTX_TEXT)
162	bl	setup_disp_bat
163#endif
164#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
165	bl	setup_cpm_bat
166#endif
167#ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
168	bl	setup_usbgecko_bat
169#endif
170
171/*
172 * Call setup_cpu for CPU 0 and initialize 6xx Idle
173 */
174	bl	reloc_offset
175	li	r24,0			/* cpu# */
176	bl	call_setup_cpu		/* Call setup_cpu for this CPU */
177	bl	reloc_offset
178	bl	init_idle_6xx
179
180
181/*
182 * We need to run with _start at physical address 0.
183 * On CHRP, we are loaded at 0x10000 since OF on CHRP uses
184 * the exception vectors at 0 (and therefore this copy
185 * overwrites OF's exception vectors with our own).
186 * The MMU is off at this point.
187 */
188	bl	reloc_offset
189	mr	r26,r3
190	addis	r4,r3,KERNELBASE@h	/* current address of _start */
191	lis	r5,PHYSICAL_START@h
192	cmplw	0,r4,r5			/* already running at PHYSICAL_START? */
193	bne	relocate_kernel
194/*
195 * we now have the 1st 16M of ram mapped with the bats.
196 * prep needs the mmu to be turned on here, but pmac already has it on.
197 * this shouldn't bother the pmac since it just gets turned on again
198 * as we jump to our code at KERNELBASE. -- Cort
199 * Actually no, pmac doesn't have it on any more. BootX enters with MMU
200 * off, and in other cases, we now turn it off before changing BATs above.
201 */
202turn_on_mmu:
203	mfmsr	r0
204	ori	r0,r0,MSR_DR|MSR_IR|MSR_RI
205	mtspr	SPRN_SRR1,r0
206	lis	r0,start_here@h
207	ori	r0,r0,start_here@l
208	mtspr	SPRN_SRR0,r0
209	rfi				/* enables MMU */
210
211/*
212 * We need __secondary_hold as a place to hold the other cpus on
213 * an SMP machine, even when we are running a UP kernel.
214 */
215	. = 0xc0			/* for prep bootloader */
216	li	r3,1			/* MTX only has 1 cpu */
217	.globl	__secondary_hold
218__secondary_hold:
219	/* tell the master we're here */
220	stw	r3,__secondary_hold_acknowledge@l(0)
221#ifdef CONFIG_SMP
222100:	lwz	r4,0(0)
223	/* wait until we're told to start */
224	cmpw	0,r4,r3
225	bne	100b
226	/* our cpu # was at addr 0 - go */
227	mr	r24,r3			/* cpu # */
228	b	__secondary_start
229#else
230	b	.
231#endif /* CONFIG_SMP */
232
233	.globl	__secondary_hold_spinloop
234__secondary_hold_spinloop:
235	.long	0
236	.globl	__secondary_hold_acknowledge
237__secondary_hold_acknowledge:
238	.long	-1
239
240/* System reset */
241/* core99 pmac starts the seconary here by changing the vector, and
242   putting it back to what it was (unknown_async_exception) when done.  */
243	EXCEPTION(INTERRUPT_SYSTEM_RESET, Reset, unknown_async_exception)
244
245/* Machine check */
246/*
247 * On CHRP, this is complicated by the fact that we could get a
248 * machine check inside RTAS, and we have no guarantee that certain
249 * critical registers will have the values we expect.  The set of
250 * registers that might have bad values includes all the GPRs
251 * and all the BATs.  We indicate that we are in RTAS by putting
252 * a non-zero value, the address of the exception frame to use,
253 * in thread.rtas_sp.  The machine check handler checks thread.rtas_sp
254 * and uses its value if it is non-zero.
255 * (Other exception handlers assume that r1 is a valid kernel stack
256 * pointer when we take an exception from supervisor mode.)
257 *	-- paulus.
258 */
259	START_EXCEPTION(INTERRUPT_MACHINE_CHECK, MachineCheck)
260	EXCEPTION_PROLOG_0
261#ifdef CONFIG_PPC_CHRP
262	mtspr	SPRN_SPRG_SCRATCH2,r1
263	mfspr	r1, SPRN_SPRG_THREAD
264	lwz	r1, RTAS_SP(r1)
265	cmpwi	cr1, r1, 0
266	bne	cr1, 7f
267	mfspr	r1, SPRN_SPRG_SCRATCH2
268#endif /* CONFIG_PPC_CHRP */
269	EXCEPTION_PROLOG_1
2707:	EXCEPTION_PROLOG_2 0x200 MachineCheck
271#ifdef CONFIG_PPC_CHRP
272	beq	cr1, 1f
273	twi	31, 0, 0
274#endif
2751:	prepare_transfer_to_handler
276	bl	machine_check_exception
277	b	interrupt_return
278
279/* Data access exception. */
280	START_EXCEPTION(INTERRUPT_DATA_STORAGE, DataAccess)
281#ifdef CONFIG_PPC_BOOK3S_604
282BEGIN_MMU_FTR_SECTION
283	mtspr	SPRN_SPRG_SCRATCH2,r10
284	mfspr	r10, SPRN_SPRG_THREAD
285	stw	r11, THR11(r10)
286	mfspr	r10, SPRN_DSISR
287	mfcr	r11
288	andis.	r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
289	mfspr	r10, SPRN_SPRG_THREAD
290	beq	hash_page_dsi
291.Lhash_page_dsi_cont:
292	mtcr	r11
293	lwz	r11, THR11(r10)
294	mfspr	r10, SPRN_SPRG_SCRATCH2
295MMU_FTR_SECTION_ELSE
296	b	1f
297ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
298#endif
2991:	EXCEPTION_PROLOG_0 handle_dar_dsisr=1
300	EXCEPTION_PROLOG_1
301	EXCEPTION_PROLOG_2 INTERRUPT_DATA_STORAGE DataAccess handle_dar_dsisr=1
302	prepare_transfer_to_handler
303	lwz	r5, _DSISR(r11)
304	andis.	r0, r5, DSISR_DABRMATCH@h
305	bne-	1f
306	bl	do_page_fault
307	b	interrupt_return
3081:	bl	do_break
309	REST_NVGPRS(r1)
310	b	interrupt_return
311
312
313/* Instruction access exception. */
314	START_EXCEPTION(INTERRUPT_INST_STORAGE, InstructionAccess)
315	mtspr	SPRN_SPRG_SCRATCH0,r10
316	mtspr	SPRN_SPRG_SCRATCH1,r11
317	mfspr	r10, SPRN_SPRG_THREAD
318	mfspr	r11, SPRN_SRR0
319	stw	r11, SRR0(r10)
320	mfspr	r11, SPRN_SRR1		/* check whether user or kernel */
321	stw	r11, SRR1(r10)
322	mfcr	r10
323#ifdef CONFIG_PPC_BOOK3S_604
324BEGIN_MMU_FTR_SECTION
325	andis.	r11, r11, SRR1_ISI_NOPT@h	/* no pte found? */
326	bne	hash_page_isi
327.Lhash_page_isi_cont:
328	mfspr	r11, SPRN_SRR1		/* check whether user or kernel */
329END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
330#endif
331	andi.	r11, r11, MSR_PR
332
333	EXCEPTION_PROLOG_1
334	EXCEPTION_PROLOG_2 INTERRUPT_INST_STORAGE InstructionAccess
335	andis.	r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */
336	stw	r5, _DSISR(r11)
337	stw	r12, _DAR(r11)
338	prepare_transfer_to_handler
339	bl	do_page_fault
340	b	interrupt_return
341
342/* External interrupt */
343	EXCEPTION(INTERRUPT_EXTERNAL, HardwareInterrupt, do_IRQ)
344
345/* Alignment exception */
346	START_EXCEPTION(INTERRUPT_ALIGNMENT, Alignment)
347	EXCEPTION_PROLOG INTERRUPT_ALIGNMENT Alignment handle_dar_dsisr=1
348	prepare_transfer_to_handler
349	bl	alignment_exception
350	REST_NVGPRS(r1)
351	b	interrupt_return
352
353/* Program check exception */
354	START_EXCEPTION(INTERRUPT_PROGRAM, ProgramCheck)
355	EXCEPTION_PROLOG INTERRUPT_PROGRAM ProgramCheck
356	prepare_transfer_to_handler
357	bl	program_check_exception
358	REST_NVGPRS(r1)
359	b	interrupt_return
360
361/* Floating-point unavailable */
362	START_EXCEPTION(0x800, FPUnavailable)
363#ifdef CONFIG_PPC_FPU
364BEGIN_FTR_SECTION
365/*
366 * Certain Freescale cores don't have a FPU and treat fp instructions
367 * as a FP Unavailable exception.  Redirect to illegal/emulation handling.
368 */
369	b 	ProgramCheck
370END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE)
371	EXCEPTION_PROLOG INTERRUPT_FP_UNAVAIL FPUnavailable
372	beq	1f
373	bl	load_up_fpu		/* if from user, just load it up */
374	b	fast_exception_return
3751:	prepare_transfer_to_handler
376	bl	kernel_fp_unavailable_exception
377	b	interrupt_return
378#else
379	b 	ProgramCheck
380#endif
381
382/* Decrementer */
383	EXCEPTION(INTERRUPT_DECREMENTER, Decrementer, timer_interrupt)
384
385	EXCEPTION(0xa00, Trap_0a, unknown_exception)
386	EXCEPTION(0xb00, Trap_0b, unknown_exception)
387
388/* System call */
389	START_EXCEPTION(INTERRUPT_SYSCALL, SystemCall)
390	SYSCALL_ENTRY	INTERRUPT_SYSCALL
391
392	EXCEPTION(INTERRUPT_TRACE, SingleStep, single_step_exception)
393	EXCEPTION(0xe00, Trap_0e, unknown_exception)
394
395/*
396 * The Altivec unavailable trap is at 0x0f20.  Foo.
397 * We effectively remap it to 0x3000.
398 * We include an altivec unavailable exception vector even if
399 * not configured for Altivec, so that you can't panic a
400 * non-altivec kernel running on a machine with altivec just
401 * by executing an altivec instruction.
402 */
403	START_EXCEPTION(INTERRUPT_PERFMON, PerformanceMonitorTrap)
404	b	PerformanceMonitor
405
406	START_EXCEPTION(INTERRUPT_ALTIVEC_UNAVAIL, AltiVecUnavailableTrap)
407	b	AltiVecUnavailable
408
409	__HEAD
410/*
411 * Handle TLB miss for instruction on 603/603e.
412 * Note: we get an alternate set of r0 - r3 to use automatically.
413 */
414	. = INTERRUPT_INST_TLB_MISS_603
415InstructionTLBMiss:
416/*
417 * r0:	scratch
418 * r1:	linux style pte ( later becomes ppc hardware pte )
419 * r2:	ptr to linux-style pte
420 * r3:	scratch
421 */
422	/* Get PTE (linux-style) and check access */
423	mfspr	r3,SPRN_IMISS
424#ifdef CONFIG_MODULES
425	lis	r1, TASK_SIZE@h		/* check if kernel address */
426	cmplw	0,r1,r3
427#endif
428	mfspr	r2, SPRN_SDR1
429	li	r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC | _PAGE_USER
430	rlwinm	r2, r2, 28, 0xfffff000
431#ifdef CONFIG_MODULES
432	bgt-	112f
433	lis	r2, (swapper_pg_dir - PAGE_OFFSET)@ha	/* if kernel address, use */
434	li	r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
435	addi	r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l	/* kernel page table */
436#endif
437112:	rlwimi	r2,r3,12,20,29		/* insert top 10 bits of address */
438	lwz	r2,0(r2)		/* get pmd entry */
439	rlwinm.	r2,r2,0,0,19		/* extract address of pte page */
440	beq-	InstructionAddressInvalid	/* return if no mapping */
441	rlwimi	r2,r3,22,20,29		/* insert next 10 bits of address */
442	lwz	r0,0(r2)		/* get linux-style pte */
443	andc.	r1,r1,r0		/* check access & ~permission */
444	bne-	InstructionAddressInvalid /* return if access not permitted */
445	/* Convert linux-style PTE to low word of PPC-style PTE */
446	rlwimi	r0,r0,32-2,31,31	/* _PAGE_USER -> PP lsb */
447	ori	r1, r1, 0xe06		/* clear out reserved bits */
448	andc	r1, r0, r1		/* PP = user? 1 : 0 */
449BEGIN_FTR_SECTION
450	rlwinm	r1,r1,0,~_PAGE_COHERENT	/* clear M (coherence not required) */
451END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
452	mtspr	SPRN_RPA,r1
453	tlbli	r3
454	mfspr	r3,SPRN_SRR1		/* Need to restore CR0 */
455	mtcrf	0x80,r3
456	rfi
457InstructionAddressInvalid:
458	mfspr	r3,SPRN_SRR1
459	rlwinm	r1,r3,9,6,6	/* Get load/store bit */
460
461	addis	r1,r1,0x2000
462	mtspr	SPRN_DSISR,r1	/* (shouldn't be needed) */
463	andi.	r2,r3,0xFFFF	/* Clear upper bits of SRR1 */
464	or	r2,r2,r1
465	mtspr	SPRN_SRR1,r2
466	mfspr	r1,SPRN_IMISS	/* Get failing address */
467	rlwinm.	r2,r2,0,31,31	/* Check for little endian access */
468	rlwimi	r2,r2,1,30,30	/* change 1 -> 3 */
469	xor	r1,r1,r2
470	mtspr	SPRN_DAR,r1	/* Set fault address */
471	mfmsr	r0		/* Restore "normal" registers */
472	xoris	r0,r0,MSR_TGPR>>16
473	mtcrf	0x80,r3		/* Restore CR0 */
474	mtmsr	r0
475	b	InstructionAccess
476
477/*
478 * Handle TLB miss for DATA Load operation on 603/603e
479 */
480	. = INTERRUPT_DATA_LOAD_TLB_MISS_603
481DataLoadTLBMiss:
482/*
483 * r0:	scratch
484 * r1:	linux style pte ( later becomes ppc hardware pte )
485 * r2:	ptr to linux-style pte
486 * r3:	scratch
487 */
488	/* Get PTE (linux-style) and check access */
489	mfspr	r3,SPRN_DMISS
490	lis	r1, TASK_SIZE@h		/* check if kernel address */
491	cmplw	0,r1,r3
492	mfspr	r2, SPRN_SDR1
493	li	r1, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER
494	rlwinm	r2, r2, 28, 0xfffff000
495	bgt-	112f
496	lis	r2, (swapper_pg_dir - PAGE_OFFSET)@ha	/* if kernel address, use */
497	li	r1, _PAGE_PRESENT | _PAGE_ACCESSED
498	addi	r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l	/* kernel page table */
499112:	rlwimi	r2,r3,12,20,29		/* insert top 10 bits of address */
500	lwz	r2,0(r2)		/* get pmd entry */
501	rlwinm.	r2,r2,0,0,19		/* extract address of pte page */
502	beq-	DataAddressInvalid	/* return if no mapping */
503	rlwimi	r2,r3,22,20,29		/* insert next 10 bits of address */
504	lwz	r0,0(r2)		/* get linux-style pte */
505	andc.	r1,r1,r0		/* check access & ~permission */
506	bne-	DataAddressInvalid	/* return if access not permitted */
507	/*
508	 * NOTE! We are assuming this is not an SMP system, otherwise
509	 * we would need to update the pte atomically with lwarx/stwcx.
510	 */
511	/* Convert linux-style PTE to low word of PPC-style PTE */
512	rlwinm	r1,r0,32-9,30,30	/* _PAGE_RW -> PP msb */
513	rlwimi	r0,r0,32-1,30,30	/* _PAGE_USER -> PP msb */
514	rlwimi	r0,r0,32-1,31,31	/* _PAGE_USER -> PP lsb */
515	ori	r1,r1,0xe04		/* clear out reserved bits */
516	andc	r1,r0,r1		/* PP = user? rw? 1: 3: 0 */
517BEGIN_FTR_SECTION
518	rlwinm	r1,r1,0,~_PAGE_COHERENT	/* clear M (coherence not required) */
519END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
520	mtspr	SPRN_RPA,r1
521	mfspr	r2,SPRN_SRR1		/* Need to restore CR0 */
522	mtcrf	0x80,r2
523BEGIN_MMU_FTR_SECTION
524	li	r0,1
525	mfspr	r1,SPRN_SPRG_603_LRU
526	rlwinm	r2,r3,20,27,31		/* Get Address bits 15:19 */
527	slw	r0,r0,r2
528	xor	r1,r0,r1
529	srw	r0,r1,r2
530	mtspr   SPRN_SPRG_603_LRU,r1
531	mfspr	r2,SPRN_SRR1
532	rlwimi	r2,r0,31-14,14,14
533	mtspr   SPRN_SRR1,r2
534END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
535	tlbld	r3
536	rfi
537DataAddressInvalid:
538	mfspr	r3,SPRN_SRR1
539	rlwinm	r1,r3,9,6,6	/* Get load/store bit */
540	addis	r1,r1,0x2000
541	mtspr	SPRN_DSISR,r1
542	andi.	r2,r3,0xFFFF	/* Clear upper bits of SRR1 */
543	mtspr	SPRN_SRR1,r2
544	mfspr	r1,SPRN_DMISS	/* Get failing address */
545	rlwinm.	r2,r2,0,31,31	/* Check for little endian access */
546	beq	20f		/* Jump if big endian */
547	xori	r1,r1,3
54820:	mtspr	SPRN_DAR,r1	/* Set fault address */
549	mfmsr	r0		/* Restore "normal" registers */
550	xoris	r0,r0,MSR_TGPR>>16
551	mtcrf	0x80,r3		/* Restore CR0 */
552	mtmsr	r0
553	b	DataAccess
554
555/*
556 * Handle TLB miss for DATA Store on 603/603e
557 */
558	. = INTERRUPT_DATA_STORE_TLB_MISS_603
559DataStoreTLBMiss:
560/*
561 * r0:	scratch
562 * r1:	linux style pte ( later becomes ppc hardware pte )
563 * r2:	ptr to linux-style pte
564 * r3:	scratch
565 */
566	/* Get PTE (linux-style) and check access */
567	mfspr	r3,SPRN_DMISS
568	lis	r1, TASK_SIZE@h		/* check if kernel address */
569	cmplw	0,r1,r3
570	mfspr	r2, SPRN_SDR1
571	li	r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER
572	rlwinm	r2, r2, 28, 0xfffff000
573	bgt-	112f
574	lis	r2, (swapper_pg_dir - PAGE_OFFSET)@ha	/* if kernel address, use */
575	li	r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
576	addi	r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l	/* kernel page table */
577112:	rlwimi	r2,r3,12,20,29		/* insert top 10 bits of address */
578	lwz	r2,0(r2)		/* get pmd entry */
579	rlwinm.	r2,r2,0,0,19		/* extract address of pte page */
580	beq-	DataAddressInvalid	/* return if no mapping */
581	rlwimi	r2,r3,22,20,29		/* insert next 10 bits of address */
582	lwz	r0,0(r2)		/* get linux-style pte */
583	andc.	r1,r1,r0		/* check access & ~permission */
584	bne-	DataAddressInvalid	/* return if access not permitted */
585	/*
586	 * NOTE! We are assuming this is not an SMP system, otherwise
587	 * we would need to update the pte atomically with lwarx/stwcx.
588	 */
589	/* Convert linux-style PTE to low word of PPC-style PTE */
590	rlwimi	r0,r0,32-2,31,31	/* _PAGE_USER -> PP lsb */
591	li	r1,0xe06		/* clear out reserved bits & PP msb */
592	andc	r1,r0,r1		/* PP = user? 1: 0 */
593BEGIN_FTR_SECTION
594	rlwinm	r1,r1,0,~_PAGE_COHERENT	/* clear M (coherence not required) */
595END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
596	mtspr	SPRN_RPA,r1
597	mfspr	r2,SPRN_SRR1		/* Need to restore CR0 */
598	mtcrf	0x80,r2
599BEGIN_MMU_FTR_SECTION
600	li	r0,1
601	mfspr	r1,SPRN_SPRG_603_LRU
602	rlwinm	r2,r3,20,27,31		/* Get Address bits 15:19 */
603	slw	r0,r0,r2
604	xor	r1,r0,r1
605	srw	r0,r1,r2
606	mtspr   SPRN_SPRG_603_LRU,r1
607	mfspr	r2,SPRN_SRR1
608	rlwimi	r2,r0,31-14,14,14
609	mtspr   SPRN_SRR1,r2
610END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
611	tlbld	r3
612	rfi
613
614#ifndef CONFIG_ALTIVEC
615#define altivec_assist_exception	unknown_exception
616#endif
617
618#ifndef CONFIG_TAU_INT
619#define TAUException	unknown_async_exception
620#endif
621
622	EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception)
623	EXCEPTION(0x1400, SMI, SMIException)
624	EXCEPTION(0x1500, Trap_15, unknown_exception)
625	EXCEPTION(0x1600, Trap_16, altivec_assist_exception)
626	EXCEPTION(0x1700, Trap_17, TAUException)
627	EXCEPTION(0x1800, Trap_18, unknown_exception)
628	EXCEPTION(0x1900, Trap_19, unknown_exception)
629	EXCEPTION(0x1a00, Trap_1a, unknown_exception)
630	EXCEPTION(0x1b00, Trap_1b, unknown_exception)
631	EXCEPTION(0x1c00, Trap_1c, unknown_exception)
632	EXCEPTION(0x1d00, Trap_1d, unknown_exception)
633	EXCEPTION(0x1e00, Trap_1e, unknown_exception)
634	EXCEPTION(0x1f00, Trap_1f, unknown_exception)
635	EXCEPTION(0x2000, RunMode, RunModeException)
636	EXCEPTION(0x2100, Trap_21, unknown_exception)
637	EXCEPTION(0x2200, Trap_22, unknown_exception)
638	EXCEPTION(0x2300, Trap_23, unknown_exception)
639	EXCEPTION(0x2400, Trap_24, unknown_exception)
640	EXCEPTION(0x2500, Trap_25, unknown_exception)
641	EXCEPTION(0x2600, Trap_26, unknown_exception)
642	EXCEPTION(0x2700, Trap_27, unknown_exception)
643	EXCEPTION(0x2800, Trap_28, unknown_exception)
644	EXCEPTION(0x2900, Trap_29, unknown_exception)
645	EXCEPTION(0x2a00, Trap_2a, unknown_exception)
646	EXCEPTION(0x2b00, Trap_2b, unknown_exception)
647	EXCEPTION(0x2c00, Trap_2c, unknown_exception)
648	EXCEPTION(0x2d00, Trap_2d, unknown_exception)
649	EXCEPTION(0x2e00, Trap_2e, unknown_exception)
650	EXCEPTION(0x2f00, Trap_2f, unknown_exception)
651
652	__HEAD
653	. = 0x3000
654
655#ifdef CONFIG_PPC_BOOK3S_604
656.macro save_regs_thread		thread
657	stw	r0, THR0(\thread)
658	stw	r3, THR3(\thread)
659	stw	r4, THR4(\thread)
660	stw	r5, THR5(\thread)
661	stw	r6, THR6(\thread)
662	stw	r8, THR8(\thread)
663	stw	r9, THR9(\thread)
664	mflr	r0
665	stw	r0, THLR(\thread)
666	mfctr	r0
667	stw	r0, THCTR(\thread)
668.endm
669
670.macro restore_regs_thread	thread
671	lwz	r0, THLR(\thread)
672	mtlr	r0
673	lwz	r0, THCTR(\thread)
674	mtctr	r0
675	lwz	r0, THR0(\thread)
676	lwz	r3, THR3(\thread)
677	lwz	r4, THR4(\thread)
678	lwz	r5, THR5(\thread)
679	lwz	r6, THR6(\thread)
680	lwz	r8, THR8(\thread)
681	lwz	r9, THR9(\thread)
682.endm
683
684hash_page_dsi:
685	save_regs_thread	r10
686	mfdsisr	r3
687	mfdar	r4
688	mfsrr0	r5
689	mfsrr1	r9
690	rlwinm	r3, r3, 32 - 15, _PAGE_RW	/* DSISR_STORE -> _PAGE_RW */
691	bl	hash_page
692	mfspr	r10, SPRN_SPRG_THREAD
693	restore_regs_thread r10
694	b	.Lhash_page_dsi_cont
695
696hash_page_isi:
697	mr	r11, r10
698	mfspr	r10, SPRN_SPRG_THREAD
699	save_regs_thread	r10
700	li	r3, 0
701	lwz	r4, SRR0(r10)
702	lwz	r9, SRR1(r10)
703	bl	hash_page
704	mfspr	r10, SPRN_SPRG_THREAD
705	restore_regs_thread r10
706	mr	r10, r11
707	b	.Lhash_page_isi_cont
708
709	.globl fast_hash_page_return
710fast_hash_page_return:
711	andis.	r10, r9, SRR1_ISI_NOPT@h	/* Set on ISI, cleared on DSI */
712	mfspr	r10, SPRN_SPRG_THREAD
713	restore_regs_thread r10
714	bne	1f
715
716	/* DSI */
717	mtcr	r11
718	lwz	r11, THR11(r10)
719	mfspr	r10, SPRN_SPRG_SCRATCH2
720	rfi
721
7221:	/* ISI */
723	mtcr	r11
724	mfspr	r11, SPRN_SPRG_SCRATCH1
725	mfspr	r10, SPRN_SPRG_SCRATCH0
726	rfi
727#endif /* CONFIG_PPC_BOOK3S_604 */
728
729#ifdef CONFIG_VMAP_STACK
730	vmap_stack_overflow_exception
731#endif
732
733	__HEAD
734AltiVecUnavailable:
735	EXCEPTION_PROLOG 0xf20 AltiVecUnavailable
736#ifdef CONFIG_ALTIVEC
737	beq	1f
738	bl	load_up_altivec		/* if from user, just load it up */
739	b	fast_exception_return
740#endif /* CONFIG_ALTIVEC */
7411:	prepare_transfer_to_handler
742	bl	altivec_unavailable_exception
743	b	interrupt_return
744
745	__HEAD
746PerformanceMonitor:
747	EXCEPTION_PROLOG 0xf00 PerformanceMonitor
748	prepare_transfer_to_handler
749	bl	performance_monitor_exception
750	b	interrupt_return
751
752
753	__HEAD
754/*
755 * This code is jumped to from the startup code to copy
756 * the kernel image to physical address PHYSICAL_START.
757 */
758relocate_kernel:
759	addis	r9,r26,klimit@ha	/* fetch klimit */
760	lwz	r25,klimit@l(r9)
761	addis	r25,r25,-KERNELBASE@h
762	lis	r3,PHYSICAL_START@h	/* Destination base address */
763	li	r6,0			/* Destination offset */
764	li	r5,0x4000		/* # bytes of memory to copy */
765	bl	copy_and_flush		/* copy the first 0x4000 bytes */
766	addi	r0,r3,4f@l		/* jump to the address of 4f */
767	mtctr	r0			/* in copy and do the rest. */
768	bctr				/* jump to the copy */
7694:	mr	r5,r25
770	bl	copy_and_flush		/* copy the rest */
771	b	turn_on_mmu
772
773/*
774 * Copy routine used to copy the kernel to start at physical address 0
775 * and flush and invalidate the caches as needed.
776 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
777 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
778 */
779_ENTRY(copy_and_flush)
780	addi	r5,r5,-4
781	addi	r6,r6,-4
7824:	li	r0,L1_CACHE_BYTES/4
783	mtctr	r0
7843:	addi	r6,r6,4			/* copy a cache line */
785	lwzx	r0,r6,r4
786	stwx	r0,r6,r3
787	bdnz	3b
788	dcbst	r6,r3			/* write it to memory */
789	sync
790	icbi	r6,r3			/* flush the icache line */
791	cmplw	0,r6,r5
792	blt	4b
793	sync				/* additional sync needed on g4 */
794	isync
795	addi	r5,r5,4
796	addi	r6,r6,4
797	blr
798
799#ifdef CONFIG_SMP
800	.globl __secondary_start_mpc86xx
801__secondary_start_mpc86xx:
802	mfspr	r3, SPRN_PIR
803	stw	r3, __secondary_hold_acknowledge@l(0)
804	mr	r24, r3			/* cpu # */
805	b	__secondary_start
806
807	.globl	__secondary_start_pmac_0
808__secondary_start_pmac_0:
809	/* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
810	li	r24,0
811	b	1f
812	li	r24,1
813	b	1f
814	li	r24,2
815	b	1f
816	li	r24,3
8171:
818	/* on powersurge, we come in here with IR=0 and DR=1, and DBAT 0
819	   set to map the 0xf0000000 - 0xffffffff region */
820	mfmsr	r0
821	rlwinm	r0,r0,0,28,26		/* clear DR (0x10) */
822	mtmsr	r0
823	isync
824
825	.globl	__secondary_start
826__secondary_start:
827	/* Copy some CPU settings from CPU 0 */
828	bl	__restore_cpu_setup
829
830	lis	r3,-KERNELBASE@h
831	mr	r4,r24
832	bl	call_setup_cpu		/* Call setup_cpu for this CPU */
833	lis	r3,-KERNELBASE@h
834	bl	init_idle_6xx
835
836	/* get current's stack and current */
837	lis	r2,secondary_current@ha
838	tophys(r2,r2)
839	lwz	r2,secondary_current@l(r2)
840	tophys(r1,r2)
841	lwz	r1,TASK_STACK(r1)
842
843	/* stack */
844	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
845	li	r0,0
846	tophys(r3,r1)
847	stw	r0,0(r3)
848
849	/* load up the MMU */
850	bl	load_segment_registers
851	bl	load_up_mmu
852
853	/* ptr to phys current thread */
854	tophys(r4,r2)
855	addi	r4,r4,THREAD	/* phys address of our thread_struct */
856	mtspr	SPRN_SPRG_THREAD,r4
857BEGIN_MMU_FTR_SECTION
858	lis	r4, (swapper_pg_dir - PAGE_OFFSET)@h
859	ori	r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
860	rlwinm	r4, r4, 4, 0xffff01ff
861	mtspr	SPRN_SDR1, r4
862END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE)
863
864	/* enable MMU and jump to start_secondary */
865	li	r4,MSR_KERNEL
866	lis	r3,start_secondary@h
867	ori	r3,r3,start_secondary@l
868	mtspr	SPRN_SRR0,r3
869	mtspr	SPRN_SRR1,r4
870	rfi
871#endif /* CONFIG_SMP */
872
873#ifdef CONFIG_KVM_BOOK3S_HANDLER
874#include "../kvm/book3s_rmhandlers.S"
875#endif
876
877/*
878 * Load stuff into the MMU.  Intended to be called with
879 * IR=0 and DR=0.
880 */
881early_hash_table:
882	sync			/* Force all PTE updates to finish */
883	isync
884	tlbia			/* Clear all TLB entries */
885	sync			/* wait for tlbia/tlbie to finish */
886	TLBSYNC			/* ... on all CPUs */
887	/* Load the SDR1 register (hash table base & size) */
888	lis	r6, early_hash - PAGE_OFFSET@h
889	ori	r6, r6, 3	/* 256kB table */
890	mtspr	SPRN_SDR1, r6
891	blr
892
893load_up_mmu:
894	sync			/* Force all PTE updates to finish */
895	isync
896	tlbia			/* Clear all TLB entries */
897	sync			/* wait for tlbia/tlbie to finish */
898	TLBSYNC			/* ... on all CPUs */
899BEGIN_MMU_FTR_SECTION
900	/* Load the SDR1 register (hash table base & size) */
901	lis	r6,_SDR1@ha
902	tophys(r6,r6)
903	lwz	r6,_SDR1@l(r6)
904	mtspr	SPRN_SDR1,r6
905END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
906
907/* Load the BAT registers with the values set up by MMU_init. */
908	lis	r3,BATS@ha
909	addi	r3,r3,BATS@l
910	tophys(r3,r3)
911	LOAD_BAT(0,r3,r4,r5)
912	LOAD_BAT(1,r3,r4,r5)
913	LOAD_BAT(2,r3,r4,r5)
914	LOAD_BAT(3,r3,r4,r5)
915BEGIN_MMU_FTR_SECTION
916	LOAD_BAT(4,r3,r4,r5)
917	LOAD_BAT(5,r3,r4,r5)
918	LOAD_BAT(6,r3,r4,r5)
919	LOAD_BAT(7,r3,r4,r5)
920END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
921	blr
922
923_GLOBAL(load_segment_registers)
924	li	r0, NUM_USER_SEGMENTS /* load up user segment register values */
925	mtctr	r0		/* for context 0 */
926	li	r3, 0		/* Kp = 0, Ks = 0, VSID = 0 */
927#ifdef CONFIG_PPC_KUEP
928	oris	r3, r3, SR_NX@h	/* Set Nx */
929#endif
930#ifdef CONFIG_PPC_KUAP
931	oris	r3, r3, SR_KS@h	/* Set Ks */
932#endif
933	li	r4, 0
9343:	mtsrin	r3, r4
935	addi	r3, r3, 0x111	/* increment VSID */
936	addis	r4, r4, 0x1000	/* address of next segment */
937	bdnz	3b
938	li	r0, 16 - NUM_USER_SEGMENTS /* load up kernel segment registers */
939	mtctr	r0			/* for context 0 */
940	rlwinm	r3, r3, 0, ~SR_NX	/* Nx = 0 */
941	rlwinm	r3, r3, 0, ~SR_KS	/* Ks = 0 */
942	oris	r3, r3, SR_KP@h		/* Kp = 1 */
9433:	mtsrin	r3, r4
944	addi	r3, r3, 0x111	/* increment VSID */
945	addis	r4, r4, 0x1000	/* address of next segment */
946	bdnz	3b
947	blr
948
949/*
950 * This is where the main kernel code starts.
951 */
952start_here:
953	/* ptr to current */
954	lis	r2,init_task@h
955	ori	r2,r2,init_task@l
956	/* Set up for using our exception vectors */
957	/* ptr to phys current thread */
958	tophys(r4,r2)
959	addi	r4,r4,THREAD	/* init task's THREAD */
960	mtspr	SPRN_SPRG_THREAD,r4
961BEGIN_MMU_FTR_SECTION
962	lis	r4, (swapper_pg_dir - PAGE_OFFSET)@h
963	ori	r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
964	rlwinm	r4, r4, 4, 0xffff01ff
965	mtspr	SPRN_SDR1, r4
966END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE)
967
968	/* stack */
969	lis	r1,init_thread_union@ha
970	addi	r1,r1,init_thread_union@l
971	li	r0,0
972	stwu	r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
973/*
974 * Do early platform-specific initialization,
975 * and set up the MMU.
976 */
977#ifdef CONFIG_KASAN
978	bl	kasan_early_init
979#endif
980	li	r3,0
981	mr	r4,r31
982	bl	machine_init
983	bl	__save_cpu_setup
984	bl	MMU_init
985	bl	MMU_init_hw_patch
986
987/*
988 * Go back to running unmapped so we can load up new values
989 * for SDR1 (hash table pointer) and the segment registers
990 * and change to using our exception vectors.
991 */
992	lis	r4,2f@h
993	ori	r4,r4,2f@l
994	tophys(r4,r4)
995	li	r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
996
997	.align	4
998	mtspr	SPRN_SRR0,r4
999	mtspr	SPRN_SRR1,r3
1000	rfi
1001/* Load up the kernel context */
10022:	bl	load_up_mmu
1003
1004#ifdef CONFIG_BDI_SWITCH
1005	/* Add helper information for the Abatron bdiGDB debugger.
1006	 * We do this here because we know the mmu is disabled, and
1007	 * will be enabled for real in just a few instructions.
1008	 */
1009	lis	r5, abatron_pteptrs@h
1010	ori	r5, r5, abatron_pteptrs@l
1011	stw	r5, 0xf0(0)	/* This much match your Abatron config */
1012	lis	r6, swapper_pg_dir@h
1013	ori	r6, r6, swapper_pg_dir@l
1014	tophys(r5, r5)
1015	stw	r6, 0(r5)
1016#endif /* CONFIG_BDI_SWITCH */
1017
1018/* Now turn on the MMU for real! */
1019	li	r4,MSR_KERNEL
1020	lis	r3,start_kernel@h
1021	ori	r3,r3,start_kernel@l
1022	mtspr	SPRN_SRR0,r3
1023	mtspr	SPRN_SRR1,r4
1024	rfi
1025
1026/*
1027 * void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next);
1028 *
1029 * Set up the segment registers for a new context.
1030 */
1031_ENTRY(switch_mmu_context)
1032	lwz	r3,MMCONTEXTID(r4)
1033	cmpwi	cr0,r3,0
1034	blt-	4f
1035	mulli	r3,r3,897	/* multiply context by skew factor */
1036	rlwinm	r3,r3,4,8,27	/* VSID = (context & 0xfffff) << 4 */
1037#ifdef CONFIG_PPC_KUEP
1038	oris	r3, r3, SR_NX@h	/* Set Nx */
1039#endif
1040#ifdef CONFIG_PPC_KUAP
1041	oris	r3, r3, SR_KS@h	/* Set Ks */
1042#endif
1043	li	r0,NUM_USER_SEGMENTS
1044	mtctr	r0
1045
1046#ifdef CONFIG_BDI_SWITCH
1047	/* Context switch the PTE pointer for the Abatron BDI2000.
1048	 * The PGDIR is passed as second argument.
1049	 */
1050	lwz	r4, MM_PGD(r4)
1051	lis	r5, abatron_pteptrs@ha
1052	stw	r4, abatron_pteptrs@l + 0x4(r5)
1053#endif
1054BEGIN_MMU_FTR_SECTION
1055#ifndef CONFIG_BDI_SWITCH
1056	lwz	r4, MM_PGD(r4)
1057#endif
1058	tophys(r4, r4)
1059	rlwinm	r4, r4, 4, 0xffff01ff
1060	mtspr	SPRN_SDR1, r4
1061END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE)
1062	li	r4,0
1063	isync
10643:
1065	mtsrin	r3,r4
1066	addi	r3,r3,0x111	/* next VSID */
1067	rlwinm	r3,r3,0,8,3	/* clear out any overflow from VSID field */
1068	addis	r4,r4,0x1000	/* address of next segment */
1069	bdnz	3b
1070	sync
1071	isync
1072	blr
10734:	trap
1074	EMIT_BUG_ENTRY 4b,__FILE__,__LINE__,0
1075	blr
1076EXPORT_SYMBOL(switch_mmu_context)
1077
1078/*
1079 * An undocumented "feature" of 604e requires that the v bit
1080 * be cleared before changing BAT values.
1081 *
1082 * Also, newer IBM firmware does not clear bat3 and 4 so
1083 * this makes sure it's done.
1084 *  -- Cort
1085 */
1086clear_bats:
1087	li	r10,0
1088
1089	mtspr	SPRN_DBAT0U,r10
1090	mtspr	SPRN_DBAT0L,r10
1091	mtspr	SPRN_DBAT1U,r10
1092	mtspr	SPRN_DBAT1L,r10
1093	mtspr	SPRN_DBAT2U,r10
1094	mtspr	SPRN_DBAT2L,r10
1095	mtspr	SPRN_DBAT3U,r10
1096	mtspr	SPRN_DBAT3L,r10
1097	mtspr	SPRN_IBAT0U,r10
1098	mtspr	SPRN_IBAT0L,r10
1099	mtspr	SPRN_IBAT1U,r10
1100	mtspr	SPRN_IBAT1L,r10
1101	mtspr	SPRN_IBAT2U,r10
1102	mtspr	SPRN_IBAT2L,r10
1103	mtspr	SPRN_IBAT3U,r10
1104	mtspr	SPRN_IBAT3L,r10
1105BEGIN_MMU_FTR_SECTION
1106	/* Here's a tweak: at this point, CPU setup have
1107	 * not been called yet, so HIGH_BAT_EN may not be
1108	 * set in HID0 for the 745x processors. However, it
1109	 * seems that doesn't affect our ability to actually
1110	 * write to these SPRs.
1111	 */
1112	mtspr	SPRN_DBAT4U,r10
1113	mtspr	SPRN_DBAT4L,r10
1114	mtspr	SPRN_DBAT5U,r10
1115	mtspr	SPRN_DBAT5L,r10
1116	mtspr	SPRN_DBAT6U,r10
1117	mtspr	SPRN_DBAT6L,r10
1118	mtspr	SPRN_DBAT7U,r10
1119	mtspr	SPRN_DBAT7L,r10
1120	mtspr	SPRN_IBAT4U,r10
1121	mtspr	SPRN_IBAT4L,r10
1122	mtspr	SPRN_IBAT5U,r10
1123	mtspr	SPRN_IBAT5L,r10
1124	mtspr	SPRN_IBAT6U,r10
1125	mtspr	SPRN_IBAT6L,r10
1126	mtspr	SPRN_IBAT7U,r10
1127	mtspr	SPRN_IBAT7L,r10
1128END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
1129	blr
1130
1131_ENTRY(update_bats)
1132	lis	r4, 1f@h
1133	ori	r4, r4, 1f@l
1134	tophys(r4, r4)
1135	mfmsr	r6
1136	mflr	r7
1137	li	r3, MSR_KERNEL & ~(MSR_IR | MSR_DR)
1138	rlwinm	r0, r6, 0, ~MSR_RI
1139	rlwinm	r0, r0, 0, ~MSR_EE
1140	mtmsr	r0
1141
1142	.align	4
1143	mtspr	SPRN_SRR0, r4
1144	mtspr	SPRN_SRR1, r3
1145	rfi
11461:	bl	clear_bats
1147	lis	r3, BATS@ha
1148	addi	r3, r3, BATS@l
1149	tophys(r3, r3)
1150	LOAD_BAT(0, r3, r4, r5)
1151	LOAD_BAT(1, r3, r4, r5)
1152	LOAD_BAT(2, r3, r4, r5)
1153	LOAD_BAT(3, r3, r4, r5)
1154BEGIN_MMU_FTR_SECTION
1155	LOAD_BAT(4, r3, r4, r5)
1156	LOAD_BAT(5, r3, r4, r5)
1157	LOAD_BAT(6, r3, r4, r5)
1158	LOAD_BAT(7, r3, r4, r5)
1159END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
1160	li	r3, MSR_KERNEL & ~(MSR_IR | MSR_DR | MSR_RI)
1161	mtmsr	r3
1162	mtspr	SPRN_SRR0, r7
1163	mtspr	SPRN_SRR1, r6
1164	rfi
1165
1166flush_tlbs:
1167	lis	r10, 0x40
11681:	addic.	r10, r10, -0x1000
1169	tlbie	r10
1170	bgt	1b
1171	sync
1172	blr
1173
1174mmu_off:
1175 	addi	r4, r3, __after_mmu_off - _start
1176	mfmsr	r3
1177	andi.	r0,r3,MSR_DR|MSR_IR		/* MMU enabled? */
1178	beqlr
1179	andc	r3,r3,r0
1180
1181	.align	4
1182	mtspr	SPRN_SRR0,r4
1183	mtspr	SPRN_SRR1,r3
1184	sync
1185	rfi
1186
1187/* We use one BAT to map up to 256M of RAM at _PAGE_OFFSET */
1188initial_bats:
1189	lis	r11,PAGE_OFFSET@h
1190	tophys(r8,r11)
1191#ifdef CONFIG_SMP
1192	ori	r8,r8,0x12		/* R/W access, M=1 */
1193#else
1194	ori	r8,r8,2			/* R/W access */
1195#endif /* CONFIG_SMP */
1196	ori	r11,r11,BL_256M<<2|0x2	/* set up BAT registers for 604 */
1197
1198	mtspr	SPRN_DBAT0L,r8		/* N.B. 6xx have valid */
1199	mtspr	SPRN_DBAT0U,r11		/* bit in upper BAT register */
1200	mtspr	SPRN_IBAT0L,r8
1201	mtspr	SPRN_IBAT0U,r11
1202	isync
1203	blr
1204
1205#ifdef CONFIG_BOOTX_TEXT
1206setup_disp_bat:
1207	/*
1208	 * setup the display bat prepared for us in prom.c
1209	 */
1210	mflr	r8
1211	bl	reloc_offset
1212	mtlr	r8
1213	addis	r8,r3,disp_BAT@ha
1214	addi	r8,r8,disp_BAT@l
1215	cmpwi	cr0,r8,0
1216	beqlr
1217	lwz	r11,0(r8)
1218	lwz	r8,4(r8)
1219	mtspr	SPRN_DBAT3L,r8
1220	mtspr	SPRN_DBAT3U,r11
1221	blr
1222#endif /* CONFIG_BOOTX_TEXT */
1223
1224#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
1225setup_cpm_bat:
1226	lis	r8, 0xf000
1227	ori	r8, r8,	0x002a
1228	mtspr	SPRN_DBAT1L, r8
1229
1230	lis	r11, 0xf000
1231	ori	r11, r11, (BL_1M << 2) | 2
1232	mtspr	SPRN_DBAT1U, r11
1233
1234	blr
1235#endif
1236
1237#ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
1238setup_usbgecko_bat:
1239	/* prepare a BAT for early io */
1240#if defined(CONFIG_GAMECUBE)
1241	lis	r8, 0x0c00
1242#elif defined(CONFIG_WII)
1243	lis	r8, 0x0d00
1244#else
1245#error Invalid platform for USB Gecko based early debugging.
1246#endif
1247	/*
1248	 * The virtual address used must match the virtual address
1249	 * associated to the fixmap entry FIX_EARLY_DEBUG_BASE.
1250	 */
1251	lis	r11, 0xfffe	/* top 128K */
1252	ori	r8, r8, 0x002a	/* uncached, guarded ,rw */
1253	ori	r11, r11, 0x2	/* 128K, Vs=1, Vp=0 */
1254	mtspr	SPRN_DBAT1L, r8
1255	mtspr	SPRN_DBAT1U, r11
1256	blr
1257#endif
1258
1259#ifdef CONFIG_8260
1260/* Jump into the system reset for the rom.
1261 * We first disable the MMU, and then jump to the ROM reset address.
1262 *
1263 * r3 is the board info structure, r4 is the location for starting.
1264 * I use this for building a small kernel that can load other kernels,
1265 * rather than trying to write or rely on a rom monitor that can tftp load.
1266 */
1267       .globl  m8260_gorom
1268m8260_gorom:
1269	mfmsr	r0
1270	rlwinm	r0,r0,0,17,15	/* clear MSR_EE in r0 */
1271	sync
1272	mtmsr	r0
1273	sync
1274	mfspr	r11, SPRN_HID0
1275	lis	r10, 0
1276	ori	r10,r10,HID0_ICE|HID0_DCE
1277	andc	r11, r11, r10
1278	mtspr	SPRN_HID0, r11
1279	isync
1280	li	r5, MSR_ME|MSR_RI
1281	lis	r6,2f@h
1282	addis	r6,r6,-KERNELBASE@h
1283	ori	r6,r6,2f@l
1284	mtspr	SPRN_SRR0,r6
1285	mtspr	SPRN_SRR1,r5
1286	isync
1287	sync
1288	rfi
12892:
1290	mtlr	r4
1291	blr
1292#endif
1293
1294
1295/*
1296 * We put a few things here that have to be page-aligned.
1297 * This stuff goes at the beginning of the data segment,
1298 * which is page-aligned.
1299 */
1300	.data
1301	.globl	sdata
1302sdata:
1303	.globl	empty_zero_page
1304empty_zero_page:
1305	.space	4096
1306EXPORT_SYMBOL(empty_zero_page)
1307
1308	.globl	swapper_pg_dir
1309swapper_pg_dir:
1310	.space	PGD_TABLE_SIZE
1311
1312/* Room for two PTE pointers, usually the kernel and current user pointers
1313 * to their respective root page table.
1314 */
1315abatron_pteptrs:
1316	.space	8
1317