xref: /linux/arch/arm64/kernel/relocate_kernel.S (revision eb3d8ea3)
1d2912cb1SThomas Gleixner/* SPDX-License-Identifier: GPL-2.0-only */
2d28f6df1SGeoff Levand/*
3d28f6df1SGeoff Levand * kexec for arm64
4d28f6df1SGeoff Levand *
5d28f6df1SGeoff Levand * Copyright (C) Linaro.
6d28f6df1SGeoff Levand * Copyright (C) Huawei Futurewei Technologies.
7efc2d0f2SPasha Tatashin * Copyright (C) 2021, Microsoft Corporation.
8efc2d0f2SPasha Tatashin * Pasha Tatashin <pasha.tatashin@soleen.com>
9d28f6df1SGeoff Levand */
10d28f6df1SGeoff Levand
11d28f6df1SGeoff Levand#include <linux/kexec.h>
12d28f6df1SGeoff Levand#include <linux/linkage.h>
13d28f6df1SGeoff Levand
14d28f6df1SGeoff Levand#include <asm/assembler.h>
15d28f6df1SGeoff Levand#include <asm/kexec.h>
16d28f6df1SGeoff Levand#include <asm/page.h>
17d28f6df1SGeoff Levand#include <asm/sysreg.h>
18ba959fe9SPasha Tatashin#include <asm/virt.h>
19d28f6df1SGeoff Levand
20efc2d0f2SPasha Tatashin.macro turn_off_mmu tmp1, tmp2
21efc2d0f2SPasha Tatashin	mov_q   \tmp1, INIT_SCTLR_EL1_MMU_OFF
22efc2d0f2SPasha Tatashin	pre_disable_mmu_workaround
23efc2d0f2SPasha Tatashin	msr	sctlr_el1, \tmp1
24efc2d0f2SPasha Tatashin	isb
25efc2d0f2SPasha Tatashin.endm
26efc2d0f2SPasha Tatashin
2719a046f0SPasha Tatashin.section    ".kexec_relocate.text", "ax"
28d28f6df1SGeoff Levand/*
29d28f6df1SGeoff Levand * arm64_relocate_new_kernel - Put a 2nd stage image in place and boot it.
30d28f6df1SGeoff Levand *
31dbd82feeSPavel Tatashin * The memory that the old kernel occupies may be overwritten when copying the
32d28f6df1SGeoff Levand * new image to its final location.  To assure that the
33d28f6df1SGeoff Levand * arm64_relocate_new_kernel routine which does that copy is not overwritten,
34d28f6df1SGeoff Levand * all code and data needed by arm64_relocate_new_kernel must be between the
35d28f6df1SGeoff Levand * symbols arm64_relocate_new_kernel and arm64_relocate_new_kernel_end.  The
36d28f6df1SGeoff Levand * machine_kexec() routine will copy arm64_relocate_new_kernel to the kexec
37dbd82feeSPavel Tatashin * safe memory that has been set up to be preserved during the copy operation.
38d28f6df1SGeoff Levand */
390343a7e4SMark BrownSYM_CODE_START(arm64_relocate_new_kernel)
40eb3d8ea3SMark Rutland	/*
41eb3d8ea3SMark Rutland	 * The kimage structure isn't allocated specially and may be clobbered
42eb3d8ea3SMark Rutland	 * during relocation. We must load any values we need from it prior to
43eb3d8ea3SMark Rutland	 * any relocation occurring.
44eb3d8ea3SMark Rutland	 */
45eb3d8ea3SMark Rutland	ldr	x28, [x0, #KIMAGE_START]
46eb3d8ea3SMark Rutland	ldr	x27, [x0, #KIMAGE_ARCH_EL2_VECTORS]
47eb3d8ea3SMark Rutland	ldr	x26, [x0, #KIMAGE_ARCH_DTB_MEM]
48eb3d8ea3SMark Rutland
49d28f6df1SGeoff Levand	/* Setup the list loop variables. */
503744b528SPasha Tatashin	ldr	x18, [x0, #KIMAGE_ARCH_ZERO_PAGE] /* x18 = zero page for BBM */
513744b528SPasha Tatashin	ldr	x17, [x0, #KIMAGE_ARCH_TTBR1]	/* x17 = linear map copy */
52878fdbd7SPasha Tatashin	ldr	x16, [x0, #KIMAGE_HEAD]		/* x16 = kimage_head */
53efc2d0f2SPasha Tatashin	ldr	x22, [x0, #KIMAGE_ARCH_PHYS_OFFSET]	/* x22 phys_offset */
54a360190eSPavel Tatashin	raw_dcache_line_size x15, x1		/* x15 = dcache line size */
553744b528SPasha Tatashin	break_before_make_ttbr_switch	x18, x17, x1, x2 /* set linear map */
56d28f6df1SGeoff Levand.Lloop:
57d28f6df1SGeoff Levand	and	x12, x16, PAGE_MASK		/* x12 = addr */
58efc2d0f2SPasha Tatashin	sub	x12, x12, x22			/* Convert x12 to virt */
59d28f6df1SGeoff Levand	/* Test the entry flags. */
60d28f6df1SGeoff Levand.Ltest_source:
61d28f6df1SGeoff Levand	tbz	x16, IND_SOURCE_BIT, .Ltest_indirection
62d28f6df1SGeoff Levand
63d28f6df1SGeoff Levand	/* Invalidate dest page to PoC. */
64efc2d0f2SPasha Tatashin	mov	x19, x13
65a360190eSPavel Tatashin	copy_page x13, x12, x1, x2, x3, x4, x5, x6, x7, x8
66efc2d0f2SPasha Tatashin	add	x1, x19, #PAGE_SIZE
67efc2d0f2SPasha Tatashin	dcache_by_myline_op civac, sy, x19, x1, x15, x20
68d28f6df1SGeoff Levand	b	.Lnext
69d28f6df1SGeoff Levand.Ltest_indirection:
70d28f6df1SGeoff Levand	tbz	x16, IND_INDIRECTION_BIT, .Ltest_destination
71dbd82feeSPavel Tatashin	mov	x14, x12			/* ptr = addr */
72d28f6df1SGeoff Levand	b	.Lnext
73d28f6df1SGeoff Levand.Ltest_destination:
74d28f6df1SGeoff Levand	tbz	x16, IND_DESTINATION_BIT, .Lnext
75dbd82feeSPavel Tatashin	mov	x13, x12			/* dest = addr */
76d28f6df1SGeoff Levand.Lnext:
77dbd82feeSPavel Tatashin	ldr	x16, [x14], #8			/* entry = *ptr++ */
78dbd82feeSPavel Tatashin	tbz	x16, IND_DONE_BIT, .Lloop	/* while (!(entry & DONE)) */
79d28f6df1SGeoff Levand	/* wait for writes from copy_page to finish */
80d28f6df1SGeoff Levand	dsb	nsh
81d28f6df1SGeoff Levand	ic	iallu
82d28f6df1SGeoff Levand	dsb	nsh
83d28f6df1SGeoff Levand	isb
84efc2d0f2SPasha Tatashin	turn_off_mmu x12, x13
85d28f6df1SGeoff Levand
86d28f6df1SGeoff Levand	/* Start new image. */
87eb3d8ea3SMark Rutland	cbz	x27, .Lel1
88eb3d8ea3SMark Rutland	mov	x1, x28				/* kernel entry point */
89eb3d8ea3SMark Rutland	mov	x2, x26				/* dtb address */
90ba959fe9SPasha Tatashin	mov	x3, xzr
91ba959fe9SPasha Tatashin	mov	x4, xzr
92ba959fe9SPasha Tatashin	mov     x0, #HVC_SOFT_RESTART
93ba959fe9SPasha Tatashin	hvc	#0				/* Jumps from el2 */
94ba959fe9SPasha Tatashin.Lel1:
95eb3d8ea3SMark Rutland	mov	x0, x26				/* dtb address */
96eb3d8ea3SMark Rutland	mov	x1, xzr
97d28f6df1SGeoff Levand	mov	x2, xzr
98d28f6df1SGeoff Levand	mov	x3, xzr
99eb3d8ea3SMark Rutland	br	x28				/* Jumps from el1 */
1000343a7e4SMark BrownSYM_CODE_END(arm64_relocate_new_kernel)
101