xref: /linux/arch/x86/include/asm/kexec.h (revision 52338415)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_KEXEC_H
3 #define _ASM_X86_KEXEC_H
4 
5 #ifdef CONFIG_X86_32
6 # define PA_CONTROL_PAGE	0
7 # define VA_CONTROL_PAGE	1
8 # define PA_PGD			2
9 # define PA_SWAP_PAGE		3
10 # define PAGES_NR		4
11 #else
12 # define PA_CONTROL_PAGE	0
13 # define VA_CONTROL_PAGE	1
14 # define PA_TABLE_PAGE		2
15 # define PA_SWAP_PAGE		3
16 # define PAGES_NR		4
17 #endif
18 
19 # define KEXEC_CONTROL_CODE_MAX_SIZE	2048
20 
21 #ifndef __ASSEMBLY__
22 
23 #include <linux/string.h>
24 #include <linux/kernel.h>
25 
26 #include <asm/page.h>
27 #include <asm/ptrace.h>
28 #include <asm/bootparam.h>
29 
30 struct kimage;
31 
32 /*
33  * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
34  * I.e. Maximum page that is mapped directly into kernel memory,
35  * and kmap is not required.
36  *
37  * So far x86_64 is limited to 40 physical address bits.
38  */
39 #ifdef CONFIG_X86_32
40 /* Maximum physical address we can use pages from */
41 # define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
42 /* Maximum address we can reach in physical address mode */
43 # define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
44 /* Maximum address we can use for the control code buffer */
45 # define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
46 
47 # define KEXEC_CONTROL_PAGE_SIZE	4096
48 
49 /* The native architecture */
50 # define KEXEC_ARCH KEXEC_ARCH_386
51 
52 /* We can also handle crash dumps from 64 bit kernel. */
53 # define vmcore_elf_check_arch_cross(x) ((x)->e_machine == EM_X86_64)
54 #else
55 /* Maximum physical address we can use pages from */
56 # define KEXEC_SOURCE_MEMORY_LIMIT      (MAXMEM-1)
57 /* Maximum address we can reach in physical address mode */
58 # define KEXEC_DESTINATION_MEMORY_LIMIT (MAXMEM-1)
59 /* Maximum address we can use for the control pages */
60 # define KEXEC_CONTROL_MEMORY_LIMIT     (MAXMEM-1)
61 
62 /* Allocate one page for the pdp and the second for the code */
63 # define KEXEC_CONTROL_PAGE_SIZE  (4096UL + 4096UL)
64 
65 /* The native architecture */
66 # define KEXEC_ARCH KEXEC_ARCH_X86_64
67 #endif
68 
69 /* Memory to backup during crash kdump */
70 #define KEXEC_BACKUP_SRC_START	(0UL)
71 #define KEXEC_BACKUP_SRC_END	(640 * 1024UL - 1)	/* 640K */
72 
73 /*
74  * This function is responsible for capturing register states if coming
75  * via panic otherwise just fix up the ss and sp if coming via kernel
76  * mode exception.
77  */
78 static inline void crash_setup_regs(struct pt_regs *newregs,
79 				    struct pt_regs *oldregs)
80 {
81 	if (oldregs) {
82 		memcpy(newregs, oldregs, sizeof(*newregs));
83 	} else {
84 #ifdef CONFIG_X86_32
85 		asm volatile("movl %%ebx,%0" : "=m"(newregs->bx));
86 		asm volatile("movl %%ecx,%0" : "=m"(newregs->cx));
87 		asm volatile("movl %%edx,%0" : "=m"(newregs->dx));
88 		asm volatile("movl %%esi,%0" : "=m"(newregs->si));
89 		asm volatile("movl %%edi,%0" : "=m"(newregs->di));
90 		asm volatile("movl %%ebp,%0" : "=m"(newregs->bp));
91 		asm volatile("movl %%eax,%0" : "=m"(newregs->ax));
92 		asm volatile("movl %%esp,%0" : "=m"(newregs->sp));
93 		asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
94 		asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
95 		asm volatile("movl %%ds, %%eax;" :"=a"(newregs->ds));
96 		asm volatile("movl %%es, %%eax;" :"=a"(newregs->es));
97 		asm volatile("pushfl; popl %0" :"=m"(newregs->flags));
98 #else
99 		asm volatile("movq %%rbx,%0" : "=m"(newregs->bx));
100 		asm volatile("movq %%rcx,%0" : "=m"(newregs->cx));
101 		asm volatile("movq %%rdx,%0" : "=m"(newregs->dx));
102 		asm volatile("movq %%rsi,%0" : "=m"(newregs->si));
103 		asm volatile("movq %%rdi,%0" : "=m"(newregs->di));
104 		asm volatile("movq %%rbp,%0" : "=m"(newregs->bp));
105 		asm volatile("movq %%rax,%0" : "=m"(newregs->ax));
106 		asm volatile("movq %%rsp,%0" : "=m"(newregs->sp));
107 		asm volatile("movq %%r8,%0" : "=m"(newregs->r8));
108 		asm volatile("movq %%r9,%0" : "=m"(newregs->r9));
109 		asm volatile("movq %%r10,%0" : "=m"(newregs->r10));
110 		asm volatile("movq %%r11,%0" : "=m"(newregs->r11));
111 		asm volatile("movq %%r12,%0" : "=m"(newregs->r12));
112 		asm volatile("movq %%r13,%0" : "=m"(newregs->r13));
113 		asm volatile("movq %%r14,%0" : "=m"(newregs->r14));
114 		asm volatile("movq %%r15,%0" : "=m"(newregs->r15));
115 		asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
116 		asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
117 		asm volatile("pushfq; popq %0" :"=m"(newregs->flags));
118 #endif
119 		newregs->ip = _THIS_IP_;
120 	}
121 }
122 
123 #ifdef CONFIG_X86_32
124 asmlinkage unsigned long
125 relocate_kernel(unsigned long indirection_page,
126 		unsigned long control_page,
127 		unsigned long start_address,
128 		unsigned int has_pae,
129 		unsigned int preserve_context);
130 #else
131 unsigned long
132 relocate_kernel(unsigned long indirection_page,
133 		unsigned long page_list,
134 		unsigned long start_address,
135 		unsigned int preserve_context,
136 		unsigned int sme_active);
137 #endif
138 
139 #define ARCH_HAS_KIMAGE_ARCH
140 
141 #ifdef CONFIG_X86_32
142 struct kimage_arch {
143 	pgd_t *pgd;
144 #ifdef CONFIG_X86_PAE
145 	pmd_t *pmd0;
146 	pmd_t *pmd1;
147 #endif
148 	pte_t *pte0;
149 	pte_t *pte1;
150 };
151 #else
152 struct kimage_arch {
153 	p4d_t *p4d;
154 	pud_t *pud;
155 	pmd_t *pmd;
156 	pte_t *pte;
157 	/* Details of backup region */
158 	unsigned long backup_src_start;
159 	unsigned long backup_src_sz;
160 
161 	/* Physical address of backup segment */
162 	unsigned long backup_load_addr;
163 
164 	/* Core ELF header buffer */
165 	void *elf_headers;
166 	unsigned long elf_headers_sz;
167 	unsigned long elf_load_addr;
168 };
169 #endif /* CONFIG_X86_32 */
170 
171 #ifdef CONFIG_X86_64
172 /*
173  * Number of elements and order of elements in this structure should match
174  * with the ones in arch/x86/purgatory/entry64.S. If you make a change here
175  * make an appropriate change in purgatory too.
176  */
177 struct kexec_entry64_regs {
178 	uint64_t rax;
179 	uint64_t rcx;
180 	uint64_t rdx;
181 	uint64_t rbx;
182 	uint64_t rsp;
183 	uint64_t rbp;
184 	uint64_t rsi;
185 	uint64_t rdi;
186 	uint64_t r8;
187 	uint64_t r9;
188 	uint64_t r10;
189 	uint64_t r11;
190 	uint64_t r12;
191 	uint64_t r13;
192 	uint64_t r14;
193 	uint64_t r15;
194 	uint64_t rip;
195 };
196 
197 extern int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages,
198 				       gfp_t gfp);
199 #define arch_kexec_post_alloc_pages arch_kexec_post_alloc_pages
200 
201 extern void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages);
202 #define arch_kexec_pre_free_pages arch_kexec_pre_free_pages
203 
204 #endif
205 
206 typedef void crash_vmclear_fn(void);
207 extern crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss;
208 extern void kdump_nmi_shootdown_cpus(void);
209 
210 #endif /* __ASSEMBLY__ */
211 
212 #endif /* _ASM_X86_KEXEC_H */
213