1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * s390 code for kexec_file_load system call
4 *
5 * Copyright IBM Corp. 2018
6 *
7 * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com>
8 */
9
10 #include <linux/elf.h>
11 #include <linux/errno.h>
12 #include <linux/kexec.h>
13 #include <linux/module_signature.h>
14 #include <linux/verification.h>
15 #include <asm/boot_data.h>
16 #include <asm/ipl.h>
17 #include <asm/setup.h>
18
19 const struct kexec_file_ops * const kexec_file_loaders[] = {
20 &s390_kexec_elf_ops,
21 &s390_kexec_image_ops,
22 NULL,
23 };
24
25 #ifdef CONFIG_KEXEC_SIG
s390_verify_sig(const char * kernel,unsigned long kernel_len)26 int s390_verify_sig(const char *kernel, unsigned long kernel_len)
27 {
28 const unsigned long marker_len = sizeof(MODULE_SIG_STRING) - 1;
29 struct module_signature *ms;
30 unsigned long sig_len;
31
32 /* Skip signature verification when not secure IPLed. */
33 if (!ipl_secure_flag)
34 return 0;
35
36 if (marker_len > kernel_len)
37 return -EKEYREJECTED;
38
39 if (memcmp(kernel + kernel_len - marker_len, MODULE_SIG_STRING,
40 marker_len))
41 return -EKEYREJECTED;
42 kernel_len -= marker_len;
43
44 ms = (void *)kernel + kernel_len - sizeof(*ms);
45 kernel_len -= sizeof(*ms);
46
47 sig_len = be32_to_cpu(ms->sig_len);
48 if (sig_len >= kernel_len)
49 return -EKEYREJECTED;
50 kernel_len -= sig_len;
51
52 if (ms->id_type != PKEY_ID_PKCS7)
53 return -EKEYREJECTED;
54
55 if (ms->algo != 0 ||
56 ms->hash != 0 ||
57 ms->signer_len != 0 ||
58 ms->key_id_len != 0 ||
59 ms->__pad[0] != 0 ||
60 ms->__pad[1] != 0 ||
61 ms->__pad[2] != 0) {
62 return -EBADMSG;
63 }
64
65 return verify_pkcs7_signature(kernel, kernel_len,
66 kernel + kernel_len, sig_len,
67 VERIFY_USE_PLATFORM_KEYRING,
68 VERIFYING_MODULE_SIGNATURE,
69 NULL, NULL);
70 }
71 #endif /* CONFIG_KEXEC_SIG */
72
kexec_file_update_purgatory(struct kimage * image,struct s390_load_data * data)73 static int kexec_file_update_purgatory(struct kimage *image,
74 struct s390_load_data *data)
75 {
76 u64 entry, type;
77 int ret;
78
79 if (image->type == KEXEC_TYPE_CRASH) {
80 entry = STARTUP_KDUMP_OFFSET;
81 type = KEXEC_TYPE_CRASH;
82 } else {
83 entry = STARTUP_NORMAL_OFFSET;
84 type = KEXEC_TYPE_DEFAULT;
85 }
86
87 ret = kexec_purgatory_get_set_symbol(image, "kernel_entry", &entry,
88 sizeof(entry), false);
89 if (ret)
90 return ret;
91
92 ret = kexec_purgatory_get_set_symbol(image, "kernel_type", &type,
93 sizeof(type), false);
94 if (ret)
95 return ret;
96
97 if (image->type == KEXEC_TYPE_CRASH) {
98 u64 crash_size;
99
100 ret = kexec_purgatory_get_set_symbol(image, "crash_start",
101 &crashk_res.start,
102 sizeof(crashk_res.start),
103 false);
104 if (ret)
105 return ret;
106
107 crash_size = crashk_res.end - crashk_res.start + 1;
108 ret = kexec_purgatory_get_set_symbol(image, "crash_size",
109 &crash_size,
110 sizeof(crash_size),
111 false);
112 }
113 return ret;
114 }
115
kexec_file_add_purgatory(struct kimage * image,struct s390_load_data * data)116 static int kexec_file_add_purgatory(struct kimage *image,
117 struct s390_load_data *data)
118 {
119 struct kexec_buf buf;
120 int ret;
121
122 buf.image = image;
123
124 data->memsz = ALIGN(data->memsz, PAGE_SIZE);
125 buf.mem = data->memsz;
126 if (image->type == KEXEC_TYPE_CRASH)
127 buf.mem += crashk_res.start;
128
129 ret = kexec_load_purgatory(image, &buf);
130 if (ret)
131 return ret;
132 data->memsz += buf.memsz;
133
134 return kexec_file_update_purgatory(image, data);
135 }
136
kexec_file_add_initrd(struct kimage * image,struct s390_load_data * data)137 static int kexec_file_add_initrd(struct kimage *image,
138 struct s390_load_data *data)
139 {
140 struct kexec_buf buf;
141 int ret;
142
143 buf.image = image;
144
145 buf.buffer = image->initrd_buf;
146 buf.bufsz = image->initrd_buf_len;
147
148 data->memsz = ALIGN(data->memsz, PAGE_SIZE);
149 buf.mem = data->memsz;
150 if (image->type == KEXEC_TYPE_CRASH)
151 buf.mem += crashk_res.start;
152 buf.memsz = buf.bufsz;
153
154 data->parm->initrd_start = data->memsz;
155 data->parm->initrd_size = buf.memsz;
156 data->memsz += buf.memsz;
157
158 ret = kexec_add_buffer(&buf);
159 if (ret)
160 return ret;
161
162 return ipl_report_add_component(data->report, &buf, 0, 0);
163 }
164
kexec_file_add_ipl_report(struct kimage * image,struct s390_load_data * data)165 static int kexec_file_add_ipl_report(struct kimage *image,
166 struct s390_load_data *data)
167 {
168 __u32 *lc_ipl_parmblock_ptr;
169 unsigned int len, ncerts;
170 struct kexec_buf buf;
171 unsigned long addr;
172 void *ptr, *end;
173
174 buf.image = image;
175
176 data->memsz = ALIGN(data->memsz, PAGE_SIZE);
177 buf.mem = data->memsz;
178 if (image->type == KEXEC_TYPE_CRASH)
179 buf.mem += crashk_res.start;
180
181 ptr = (void *)ipl_cert_list_addr;
182 end = ptr + ipl_cert_list_size;
183 ncerts = 0;
184 while (ptr < end) {
185 ncerts++;
186 len = *(unsigned int *)ptr;
187 ptr += sizeof(len);
188 ptr += len;
189 }
190
191 addr = data->memsz + data->report->size;
192 addr += ncerts * sizeof(struct ipl_rb_certificate_entry);
193 ptr = (void *)ipl_cert_list_addr;
194 while (ptr < end) {
195 len = *(unsigned int *)ptr;
196 ptr += sizeof(len);
197 ipl_report_add_certificate(data->report, ptr, addr, len);
198 addr += len;
199 ptr += len;
200 }
201
202 buf.buffer = ipl_report_finish(data->report);
203 buf.bufsz = data->report->size;
204 buf.memsz = buf.bufsz;
205
206 data->memsz += buf.memsz;
207
208 lc_ipl_parmblock_ptr =
209 data->kernel_buf + offsetof(struct lowcore, ipl_parmblock_ptr);
210 *lc_ipl_parmblock_ptr = (__u32)buf.mem;
211
212 return kexec_add_buffer(&buf);
213 }
214
kexec_file_add_components(struct kimage * image,int (* add_kernel)(struct kimage * image,struct s390_load_data * data))215 void *kexec_file_add_components(struct kimage *image,
216 int (*add_kernel)(struct kimage *image,
217 struct s390_load_data *data))
218 {
219 struct s390_load_data data = {0};
220 int ret;
221
222 data.report = ipl_report_init(&ipl_block);
223 if (IS_ERR(data.report))
224 return data.report;
225
226 ret = add_kernel(image, &data);
227 if (ret)
228 goto out;
229
230 if (image->cmdline_buf_len >= ARCH_COMMAND_LINE_SIZE) {
231 ret = -EINVAL;
232 goto out;
233 }
234 memcpy(data.parm->command_line, image->cmdline_buf,
235 image->cmdline_buf_len);
236
237 if (image->type == KEXEC_TYPE_CRASH) {
238 data.parm->oldmem_base = crashk_res.start;
239 data.parm->oldmem_size = crashk_res.end - crashk_res.start + 1;
240 }
241
242 if (image->initrd_buf) {
243 ret = kexec_file_add_initrd(image, &data);
244 if (ret)
245 goto out;
246 }
247
248 ret = kexec_file_add_purgatory(image, &data);
249 if (ret)
250 goto out;
251
252 if (data.kernel_mem == 0) {
253 unsigned long restart_psw = 0x0008000080000000UL;
254 restart_psw += image->start;
255 memcpy(data.kernel_buf, &restart_psw, sizeof(restart_psw));
256 image->start = 0;
257 }
258
259 ret = kexec_file_add_ipl_report(image, &data);
260 out:
261 ipl_report_free(data.report);
262 return ERR_PTR(ret);
263 }
264
arch_kexec_apply_relocations_add(struct purgatory_info * pi,Elf_Shdr * section,const Elf_Shdr * relsec,const Elf_Shdr * symtab)265 int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
266 Elf_Shdr *section,
267 const Elf_Shdr *relsec,
268 const Elf_Shdr *symtab)
269 {
270 Elf_Rela *relas;
271 int i, r_type;
272
273 relas = (void *)pi->ehdr + relsec->sh_offset;
274
275 for (i = 0; i < relsec->sh_size / sizeof(*relas); i++) {
276 const Elf_Sym *sym; /* symbol to relocate */
277 unsigned long addr; /* final location after relocation */
278 unsigned long val; /* relocated symbol value */
279 void *loc; /* tmp location to modify */
280
281 sym = (void *)pi->ehdr + symtab->sh_offset;
282 sym += ELF64_R_SYM(relas[i].r_info);
283
284 if (sym->st_shndx == SHN_UNDEF)
285 return -ENOEXEC;
286
287 if (sym->st_shndx == SHN_COMMON)
288 return -ENOEXEC;
289
290 if (sym->st_shndx >= pi->ehdr->e_shnum &&
291 sym->st_shndx != SHN_ABS)
292 return -ENOEXEC;
293
294 loc = pi->purgatory_buf;
295 loc += section->sh_offset;
296 loc += relas[i].r_offset;
297
298 val = sym->st_value;
299 if (sym->st_shndx != SHN_ABS)
300 val += pi->sechdrs[sym->st_shndx].sh_addr;
301 val += relas[i].r_addend;
302
303 addr = section->sh_addr + relas[i].r_offset;
304
305 r_type = ELF64_R_TYPE(relas[i].r_info);
306 arch_kexec_do_relocs(r_type, loc, val, addr);
307 }
308 return 0;
309 }
310
arch_kexec_kernel_image_probe(struct kimage * image,void * buf,unsigned long buf_len)311 int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
312 unsigned long buf_len)
313 {
314 /* A kernel must be at least large enough to contain head.S. During
315 * load memory in head.S will be accessed, e.g. to register the next
316 * command line. If the next kernel were smaller the current kernel
317 * will panic at load.
318 */
319 if (buf_len < HEAD_END)
320 return -ENOEXEC;
321
322 return kexec_image_probe_default(image, buf, buf_len);
323 }
324