xref: /linux/arch/s390/boot/startup.c (revision 693d41f7)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/string.h>
3 #include <linux/elf.h>
4 #include <asm/page-states.h>
5 #include <asm/boot_data.h>
6 #include <asm/extmem.h>
7 #include <asm/sections.h>
8 #include <asm/maccess.h>
9 #include <asm/cpu_mf.h>
10 #include <asm/setup.h>
11 #include <asm/kasan.h>
12 #include <asm/kexec.h>
13 #include <asm/sclp.h>
14 #include <asm/diag.h>
15 #include <asm/uv.h>
16 #include <asm/abs_lowcore.h>
17 #include <asm/physmem_info.h>
18 #include "decompressor.h"
19 #include "boot.h"
20 #include "uv.h"
21 
22 struct vm_layout __bootdata_preserved(vm_layout);
23 unsigned long __bootdata_preserved(__abs_lowcore);
24 unsigned long __bootdata_preserved(__memcpy_real_area);
25 pte_t *__bootdata_preserved(memcpy_real_ptep);
26 unsigned long __bootdata_preserved(VMALLOC_START);
27 unsigned long __bootdata_preserved(VMALLOC_END);
28 struct page *__bootdata_preserved(vmemmap);
29 unsigned long __bootdata_preserved(vmemmap_size);
30 unsigned long __bootdata_preserved(MODULES_VADDR);
31 unsigned long __bootdata_preserved(MODULES_END);
32 unsigned long __bootdata_preserved(max_mappable);
33 
34 u64 __bootdata_preserved(stfle_fac_list[16]);
35 struct oldmem_data __bootdata_preserved(oldmem_data);
36 
37 struct machine_info machine;
38 
error(char * x)39 void error(char *x)
40 {
41 	sclp_early_printk("\n\n");
42 	sclp_early_printk(x);
43 	sclp_early_printk("\n\n -- System halted");
44 
45 	disabled_wait();
46 }
47 
detect_facilities(void)48 static void detect_facilities(void)
49 {
50 	if (test_facility(8)) {
51 		machine.has_edat1 = 1;
52 		local_ctl_set_bit(0, CR0_EDAT_BIT);
53 	}
54 	if (test_facility(78))
55 		machine.has_edat2 = 1;
56 	if (test_facility(130))
57 		machine.has_nx = 1;
58 }
59 
cmma_test_essa(void)60 static int cmma_test_essa(void)
61 {
62 	unsigned long reg1, reg2, tmp = 0;
63 	int rc = 1;
64 	psw_t old;
65 
66 	/* Test ESSA_GET_STATE */
67 	asm volatile(
68 		"	mvc	0(16,%[psw_old]),0(%[psw_pgm])\n"
69 		"	epsw	%[reg1],%[reg2]\n"
70 		"	st	%[reg1],0(%[psw_pgm])\n"
71 		"	st	%[reg2],4(%[psw_pgm])\n"
72 		"	larl	%[reg1],1f\n"
73 		"	stg	%[reg1],8(%[psw_pgm])\n"
74 		"	.insn	rrf,0xb9ab0000,%[tmp],%[tmp],%[cmd],0\n"
75 		"	la	%[rc],0\n"
76 		"1:	mvc	0(16,%[psw_pgm]),0(%[psw_old])\n"
77 		: [reg1] "=&d" (reg1),
78 		  [reg2] "=&a" (reg2),
79 		  [rc] "+&d" (rc),
80 		  [tmp] "=&d" (tmp),
81 		  "+Q" (S390_lowcore.program_new_psw),
82 		  "=Q" (old)
83 		: [psw_old] "a" (&old),
84 		  [psw_pgm] "a" (&S390_lowcore.program_new_psw),
85 		  [cmd] "i" (ESSA_GET_STATE)
86 		: "cc", "memory");
87 	return rc;
88 }
89 
cmma_init(void)90 static void cmma_init(void)
91 {
92 	if (!cmma_flag)
93 		return;
94 	if (cmma_test_essa()) {
95 		cmma_flag = 0;
96 		return;
97 	}
98 	if (test_facility(147))
99 		cmma_flag = 2;
100 }
101 
setup_lpp(void)102 static void setup_lpp(void)
103 {
104 	S390_lowcore.current_pid = 0;
105 	S390_lowcore.lpp = LPP_MAGIC;
106 	if (test_facility(40))
107 		lpp(&S390_lowcore.lpp);
108 }
109 
110 #ifdef CONFIG_KERNEL_UNCOMPRESSED
mem_safe_offset(void)111 static unsigned long mem_safe_offset(void)
112 {
113 	return (unsigned long)_compressed_start;
114 }
115 
deploy_kernel(void * output)116 static void deploy_kernel(void *output)
117 {
118 	void *uncompressed_start = (void *)_compressed_start;
119 
120 	if (output == uncompressed_start)
121 		return;
122 	memmove(output, uncompressed_start, vmlinux.image_size);
123 	memset(uncompressed_start, 0, vmlinux.image_size);
124 }
125 #endif
126 
rescue_initrd(unsigned long min,unsigned long max)127 static void rescue_initrd(unsigned long min, unsigned long max)
128 {
129 	unsigned long old_addr, addr, size;
130 
131 	if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
132 		return;
133 	if (!get_physmem_reserved(RR_INITRD, &addr, &size))
134 		return;
135 	if (addr >= min && addr + size <= max)
136 		return;
137 	old_addr = addr;
138 	physmem_free(RR_INITRD);
139 	addr = physmem_alloc_top_down(RR_INITRD, size, 0);
140 	memmove((void *)addr, (void *)old_addr, size);
141 }
142 
copy_bootdata(void)143 static void copy_bootdata(void)
144 {
145 	if (__boot_data_end - __boot_data_start != vmlinux.bootdata_size)
146 		error(".boot.data section size mismatch");
147 	memcpy((void *)vmlinux.bootdata_off, __boot_data_start, vmlinux.bootdata_size);
148 	if (__boot_data_preserved_end - __boot_data_preserved_start != vmlinux.bootdata_preserved_size)
149 		error(".boot.preserved.data section size mismatch");
150 	memcpy((void *)vmlinux.bootdata_preserved_off, __boot_data_preserved_start, vmlinux.bootdata_preserved_size);
151 }
152 
kaslr_adjust_relocs(unsigned long min_addr,unsigned long max_addr,unsigned long offset,unsigned long phys_offset)153 static void kaslr_adjust_relocs(unsigned long min_addr, unsigned long max_addr,
154 				unsigned long offset, unsigned long phys_offset)
155 {
156 	int *reloc;
157 	long loc;
158 
159 	/* Adjust R_390_64 relocations */
160 	for (reloc = (int *)__vmlinux_relocs_64_start; reloc < (int *)__vmlinux_relocs_64_end; reloc++) {
161 		loc = (long)*reloc + phys_offset;
162 		if (loc < min_addr || loc > max_addr)
163 			error("64-bit relocation outside of kernel!\n");
164 		*(u64 *)loc += offset - __START_KERNEL;
165 	}
166 }
167 
kaslr_adjust_got(unsigned long offset)168 static void kaslr_adjust_got(unsigned long offset)
169 {
170 	u64 *entry;
171 
172 	/*
173 	 * Even without -fPIE, Clang still uses a global offset table for some
174 	 * reason. Adjust the GOT entries.
175 	 */
176 	for (entry = (u64 *)vmlinux.got_start; entry < (u64 *)vmlinux.got_end; entry++)
177 		*entry += offset - __START_KERNEL;
178 }
179 
180 /*
181  * Merge information from several sources into a single ident_map_size value.
182  * "ident_map_size" represents the upper limit of physical memory we may ever
183  * reach. It might not be all online memory, but also include standby (offline)
184  * memory. "ident_map_size" could be lower then actual standby or even online
185  * memory present, due to limiting factors. We should never go above this limit.
186  * It is the size of our identity mapping.
187  *
188  * Consider the following factors:
189  * 1. max_physmem_end - end of physical memory online or standby.
190  *    Always >= end of the last online memory range (get_physmem_online_end()).
191  * 2. CONFIG_MAX_PHYSMEM_BITS - the maximum size of physical memory the
192  *    kernel is able to support.
193  * 3. "mem=" kernel command line option which limits physical memory usage.
194  * 4. OLDMEM_BASE which is a kdump memory limit when the kernel is executed as
195  *    crash kernel.
196  * 5. "hsa" size which is a memory limit when the kernel is executed during
197  *    zfcp/nvme dump.
198  */
setup_ident_map_size(unsigned long max_physmem_end)199 static void setup_ident_map_size(unsigned long max_physmem_end)
200 {
201 	unsigned long hsa_size;
202 
203 	ident_map_size = max_physmem_end;
204 	if (memory_limit)
205 		ident_map_size = min(ident_map_size, memory_limit);
206 	ident_map_size = min(ident_map_size, 1UL << MAX_PHYSMEM_BITS);
207 
208 #ifdef CONFIG_CRASH_DUMP
209 	if (oldmem_data.start) {
210 		__kaslr_enabled = 0;
211 		ident_map_size = min(ident_map_size, oldmem_data.size);
212 	} else if (ipl_block_valid && is_ipl_block_dump()) {
213 		__kaslr_enabled = 0;
214 		if (!sclp_early_get_hsa_size(&hsa_size) && hsa_size)
215 			ident_map_size = min(ident_map_size, hsa_size);
216 	}
217 #endif
218 }
219 
220 #define FIXMAP_SIZE	round_up(MEMCPY_REAL_SIZE + ABS_LOWCORE_MAP_SIZE, sizeof(struct lowcore))
221 
get_vmem_size(unsigned long identity_size,unsigned long vmemmap_size,unsigned long vmalloc_size,unsigned long rte_size)222 static unsigned long get_vmem_size(unsigned long identity_size,
223 				   unsigned long vmemmap_size,
224 				   unsigned long vmalloc_size,
225 				   unsigned long rte_size)
226 {
227 	unsigned long max_mappable, vsize;
228 
229 	max_mappable = max(identity_size, MAX_DCSS_ADDR);
230 	vsize = round_up(SZ_2G + max_mappable, rte_size) +
231 		round_up(vmemmap_size, rte_size) +
232 		FIXMAP_SIZE + MODULES_LEN + KASLR_LEN;
233 	return size_add(vsize, vmalloc_size);
234 }
235 
setup_kernel_memory_layout(unsigned long kernel_size)236 static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
237 {
238 	unsigned long vmemmap_start;
239 	unsigned long kernel_start;
240 	unsigned long asce_limit;
241 	unsigned long rte_size;
242 	unsigned long pages;
243 	unsigned long vsize;
244 	unsigned long vmax;
245 
246 	pages = ident_map_size / PAGE_SIZE;
247 	/* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
248 	vmemmap_size = SECTION_ALIGN_UP(pages) * sizeof(struct page);
249 
250 	/* choose kernel address space layout: 4 or 3 levels. */
251 	BUILD_BUG_ON(!IS_ALIGNED(__START_KERNEL, THREAD_SIZE));
252 	BUILD_BUG_ON(!IS_ALIGNED(__NO_KASLR_START_KERNEL, THREAD_SIZE));
253 	BUILD_BUG_ON(__NO_KASLR_END_KERNEL > _REGION1_SIZE);
254 	vsize = get_vmem_size(ident_map_size, vmemmap_size, vmalloc_size, _REGION3_SIZE);
255 	if (IS_ENABLED(CONFIG_KASAN) || __NO_KASLR_END_KERNEL > _REGION2_SIZE ||
256 	    (vsize > _REGION2_SIZE && kaslr_enabled())) {
257 		asce_limit = _REGION1_SIZE;
258 		if (__NO_KASLR_END_KERNEL > _REGION2_SIZE) {
259 			rte_size = _REGION2_SIZE;
260 			vsize = get_vmem_size(ident_map_size, vmemmap_size, vmalloc_size, _REGION2_SIZE);
261 		} else {
262 			rte_size = _REGION3_SIZE;
263 		}
264 	} else {
265 		asce_limit = _REGION2_SIZE;
266 		rte_size = _REGION3_SIZE;
267 	}
268 
269 	/*
270 	 * Forcing modules and vmalloc area under the ultravisor
271 	 * secure storage limit, so that any vmalloc allocation
272 	 * we do could be used to back secure guest storage.
273 	 *
274 	 * Assume the secure storage limit always exceeds _REGION2_SIZE,
275 	 * otherwise asce_limit and rte_size would have been adjusted.
276 	 */
277 	vmax = adjust_to_uv_max(asce_limit);
278 #ifdef CONFIG_KASAN
279 	BUILD_BUG_ON(__NO_KASLR_END_KERNEL > KASAN_SHADOW_START);
280 	/* force vmalloc and modules below kasan shadow */
281 	vmax = min(vmax, KASAN_SHADOW_START);
282 #endif
283 	vsize = min(vsize, vmax);
284 	if (kaslr_enabled()) {
285 		unsigned long kernel_end, kaslr_len, slots, pos;
286 
287 		kaslr_len = max(KASLR_LEN, vmax - vsize);
288 		slots = DIV_ROUND_UP(kaslr_len - kernel_size, THREAD_SIZE);
289 		if (get_random(slots, &pos))
290 			pos = 0;
291 		kernel_end = vmax - pos * THREAD_SIZE;
292 		kernel_start = round_down(kernel_end - kernel_size, THREAD_SIZE);
293 	} else if (vmax < __NO_KASLR_END_KERNEL || vsize > __NO_KASLR_END_KERNEL) {
294 		kernel_start = round_down(vmax - kernel_size, THREAD_SIZE);
295 		decompressor_printk("The kernel base address is forced to %lx\n", kernel_start);
296 	} else {
297 		kernel_start = __NO_KASLR_START_KERNEL;
298 	}
299 	__kaslr_offset = kernel_start;
300 
301 	MODULES_END = round_down(kernel_start, _SEGMENT_SIZE);
302 	MODULES_VADDR = MODULES_END - MODULES_LEN;
303 	VMALLOC_END = MODULES_VADDR;
304 
305 	/* allow vmalloc area to occupy up to about 1/2 of the rest virtual space left */
306 	vsize = (VMALLOC_END - FIXMAP_SIZE) / 2;
307 	vsize = round_down(vsize, _SEGMENT_SIZE);
308 	vmalloc_size = min(vmalloc_size, vsize);
309 	VMALLOC_START = VMALLOC_END - vmalloc_size;
310 
311 	__memcpy_real_area = round_down(VMALLOC_START - MEMCPY_REAL_SIZE, PAGE_SIZE);
312 	__abs_lowcore = round_down(__memcpy_real_area - ABS_LOWCORE_MAP_SIZE,
313 				   sizeof(struct lowcore));
314 
315 	/* split remaining virtual space between 1:1 mapping & vmemmap array */
316 	pages = __abs_lowcore / (PAGE_SIZE + sizeof(struct page));
317 	pages = SECTION_ALIGN_UP(pages);
318 	/* keep vmemmap_start aligned to a top level region table entry */
319 	vmemmap_start = round_down(__abs_lowcore - pages * sizeof(struct page), rte_size);
320 	/* make sure identity map doesn't overlay with vmemmap */
321 	ident_map_size = min(ident_map_size, vmemmap_start);
322 	vmemmap_size = SECTION_ALIGN_UP(ident_map_size / PAGE_SIZE) * sizeof(struct page);
323 	/* make sure vmemmap doesn't overlay with absolute lowcore area */
324 	if (vmemmap_start + vmemmap_size > __abs_lowcore) {
325 		vmemmap_size = SECTION_ALIGN_DOWN(ident_map_size / PAGE_SIZE) * sizeof(struct page);
326 		ident_map_size = vmemmap_size / sizeof(struct page) * PAGE_SIZE;
327 	}
328 	vmemmap = (struct page *)vmemmap_start;
329 	/* maximum address for which linear mapping could be created (DCSS, memory) */
330 	BUILD_BUG_ON(MAX_DCSS_ADDR > (1UL << MAX_PHYSMEM_BITS));
331 	max_mappable = max(ident_map_size, MAX_DCSS_ADDR);
332 	max_mappable = min(max_mappable, vmemmap_start);
333 	__identity_base = round_down(vmemmap_start - max_mappable, rte_size);
334 
335 	return asce_limit;
336 }
337 
338 /*
339  * This function clears the BSS section of the decompressed Linux kernel and NOT the decompressor's.
340  */
clear_bss_section(unsigned long kernel_start)341 static void clear_bss_section(unsigned long kernel_start)
342 {
343 	memset((void *)kernel_start + vmlinux.image_size, 0, vmlinux.bss_size);
344 }
345 
346 /*
347  * Set vmalloc area size to an 8th of (potential) physical memory
348  * size, unless size has been set by kernel command line parameter.
349  */
setup_vmalloc_size(void)350 static void setup_vmalloc_size(void)
351 {
352 	unsigned long size;
353 
354 	if (vmalloc_size_set)
355 		return;
356 	size = round_up(ident_map_size / 8, _SEGMENT_SIZE);
357 	vmalloc_size = max(size, vmalloc_size);
358 }
359 
kaslr_adjust_vmlinux_info(long offset)360 static void kaslr_adjust_vmlinux_info(long offset)
361 {
362 	vmlinux.bootdata_off += offset;
363 	vmlinux.bootdata_preserved_off += offset;
364 	vmlinux.got_start += offset;
365 	vmlinux.got_end += offset;
366 	vmlinux.init_mm_off += offset;
367 	vmlinux.swapper_pg_dir_off += offset;
368 	vmlinux.invalid_pg_dir_off += offset;
369 #ifdef CONFIG_KASAN
370 	vmlinux.kasan_early_shadow_page_off += offset;
371 	vmlinux.kasan_early_shadow_pte_off += offset;
372 	vmlinux.kasan_early_shadow_pmd_off += offset;
373 	vmlinux.kasan_early_shadow_pud_off += offset;
374 	vmlinux.kasan_early_shadow_p4d_off += offset;
375 #endif
376 }
377 
fixup_vmlinux_info(void)378 static void fixup_vmlinux_info(void)
379 {
380 	vmlinux.entry -= __START_KERNEL;
381 	kaslr_adjust_vmlinux_info(-__START_KERNEL);
382 }
383 
startup_kernel(void)384 void startup_kernel(void)
385 {
386 	unsigned long kernel_size = vmlinux.image_size + vmlinux.bss_size;
387 	unsigned long nokaslr_offset_phys, kaslr_large_page_offset;
388 	unsigned long amode31_lma = 0;
389 	unsigned long max_physmem_end;
390 	unsigned long asce_limit;
391 	unsigned long safe_addr;
392 	psw_t psw;
393 
394 	fixup_vmlinux_info();
395 	setup_lpp();
396 
397 	/*
398 	 * Non-randomized kernel physical start address must be _SEGMENT_SIZE
399 	 * aligned (see blow).
400 	 */
401 	nokaslr_offset_phys = ALIGN(mem_safe_offset(), _SEGMENT_SIZE);
402 	safe_addr = PAGE_ALIGN(nokaslr_offset_phys + kernel_size);
403 
404 	/*
405 	 * Reserve decompressor memory together with decompression heap,
406 	 * buffer and memory which might be occupied by uncompressed kernel
407 	 * (if KASLR is off or failed).
408 	 */
409 	physmem_reserve(RR_DECOMPRESSOR, 0, safe_addr);
410 	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && parmarea.initrd_size)
411 		physmem_reserve(RR_INITRD, parmarea.initrd_start, parmarea.initrd_size);
412 	oldmem_data.start = parmarea.oldmem_base;
413 	oldmem_data.size = parmarea.oldmem_size;
414 
415 	store_ipl_parmblock();
416 	read_ipl_report();
417 	uv_query_info();
418 	sclp_early_read_info();
419 	setup_boot_command_line();
420 	parse_boot_command_line();
421 	detect_facilities();
422 	cmma_init();
423 	sanitize_prot_virt_host();
424 	max_physmem_end = detect_max_physmem_end();
425 	setup_ident_map_size(max_physmem_end);
426 	setup_vmalloc_size();
427 	asce_limit = setup_kernel_memory_layout(kernel_size);
428 	/* got final ident_map_size, physmem allocations could be performed now */
429 	physmem_set_usable_limit(ident_map_size);
430 	detect_physmem_online_ranges(max_physmem_end);
431 	save_ipl_cert_comp_list();
432 	rescue_initrd(safe_addr, ident_map_size);
433 
434 	/*
435 	 * __kaslr_offset_phys must be _SEGMENT_SIZE aligned, so the lower
436 	 * 20 bits (the offset within a large page) are zero. Copy the last
437 	 * 20 bits of __kaslr_offset, which is THREAD_SIZE aligned, to
438 	 * __kaslr_offset_phys.
439 	 *
440 	 * With this the last 20 bits of __kaslr_offset_phys and __kaslr_offset
441 	 * are identical, which is required to allow for large mappings of the
442 	 * kernel image.
443 	 */
444 	kaslr_large_page_offset = __kaslr_offset & ~_SEGMENT_MASK;
445 	if (kaslr_enabled()) {
446 		unsigned long end = ident_map_size - kaslr_large_page_offset;
447 
448 		__kaslr_offset_phys = randomize_within_range(kernel_size, _SEGMENT_SIZE, 0, end);
449 	}
450 	if (!__kaslr_offset_phys)
451 		__kaslr_offset_phys = nokaslr_offset_phys;
452 	__kaslr_offset_phys |= kaslr_large_page_offset;
453 	kaslr_adjust_vmlinux_info(__kaslr_offset_phys);
454 	physmem_reserve(RR_VMLINUX, __kaslr_offset_phys, kernel_size);
455 	deploy_kernel((void *)__kaslr_offset_phys);
456 
457 	/* vmlinux decompression is done, shrink reserved low memory */
458 	physmem_reserve(RR_DECOMPRESSOR, 0, (unsigned long)_decompressor_end);
459 
460 	/*
461 	 * In case KASLR is enabled the randomized location of .amode31
462 	 * section might overlap with .vmlinux.relocs section. To avoid that
463 	 * the below randomize_within_range() could have been called with
464 	 * __vmlinux_relocs_64_end as the lower range address. However,
465 	 * .amode31 section is written to by the decompressed kernel - at
466 	 * that time the contents of .vmlinux.relocs is not needed anymore.
467 	 * Conversly, .vmlinux.relocs is read only by the decompressor, even
468 	 * before the kernel started. Therefore, in case the two sections
469 	 * overlap there is no risk of corrupting any data.
470 	 */
471 	if (kaslr_enabled())
472 		amode31_lma = randomize_within_range(vmlinux.amode31_size, PAGE_SIZE, 0, SZ_2G);
473 	if (!amode31_lma)
474 		amode31_lma = __kaslr_offset_phys - vmlinux.amode31_size;
475 	physmem_reserve(RR_AMODE31, amode31_lma, vmlinux.amode31_size);
476 
477 	/*
478 	 * The order of the following operations is important:
479 	 *
480 	 * - kaslr_adjust_relocs() must follow clear_bss_section() to establish
481 	 *   static memory references to data in .bss to be used by setup_vmem()
482 	 *   (i.e init_mm.pgd)
483 	 *
484 	 * - setup_vmem() must follow kaslr_adjust_relocs() to be able using
485 	 *   static memory references to data in .bss (i.e init_mm.pgd)
486 	 *
487 	 * - copy_bootdata() must follow setup_vmem() to propagate changes
488 	 *   to bootdata made by setup_vmem()
489 	 */
490 	clear_bss_section(__kaslr_offset_phys);
491 	kaslr_adjust_relocs(__kaslr_offset_phys, __kaslr_offset_phys + vmlinux.image_size,
492 			    __kaslr_offset, __kaslr_offset_phys);
493 	kaslr_adjust_got(__kaslr_offset);
494 	setup_vmem(__kaslr_offset, __kaslr_offset + kernel_size, asce_limit);
495 	copy_bootdata();
496 
497 	/*
498 	 * Save KASLR offset for early dumps, before vmcore_info is set.
499 	 * Mark as uneven to distinguish from real vmcore_info pointer.
500 	 */
501 	S390_lowcore.vmcore_info = __kaslr_offset_phys ? __kaslr_offset_phys | 0x1UL : 0;
502 
503 	/*
504 	 * Jump to the decompressed kernel entry point and switch DAT mode on.
505 	 */
506 	psw.addr = __kaslr_offset + vmlinux.entry;
507 	psw.mask = PSW_KERNEL_BITS;
508 	__load_psw(psw);
509 }
510