1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright IBM Corp. 2019
4  */
5 #include <linux/pgtable.h>
6 #include <asm/mem_detect.h>
7 #include <asm/cpacf.h>
8 #include <asm/timex.h>
9 #include <asm/sclp.h>
10 #include <asm/kasan.h>
11 #include "compressed/decompressor.h"
12 #include "boot.h"
13 
14 #define PRNG_MODE_TDES	 1
15 #define PRNG_MODE_SHA512 2
16 #define PRNG_MODE_TRNG	 3
17 
18 struct prno_parm {
19 	u32 res;
20 	u32 reseed_counter;
21 	u64 stream_bytes;
22 	u8  V[112];
23 	u8  C[112];
24 };
25 
26 struct prng_parm {
27 	u8  parm_block[32];
28 	u32 reseed_counter;
29 	u64 byte_counter;
30 };
31 
check_prng(void)32 static int check_prng(void)
33 {
34 	if (!cpacf_query_func(CPACF_KMC, CPACF_KMC_PRNG)) {
35 		sclp_early_printk("KASLR disabled: CPU has no PRNG\n");
36 		return 0;
37 	}
38 	if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG))
39 		return PRNG_MODE_TRNG;
40 	if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_SHA512_DRNG_GEN))
41 		return PRNG_MODE_SHA512;
42 	else
43 		return PRNG_MODE_TDES;
44 }
45 
get_random(unsigned long limit,unsigned long * value)46 static int get_random(unsigned long limit, unsigned long *value)
47 {
48 	struct prng_parm prng = {
49 		/* initial parameter block for tdes mode, copied from libica */
50 		.parm_block = {
51 			0x0F, 0x2B, 0x8E, 0x63, 0x8C, 0x8E, 0xD2, 0x52,
52 			0x64, 0xB7, 0xA0, 0x7B, 0x75, 0x28, 0xB8, 0xF4,
53 			0x75, 0x5F, 0xD2, 0xA6, 0x8D, 0x97, 0x11, 0xFF,
54 			0x49, 0xD8, 0x23, 0xF3, 0x7E, 0x21, 0xEC, 0xA0
55 		},
56 	};
57 	unsigned long seed, random;
58 	struct prno_parm prno;
59 	__u64 entropy[4];
60 	int mode, i;
61 
62 	mode = check_prng();
63 	seed = get_tod_clock_fast();
64 	switch (mode) {
65 	case PRNG_MODE_TRNG:
66 		cpacf_trng(NULL, 0, (u8 *) &random, sizeof(random));
67 		break;
68 	case PRNG_MODE_SHA512:
69 		cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED, &prno, NULL, 0,
70 			   (u8 *) &seed, sizeof(seed));
71 		cpacf_prno(CPACF_PRNO_SHA512_DRNG_GEN, &prno, (u8 *) &random,
72 			   sizeof(random), NULL, 0);
73 		break;
74 	case PRNG_MODE_TDES:
75 		/* add entropy */
76 		*(unsigned long *) prng.parm_block ^= seed;
77 		for (i = 0; i < 16; i++) {
78 			cpacf_kmc(CPACF_KMC_PRNG, prng.parm_block,
79 				  (u8 *) entropy, (u8 *) entropy,
80 				  sizeof(entropy));
81 			memcpy(prng.parm_block, entropy, sizeof(entropy));
82 		}
83 		random = seed;
84 		cpacf_kmc(CPACF_KMC_PRNG, prng.parm_block, (u8 *) &random,
85 			  (u8 *) &random, sizeof(random));
86 		break;
87 	default:
88 		return -1;
89 	}
90 	*value = random % limit;
91 	return 0;
92 }
93 
94 /*
95  * To randomize kernel base address we have to consider several facts:
96  * 1. physical online memory might not be continuous and have holes. mem_detect
97  *    info contains list of online memory ranges we should consider.
98  * 2. we have several memory regions which are occupied and we should not
99  *    overlap and destroy them. Currently safe_addr tells us the border below
100  *    which all those occupied regions are. We are safe to use anything above
101  *    safe_addr.
102  * 3. the upper limit might apply as well, even if memory above that limit is
103  *    online. Currently those limitations are:
104  *    3.1. Limit set by "mem=" kernel command line option
105  *    3.2. memory reserved at the end for kasan initialization.
106  * 4. kernel base address must be aligned to THREAD_SIZE (kernel stack size).
107  *    Which is required for CONFIG_CHECK_STACK. Currently THREAD_SIZE is 4 pages
108  *    (16 pages when the kernel is built with kasan enabled)
109  * Assumptions:
110  * 1. kernel size (including .bss size) and upper memory limit are page aligned.
111  * 2. mem_detect memory region start is THREAD_SIZE aligned / end is PAGE_SIZE
112  *    aligned (in practice memory configurations granularity on z/VM and LPAR
113  *    is 1mb).
114  *
115  * To guarantee uniform distribution of kernel base address among all suitable
116  * addresses we generate random value just once. For that we need to build a
117  * continuous range in which every value would be suitable. We can build this
118  * range by simply counting all suitable addresses (let's call them positions)
119  * which would be valid as kernel base address. To count positions we iterate
120  * over online memory ranges. For each range which is big enough for the
121  * kernel image we count all suitable addresses we can put the kernel image at
122  * that is
123  * (end - start - kernel_size) / THREAD_SIZE + 1
124  * Two functions count_valid_kernel_positions and position_to_address help
125  * to count positions in memory range given and then convert position back
126  * to address.
127  */
count_valid_kernel_positions(unsigned long kernel_size,unsigned long _min,unsigned long _max)128 static unsigned long count_valid_kernel_positions(unsigned long kernel_size,
129 						  unsigned long _min,
130 						  unsigned long _max)
131 {
132 	unsigned long start, end, pos = 0;
133 	int i;
134 
135 	for_each_mem_detect_block(i, &start, &end) {
136 		if (_min >= end)
137 			continue;
138 		if (start >= _max)
139 			break;
140 		start = max(_min, start);
141 		end = min(_max, end);
142 		if (end - start < kernel_size)
143 			continue;
144 		pos += (end - start - kernel_size) / THREAD_SIZE + 1;
145 	}
146 
147 	return pos;
148 }
149 
position_to_address(unsigned long pos,unsigned long kernel_size,unsigned long _min,unsigned long _max)150 static unsigned long position_to_address(unsigned long pos, unsigned long kernel_size,
151 				 unsigned long _min, unsigned long _max)
152 {
153 	unsigned long start, end;
154 	int i;
155 
156 	for_each_mem_detect_block(i, &start, &end) {
157 		if (_min >= end)
158 			continue;
159 		if (start >= _max)
160 			break;
161 		start = max(_min, start);
162 		end = min(_max, end);
163 		if (end - start < kernel_size)
164 			continue;
165 		if ((end - start - kernel_size) / THREAD_SIZE + 1 >= pos)
166 			return start + (pos - 1) * THREAD_SIZE;
167 		pos -= (end - start - kernel_size) / THREAD_SIZE + 1;
168 	}
169 
170 	return 0;
171 }
172 
get_random_base(unsigned long safe_addr)173 unsigned long get_random_base(unsigned long safe_addr)
174 {
175 	unsigned long memory_limit = get_mem_detect_end();
176 	unsigned long base_pos, max_pos, kernel_size;
177 	unsigned long kasan_needs;
178 	int i;
179 
180 	memory_limit = min(memory_limit, ident_map_size);
181 
182 	/*
183 	 * Avoid putting kernel in the end of physical memory
184 	 * which kasan will use for shadow memory and early pgtable
185 	 * mapping allocations.
186 	 */
187 	memory_limit -= kasan_estimate_memory_needs(memory_limit);
188 
189 	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE) {
190 		if (safe_addr < INITRD_START + INITRD_SIZE)
191 			safe_addr = INITRD_START + INITRD_SIZE;
192 	}
193 	safe_addr = ALIGN(safe_addr, THREAD_SIZE);
194 
195 	kernel_size = vmlinux.image_size + vmlinux.bss_size;
196 	if (safe_addr + kernel_size > memory_limit)
197 		return 0;
198 
199 	max_pos = count_valid_kernel_positions(kernel_size, safe_addr, memory_limit);
200 	if (!max_pos) {
201 		sclp_early_printk("KASLR disabled: not enough memory\n");
202 		return 0;
203 	}
204 
205 	/* we need a value in the range [1, base_pos] inclusive */
206 	if (get_random(max_pos, &base_pos))
207 		return 0;
208 	return position_to_address(base_pos + 1, kernel_size, safe_addr, memory_limit);
209 }
210