xref: /linux/arch/powerpc/kernel/kvm.c (revision 52338415)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
4  * Copyright 2010-2011 Freescale Semiconductor, Inc.
5  *
6  * Authors:
7  *     Alexander Graf <agraf@suse.de>
8  */
9 
10 #include <linux/kvm_host.h>
11 #include <linux/init.h>
12 #include <linux/export.h>
13 #include <linux/kmemleak.h>
14 #include <linux/kvm_para.h>
15 #include <linux/slab.h>
16 #include <linux/of.h>
17 #include <linux/pagemap.h>
18 
19 #include <asm/reg.h>
20 #include <asm/sections.h>
21 #include <asm/cacheflush.h>
22 #include <asm/disassemble.h>
23 #include <asm/ppc-opcode.h>
24 #include <asm/epapr_hcalls.h>
25 
26 #define KVM_MAGIC_PAGE		(-4096L)
27 #define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x)
28 
29 #define KVM_INST_LWZ		0x80000000
30 #define KVM_INST_STW		0x90000000
31 #define KVM_INST_LD		0xe8000000
32 #define KVM_INST_STD		0xf8000000
33 #define KVM_INST_NOP		0x60000000
34 #define KVM_INST_B		0x48000000
35 #define KVM_INST_B_MASK		0x03ffffff
36 #define KVM_INST_B_MAX		0x01ffffff
37 #define KVM_INST_LI		0x38000000
38 
39 #define KVM_MASK_RT		0x03e00000
40 #define KVM_RT_30		0x03c00000
41 #define KVM_MASK_RB		0x0000f800
42 #define KVM_INST_MFMSR		0x7c0000a6
43 
44 #define SPR_FROM		0
45 #define SPR_TO			0x100
46 
47 #define KVM_INST_SPR(sprn, moveto) (0x7c0002a6 | \
48 				    (((sprn) & 0x1f) << 16) | \
49 				    (((sprn) & 0x3e0) << 6) | \
50 				    (moveto))
51 
52 #define KVM_INST_MFSPR(sprn)	KVM_INST_SPR(sprn, SPR_FROM)
53 #define KVM_INST_MTSPR(sprn)	KVM_INST_SPR(sprn, SPR_TO)
54 
55 #define KVM_INST_TLBSYNC	0x7c00046c
56 #define KVM_INST_MTMSRD_L0	0x7c000164
57 #define KVM_INST_MTMSRD_L1	0x7c010164
58 #define KVM_INST_MTMSR		0x7c000124
59 
60 #define KVM_INST_WRTEE		0x7c000106
61 #define KVM_INST_WRTEEI_0	0x7c000146
62 #define KVM_INST_WRTEEI_1	0x7c008146
63 
64 #define KVM_INST_MTSRIN		0x7c0001e4
65 
66 static bool kvm_patching_worked = true;
67 extern char kvm_tmp[];
68 extern char kvm_tmp_end[];
69 static int kvm_tmp_index;
70 
71 static void __init kvm_patch_ins(u32 *inst, u32 new_inst)
72 {
73 	*inst = new_inst;
74 	flush_icache_range((ulong)inst, (ulong)inst + 4);
75 }
76 
77 static void __init kvm_patch_ins_ll(u32 *inst, long addr, u32 rt)
78 {
79 #ifdef CONFIG_64BIT
80 	kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
81 #else
82 	kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000fffc));
83 #endif
84 }
85 
86 static void __init kvm_patch_ins_ld(u32 *inst, long addr, u32 rt)
87 {
88 #ifdef CONFIG_64BIT
89 	kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
90 #else
91 	kvm_patch_ins(inst, KVM_INST_LWZ | rt | ((addr + 4) & 0x0000fffc));
92 #endif
93 }
94 
95 static void __init kvm_patch_ins_lwz(u32 *inst, long addr, u32 rt)
96 {
97 	kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000ffff));
98 }
99 
100 static void __init kvm_patch_ins_std(u32 *inst, long addr, u32 rt)
101 {
102 #ifdef CONFIG_64BIT
103 	kvm_patch_ins(inst, KVM_INST_STD | rt | (addr & 0x0000fffc));
104 #else
105 	kvm_patch_ins(inst, KVM_INST_STW | rt | ((addr + 4) & 0x0000fffc));
106 #endif
107 }
108 
109 static void __init kvm_patch_ins_stw(u32 *inst, long addr, u32 rt)
110 {
111 	kvm_patch_ins(inst, KVM_INST_STW | rt | (addr & 0x0000fffc));
112 }
113 
114 static void __init kvm_patch_ins_nop(u32 *inst)
115 {
116 	kvm_patch_ins(inst, KVM_INST_NOP);
117 }
118 
119 static void __init kvm_patch_ins_b(u32 *inst, int addr)
120 {
121 #if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC_BOOK3S)
122 	/* On relocatable kernels interrupts handlers and our code
123 	   can be in different regions, so we don't patch them */
124 
125 	if ((ulong)inst < (ulong)&__end_interrupts)
126 		return;
127 #endif
128 
129 	kvm_patch_ins(inst, KVM_INST_B | (addr & KVM_INST_B_MASK));
130 }
131 
132 static u32 * __init kvm_alloc(int len)
133 {
134 	u32 *p;
135 
136 	if ((kvm_tmp_index + len) > (kvm_tmp_end - kvm_tmp)) {
137 		printk(KERN_ERR "KVM: No more space (%d + %d)\n",
138 				kvm_tmp_index, len);
139 		kvm_patching_worked = false;
140 		return NULL;
141 	}
142 
143 	p = (void*)&kvm_tmp[kvm_tmp_index];
144 	kvm_tmp_index += len;
145 
146 	return p;
147 }
148 
149 extern u32 kvm_emulate_mtmsrd_branch_offs;
150 extern u32 kvm_emulate_mtmsrd_reg_offs;
151 extern u32 kvm_emulate_mtmsrd_orig_ins_offs;
152 extern u32 kvm_emulate_mtmsrd_len;
153 extern u32 kvm_emulate_mtmsrd[];
154 
155 static void __init kvm_patch_ins_mtmsrd(u32 *inst, u32 rt)
156 {
157 	u32 *p;
158 	int distance_start;
159 	int distance_end;
160 	ulong next_inst;
161 
162 	p = kvm_alloc(kvm_emulate_mtmsrd_len * 4);
163 	if (!p)
164 		return;
165 
166 	/* Find out where we are and put everything there */
167 	distance_start = (ulong)p - (ulong)inst;
168 	next_inst = ((ulong)inst + 4);
169 	distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsrd_branch_offs];
170 
171 	/* Make sure we only write valid b instructions */
172 	if (distance_start > KVM_INST_B_MAX) {
173 		kvm_patching_worked = false;
174 		return;
175 	}
176 
177 	/* Modify the chunk to fit the invocation */
178 	memcpy(p, kvm_emulate_mtmsrd, kvm_emulate_mtmsrd_len * 4);
179 	p[kvm_emulate_mtmsrd_branch_offs] |= distance_end & KVM_INST_B_MASK;
180 	switch (get_rt(rt)) {
181 	case 30:
182 		kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
183 				 magic_var(scratch2), KVM_RT_30);
184 		break;
185 	case 31:
186 		kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
187 				 magic_var(scratch1), KVM_RT_30);
188 		break;
189 	default:
190 		p[kvm_emulate_mtmsrd_reg_offs] |= rt;
191 		break;
192 	}
193 
194 	p[kvm_emulate_mtmsrd_orig_ins_offs] = *inst;
195 	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsrd_len * 4);
196 
197 	/* Patch the invocation */
198 	kvm_patch_ins_b(inst, distance_start);
199 }
200 
201 extern u32 kvm_emulate_mtmsr_branch_offs;
202 extern u32 kvm_emulate_mtmsr_reg1_offs;
203 extern u32 kvm_emulate_mtmsr_reg2_offs;
204 extern u32 kvm_emulate_mtmsr_orig_ins_offs;
205 extern u32 kvm_emulate_mtmsr_len;
206 extern u32 kvm_emulate_mtmsr[];
207 
208 static void __init kvm_patch_ins_mtmsr(u32 *inst, u32 rt)
209 {
210 	u32 *p;
211 	int distance_start;
212 	int distance_end;
213 	ulong next_inst;
214 
215 	p = kvm_alloc(kvm_emulate_mtmsr_len * 4);
216 	if (!p)
217 		return;
218 
219 	/* Find out where we are and put everything there */
220 	distance_start = (ulong)p - (ulong)inst;
221 	next_inst = ((ulong)inst + 4);
222 	distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsr_branch_offs];
223 
224 	/* Make sure we only write valid b instructions */
225 	if (distance_start > KVM_INST_B_MAX) {
226 		kvm_patching_worked = false;
227 		return;
228 	}
229 
230 	/* Modify the chunk to fit the invocation */
231 	memcpy(p, kvm_emulate_mtmsr, kvm_emulate_mtmsr_len * 4);
232 	p[kvm_emulate_mtmsr_branch_offs] |= distance_end & KVM_INST_B_MASK;
233 
234 	/* Make clobbered registers work too */
235 	switch (get_rt(rt)) {
236 	case 30:
237 		kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
238 				 magic_var(scratch2), KVM_RT_30);
239 		kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
240 				 magic_var(scratch2), KVM_RT_30);
241 		break;
242 	case 31:
243 		kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
244 				 magic_var(scratch1), KVM_RT_30);
245 		kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
246 				 magic_var(scratch1), KVM_RT_30);
247 		break;
248 	default:
249 		p[kvm_emulate_mtmsr_reg1_offs] |= rt;
250 		p[kvm_emulate_mtmsr_reg2_offs] |= rt;
251 		break;
252 	}
253 
254 	p[kvm_emulate_mtmsr_orig_ins_offs] = *inst;
255 	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsr_len * 4);
256 
257 	/* Patch the invocation */
258 	kvm_patch_ins_b(inst, distance_start);
259 }
260 
261 #ifdef CONFIG_BOOKE
262 
263 extern u32 kvm_emulate_wrtee_branch_offs;
264 extern u32 kvm_emulate_wrtee_reg_offs;
265 extern u32 kvm_emulate_wrtee_orig_ins_offs;
266 extern u32 kvm_emulate_wrtee_len;
267 extern u32 kvm_emulate_wrtee[];
268 
269 static void __init kvm_patch_ins_wrtee(u32 *inst, u32 rt, int imm_one)
270 {
271 	u32 *p;
272 	int distance_start;
273 	int distance_end;
274 	ulong next_inst;
275 
276 	p = kvm_alloc(kvm_emulate_wrtee_len * 4);
277 	if (!p)
278 		return;
279 
280 	/* Find out where we are and put everything there */
281 	distance_start = (ulong)p - (ulong)inst;
282 	next_inst = ((ulong)inst + 4);
283 	distance_end = next_inst - (ulong)&p[kvm_emulate_wrtee_branch_offs];
284 
285 	/* Make sure we only write valid b instructions */
286 	if (distance_start > KVM_INST_B_MAX) {
287 		kvm_patching_worked = false;
288 		return;
289 	}
290 
291 	/* Modify the chunk to fit the invocation */
292 	memcpy(p, kvm_emulate_wrtee, kvm_emulate_wrtee_len * 4);
293 	p[kvm_emulate_wrtee_branch_offs] |= distance_end & KVM_INST_B_MASK;
294 
295 	if (imm_one) {
296 		p[kvm_emulate_wrtee_reg_offs] =
297 			KVM_INST_LI | __PPC_RT(R30) | MSR_EE;
298 	} else {
299 		/* Make clobbered registers work too */
300 		switch (get_rt(rt)) {
301 		case 30:
302 			kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
303 					 magic_var(scratch2), KVM_RT_30);
304 			break;
305 		case 31:
306 			kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
307 					 magic_var(scratch1), KVM_RT_30);
308 			break;
309 		default:
310 			p[kvm_emulate_wrtee_reg_offs] |= rt;
311 			break;
312 		}
313 	}
314 
315 	p[kvm_emulate_wrtee_orig_ins_offs] = *inst;
316 	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrtee_len * 4);
317 
318 	/* Patch the invocation */
319 	kvm_patch_ins_b(inst, distance_start);
320 }
321 
322 extern u32 kvm_emulate_wrteei_0_branch_offs;
323 extern u32 kvm_emulate_wrteei_0_len;
324 extern u32 kvm_emulate_wrteei_0[];
325 
326 static void __init kvm_patch_ins_wrteei_0(u32 *inst)
327 {
328 	u32 *p;
329 	int distance_start;
330 	int distance_end;
331 	ulong next_inst;
332 
333 	p = kvm_alloc(kvm_emulate_wrteei_0_len * 4);
334 	if (!p)
335 		return;
336 
337 	/* Find out where we are and put everything there */
338 	distance_start = (ulong)p - (ulong)inst;
339 	next_inst = ((ulong)inst + 4);
340 	distance_end = next_inst - (ulong)&p[kvm_emulate_wrteei_0_branch_offs];
341 
342 	/* Make sure we only write valid b instructions */
343 	if (distance_start > KVM_INST_B_MAX) {
344 		kvm_patching_worked = false;
345 		return;
346 	}
347 
348 	memcpy(p, kvm_emulate_wrteei_0, kvm_emulate_wrteei_0_len * 4);
349 	p[kvm_emulate_wrteei_0_branch_offs] |= distance_end & KVM_INST_B_MASK;
350 	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrteei_0_len * 4);
351 
352 	/* Patch the invocation */
353 	kvm_patch_ins_b(inst, distance_start);
354 }
355 
356 #endif
357 
358 #ifdef CONFIG_PPC_BOOK3S_32
359 
360 extern u32 kvm_emulate_mtsrin_branch_offs;
361 extern u32 kvm_emulate_mtsrin_reg1_offs;
362 extern u32 kvm_emulate_mtsrin_reg2_offs;
363 extern u32 kvm_emulate_mtsrin_orig_ins_offs;
364 extern u32 kvm_emulate_mtsrin_len;
365 extern u32 kvm_emulate_mtsrin[];
366 
367 static void __init kvm_patch_ins_mtsrin(u32 *inst, u32 rt, u32 rb)
368 {
369 	u32 *p;
370 	int distance_start;
371 	int distance_end;
372 	ulong next_inst;
373 
374 	p = kvm_alloc(kvm_emulate_mtsrin_len * 4);
375 	if (!p)
376 		return;
377 
378 	/* Find out where we are and put everything there */
379 	distance_start = (ulong)p - (ulong)inst;
380 	next_inst = ((ulong)inst + 4);
381 	distance_end = next_inst - (ulong)&p[kvm_emulate_mtsrin_branch_offs];
382 
383 	/* Make sure we only write valid b instructions */
384 	if (distance_start > KVM_INST_B_MAX) {
385 		kvm_patching_worked = false;
386 		return;
387 	}
388 
389 	/* Modify the chunk to fit the invocation */
390 	memcpy(p, kvm_emulate_mtsrin, kvm_emulate_mtsrin_len * 4);
391 	p[kvm_emulate_mtsrin_branch_offs] |= distance_end & KVM_INST_B_MASK;
392 	p[kvm_emulate_mtsrin_reg1_offs] |= (rb << 10);
393 	p[kvm_emulate_mtsrin_reg2_offs] |= rt;
394 	p[kvm_emulate_mtsrin_orig_ins_offs] = *inst;
395 	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtsrin_len * 4);
396 
397 	/* Patch the invocation */
398 	kvm_patch_ins_b(inst, distance_start);
399 }
400 
401 #endif
402 
403 static void __init kvm_map_magic_page(void *data)
404 {
405 	u32 *features = data;
406 
407 	ulong in[8] = {0};
408 	ulong out[8];
409 
410 	in[0] = KVM_MAGIC_PAGE;
411 	in[1] = KVM_MAGIC_PAGE | MAGIC_PAGE_FLAG_NOT_MAPPED_NX;
412 
413 	epapr_hypercall(in, out, KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE));
414 
415 	*features = out[0];
416 }
417 
418 static void __init kvm_check_ins(u32 *inst, u32 features)
419 {
420 	u32 _inst = *inst;
421 	u32 inst_no_rt = _inst & ~KVM_MASK_RT;
422 	u32 inst_rt = _inst & KVM_MASK_RT;
423 
424 	switch (inst_no_rt) {
425 	/* Loads */
426 	case KVM_INST_MFMSR:
427 		kvm_patch_ins_ld(inst, magic_var(msr), inst_rt);
428 		break;
429 	case KVM_INST_MFSPR(SPRN_SPRG0):
430 		kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt);
431 		break;
432 	case KVM_INST_MFSPR(SPRN_SPRG1):
433 		kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt);
434 		break;
435 	case KVM_INST_MFSPR(SPRN_SPRG2):
436 		kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt);
437 		break;
438 	case KVM_INST_MFSPR(SPRN_SPRG3):
439 		kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt);
440 		break;
441 	case KVM_INST_MFSPR(SPRN_SRR0):
442 		kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt);
443 		break;
444 	case KVM_INST_MFSPR(SPRN_SRR1):
445 		kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt);
446 		break;
447 #ifdef CONFIG_BOOKE
448 	case KVM_INST_MFSPR(SPRN_DEAR):
449 #else
450 	case KVM_INST_MFSPR(SPRN_DAR):
451 #endif
452 		kvm_patch_ins_ld(inst, magic_var(dar), inst_rt);
453 		break;
454 	case KVM_INST_MFSPR(SPRN_DSISR):
455 		kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt);
456 		break;
457 
458 #ifdef CONFIG_PPC_BOOK3E_MMU
459 	case KVM_INST_MFSPR(SPRN_MAS0):
460 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
461 			kvm_patch_ins_lwz(inst, magic_var(mas0), inst_rt);
462 		break;
463 	case KVM_INST_MFSPR(SPRN_MAS1):
464 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
465 			kvm_patch_ins_lwz(inst, magic_var(mas1), inst_rt);
466 		break;
467 	case KVM_INST_MFSPR(SPRN_MAS2):
468 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
469 			kvm_patch_ins_ld(inst, magic_var(mas2), inst_rt);
470 		break;
471 	case KVM_INST_MFSPR(SPRN_MAS3):
472 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
473 			kvm_patch_ins_lwz(inst, magic_var(mas7_3) + 4, inst_rt);
474 		break;
475 	case KVM_INST_MFSPR(SPRN_MAS4):
476 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
477 			kvm_patch_ins_lwz(inst, magic_var(mas4), inst_rt);
478 		break;
479 	case KVM_INST_MFSPR(SPRN_MAS6):
480 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
481 			kvm_patch_ins_lwz(inst, magic_var(mas6), inst_rt);
482 		break;
483 	case KVM_INST_MFSPR(SPRN_MAS7):
484 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
485 			kvm_patch_ins_lwz(inst, magic_var(mas7_3), inst_rt);
486 		break;
487 #endif /* CONFIG_PPC_BOOK3E_MMU */
488 
489 	case KVM_INST_MFSPR(SPRN_SPRG4):
490 #ifdef CONFIG_BOOKE
491 	case KVM_INST_MFSPR(SPRN_SPRG4R):
492 #endif
493 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
494 			kvm_patch_ins_ld(inst, magic_var(sprg4), inst_rt);
495 		break;
496 	case KVM_INST_MFSPR(SPRN_SPRG5):
497 #ifdef CONFIG_BOOKE
498 	case KVM_INST_MFSPR(SPRN_SPRG5R):
499 #endif
500 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
501 			kvm_patch_ins_ld(inst, magic_var(sprg5), inst_rt);
502 		break;
503 	case KVM_INST_MFSPR(SPRN_SPRG6):
504 #ifdef CONFIG_BOOKE
505 	case KVM_INST_MFSPR(SPRN_SPRG6R):
506 #endif
507 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
508 			kvm_patch_ins_ld(inst, magic_var(sprg6), inst_rt);
509 		break;
510 	case KVM_INST_MFSPR(SPRN_SPRG7):
511 #ifdef CONFIG_BOOKE
512 	case KVM_INST_MFSPR(SPRN_SPRG7R):
513 #endif
514 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
515 			kvm_patch_ins_ld(inst, magic_var(sprg7), inst_rt);
516 		break;
517 
518 #ifdef CONFIG_BOOKE
519 	case KVM_INST_MFSPR(SPRN_ESR):
520 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
521 			kvm_patch_ins_lwz(inst, magic_var(esr), inst_rt);
522 		break;
523 #endif
524 
525 	case KVM_INST_MFSPR(SPRN_PIR):
526 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
527 			kvm_patch_ins_lwz(inst, magic_var(pir), inst_rt);
528 		break;
529 
530 
531 	/* Stores */
532 	case KVM_INST_MTSPR(SPRN_SPRG0):
533 		kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt);
534 		break;
535 	case KVM_INST_MTSPR(SPRN_SPRG1):
536 		kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt);
537 		break;
538 	case KVM_INST_MTSPR(SPRN_SPRG2):
539 		kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt);
540 		break;
541 	case KVM_INST_MTSPR(SPRN_SPRG3):
542 		kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt);
543 		break;
544 	case KVM_INST_MTSPR(SPRN_SRR0):
545 		kvm_patch_ins_std(inst, magic_var(srr0), inst_rt);
546 		break;
547 	case KVM_INST_MTSPR(SPRN_SRR1):
548 		kvm_patch_ins_std(inst, magic_var(srr1), inst_rt);
549 		break;
550 #ifdef CONFIG_BOOKE
551 	case KVM_INST_MTSPR(SPRN_DEAR):
552 #else
553 	case KVM_INST_MTSPR(SPRN_DAR):
554 #endif
555 		kvm_patch_ins_std(inst, magic_var(dar), inst_rt);
556 		break;
557 	case KVM_INST_MTSPR(SPRN_DSISR):
558 		kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt);
559 		break;
560 #ifdef CONFIG_PPC_BOOK3E_MMU
561 	case KVM_INST_MTSPR(SPRN_MAS0):
562 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
563 			kvm_patch_ins_stw(inst, magic_var(mas0), inst_rt);
564 		break;
565 	case KVM_INST_MTSPR(SPRN_MAS1):
566 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
567 			kvm_patch_ins_stw(inst, magic_var(mas1), inst_rt);
568 		break;
569 	case KVM_INST_MTSPR(SPRN_MAS2):
570 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
571 			kvm_patch_ins_std(inst, magic_var(mas2), inst_rt);
572 		break;
573 	case KVM_INST_MTSPR(SPRN_MAS3):
574 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
575 			kvm_patch_ins_stw(inst, magic_var(mas7_3) + 4, inst_rt);
576 		break;
577 	case KVM_INST_MTSPR(SPRN_MAS4):
578 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
579 			kvm_patch_ins_stw(inst, magic_var(mas4), inst_rt);
580 		break;
581 	case KVM_INST_MTSPR(SPRN_MAS6):
582 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
583 			kvm_patch_ins_stw(inst, magic_var(mas6), inst_rt);
584 		break;
585 	case KVM_INST_MTSPR(SPRN_MAS7):
586 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
587 			kvm_patch_ins_stw(inst, magic_var(mas7_3), inst_rt);
588 		break;
589 #endif /* CONFIG_PPC_BOOK3E_MMU */
590 
591 	case KVM_INST_MTSPR(SPRN_SPRG4):
592 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
593 			kvm_patch_ins_std(inst, magic_var(sprg4), inst_rt);
594 		break;
595 	case KVM_INST_MTSPR(SPRN_SPRG5):
596 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
597 			kvm_patch_ins_std(inst, magic_var(sprg5), inst_rt);
598 		break;
599 	case KVM_INST_MTSPR(SPRN_SPRG6):
600 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
601 			kvm_patch_ins_std(inst, magic_var(sprg6), inst_rt);
602 		break;
603 	case KVM_INST_MTSPR(SPRN_SPRG7):
604 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
605 			kvm_patch_ins_std(inst, magic_var(sprg7), inst_rt);
606 		break;
607 
608 #ifdef CONFIG_BOOKE
609 	case KVM_INST_MTSPR(SPRN_ESR):
610 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
611 			kvm_patch_ins_stw(inst, magic_var(esr), inst_rt);
612 		break;
613 #endif
614 
615 	/* Nops */
616 	case KVM_INST_TLBSYNC:
617 		kvm_patch_ins_nop(inst);
618 		break;
619 
620 	/* Rewrites */
621 	case KVM_INST_MTMSRD_L1:
622 		kvm_patch_ins_mtmsrd(inst, inst_rt);
623 		break;
624 	case KVM_INST_MTMSR:
625 	case KVM_INST_MTMSRD_L0:
626 		kvm_patch_ins_mtmsr(inst, inst_rt);
627 		break;
628 #ifdef CONFIG_BOOKE
629 	case KVM_INST_WRTEE:
630 		kvm_patch_ins_wrtee(inst, inst_rt, 0);
631 		break;
632 #endif
633 	}
634 
635 	switch (inst_no_rt & ~KVM_MASK_RB) {
636 #ifdef CONFIG_PPC_BOOK3S_32
637 	case KVM_INST_MTSRIN:
638 		if (features & KVM_MAGIC_FEAT_SR) {
639 			u32 inst_rb = _inst & KVM_MASK_RB;
640 			kvm_patch_ins_mtsrin(inst, inst_rt, inst_rb);
641 		}
642 		break;
643 #endif
644 	}
645 
646 	switch (_inst) {
647 #ifdef CONFIG_BOOKE
648 	case KVM_INST_WRTEEI_0:
649 		kvm_patch_ins_wrteei_0(inst);
650 		break;
651 
652 	case KVM_INST_WRTEEI_1:
653 		kvm_patch_ins_wrtee(inst, 0, 1);
654 		break;
655 #endif
656 	}
657 }
658 
659 extern u32 kvm_template_start[];
660 extern u32 kvm_template_end[];
661 
662 static void __init kvm_use_magic_page(void)
663 {
664 	u32 *p;
665 	u32 *start, *end;
666 	u32 features;
667 
668 	/* Tell the host to map the magic page to -4096 on all CPUs */
669 	on_each_cpu(kvm_map_magic_page, &features, 1);
670 
671 	/* Quick self-test to see if the mapping works */
672 	if (!fault_in_pages_readable((const char *)KVM_MAGIC_PAGE, sizeof(u32))) {
673 		kvm_patching_worked = false;
674 		return;
675 	}
676 
677 	/* Now loop through all code and find instructions */
678 	start = (void*)_stext;
679 	end = (void*)_etext;
680 
681 	/*
682 	 * Being interrupted in the middle of patching would
683 	 * be bad for SPRG4-7, which KVM can't keep in sync
684 	 * with emulated accesses because reads don't trap.
685 	 */
686 	local_irq_disable();
687 
688 	for (p = start; p < end; p++) {
689 		/* Avoid patching the template code */
690 		if (p >= kvm_template_start && p < kvm_template_end) {
691 			p = kvm_template_end - 1;
692 			continue;
693 		}
694 		kvm_check_ins(p, features);
695 	}
696 
697 	local_irq_enable();
698 
699 	printk(KERN_INFO "KVM: Live patching for a fast VM %s\n",
700 			 kvm_patching_worked ? "worked" : "failed");
701 }
702 
703 static int __init kvm_guest_init(void)
704 {
705 	if (!kvm_para_available())
706 		return 0;
707 
708 	if (!epapr_paravirt_enabled)
709 		return 0;
710 
711 	if (kvm_para_has_feature(KVM_FEATURE_MAGIC_PAGE))
712 		kvm_use_magic_page();
713 
714 #ifdef CONFIG_PPC_BOOK3S_64
715 	/* Enable napping */
716 	powersave_nap = 1;
717 #endif
718 
719 	return 0;
720 }
721 
722 postcore_initcall(kvm_guest_init);
723