1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
4 */
5
6 #include <linux/cpu.h>
7 #include <linux/kvm_host.h>
8 #include <linux/preempt.h>
9 #include <linux/export.h>
10 #include <linux/sched.h>
11 #include <linux/spinlock.h>
12 #include <linux/init.h>
13 #include <linux/memblock.h>
14 #include <linux/sizes.h>
15 #include <linux/cma.h>
16 #include <linux/bitops.h>
17
18 #include <asm/asm-prototypes.h>
19 #include <asm/cputable.h>
20 #include <asm/interrupt.h>
21 #include <asm/kvm_ppc.h>
22 #include <asm/kvm_book3s.h>
23 #include <asm/archrandom.h>
24 #include <asm/xics.h>
25 #include <asm/xive.h>
26 #include <asm/dbell.h>
27 #include <asm/cputhreads.h>
28 #include <asm/io.h>
29 #include <asm/opal.h>
30 #include <asm/smp.h>
31
32 #define KVM_CMA_CHUNK_ORDER 18
33
34 #include "book3s_xics.h"
35 #include "book3s_xive.h"
36
37 /*
38 * The XIVE module will populate these when it loads
39 */
40 unsigned long (*__xive_vm_h_xirr)(struct kvm_vcpu *vcpu);
41 unsigned long (*__xive_vm_h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server);
42 int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
43 unsigned long mfrr);
44 int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr);
45 int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr);
46 EXPORT_SYMBOL_GPL(__xive_vm_h_xirr);
47 EXPORT_SYMBOL_GPL(__xive_vm_h_ipoll);
48 EXPORT_SYMBOL_GPL(__xive_vm_h_ipi);
49 EXPORT_SYMBOL_GPL(__xive_vm_h_cppr);
50 EXPORT_SYMBOL_GPL(__xive_vm_h_eoi);
51
52 /*
53 * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206)
54 * should be power of 2.
55 */
56 #define HPT_ALIGN_PAGES ((1 << 18) >> PAGE_SHIFT) /* 256k */
57 /*
58 * By default we reserve 5% of memory for hash pagetable allocation.
59 */
60 static unsigned long kvm_cma_resv_ratio = 5;
61
62 static struct cma *kvm_cma;
63
early_parse_kvm_cma_resv(char * p)64 static int __init early_parse_kvm_cma_resv(char *p)
65 {
66 pr_debug("%s(%s)\n", __func__, p);
67 if (!p)
68 return -EINVAL;
69 return kstrtoul(p, 0, &kvm_cma_resv_ratio);
70 }
71 early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv);
72
kvm_alloc_hpt_cma(unsigned long nr_pages)73 struct page *kvm_alloc_hpt_cma(unsigned long nr_pages)
74 {
75 VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
76
77 return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES),
78 false);
79 }
80 EXPORT_SYMBOL_GPL(kvm_alloc_hpt_cma);
81
kvm_free_hpt_cma(struct page * page,unsigned long nr_pages)82 void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages)
83 {
84 cma_release(kvm_cma, page, nr_pages);
85 }
86 EXPORT_SYMBOL_GPL(kvm_free_hpt_cma);
87
88 /**
89 * kvm_cma_reserve() - reserve area for kvm hash pagetable
90 *
91 * This function reserves memory from early allocator. It should be
92 * called by arch specific code once the memblock allocator
93 * has been activated and all other subsystems have already allocated/reserved
94 * memory.
95 */
kvm_cma_reserve(void)96 void __init kvm_cma_reserve(void)
97 {
98 unsigned long align_size;
99 phys_addr_t selected_size;
100
101 /*
102 * We need CMA reservation only when we are in HV mode
103 */
104 if (!cpu_has_feature(CPU_FTR_HVMODE))
105 return;
106
107 selected_size = PAGE_ALIGN(memblock_phys_mem_size() * kvm_cma_resv_ratio / 100);
108 if (selected_size) {
109 pr_info("%s: reserving %ld MiB for global area\n", __func__,
110 (unsigned long)selected_size / SZ_1M);
111 align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;
112 cma_declare_contiguous(0, selected_size, 0, align_size,
113 KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, "kvm_cma",
114 &kvm_cma);
115 }
116 }
117
118 /*
119 * Real-mode H_CONFER implementation.
120 * We check if we are the only vcpu out of this virtual core
121 * still running in the guest and not ceded. If so, we pop up
122 * to the virtual-mode implementation; if not, just return to
123 * the guest.
124 */
kvmppc_rm_h_confer(struct kvm_vcpu * vcpu,int target,unsigned int yield_count)125 long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
126 unsigned int yield_count)
127 {
128 struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore;
129 int ptid = local_paca->kvm_hstate.ptid;
130 int threads_running;
131 int threads_ceded;
132 int threads_conferring;
133 u64 stop = get_tb() + 10 * tb_ticks_per_usec;
134 int rv = H_SUCCESS; /* => don't yield */
135
136 set_bit(ptid, &vc->conferring_threads);
137 while ((get_tb() < stop) && !VCORE_IS_EXITING(vc)) {
138 threads_running = VCORE_ENTRY_MAP(vc);
139 threads_ceded = vc->napping_threads;
140 threads_conferring = vc->conferring_threads;
141 if ((threads_ceded | threads_conferring) == threads_running) {
142 rv = H_TOO_HARD; /* => do yield */
143 break;
144 }
145 }
146 clear_bit(ptid, &vc->conferring_threads);
147 return rv;
148 }
149
150 /*
151 * When running HV mode KVM we need to block certain operations while KVM VMs
152 * exist in the system. We use a counter of VMs to track this.
153 *
154 * One of the operations we need to block is onlining of secondaries, so we
155 * protect hv_vm_count with get/put_online_cpus().
156 */
157 static atomic_t hv_vm_count;
158
kvm_hv_vm_activated(void)159 void kvm_hv_vm_activated(void)
160 {
161 get_online_cpus();
162 atomic_inc(&hv_vm_count);
163 put_online_cpus();
164 }
165 EXPORT_SYMBOL_GPL(kvm_hv_vm_activated);
166
kvm_hv_vm_deactivated(void)167 void kvm_hv_vm_deactivated(void)
168 {
169 get_online_cpus();
170 atomic_dec(&hv_vm_count);
171 put_online_cpus();
172 }
173 EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated);
174
kvm_hv_mode_active(void)175 bool kvm_hv_mode_active(void)
176 {
177 return atomic_read(&hv_vm_count) != 0;
178 }
179
180 extern int hcall_real_table[], hcall_real_table_end[];
181
kvmppc_hcall_impl_hv_realmode(unsigned long cmd)182 int kvmppc_hcall_impl_hv_realmode(unsigned long cmd)
183 {
184 cmd /= 4;
185 if (cmd < hcall_real_table_end - hcall_real_table &&
186 hcall_real_table[cmd])
187 return 1;
188
189 return 0;
190 }
191 EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode);
192
kvmppc_hwrng_present(void)193 int kvmppc_hwrng_present(void)
194 {
195 return powernv_hwrng_present();
196 }
197 EXPORT_SYMBOL_GPL(kvmppc_hwrng_present);
198
kvmppc_h_random(struct kvm_vcpu * vcpu)199 long kvmppc_h_random(struct kvm_vcpu *vcpu)
200 {
201 int r;
202
203 /* Only need to do the expensive mfmsr() on radix */
204 if (kvm_is_radix(vcpu->kvm) && (mfmsr() & MSR_IR))
205 r = powernv_get_random_long(&vcpu->arch.regs.gpr[4]);
206 else
207 r = powernv_get_random_real_mode(&vcpu->arch.regs.gpr[4]);
208 if (r)
209 return H_SUCCESS;
210
211 return H_HARDWARE;
212 }
213
214 /*
215 * Send an interrupt or message to another CPU.
216 * The caller needs to include any barrier needed to order writes
217 * to memory vs. the IPI/message.
218 */
kvmhv_rm_send_ipi(int cpu)219 void kvmhv_rm_send_ipi(int cpu)
220 {
221 void __iomem *xics_phys;
222 unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
223
224 /* For a nested hypervisor, use the XICS via hcall */
225 if (kvmhv_on_pseries()) {
226 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
227
228 plpar_hcall_raw(H_IPI, retbuf, get_hard_smp_processor_id(cpu),
229 IPI_PRIORITY);
230 return;
231 }
232
233 /* On POWER9 we can use msgsnd for any destination cpu. */
234 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
235 msg |= get_hard_smp_processor_id(cpu);
236 __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
237 return;
238 }
239
240 /* On POWER8 for IPIs to threads in the same core, use msgsnd. */
241 if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
242 cpu_first_thread_sibling(cpu) ==
243 cpu_first_thread_sibling(raw_smp_processor_id())) {
244 msg |= cpu_thread_in_core(cpu);
245 __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
246 return;
247 }
248
249 /* We should never reach this */
250 if (WARN_ON_ONCE(xics_on_xive()))
251 return;
252
253 /* Else poke the target with an IPI */
254 xics_phys = paca_ptrs[cpu]->kvm_hstate.xics_phys;
255 if (xics_phys)
256 __raw_rm_writeb(IPI_PRIORITY, xics_phys + XICS_MFRR);
257 else
258 opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY);
259 }
260
261 /*
262 * The following functions are called from the assembly code
263 * in book3s_hv_rmhandlers.S.
264 */
kvmhv_interrupt_vcore(struct kvmppc_vcore * vc,int active)265 static void kvmhv_interrupt_vcore(struct kvmppc_vcore *vc, int active)
266 {
267 int cpu = vc->pcpu;
268
269 /* Order setting of exit map vs. msgsnd/IPI */
270 smp_mb();
271 for (; active; active >>= 1, ++cpu)
272 if (active & 1)
273 kvmhv_rm_send_ipi(cpu);
274 }
275
kvmhv_commence_exit(int trap)276 void kvmhv_commence_exit(int trap)
277 {
278 struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore;
279 int ptid = local_paca->kvm_hstate.ptid;
280 struct kvm_split_mode *sip = local_paca->kvm_hstate.kvm_split_mode;
281 int me, ee, i;
282
283 /* Set our bit in the threads-exiting-guest map in the 0xff00
284 bits of vcore->entry_exit_map */
285 me = 0x100 << ptid;
286 do {
287 ee = vc->entry_exit_map;
288 } while (cmpxchg(&vc->entry_exit_map, ee, ee | me) != ee);
289
290 /* Are we the first here? */
291 if ((ee >> 8) != 0)
292 return;
293
294 /*
295 * Trigger the other threads in this vcore to exit the guest.
296 * If this is a hypervisor decrementer interrupt then they
297 * will be already on their way out of the guest.
298 */
299 if (trap != BOOK3S_INTERRUPT_HV_DECREMENTER)
300 kvmhv_interrupt_vcore(vc, ee & ~(1 << ptid));
301
302 /*
303 * If we are doing dynamic micro-threading, interrupt the other
304 * subcores to pull them out of their guests too.
305 */
306 if (!sip)
307 return;
308
309 for (i = 0; i < MAX_SUBCORES; ++i) {
310 vc = sip->vc[i];
311 if (!vc)
312 break;
313 do {
314 ee = vc->entry_exit_map;
315 /* Already asked to exit? */
316 if ((ee >> 8) != 0)
317 break;
318 } while (cmpxchg(&vc->entry_exit_map, ee,
319 ee | VCORE_EXIT_REQ) != ee);
320 if ((ee >> 8) == 0)
321 kvmhv_interrupt_vcore(vc, ee);
322 }
323 }
324
325 struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
326 EXPORT_SYMBOL_GPL(kvmppc_host_rm_ops_hv);
327
328 #ifdef CONFIG_KVM_XICS
get_irqmap(struct kvmppc_passthru_irqmap * pimap,u32 xisr)329 static struct kvmppc_irq_map *get_irqmap(struct kvmppc_passthru_irqmap *pimap,
330 u32 xisr)
331 {
332 int i;
333
334 /*
335 * We access the mapped array here without a lock. That
336 * is safe because we never reduce the number of entries
337 * in the array and we never change the v_hwirq field of
338 * an entry once it is set.
339 *
340 * We have also carefully ordered the stores in the writer
341 * and the loads here in the reader, so that if we find a matching
342 * hwirq here, the associated GSI and irq_desc fields are valid.
343 */
344 for (i = 0; i < pimap->n_mapped; i++) {
345 if (xisr == pimap->mapped[i].r_hwirq) {
346 /*
347 * Order subsequent reads in the caller to serialize
348 * with the writer.
349 */
350 smp_rmb();
351 return &pimap->mapped[i];
352 }
353 }
354 return NULL;
355 }
356
357 /*
358 * If we have an interrupt that's not an IPI, check if we have a
359 * passthrough adapter and if so, check if this external interrupt
360 * is for the adapter.
361 * We will attempt to deliver the IRQ directly to the target VCPU's
362 * ICP, the virtual ICP (based on affinity - the xive value in ICS).
363 *
364 * If the delivery fails or if this is not for a passthrough adapter,
365 * return to the host to handle this interrupt. We earlier
366 * saved a copy of the XIRR in the PACA, it will be picked up by
367 * the host ICP driver.
368 */
kvmppc_check_passthru(u32 xisr,__be32 xirr,bool * again)369 static int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again)
370 {
371 struct kvmppc_passthru_irqmap *pimap;
372 struct kvmppc_irq_map *irq_map;
373 struct kvm_vcpu *vcpu;
374
375 vcpu = local_paca->kvm_hstate.kvm_vcpu;
376 if (!vcpu)
377 return 1;
378 pimap = kvmppc_get_passthru_irqmap(vcpu->kvm);
379 if (!pimap)
380 return 1;
381 irq_map = get_irqmap(pimap, xisr);
382 if (!irq_map)
383 return 1;
384
385 /* We're handling this interrupt, generic code doesn't need to */
386 local_paca->kvm_hstate.saved_xirr = 0;
387
388 return kvmppc_deliver_irq_passthru(vcpu, xirr, irq_map, pimap, again);
389 }
390
391 #else
kvmppc_check_passthru(u32 xisr,__be32 xirr,bool * again)392 static inline int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again)
393 {
394 return 1;
395 }
396 #endif
397
398 /*
399 * Determine what sort of external interrupt is pending (if any).
400 * Returns:
401 * 0 if no interrupt is pending
402 * 1 if an interrupt is pending that needs to be handled by the host
403 * 2 Passthrough that needs completion in the host
404 * -1 if there was a guest wakeup IPI (which has now been cleared)
405 * -2 if there is PCI passthrough external interrupt that was handled
406 */
407 static long kvmppc_read_one_intr(bool *again);
408
kvmppc_read_intr(void)409 long kvmppc_read_intr(void)
410 {
411 long ret = 0;
412 long rc;
413 bool again;
414
415 if (xive_enabled())
416 return 1;
417
418 do {
419 again = false;
420 rc = kvmppc_read_one_intr(&again);
421 if (rc && (ret == 0 || rc > ret))
422 ret = rc;
423 } while (again);
424 return ret;
425 }
426
kvmppc_read_one_intr(bool * again)427 static long kvmppc_read_one_intr(bool *again)
428 {
429 void __iomem *xics_phys;
430 u32 h_xirr;
431 __be32 xirr;
432 u32 xisr;
433 u8 host_ipi;
434 int64_t rc;
435
436 if (xive_enabled())
437 return 1;
438
439 /* see if a host IPI is pending */
440 host_ipi = local_paca->kvm_hstate.host_ipi;
441 if (host_ipi)
442 return 1;
443
444 /* Now read the interrupt from the ICP */
445 if (kvmhv_on_pseries()) {
446 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
447
448 rc = plpar_hcall_raw(H_XIRR, retbuf, 0xFF);
449 xirr = cpu_to_be32(retbuf[0]);
450 } else {
451 xics_phys = local_paca->kvm_hstate.xics_phys;
452 rc = 0;
453 if (!xics_phys)
454 rc = opal_int_get_xirr(&xirr, false);
455 else
456 xirr = __raw_rm_readl(xics_phys + XICS_XIRR);
457 }
458 if (rc < 0)
459 return 1;
460
461 /*
462 * Save XIRR for later. Since we get control in reverse endian
463 * on LE systems, save it byte reversed and fetch it back in
464 * host endian. Note that xirr is the value read from the
465 * XIRR register, while h_xirr is the host endian version.
466 */
467 h_xirr = be32_to_cpu(xirr);
468 local_paca->kvm_hstate.saved_xirr = h_xirr;
469 xisr = h_xirr & 0xffffff;
470 /*
471 * Ensure that the store/load complete to guarantee all side
472 * effects of loading from XIRR has completed
473 */
474 smp_mb();
475
476 /* if nothing pending in the ICP */
477 if (!xisr)
478 return 0;
479
480 /* We found something in the ICP...
481 *
482 * If it is an IPI, clear the MFRR and EOI it.
483 */
484 if (xisr == XICS_IPI) {
485 rc = 0;
486 if (kvmhv_on_pseries()) {
487 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
488
489 plpar_hcall_raw(H_IPI, retbuf,
490 hard_smp_processor_id(), 0xff);
491 plpar_hcall_raw(H_EOI, retbuf, h_xirr);
492 } else if (xics_phys) {
493 __raw_rm_writeb(0xff, xics_phys + XICS_MFRR);
494 __raw_rm_writel(xirr, xics_phys + XICS_XIRR);
495 } else {
496 opal_int_set_mfrr(hard_smp_processor_id(), 0xff);
497 rc = opal_int_eoi(h_xirr);
498 }
499 /* If rc > 0, there is another interrupt pending */
500 *again = rc > 0;
501
502 /*
503 * Need to ensure side effects of above stores
504 * complete before proceeding.
505 */
506 smp_mb();
507
508 /*
509 * We need to re-check host IPI now in case it got set in the
510 * meantime. If it's clear, we bounce the interrupt to the
511 * guest
512 */
513 host_ipi = local_paca->kvm_hstate.host_ipi;
514 if (unlikely(host_ipi != 0)) {
515 /* We raced with the host,
516 * we need to resend that IPI, bummer
517 */
518 if (kvmhv_on_pseries()) {
519 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
520
521 plpar_hcall_raw(H_IPI, retbuf,
522 hard_smp_processor_id(),
523 IPI_PRIORITY);
524 } else if (xics_phys)
525 __raw_rm_writeb(IPI_PRIORITY,
526 xics_phys + XICS_MFRR);
527 else
528 opal_int_set_mfrr(hard_smp_processor_id(),
529 IPI_PRIORITY);
530 /* Let side effects complete */
531 smp_mb();
532 return 1;
533 }
534
535 /* OK, it's an IPI for us */
536 local_paca->kvm_hstate.saved_xirr = 0;
537 return -1;
538 }
539
540 return kvmppc_check_passthru(xisr, xirr, again);
541 }
542
543 #ifdef CONFIG_KVM_XICS
is_rm(void)544 static inline bool is_rm(void)
545 {
546 return !(mfmsr() & MSR_DR);
547 }
548
kvmppc_rm_h_xirr(struct kvm_vcpu * vcpu)549 unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
550 {
551 if (!kvmppc_xics_enabled(vcpu))
552 return H_TOO_HARD;
553 if (xics_on_xive()) {
554 if (is_rm())
555 return xive_rm_h_xirr(vcpu);
556 if (unlikely(!__xive_vm_h_xirr))
557 return H_NOT_AVAILABLE;
558 return __xive_vm_h_xirr(vcpu);
559 } else
560 return xics_rm_h_xirr(vcpu);
561 }
562
kvmppc_rm_h_xirr_x(struct kvm_vcpu * vcpu)563 unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu)
564 {
565 if (!kvmppc_xics_enabled(vcpu))
566 return H_TOO_HARD;
567 vcpu->arch.regs.gpr[5] = get_tb();
568 if (xics_on_xive()) {
569 if (is_rm())
570 return xive_rm_h_xirr(vcpu);
571 if (unlikely(!__xive_vm_h_xirr))
572 return H_NOT_AVAILABLE;
573 return __xive_vm_h_xirr(vcpu);
574 } else
575 return xics_rm_h_xirr(vcpu);
576 }
577
kvmppc_rm_h_ipoll(struct kvm_vcpu * vcpu,unsigned long server)578 unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
579 {
580 if (!kvmppc_xics_enabled(vcpu))
581 return H_TOO_HARD;
582 if (xics_on_xive()) {
583 if (is_rm())
584 return xive_rm_h_ipoll(vcpu, server);
585 if (unlikely(!__xive_vm_h_ipoll))
586 return H_NOT_AVAILABLE;
587 return __xive_vm_h_ipoll(vcpu, server);
588 } else
589 return H_TOO_HARD;
590 }
591
kvmppc_rm_h_ipi(struct kvm_vcpu * vcpu,unsigned long server,unsigned long mfrr)592 int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
593 unsigned long mfrr)
594 {
595 if (!kvmppc_xics_enabled(vcpu))
596 return H_TOO_HARD;
597 if (xics_on_xive()) {
598 if (is_rm())
599 return xive_rm_h_ipi(vcpu, server, mfrr);
600 if (unlikely(!__xive_vm_h_ipi))
601 return H_NOT_AVAILABLE;
602 return __xive_vm_h_ipi(vcpu, server, mfrr);
603 } else
604 return xics_rm_h_ipi(vcpu, server, mfrr);
605 }
606
kvmppc_rm_h_cppr(struct kvm_vcpu * vcpu,unsigned long cppr)607 int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
608 {
609 if (!kvmppc_xics_enabled(vcpu))
610 return H_TOO_HARD;
611 if (xics_on_xive()) {
612 if (is_rm())
613 return xive_rm_h_cppr(vcpu, cppr);
614 if (unlikely(!__xive_vm_h_cppr))
615 return H_NOT_AVAILABLE;
616 return __xive_vm_h_cppr(vcpu, cppr);
617 } else
618 return xics_rm_h_cppr(vcpu, cppr);
619 }
620
kvmppc_rm_h_eoi(struct kvm_vcpu * vcpu,unsigned long xirr)621 int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
622 {
623 if (!kvmppc_xics_enabled(vcpu))
624 return H_TOO_HARD;
625 if (xics_on_xive()) {
626 if (is_rm())
627 return xive_rm_h_eoi(vcpu, xirr);
628 if (unlikely(!__xive_vm_h_eoi))
629 return H_NOT_AVAILABLE;
630 return __xive_vm_h_eoi(vcpu, xirr);
631 } else
632 return xics_rm_h_eoi(vcpu, xirr);
633 }
634 #endif /* CONFIG_KVM_XICS */
635
kvmppc_bad_interrupt(struct pt_regs * regs)636 void kvmppc_bad_interrupt(struct pt_regs *regs)
637 {
638 /*
639 * 100 could happen at any time, 200 can happen due to invalid real
640 * address access for example (or any time due to a hardware problem).
641 */
642 if (TRAP(regs) == 0x100) {
643 get_paca()->in_nmi++;
644 system_reset_exception(regs);
645 get_paca()->in_nmi--;
646 } else if (TRAP(regs) == 0x200) {
647 machine_check_exception(regs);
648 } else {
649 die("Bad interrupt in KVM entry/exit code", regs, SIGABRT);
650 }
651 panic("Bad KVM trap");
652 }
653
kvmppc_end_cede(struct kvm_vcpu * vcpu)654 static void kvmppc_end_cede(struct kvm_vcpu *vcpu)
655 {
656 vcpu->arch.ceded = 0;
657 if (vcpu->arch.timer_running) {
658 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
659 vcpu->arch.timer_running = 0;
660 }
661 }
662
kvmppc_set_msr_hv(struct kvm_vcpu * vcpu,u64 msr)663 void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
664 {
665 /* Guest must always run with ME enabled, HV disabled. */
666 msr = (msr | MSR_ME) & ~MSR_HV;
667
668 /*
669 * Check for illegal transactional state bit combination
670 * and if we find it, force the TS field to a safe state.
671 */
672 if ((msr & MSR_TS_MASK) == MSR_TS_MASK)
673 msr &= ~MSR_TS_MASK;
674 vcpu->arch.shregs.msr = msr;
675 kvmppc_end_cede(vcpu);
676 }
677 EXPORT_SYMBOL_GPL(kvmppc_set_msr_hv);
678
inject_interrupt(struct kvm_vcpu * vcpu,int vec,u64 srr1_flags)679 static void inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags)
680 {
681 unsigned long msr, pc, new_msr, new_pc;
682
683 msr = kvmppc_get_msr(vcpu);
684 pc = kvmppc_get_pc(vcpu);
685 new_msr = vcpu->arch.intr_msr;
686 new_pc = vec;
687
688 /* If transactional, change to suspend mode on IRQ delivery */
689 if (MSR_TM_TRANSACTIONAL(msr))
690 new_msr |= MSR_TS_S;
691 else
692 new_msr |= msr & MSR_TS_MASK;
693
694 /*
695 * Perform MSR and PC adjustment for LPCR[AIL]=3 if it is set and
696 * applicable. AIL=2 is not supported.
697 *
698 * AIL does not apply to SRESET, MCE, or HMI (which is never
699 * delivered to the guest), and does not apply if IR=0 or DR=0.
700 */
701 if (vec != BOOK3S_INTERRUPT_SYSTEM_RESET &&
702 vec != BOOK3S_INTERRUPT_MACHINE_CHECK &&
703 (vcpu->arch.vcore->lpcr & LPCR_AIL) == LPCR_AIL_3 &&
704 (msr & (MSR_IR|MSR_DR)) == (MSR_IR|MSR_DR) ) {
705 new_msr |= MSR_IR | MSR_DR;
706 new_pc += 0xC000000000004000ULL;
707 }
708
709 kvmppc_set_srr0(vcpu, pc);
710 kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags);
711 kvmppc_set_pc(vcpu, new_pc);
712 vcpu->arch.shregs.msr = new_msr;
713 }
714
kvmppc_inject_interrupt_hv(struct kvm_vcpu * vcpu,int vec,u64 srr1_flags)715 void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags)
716 {
717 inject_interrupt(vcpu, vec, srr1_flags);
718 kvmppc_end_cede(vcpu);
719 }
720 EXPORT_SYMBOL_GPL(kvmppc_inject_interrupt_hv);
721
722 /*
723 * Is there a PRIV_DOORBELL pending for the guest (on POWER9)?
724 * Can we inject a Decrementer or a External interrupt?
725 */
kvmppc_guest_entry_inject_int(struct kvm_vcpu * vcpu)726 void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu)
727 {
728 int ext;
729 unsigned long lpcr;
730
731 /* Insert EXTERNAL bit into LPCR at the MER bit position */
732 ext = (vcpu->arch.pending_exceptions >> BOOK3S_IRQPRIO_EXTERNAL) & 1;
733 lpcr = mfspr(SPRN_LPCR);
734 lpcr |= ext << LPCR_MER_SH;
735 mtspr(SPRN_LPCR, lpcr);
736 isync();
737
738 if (vcpu->arch.shregs.msr & MSR_EE) {
739 if (ext) {
740 inject_interrupt(vcpu, BOOK3S_INTERRUPT_EXTERNAL, 0);
741 } else {
742 long int dec = mfspr(SPRN_DEC);
743 if (!(lpcr & LPCR_LD))
744 dec = (int) dec;
745 if (dec < 0)
746 inject_interrupt(vcpu,
747 BOOK3S_INTERRUPT_DECREMENTER, 0);
748 }
749 }
750
751 if (vcpu->arch.doorbell_request) {
752 mtspr(SPRN_DPDES, 1);
753 vcpu->arch.vcore->dpdes = 1;
754 smp_wmb();
755 vcpu->arch.doorbell_request = 0;
756 }
757 }
758
flush_guest_tlb(struct kvm * kvm)759 static void flush_guest_tlb(struct kvm *kvm)
760 {
761 unsigned long rb, set;
762
763 rb = PPC_BIT(52); /* IS = 2 */
764 if (kvm_is_radix(kvm)) {
765 /* R=1 PRS=1 RIC=2 */
766 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
767 : : "r" (rb), "i" (1), "i" (1), "i" (2),
768 "r" (0) : "memory");
769 for (set = 1; set < kvm->arch.tlb_sets; ++set) {
770 rb += PPC_BIT(51); /* increment set number */
771 /* R=1 PRS=1 RIC=0 */
772 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
773 : : "r" (rb), "i" (1), "i" (1), "i" (0),
774 "r" (0) : "memory");
775 }
776 asm volatile("ptesync": : :"memory");
777 asm volatile(PPC_RADIX_INVALIDATE_ERAT_GUEST : : :"memory");
778 } else {
779 for (set = 0; set < kvm->arch.tlb_sets; ++set) {
780 /* R=0 PRS=0 RIC=0 */
781 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
782 : : "r" (rb), "i" (0), "i" (0), "i" (0),
783 "r" (0) : "memory");
784 rb += PPC_BIT(51); /* increment set number */
785 }
786 asm volatile("ptesync": : :"memory");
787 asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory");
788 }
789 }
790
kvmppc_check_need_tlb_flush(struct kvm * kvm,int pcpu,struct kvm_nested_guest * nested)791 void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu,
792 struct kvm_nested_guest *nested)
793 {
794 cpumask_t *need_tlb_flush;
795
796 /*
797 * On POWER9, individual threads can come in here, but the
798 * TLB is shared between the 4 threads in a core, hence
799 * invalidating on one thread invalidates for all.
800 * Thus we make all 4 threads use the same bit.
801 */
802 if (cpu_has_feature(CPU_FTR_ARCH_300))
803 pcpu = cpu_first_thread_sibling(pcpu);
804
805 if (nested)
806 need_tlb_flush = &nested->need_tlb_flush;
807 else
808 need_tlb_flush = &kvm->arch.need_tlb_flush;
809
810 if (cpumask_test_cpu(pcpu, need_tlb_flush)) {
811 flush_guest_tlb(kvm);
812
813 /* Clear the bit after the TLB flush */
814 cpumask_clear_cpu(pcpu, need_tlb_flush);
815 }
816 }
817 EXPORT_SYMBOL_GPL(kvmppc_check_need_tlb_flush);
818