xref: /qemu/target/s390x/kvm/kvm.c (revision ec6f3fc3)
1 /*
2  * QEMU S390x KVM implementation
3  *
4  * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
5  * Copyright IBM Corp. 2012
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include <sys/ioctl.h>
23 
24 #include <linux/kvm.h>
25 #include <asm/ptrace.h>
26 
27 #include "cpu.h"
28 #include "s390x-internal.h"
29 #include "kvm_s390x.h"
30 #include "sysemu/kvm_int.h"
31 #include "qemu/cutils.h"
32 #include "qapi/error.h"
33 #include "qemu/error-report.h"
34 #include "qemu/timer.h"
35 #include "qemu/units.h"
36 #include "qemu/main-loop.h"
37 #include "qemu/mmap-alloc.h"
38 #include "qemu/log.h"
39 #include "sysemu/sysemu.h"
40 #include "sysemu/hw_accel.h"
41 #include "sysemu/runstate.h"
42 #include "sysemu/device_tree.h"
43 #include "exec/gdbstub.h"
44 #include "exec/ram_addr.h"
45 #include "trace.h"
46 #include "hw/s390x/s390-pci-inst.h"
47 #include "hw/s390x/s390-pci-bus.h"
48 #include "hw/s390x/ipl.h"
49 #include "hw/s390x/ebcdic.h"
50 #include "exec/memattrs.h"
51 #include "hw/s390x/s390-virtio-ccw.h"
52 #include "hw/s390x/s390-virtio-hcall.h"
53 #include "target/s390x/kvm/pv.h"
54 
55 #define kvm_vm_check_mem_attr(s, attr) \
56     kvm_vm_check_attr(s, KVM_S390_VM_MEM_CTRL, attr)
57 
58 #define IPA0_DIAG                       0x8300
59 #define IPA0_SIGP                       0xae00
60 #define IPA0_B2                         0xb200
61 #define IPA0_B9                         0xb900
62 #define IPA0_EB                         0xeb00
63 #define IPA0_E3                         0xe300
64 
65 #define PRIV_B2_SCLP_CALL               0x20
66 #define PRIV_B2_CSCH                    0x30
67 #define PRIV_B2_HSCH                    0x31
68 #define PRIV_B2_MSCH                    0x32
69 #define PRIV_B2_SSCH                    0x33
70 #define PRIV_B2_STSCH                   0x34
71 #define PRIV_B2_TSCH                    0x35
72 #define PRIV_B2_TPI                     0x36
73 #define PRIV_B2_SAL                     0x37
74 #define PRIV_B2_RSCH                    0x38
75 #define PRIV_B2_STCRW                   0x39
76 #define PRIV_B2_STCPS                   0x3a
77 #define PRIV_B2_RCHP                    0x3b
78 #define PRIV_B2_SCHM                    0x3c
79 #define PRIV_B2_CHSC                    0x5f
80 #define PRIV_B2_SIGA                    0x74
81 #define PRIV_B2_XSCH                    0x76
82 
83 #define PRIV_EB_SQBS                    0x8a
84 #define PRIV_EB_PCISTB                  0xd0
85 #define PRIV_EB_SIC                     0xd1
86 
87 #define PRIV_B9_EQBS                    0x9c
88 #define PRIV_B9_CLP                     0xa0
89 #define PRIV_B9_PTF                     0xa2
90 #define PRIV_B9_PCISTG                  0xd0
91 #define PRIV_B9_PCILG                   0xd2
92 #define PRIV_B9_RPCIT                   0xd3
93 
94 #define PRIV_E3_MPCIFC                  0xd0
95 #define PRIV_E3_STPCIFC                 0xd4
96 
97 #define DIAG_TIMEREVENT                 0x288
98 #define DIAG_IPL                        0x308
99 #define DIAG_SET_CONTROL_PROGRAM_CODES  0x318
100 #define DIAG_KVM_HYPERCALL              0x500
101 #define DIAG_KVM_BREAKPOINT             0x501
102 
103 #define ICPT_INSTRUCTION                0x04
104 #define ICPT_PROGRAM                    0x08
105 #define ICPT_EXT_INT                    0x14
106 #define ICPT_WAITPSW                    0x1c
107 #define ICPT_SOFT_INTERCEPT             0x24
108 #define ICPT_CPU_STOP                   0x28
109 #define ICPT_OPEREXC                    0x2c
110 #define ICPT_IO                         0x40
111 #define ICPT_PV_INSTR                   0x68
112 #define ICPT_PV_INSTR_NOTIFICATION      0x6c
113 
114 #define NR_LOCAL_IRQS 32
115 /*
116  * Needs to be big enough to contain max_cpus emergency signals
117  * and in addition NR_LOCAL_IRQS interrupts
118  */
119 #define VCPU_IRQ_BUF_SIZE(max_cpus) (sizeof(struct kvm_s390_irq) * \
120                                      (max_cpus + NR_LOCAL_IRQS))
121 /*
122  * KVM does only support memory slots up to KVM_MEM_MAX_NR_PAGES pages
123  * as the dirty bitmap must be managed by bitops that take an int as
124  * position indicator. This would end at an unaligned  address
125  * (0x7fffff00000). As future variants might provide larger pages
126  * and to make all addresses properly aligned, let us split at 4TB.
127  */
128 #define KVM_SLOT_MAX_BYTES (4UL * TiB)
129 
130 static CPUWatchpoint hw_watchpoint;
131 /*
132  * We don't use a list because this structure is also used to transmit the
133  * hardware breakpoints to the kernel.
134  */
135 static struct kvm_hw_breakpoint *hw_breakpoints;
136 static int nb_hw_breakpoints;
137 
138 const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
139     KVM_CAP_LAST_INFO
140 };
141 
142 static int cap_async_pf;
143 static int cap_mem_op;
144 static int cap_mem_op_extension;
145 static int cap_s390_irq;
146 static int cap_ri;
147 static int cap_hpage_1m;
148 static int cap_vcpu_resets;
149 static int cap_protected;
150 static int cap_zpci_op;
151 static int cap_protected_dump;
152 
153 static bool mem_op_storage_key_support;
154 
155 static int active_cmma;
156 
157 static int kvm_s390_query_mem_limit(uint64_t *memory_limit)
158 {
159     struct kvm_device_attr attr = {
160         .group = KVM_S390_VM_MEM_CTRL,
161         .attr = KVM_S390_VM_MEM_LIMIT_SIZE,
162         .addr = (uint64_t) memory_limit,
163     };
164 
165     return kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
166 }
167 
168 int kvm_s390_set_mem_limit(uint64_t new_limit, uint64_t *hw_limit)
169 {
170     int rc;
171 
172     struct kvm_device_attr attr = {
173         .group = KVM_S390_VM_MEM_CTRL,
174         .attr = KVM_S390_VM_MEM_LIMIT_SIZE,
175         .addr = (uint64_t) &new_limit,
176     };
177 
178     if (!kvm_vm_check_mem_attr(kvm_state, KVM_S390_VM_MEM_LIMIT_SIZE)) {
179         return 0;
180     }
181 
182     rc = kvm_s390_query_mem_limit(hw_limit);
183     if (rc) {
184         return rc;
185     } else if (*hw_limit < new_limit) {
186         return -E2BIG;
187     }
188 
189     return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
190 }
191 
192 int kvm_s390_cmma_active(void)
193 {
194     return active_cmma;
195 }
196 
197 static bool kvm_s390_cmma_available(void)
198 {
199     static bool initialized, value;
200 
201     if (!initialized) {
202         initialized = true;
203         value = kvm_vm_check_mem_attr(kvm_state, KVM_S390_VM_MEM_ENABLE_CMMA) &&
204                 kvm_vm_check_mem_attr(kvm_state, KVM_S390_VM_MEM_CLR_CMMA);
205     }
206     return value;
207 }
208 
209 void kvm_s390_cmma_reset(void)
210 {
211     int rc;
212     struct kvm_device_attr attr = {
213         .group = KVM_S390_VM_MEM_CTRL,
214         .attr = KVM_S390_VM_MEM_CLR_CMMA,
215     };
216 
217     if (!kvm_s390_cmma_active()) {
218         return;
219     }
220 
221     rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
222     trace_kvm_clear_cmma(rc);
223 }
224 
225 static void kvm_s390_enable_cmma(void)
226 {
227     int rc;
228     struct kvm_device_attr attr = {
229         .group = KVM_S390_VM_MEM_CTRL,
230         .attr = KVM_S390_VM_MEM_ENABLE_CMMA,
231     };
232 
233     if (cap_hpage_1m) {
234         warn_report("CMM will not be enabled because it is not "
235                     "compatible with huge memory backings.");
236         return;
237     }
238     rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
239     active_cmma = !rc;
240     trace_kvm_enable_cmma(rc);
241 }
242 
243 static void kvm_s390_set_crypto_attr(uint64_t attr)
244 {
245     struct kvm_device_attr attribute = {
246         .group = KVM_S390_VM_CRYPTO,
247         .attr  = attr,
248     };
249 
250     int ret = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attribute);
251 
252     if (ret) {
253         error_report("Failed to set crypto device attribute %lu: %s",
254                      attr, strerror(-ret));
255     }
256 }
257 
258 static void kvm_s390_init_aes_kw(void)
259 {
260     uint64_t attr = KVM_S390_VM_CRYPTO_DISABLE_AES_KW;
261 
262     if (object_property_get_bool(OBJECT(qdev_get_machine()), "aes-key-wrap",
263                                  NULL)) {
264             attr = KVM_S390_VM_CRYPTO_ENABLE_AES_KW;
265     }
266 
267     if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) {
268             kvm_s390_set_crypto_attr(attr);
269     }
270 }
271 
272 static void kvm_s390_init_dea_kw(void)
273 {
274     uint64_t attr = KVM_S390_VM_CRYPTO_DISABLE_DEA_KW;
275 
276     if (object_property_get_bool(OBJECT(qdev_get_machine()), "dea-key-wrap",
277                                  NULL)) {
278             attr = KVM_S390_VM_CRYPTO_ENABLE_DEA_KW;
279     }
280 
281     if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) {
282             kvm_s390_set_crypto_attr(attr);
283     }
284 }
285 
286 void kvm_s390_crypto_reset(void)
287 {
288     if (s390_has_feat(S390_FEAT_MSA_EXT_3)) {
289         kvm_s390_init_aes_kw();
290         kvm_s390_init_dea_kw();
291     }
292 }
293 
294 void kvm_s390_set_max_pagesize(uint64_t pagesize, Error **errp)
295 {
296     if (pagesize == 4 * KiB) {
297         return;
298     }
299 
300     if (!hpage_1m_allowed()) {
301         error_setg(errp, "This QEMU machine does not support huge page "
302                    "mappings");
303         return;
304     }
305 
306     if (pagesize != 1 * MiB) {
307         error_setg(errp, "Memory backing with 2G pages was specified, "
308                    "but KVM does not support this memory backing");
309         return;
310     }
311 
312     if (kvm_vm_enable_cap(kvm_state, KVM_CAP_S390_HPAGE_1M, 0)) {
313         error_setg(errp, "Memory backing with 1M pages was specified, "
314                    "but KVM does not support this memory backing");
315         return;
316     }
317 
318     cap_hpage_1m = 1;
319 }
320 
321 int kvm_s390_get_hpage_1m(void)
322 {
323     return cap_hpage_1m;
324 }
325 
326 static void ccw_machine_class_foreach(ObjectClass *oc, void *opaque)
327 {
328     MachineClass *mc = MACHINE_CLASS(oc);
329 
330     mc->default_cpu_type = S390_CPU_TYPE_NAME("host");
331 }
332 
333 int kvm_arch_get_default_type(MachineState *ms)
334 {
335     return 0;
336 }
337 
338 int kvm_arch_init(MachineState *ms, KVMState *s)
339 {
340     int required_caps[] = {
341         KVM_CAP_DEVICE_CTRL,
342         KVM_CAP_SYNC_REGS,
343     };
344 
345     for (int i = 0; i < ARRAY_SIZE(required_caps); i++) {
346         if (!kvm_check_extension(s, required_caps[i])) {
347             error_report("KVM is missing capability #%d - "
348                          "please use kernel 3.15 or newer", required_caps[i]);
349             return -1;
350         }
351     }
352 
353     object_class_foreach(ccw_machine_class_foreach, TYPE_S390_CCW_MACHINE,
354                          false, NULL);
355 
356     if (!kvm_check_extension(s, KVM_CAP_S390_COW)) {
357         error_report("KVM is missing capability KVM_CAP_S390_COW - "
358                      "unsupported environment");
359         return -1;
360     }
361 
362     cap_async_pf = kvm_check_extension(s, KVM_CAP_ASYNC_PF);
363     cap_mem_op = kvm_check_extension(s, KVM_CAP_S390_MEM_OP);
364     cap_mem_op_extension = kvm_check_extension(s, KVM_CAP_S390_MEM_OP_EXTENSION);
365     mem_op_storage_key_support = cap_mem_op_extension > 0;
366     cap_s390_irq = kvm_check_extension(s, KVM_CAP_S390_INJECT_IRQ);
367     cap_vcpu_resets = kvm_check_extension(s, KVM_CAP_S390_VCPU_RESETS);
368     cap_protected = kvm_check_extension(s, KVM_CAP_S390_PROTECTED);
369     cap_zpci_op = kvm_check_extension(s, KVM_CAP_S390_ZPCI_OP);
370     cap_protected_dump = kvm_check_extension(s, KVM_CAP_S390_PROTECTED_DUMP);
371 
372     kvm_vm_enable_cap(s, KVM_CAP_S390_USER_SIGP, 0);
373     kvm_vm_enable_cap(s, KVM_CAP_S390_VECTOR_REGISTERS, 0);
374     kvm_vm_enable_cap(s, KVM_CAP_S390_USER_STSI, 0);
375     kvm_vm_enable_cap(s, KVM_CAP_S390_CPU_TOPOLOGY, 0);
376     if (ri_allowed()) {
377         if (kvm_vm_enable_cap(s, KVM_CAP_S390_RI, 0) == 0) {
378             cap_ri = 1;
379         }
380     }
381     if (cpu_model_allowed()) {
382         kvm_vm_enable_cap(s, KVM_CAP_S390_GS, 0);
383     }
384 
385     /*
386      * The migration interface for ais was introduced with kernel 4.13
387      * but the capability itself had been active since 4.12. As migration
388      * support is considered necessary, we only try to enable this for
389      * newer machine types if KVM_CAP_S390_AIS_MIGRATION is available.
390      */
391     if (cpu_model_allowed() && kvm_kernel_irqchip_allowed() &&
392         kvm_check_extension(s, KVM_CAP_S390_AIS_MIGRATION)) {
393         kvm_vm_enable_cap(s, KVM_CAP_S390_AIS, 0);
394     }
395 
396     kvm_set_max_memslot_size(KVM_SLOT_MAX_BYTES);
397     return 0;
398 }
399 
400 int kvm_arch_irqchip_create(KVMState *s)
401 {
402     return 0;
403 }
404 
405 unsigned long kvm_arch_vcpu_id(CPUState *cpu)
406 {
407     return cpu->cpu_index;
408 }
409 
410 int kvm_arch_init_vcpu(CPUState *cs)
411 {
412     unsigned int max_cpus = MACHINE(qdev_get_machine())->smp.max_cpus;
413     S390CPU *cpu = S390_CPU(cs);
414     kvm_s390_set_cpu_state(cpu, cpu->env.cpu_state);
415     cpu->irqstate = g_malloc0(VCPU_IRQ_BUF_SIZE(max_cpus));
416     return 0;
417 }
418 
419 int kvm_arch_destroy_vcpu(CPUState *cs)
420 {
421     S390CPU *cpu = S390_CPU(cs);
422 
423     g_free(cpu->irqstate);
424     cpu->irqstate = NULL;
425 
426     return 0;
427 }
428 
429 static void kvm_s390_reset_vcpu(S390CPU *cpu, unsigned long type)
430 {
431     CPUState *cs = CPU(cpu);
432 
433     /*
434      * The reset call is needed here to reset in-kernel vcpu data that
435      * we can't access directly from QEMU (i.e. with older kernels
436      * which don't support sync_regs/ONE_REG).  Before this ioctl
437      * cpu_synchronize_state() is called in common kvm code
438      * (kvm-all).
439      */
440     if (kvm_vcpu_ioctl(cs, type)) {
441         error_report("CPU reset failed on CPU %i type %lx",
442                      cs->cpu_index, type);
443     }
444 }
445 
446 void kvm_s390_reset_vcpu_initial(S390CPU *cpu)
447 {
448     kvm_s390_reset_vcpu(cpu, KVM_S390_INITIAL_RESET);
449 }
450 
451 void kvm_s390_reset_vcpu_clear(S390CPU *cpu)
452 {
453     if (cap_vcpu_resets) {
454         kvm_s390_reset_vcpu(cpu, KVM_S390_CLEAR_RESET);
455     } else {
456         kvm_s390_reset_vcpu(cpu, KVM_S390_INITIAL_RESET);
457     }
458 }
459 
460 void kvm_s390_reset_vcpu_normal(S390CPU *cpu)
461 {
462     if (cap_vcpu_resets) {
463         kvm_s390_reset_vcpu(cpu, KVM_S390_NORMAL_RESET);
464     }
465 }
466 
467 static int can_sync_regs(CPUState *cs, int regs)
468 {
469     return (cs->kvm_run->kvm_valid_regs & regs) == regs;
470 }
471 
472 #define KVM_SYNC_REQUIRED_REGS (KVM_SYNC_GPRS | KVM_SYNC_ACRS | \
473                                 KVM_SYNC_CRS | KVM_SYNC_PREFIX)
474 
475 int kvm_arch_put_registers(CPUState *cs, int level)
476 {
477     S390CPU *cpu = S390_CPU(cs);
478     CPUS390XState *env = &cpu->env;
479     struct kvm_fpu fpu = {};
480     int r;
481     int i;
482 
483     g_assert(can_sync_regs(cs, KVM_SYNC_REQUIRED_REGS));
484 
485     /* always save the PSW  and the GPRS*/
486     cs->kvm_run->psw_addr = env->psw.addr;
487     cs->kvm_run->psw_mask = env->psw.mask;
488 
489     memcpy(cs->kvm_run->s.regs.gprs, env->regs, sizeof(cs->kvm_run->s.regs.gprs));
490     cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GPRS;
491 
492     if (can_sync_regs(cs, KVM_SYNC_VRS)) {
493         for (i = 0; i < 32; i++) {
494             cs->kvm_run->s.regs.vrs[i][0] = env->vregs[i][0];
495             cs->kvm_run->s.regs.vrs[i][1] = env->vregs[i][1];
496         }
497         cs->kvm_run->s.regs.fpc = env->fpc;
498         cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_VRS;
499     } else if (can_sync_regs(cs, KVM_SYNC_FPRS)) {
500         for (i = 0; i < 16; i++) {
501             cs->kvm_run->s.regs.fprs[i] = *get_freg(env, i);
502         }
503         cs->kvm_run->s.regs.fpc = env->fpc;
504         cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_FPRS;
505     } else {
506         /* Floating point */
507         for (i = 0; i < 16; i++) {
508             fpu.fprs[i] = *get_freg(env, i);
509         }
510         fpu.fpc = env->fpc;
511 
512         r = kvm_vcpu_ioctl(cs, KVM_SET_FPU, &fpu);
513         if (r < 0) {
514             return r;
515         }
516     }
517 
518     /* Do we need to save more than that? */
519     if (level == KVM_PUT_RUNTIME_STATE) {
520         return 0;
521     }
522 
523     /*
524      * Access registers, control registers and the prefix - these are
525      * always available via kvm_sync_regs in the kernels that we support
526      */
527     memcpy(cs->kvm_run->s.regs.acrs, env->aregs, sizeof(cs->kvm_run->s.regs.acrs));
528     memcpy(cs->kvm_run->s.regs.crs, env->cregs, sizeof(cs->kvm_run->s.regs.crs));
529     cs->kvm_run->s.regs.prefix = env->psa;
530     cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ACRS | KVM_SYNC_CRS | KVM_SYNC_PREFIX;
531 
532     if (can_sync_regs(cs, KVM_SYNC_ARCH0)) {
533         cs->kvm_run->s.regs.cputm = env->cputm;
534         cs->kvm_run->s.regs.ckc = env->ckc;
535         cs->kvm_run->s.regs.todpr = env->todpr;
536         cs->kvm_run->s.regs.gbea = env->gbea;
537         cs->kvm_run->s.regs.pp = env->pp;
538         cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ARCH0;
539     } else {
540         /*
541          * These ONE_REGS are not protected by a capability. As they are only
542          * necessary for migration we just trace a possible error, but don't
543          * return with an error return code.
544          */
545         kvm_set_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm);
546         kvm_set_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc);
547         kvm_set_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr);
548         kvm_set_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea);
549         kvm_set_one_reg(cs, KVM_REG_S390_PP, &env->pp);
550     }
551 
552     if (can_sync_regs(cs, KVM_SYNC_RICCB)) {
553         memcpy(cs->kvm_run->s.regs.riccb, env->riccb, 64);
554         cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_RICCB;
555     }
556 
557     /* pfault parameters */
558     if (can_sync_regs(cs, KVM_SYNC_PFAULT)) {
559         cs->kvm_run->s.regs.pft = env->pfault_token;
560         cs->kvm_run->s.regs.pfs = env->pfault_select;
561         cs->kvm_run->s.regs.pfc = env->pfault_compare;
562         cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PFAULT;
563     } else if (cap_async_pf) {
564         r = kvm_set_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token);
565         if (r < 0) {
566             return r;
567         }
568         r = kvm_set_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare);
569         if (r < 0) {
570             return r;
571         }
572         r = kvm_set_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select);
573         if (r < 0) {
574             return r;
575         }
576     }
577 
578     if (can_sync_regs(cs, KVM_SYNC_GSCB)) {
579         memcpy(cs->kvm_run->s.regs.gscb, env->gscb, 32);
580         cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GSCB;
581     }
582 
583     if (can_sync_regs(cs, KVM_SYNC_BPBC)) {
584         cs->kvm_run->s.regs.bpbc = env->bpbc;
585         cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_BPBC;
586     }
587 
588     if (can_sync_regs(cs, KVM_SYNC_ETOKEN)) {
589         cs->kvm_run->s.regs.etoken = env->etoken;
590         cs->kvm_run->s.regs.etoken_extension  = env->etoken_extension;
591         cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ETOKEN;
592     }
593 
594     if (can_sync_regs(cs, KVM_SYNC_DIAG318)) {
595         cs->kvm_run->s.regs.diag318 = env->diag318_info;
596         cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_DIAG318;
597     }
598 
599     return 0;
600 }
601 
602 int kvm_arch_get_registers(CPUState *cs)
603 {
604     S390CPU *cpu = S390_CPU(cs);
605     CPUS390XState *env = &cpu->env;
606     struct kvm_fpu fpu;
607     int i, r;
608 
609     /* get the PSW */
610     env->psw.addr = cs->kvm_run->psw_addr;
611     env->psw.mask = cs->kvm_run->psw_mask;
612 
613     /* the GPRS, ACRS and CRS */
614     g_assert(can_sync_regs(cs, KVM_SYNC_REQUIRED_REGS));
615     memcpy(env->regs, cs->kvm_run->s.regs.gprs, sizeof(env->regs));
616     memcpy(env->aregs, cs->kvm_run->s.regs.acrs, sizeof(env->aregs));
617     memcpy(env->cregs, cs->kvm_run->s.regs.crs, sizeof(env->cregs));
618 
619     /* The prefix */
620     env->psa = cs->kvm_run->s.regs.prefix;
621 
622     /* Floating point and vector registers */
623     if (can_sync_regs(cs, KVM_SYNC_VRS)) {
624         for (i = 0; i < 32; i++) {
625             env->vregs[i][0] = cs->kvm_run->s.regs.vrs[i][0];
626             env->vregs[i][1] = cs->kvm_run->s.regs.vrs[i][1];
627         }
628         env->fpc = cs->kvm_run->s.regs.fpc;
629     } else if (can_sync_regs(cs, KVM_SYNC_FPRS)) {
630         for (i = 0; i < 16; i++) {
631             *get_freg(env, i) = cs->kvm_run->s.regs.fprs[i];
632         }
633         env->fpc = cs->kvm_run->s.regs.fpc;
634     } else {
635         r = kvm_vcpu_ioctl(cs, KVM_GET_FPU, &fpu);
636         if (r < 0) {
637             return r;
638         }
639         for (i = 0; i < 16; i++) {
640             *get_freg(env, i) = fpu.fprs[i];
641         }
642         env->fpc = fpu.fpc;
643     }
644 
645     if (can_sync_regs(cs, KVM_SYNC_ARCH0)) {
646         env->cputm = cs->kvm_run->s.regs.cputm;
647         env->ckc = cs->kvm_run->s.regs.ckc;
648         env->todpr = cs->kvm_run->s.regs.todpr;
649         env->gbea = cs->kvm_run->s.regs.gbea;
650         env->pp = cs->kvm_run->s.regs.pp;
651     } else {
652         /*
653          * These ONE_REGS are not protected by a capability. As they are only
654          * necessary for migration we just trace a possible error, but don't
655          * return with an error return code.
656          */
657         kvm_get_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm);
658         kvm_get_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc);
659         kvm_get_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr);
660         kvm_get_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea);
661         kvm_get_one_reg(cs, KVM_REG_S390_PP, &env->pp);
662     }
663 
664     if (can_sync_regs(cs, KVM_SYNC_RICCB)) {
665         memcpy(env->riccb, cs->kvm_run->s.regs.riccb, 64);
666     }
667 
668     if (can_sync_regs(cs, KVM_SYNC_GSCB)) {
669         memcpy(env->gscb, cs->kvm_run->s.regs.gscb, 32);
670     }
671 
672     if (can_sync_regs(cs, KVM_SYNC_BPBC)) {
673         env->bpbc = cs->kvm_run->s.regs.bpbc;
674     }
675 
676     if (can_sync_regs(cs, KVM_SYNC_ETOKEN)) {
677         env->etoken = cs->kvm_run->s.regs.etoken;
678         env->etoken_extension = cs->kvm_run->s.regs.etoken_extension;
679     }
680 
681     /* pfault parameters */
682     if (can_sync_regs(cs, KVM_SYNC_PFAULT)) {
683         env->pfault_token = cs->kvm_run->s.regs.pft;
684         env->pfault_select = cs->kvm_run->s.regs.pfs;
685         env->pfault_compare = cs->kvm_run->s.regs.pfc;
686     } else if (cap_async_pf) {
687         r = kvm_get_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token);
688         if (r < 0) {
689             return r;
690         }
691         r = kvm_get_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare);
692         if (r < 0) {
693             return r;
694         }
695         r = kvm_get_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select);
696         if (r < 0) {
697             return r;
698         }
699     }
700 
701     if (can_sync_regs(cs, KVM_SYNC_DIAG318)) {
702         env->diag318_info = cs->kvm_run->s.regs.diag318;
703     }
704 
705     return 0;
706 }
707 
708 int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_low)
709 {
710     int r;
711     struct kvm_device_attr attr = {
712         .group = KVM_S390_VM_TOD,
713         .attr = KVM_S390_VM_TOD_LOW,
714         .addr = (uint64_t)tod_low,
715     };
716 
717     r = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
718     if (r) {
719         return r;
720     }
721 
722     attr.attr = KVM_S390_VM_TOD_HIGH;
723     attr.addr = (uint64_t)tod_high;
724     return kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
725 }
726 
727 int kvm_s390_get_clock_ext(uint8_t *tod_high, uint64_t *tod_low)
728 {
729     int r;
730     struct kvm_s390_vm_tod_clock gtod;
731     struct kvm_device_attr attr = {
732         .group = KVM_S390_VM_TOD,
733         .attr = KVM_S390_VM_TOD_EXT,
734         .addr = (uint64_t)&gtod,
735     };
736 
737     r = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
738     *tod_high = gtod.epoch_idx;
739     *tod_low  = gtod.tod;
740 
741     return r;
742 }
743 
744 int kvm_s390_set_clock(uint8_t tod_high, uint64_t tod_low)
745 {
746     int r;
747     struct kvm_device_attr attr = {
748         .group = KVM_S390_VM_TOD,
749         .attr = KVM_S390_VM_TOD_LOW,
750         .addr = (uint64_t)&tod_low,
751     };
752 
753     r = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
754     if (r) {
755         return r;
756     }
757 
758     attr.attr = KVM_S390_VM_TOD_HIGH;
759     attr.addr = (uint64_t)&tod_high;
760     return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
761 }
762 
763 int kvm_s390_set_clock_ext(uint8_t tod_high, uint64_t tod_low)
764 {
765     struct kvm_s390_vm_tod_clock gtod = {
766         .epoch_idx = tod_high,
767         .tod  = tod_low,
768     };
769     struct kvm_device_attr attr = {
770         .group = KVM_S390_VM_TOD,
771         .attr = KVM_S390_VM_TOD_EXT,
772         .addr = (uint64_t)&gtod,
773     };
774 
775     return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
776 }
777 
778 /**
779  * kvm_s390_mem_op:
780  * @addr:      the logical start address in guest memory
781  * @ar:        the access register number
782  * @hostbuf:   buffer in host memory. NULL = do only checks w/o copying
783  * @len:       length that should be transferred
784  * @is_write:  true = write, false = read
785  * Returns:    0 on success, non-zero if an exception or error occurred
786  *
787  * Use KVM ioctl to read/write from/to guest memory. An access exception
788  * is injected into the vCPU in case of translation errors.
789  */
790 int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar, void *hostbuf,
791                     int len, bool is_write)
792 {
793     struct kvm_s390_mem_op mem_op = {
794         .gaddr = addr,
795         .flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION,
796         .size = len,
797         .op = is_write ? KVM_S390_MEMOP_LOGICAL_WRITE
798                        : KVM_S390_MEMOP_LOGICAL_READ,
799         .buf = (uint64_t)hostbuf,
800         .ar = ar,
801         .key = (cpu->env.psw.mask & PSW_MASK_KEY) >> PSW_SHIFT_KEY,
802     };
803     int ret;
804 
805     if (!cap_mem_op) {
806         return -ENOSYS;
807     }
808     if (!hostbuf) {
809         mem_op.flags |= KVM_S390_MEMOP_F_CHECK_ONLY;
810     }
811     if (mem_op_storage_key_support) {
812         mem_op.flags |= KVM_S390_MEMOP_F_SKEY_PROTECTION;
813     }
814 
815     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_S390_MEM_OP, &mem_op);
816     if (ret < 0) {
817         warn_report("KVM_S390_MEM_OP failed: %s", strerror(-ret));
818     }
819     return ret;
820 }
821 
822 int kvm_s390_mem_op_pv(S390CPU *cpu, uint64_t offset, void *hostbuf,
823                        int len, bool is_write)
824 {
825     struct kvm_s390_mem_op mem_op = {
826         .sida_offset = offset,
827         .size = len,
828         .op = is_write ? KVM_S390_MEMOP_SIDA_WRITE
829                        : KVM_S390_MEMOP_SIDA_READ,
830         .buf = (uint64_t)hostbuf,
831     };
832     int ret;
833 
834     if (!cap_mem_op || !cap_protected) {
835         return -ENOSYS;
836     }
837 
838     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_S390_MEM_OP, &mem_op);
839     if (ret < 0) {
840         error_report("KVM_S390_MEM_OP failed: %s", strerror(-ret));
841         abort();
842     }
843     return ret;
844 }
845 
846 static uint8_t const *sw_bp_inst;
847 static uint8_t sw_bp_ilen;
848 
849 static void determine_sw_breakpoint_instr(void)
850 {
851         /* DIAG 501 is used for sw breakpoints with old kernels */
852         static const uint8_t diag_501[] = {0x83, 0x24, 0x05, 0x01};
853         /* Instruction 0x0000 is used for sw breakpoints with recent kernels */
854         static const uint8_t instr_0x0000[] = {0x00, 0x00};
855 
856         if (sw_bp_inst) {
857             return;
858         }
859         if (kvm_vm_enable_cap(kvm_state, KVM_CAP_S390_USER_INSTR0, 0)) {
860             sw_bp_inst = diag_501;
861             sw_bp_ilen = sizeof(diag_501);
862             trace_kvm_sw_breakpoint(4);
863         } else {
864             sw_bp_inst = instr_0x0000;
865             sw_bp_ilen = sizeof(instr_0x0000);
866             trace_kvm_sw_breakpoint(2);
867         }
868 }
869 
870 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
871 {
872     determine_sw_breakpoint_instr();
873 
874     if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
875                             sw_bp_ilen, 0) ||
876         cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)sw_bp_inst, sw_bp_ilen, 1)) {
877         return -EINVAL;
878     }
879     return 0;
880 }
881 
882 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
883 {
884     uint8_t t[MAX_ILEN];
885 
886     if (cpu_memory_rw_debug(cs, bp->pc, t, sw_bp_ilen, 0)) {
887         return -EINVAL;
888     } else if (memcmp(t, sw_bp_inst, sw_bp_ilen)) {
889         return -EINVAL;
890     } else if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
891                                    sw_bp_ilen, 1)) {
892         return -EINVAL;
893     }
894 
895     return 0;
896 }
897 
898 static struct kvm_hw_breakpoint *find_hw_breakpoint(target_ulong addr,
899                                                     int len, int type)
900 {
901     int n;
902 
903     for (n = 0; n < nb_hw_breakpoints; n++) {
904         if (hw_breakpoints[n].addr == addr && hw_breakpoints[n].type == type &&
905             (hw_breakpoints[n].len == len || len == -1)) {
906             return &hw_breakpoints[n];
907         }
908     }
909 
910     return NULL;
911 }
912 
913 static int insert_hw_breakpoint(target_ulong addr, int len, int type)
914 {
915     int size;
916 
917     if (find_hw_breakpoint(addr, len, type)) {
918         return -EEXIST;
919     }
920 
921     size = (nb_hw_breakpoints + 1) * sizeof(struct kvm_hw_breakpoint);
922 
923     if (!hw_breakpoints) {
924         nb_hw_breakpoints = 0;
925         hw_breakpoints = (struct kvm_hw_breakpoint *)g_try_malloc(size);
926     } else {
927         hw_breakpoints =
928             (struct kvm_hw_breakpoint *)g_try_realloc(hw_breakpoints, size);
929     }
930 
931     if (!hw_breakpoints) {
932         nb_hw_breakpoints = 0;
933         return -ENOMEM;
934     }
935 
936     hw_breakpoints[nb_hw_breakpoints].addr = addr;
937     hw_breakpoints[nb_hw_breakpoints].len = len;
938     hw_breakpoints[nb_hw_breakpoints].type = type;
939 
940     nb_hw_breakpoints++;
941 
942     return 0;
943 }
944 
945 int kvm_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type)
946 {
947     switch (type) {
948     case GDB_BREAKPOINT_HW:
949         type = KVM_HW_BP;
950         break;
951     case GDB_WATCHPOINT_WRITE:
952         if (len < 1) {
953             return -EINVAL;
954         }
955         type = KVM_HW_WP_WRITE;
956         break;
957     default:
958         return -ENOSYS;
959     }
960     return insert_hw_breakpoint(addr, len, type);
961 }
962 
963 int kvm_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type)
964 {
965     int size;
966     struct kvm_hw_breakpoint *bp = find_hw_breakpoint(addr, len, type);
967 
968     if (bp == NULL) {
969         return -ENOENT;
970     }
971 
972     nb_hw_breakpoints--;
973     if (nb_hw_breakpoints > 0) {
974         /*
975          * In order to trim the array, move the last element to the position to
976          * be removed - if necessary.
977          */
978         if (bp != &hw_breakpoints[nb_hw_breakpoints]) {
979             *bp = hw_breakpoints[nb_hw_breakpoints];
980         }
981         size = nb_hw_breakpoints * sizeof(struct kvm_hw_breakpoint);
982         hw_breakpoints =
983              g_realloc(hw_breakpoints, size);
984     } else {
985         g_free(hw_breakpoints);
986         hw_breakpoints = NULL;
987     }
988 
989     return 0;
990 }
991 
992 void kvm_arch_remove_all_hw_breakpoints(void)
993 {
994     nb_hw_breakpoints = 0;
995     g_free(hw_breakpoints);
996     hw_breakpoints = NULL;
997 }
998 
999 void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
1000 {
1001     int i;
1002 
1003     if (nb_hw_breakpoints > 0) {
1004         dbg->arch.nr_hw_bp = nb_hw_breakpoints;
1005         dbg->arch.hw_bp = hw_breakpoints;
1006 
1007         for (i = 0; i < nb_hw_breakpoints; ++i) {
1008             hw_breakpoints[i].phys_addr = s390_cpu_get_phys_addr_debug(cpu,
1009                                                        hw_breakpoints[i].addr);
1010         }
1011         dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
1012     } else {
1013         dbg->arch.nr_hw_bp = 0;
1014         dbg->arch.hw_bp = NULL;
1015     }
1016 }
1017 
1018 void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
1019 {
1020 }
1021 
1022 MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
1023 {
1024     return MEMTXATTRS_UNSPECIFIED;
1025 }
1026 
1027 int kvm_arch_process_async_events(CPUState *cs)
1028 {
1029     return cs->halted;
1030 }
1031 
1032 static int s390_kvm_irq_to_interrupt(struct kvm_s390_irq *irq,
1033                                      struct kvm_s390_interrupt *interrupt)
1034 {
1035     int r = 0;
1036 
1037     interrupt->type = irq->type;
1038     switch (irq->type) {
1039     case KVM_S390_INT_VIRTIO:
1040         interrupt->parm = irq->u.ext.ext_params;
1041         /* fall through */
1042     case KVM_S390_INT_PFAULT_INIT:
1043     case KVM_S390_INT_PFAULT_DONE:
1044         interrupt->parm64 = irq->u.ext.ext_params2;
1045         break;
1046     case KVM_S390_PROGRAM_INT:
1047         interrupt->parm = irq->u.pgm.code;
1048         break;
1049     case KVM_S390_SIGP_SET_PREFIX:
1050         interrupt->parm = irq->u.prefix.address;
1051         break;
1052     case KVM_S390_INT_SERVICE:
1053         interrupt->parm = irq->u.ext.ext_params;
1054         break;
1055     case KVM_S390_MCHK:
1056         interrupt->parm = irq->u.mchk.cr14;
1057         interrupt->parm64 = irq->u.mchk.mcic;
1058         break;
1059     case KVM_S390_INT_EXTERNAL_CALL:
1060         interrupt->parm = irq->u.extcall.code;
1061         break;
1062     case KVM_S390_INT_EMERGENCY:
1063         interrupt->parm = irq->u.emerg.code;
1064         break;
1065     case KVM_S390_SIGP_STOP:
1066     case KVM_S390_RESTART:
1067         break; /* These types have no parameters */
1068     case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1069         interrupt->parm = irq->u.io.subchannel_id << 16;
1070         interrupt->parm |= irq->u.io.subchannel_nr;
1071         interrupt->parm64 = (uint64_t)irq->u.io.io_int_parm << 32;
1072         interrupt->parm64 |= irq->u.io.io_int_word;
1073         break;
1074     default:
1075         r = -EINVAL;
1076         break;
1077     }
1078     return r;
1079 }
1080 
1081 static void inject_vcpu_irq_legacy(CPUState *cs, struct kvm_s390_irq *irq)
1082 {
1083     struct kvm_s390_interrupt kvmint = {};
1084     int r;
1085 
1086     r = s390_kvm_irq_to_interrupt(irq, &kvmint);
1087     if (r < 0) {
1088         fprintf(stderr, "%s called with bogus interrupt\n", __func__);
1089         exit(1);
1090     }
1091 
1092     r = kvm_vcpu_ioctl(cs, KVM_S390_INTERRUPT, &kvmint);
1093     if (r < 0) {
1094         fprintf(stderr, "KVM failed to inject interrupt\n");
1095         exit(1);
1096     }
1097 }
1098 
1099 void kvm_s390_vcpu_interrupt(S390CPU *cpu, struct kvm_s390_irq *irq)
1100 {
1101     CPUState *cs = CPU(cpu);
1102     int r;
1103 
1104     if (cap_s390_irq) {
1105         r = kvm_vcpu_ioctl(cs, KVM_S390_IRQ, irq);
1106         if (!r) {
1107             return;
1108         }
1109         error_report("KVM failed to inject interrupt %llx", irq->type);
1110         exit(1);
1111     }
1112 
1113     inject_vcpu_irq_legacy(cs, irq);
1114 }
1115 
1116 void kvm_s390_floating_interrupt_legacy(struct kvm_s390_irq *irq)
1117 {
1118     struct kvm_s390_interrupt kvmint = {};
1119     int r;
1120 
1121     r = s390_kvm_irq_to_interrupt(irq, &kvmint);
1122     if (r < 0) {
1123         fprintf(stderr, "%s called with bogus interrupt\n", __func__);
1124         exit(1);
1125     }
1126 
1127     r = kvm_vm_ioctl(kvm_state, KVM_S390_INTERRUPT, &kvmint);
1128     if (r < 0) {
1129         fprintf(stderr, "KVM failed to inject interrupt\n");
1130         exit(1);
1131     }
1132 }
1133 
1134 void kvm_s390_program_interrupt(S390CPU *cpu, uint16_t code)
1135 {
1136     struct kvm_s390_irq irq = {
1137         .type = KVM_S390_PROGRAM_INT,
1138         .u.pgm.code = code,
1139     };
1140     qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n",
1141                   cpu->env.psw.addr);
1142     kvm_s390_vcpu_interrupt(cpu, &irq);
1143 }
1144 
1145 void kvm_s390_access_exception(S390CPU *cpu, uint16_t code, uint64_t te_code)
1146 {
1147     struct kvm_s390_irq irq = {
1148         .type = KVM_S390_PROGRAM_INT,
1149         .u.pgm.code = code,
1150         .u.pgm.trans_exc_code = te_code,
1151         .u.pgm.exc_access_id = te_code & 3,
1152     };
1153 
1154     kvm_s390_vcpu_interrupt(cpu, &irq);
1155 }
1156 
1157 static void kvm_sclp_service_call(S390CPU *cpu, struct kvm_run *run,
1158                                  uint16_t ipbh0)
1159 {
1160     CPUS390XState *env = &cpu->env;
1161     uint64_t sccb;
1162     uint32_t code;
1163     int r;
1164 
1165     sccb = env->regs[ipbh0 & 0xf];
1166     code = env->regs[(ipbh0 & 0xf0) >> 4];
1167 
1168     switch (run->s390_sieic.icptcode) {
1169     case ICPT_PV_INSTR_NOTIFICATION:
1170         g_assert(s390_is_pv());
1171         /* The notification intercepts are currently handled by KVM */
1172         error_report("unexpected SCLP PV notification");
1173         exit(1);
1174         break;
1175     case ICPT_PV_INSTR:
1176         g_assert(s390_is_pv());
1177         sclp_service_call_protected(cpu, sccb, code);
1178         /* Setting the CC is done by the Ultravisor. */
1179         break;
1180     case ICPT_INSTRUCTION:
1181         g_assert(!s390_is_pv());
1182         r = sclp_service_call(cpu, sccb, code);
1183         if (r < 0) {
1184             kvm_s390_program_interrupt(cpu, -r);
1185             return;
1186         }
1187         setcc(cpu, r);
1188     }
1189 }
1190 
1191 static int handle_b2(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
1192 {
1193     CPUS390XState *env = &cpu->env;
1194     int rc = 0;
1195     uint16_t ipbh0 = (run->s390_sieic.ipb & 0xffff0000) >> 16;
1196 
1197     switch (ipa1) {
1198     case PRIV_B2_XSCH:
1199         ioinst_handle_xsch(cpu, env->regs[1], RA_IGNORED);
1200         break;
1201     case PRIV_B2_CSCH:
1202         ioinst_handle_csch(cpu, env->regs[1], RA_IGNORED);
1203         break;
1204     case PRIV_B2_HSCH:
1205         ioinst_handle_hsch(cpu, env->regs[1], RA_IGNORED);
1206         break;
1207     case PRIV_B2_MSCH:
1208         ioinst_handle_msch(cpu, env->regs[1], run->s390_sieic.ipb, RA_IGNORED);
1209         break;
1210     case PRIV_B2_SSCH:
1211         ioinst_handle_ssch(cpu, env->regs[1], run->s390_sieic.ipb, RA_IGNORED);
1212         break;
1213     case PRIV_B2_STCRW:
1214         ioinst_handle_stcrw(cpu, run->s390_sieic.ipb, RA_IGNORED);
1215         break;
1216     case PRIV_B2_STSCH:
1217         ioinst_handle_stsch(cpu, env->regs[1], run->s390_sieic.ipb, RA_IGNORED);
1218         break;
1219     case PRIV_B2_TSCH:
1220         /* We should only get tsch via KVM_EXIT_S390_TSCH. */
1221         fprintf(stderr, "Spurious tsch intercept\n");
1222         break;
1223     case PRIV_B2_CHSC:
1224         ioinst_handle_chsc(cpu, run->s390_sieic.ipb, RA_IGNORED);
1225         break;
1226     case PRIV_B2_TPI:
1227         /* This should have been handled by kvm already. */
1228         fprintf(stderr, "Spurious tpi intercept\n");
1229         break;
1230     case PRIV_B2_SCHM:
1231         ioinst_handle_schm(cpu, env->regs[1], env->regs[2],
1232                            run->s390_sieic.ipb, RA_IGNORED);
1233         break;
1234     case PRIV_B2_RSCH:
1235         ioinst_handle_rsch(cpu, env->regs[1], RA_IGNORED);
1236         break;
1237     case PRIV_B2_RCHP:
1238         ioinst_handle_rchp(cpu, env->regs[1], RA_IGNORED);
1239         break;
1240     case PRIV_B2_STCPS:
1241         /* We do not provide this instruction, it is suppressed. */
1242         break;
1243     case PRIV_B2_SAL:
1244         ioinst_handle_sal(cpu, env->regs[1], RA_IGNORED);
1245         break;
1246     case PRIV_B2_SIGA:
1247         /* Not provided, set CC = 3 for subchannel not operational */
1248         setcc(cpu, 3);
1249         break;
1250     case PRIV_B2_SCLP_CALL:
1251         kvm_sclp_service_call(cpu, run, ipbh0);
1252         break;
1253     default:
1254         rc = -1;
1255         trace_kvm_insn_unhandled_priv(ipa1);
1256         break;
1257     }
1258 
1259     return rc;
1260 }
1261 
1262 static uint64_t get_base_disp_rxy(S390CPU *cpu, struct kvm_run *run,
1263                                   uint8_t *ar)
1264 {
1265     CPUS390XState *env = &cpu->env;
1266     uint32_t x2 = (run->s390_sieic.ipa & 0x000f);
1267     uint32_t base2 = run->s390_sieic.ipb >> 28;
1268     uint32_t disp2 = ((run->s390_sieic.ipb & 0x0fff0000) >> 16) +
1269                      ((run->s390_sieic.ipb & 0xff00) << 4);
1270 
1271     if (disp2 & 0x80000) {
1272         disp2 += 0xfff00000;
1273     }
1274     if (ar) {
1275         *ar = base2;
1276     }
1277 
1278     return (base2 ? env->regs[base2] : 0) +
1279            (x2 ? env->regs[x2] : 0) + (long)(int)disp2;
1280 }
1281 
1282 static uint64_t get_base_disp_rsy(S390CPU *cpu, struct kvm_run *run,
1283                                   uint8_t *ar)
1284 {
1285     CPUS390XState *env = &cpu->env;
1286     uint32_t base2 = run->s390_sieic.ipb >> 28;
1287     uint32_t disp2 = ((run->s390_sieic.ipb & 0x0fff0000) >> 16) +
1288                      ((run->s390_sieic.ipb & 0xff00) << 4);
1289 
1290     if (disp2 & 0x80000) {
1291         disp2 += 0xfff00000;
1292     }
1293     if (ar) {
1294         *ar = base2;
1295     }
1296 
1297     return (base2 ? env->regs[base2] : 0) + (long)(int)disp2;
1298 }
1299 
1300 static int kvm_clp_service_call(S390CPU *cpu, struct kvm_run *run)
1301 {
1302     uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
1303 
1304     if (s390_has_feat(S390_FEAT_ZPCI)) {
1305         return clp_service_call(cpu, r2, RA_IGNORED);
1306     } else {
1307         return -1;
1308     }
1309 }
1310 
1311 static int kvm_pcilg_service_call(S390CPU *cpu, struct kvm_run *run)
1312 {
1313     uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20;
1314     uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
1315 
1316     if (s390_has_feat(S390_FEAT_ZPCI)) {
1317         return pcilg_service_call(cpu, r1, r2, RA_IGNORED);
1318     } else {
1319         return -1;
1320     }
1321 }
1322 
1323 static int kvm_pcistg_service_call(S390CPU *cpu, struct kvm_run *run)
1324 {
1325     uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20;
1326     uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
1327 
1328     if (s390_has_feat(S390_FEAT_ZPCI)) {
1329         return pcistg_service_call(cpu, r1, r2, RA_IGNORED);
1330     } else {
1331         return -1;
1332     }
1333 }
1334 
1335 static int kvm_stpcifc_service_call(S390CPU *cpu, struct kvm_run *run)
1336 {
1337     uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
1338     uint64_t fiba;
1339     uint8_t ar;
1340 
1341     if (s390_has_feat(S390_FEAT_ZPCI)) {
1342         fiba = get_base_disp_rxy(cpu, run, &ar);
1343 
1344         return stpcifc_service_call(cpu, r1, fiba, ar, RA_IGNORED);
1345     } else {
1346         return -1;
1347     }
1348 }
1349 
1350 static int kvm_sic_service_call(S390CPU *cpu, struct kvm_run *run)
1351 {
1352     CPUS390XState *env = &cpu->env;
1353     uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
1354     uint8_t r3 = run->s390_sieic.ipa & 0x000f;
1355     uint8_t isc;
1356     uint16_t mode;
1357     int r;
1358 
1359     mode = env->regs[r1] & 0xffff;
1360     isc = (env->regs[r3] >> 27) & 0x7;
1361     r = css_do_sic(cpu, isc, mode);
1362     if (r) {
1363         kvm_s390_program_interrupt(cpu, -r);
1364     }
1365 
1366     return 0;
1367 }
1368 
1369 static int kvm_rpcit_service_call(S390CPU *cpu, struct kvm_run *run)
1370 {
1371     uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20;
1372     uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
1373 
1374     if (s390_has_feat(S390_FEAT_ZPCI)) {
1375         return rpcit_service_call(cpu, r1, r2, RA_IGNORED);
1376     } else {
1377         return -1;
1378     }
1379 }
1380 
1381 static int kvm_pcistb_service_call(S390CPU *cpu, struct kvm_run *run)
1382 {
1383     uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
1384     uint8_t r3 = run->s390_sieic.ipa & 0x000f;
1385     uint64_t gaddr;
1386     uint8_t ar;
1387 
1388     if (s390_has_feat(S390_FEAT_ZPCI)) {
1389         gaddr = get_base_disp_rsy(cpu, run, &ar);
1390 
1391         return pcistb_service_call(cpu, r1, r3, gaddr, ar, RA_IGNORED);
1392     } else {
1393         return -1;
1394     }
1395 }
1396 
1397 static int kvm_mpcifc_service_call(S390CPU *cpu, struct kvm_run *run)
1398 {
1399     uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
1400     uint64_t fiba;
1401     uint8_t ar;
1402 
1403     if (s390_has_feat(S390_FEAT_ZPCI)) {
1404         fiba = get_base_disp_rxy(cpu, run, &ar);
1405 
1406         return mpcifc_service_call(cpu, r1, fiba, ar, RA_IGNORED);
1407     } else {
1408         return -1;
1409     }
1410 }
1411 
1412 static void kvm_handle_ptf(S390CPU *cpu, struct kvm_run *run)
1413 {
1414     uint8_t r1 = (run->s390_sieic.ipb >> 20) & 0x0f;
1415 
1416     s390_handle_ptf(cpu, r1, RA_IGNORED);
1417 }
1418 
1419 static int handle_b9(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
1420 {
1421     int r = 0;
1422 
1423     switch (ipa1) {
1424     case PRIV_B9_CLP:
1425         r = kvm_clp_service_call(cpu, run);
1426         break;
1427     case PRIV_B9_PCISTG:
1428         r = kvm_pcistg_service_call(cpu, run);
1429         break;
1430     case PRIV_B9_PCILG:
1431         r = kvm_pcilg_service_call(cpu, run);
1432         break;
1433     case PRIV_B9_RPCIT:
1434         r = kvm_rpcit_service_call(cpu, run);
1435         break;
1436     case PRIV_B9_PTF:
1437         kvm_handle_ptf(cpu, run);
1438         break;
1439     case PRIV_B9_EQBS:
1440         /* just inject exception */
1441         r = -1;
1442         break;
1443     default:
1444         r = -1;
1445         trace_kvm_insn_unhandled_priv(ipa1);
1446         break;
1447     }
1448 
1449     return r;
1450 }
1451 
1452 static int handle_eb(S390CPU *cpu, struct kvm_run *run, uint8_t ipbl)
1453 {
1454     int r = 0;
1455 
1456     switch (ipbl) {
1457     case PRIV_EB_PCISTB:
1458         r = kvm_pcistb_service_call(cpu, run);
1459         break;
1460     case PRIV_EB_SIC:
1461         r = kvm_sic_service_call(cpu, run);
1462         break;
1463     case PRIV_EB_SQBS:
1464         /* just inject exception */
1465         r = -1;
1466         break;
1467     default:
1468         r = -1;
1469         trace_kvm_insn_unhandled_priv(ipbl);
1470         break;
1471     }
1472 
1473     return r;
1474 }
1475 
1476 static int handle_e3(S390CPU *cpu, struct kvm_run *run, uint8_t ipbl)
1477 {
1478     int r = 0;
1479 
1480     switch (ipbl) {
1481     case PRIV_E3_MPCIFC:
1482         r = kvm_mpcifc_service_call(cpu, run);
1483         break;
1484     case PRIV_E3_STPCIFC:
1485         r = kvm_stpcifc_service_call(cpu, run);
1486         break;
1487     default:
1488         r = -1;
1489         trace_kvm_insn_unhandled_priv(ipbl);
1490         break;
1491     }
1492 
1493     return r;
1494 }
1495 
1496 static int handle_hypercall(S390CPU *cpu, struct kvm_run *run)
1497 {
1498     CPUS390XState *env = &cpu->env;
1499     int ret;
1500 
1501     ret = s390_virtio_hypercall(env);
1502     if (ret == -EINVAL) {
1503         kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION);
1504         return 0;
1505     }
1506 
1507     return ret;
1508 }
1509 
1510 static void kvm_handle_diag_288(S390CPU *cpu, struct kvm_run *run)
1511 {
1512     uint64_t r1, r3;
1513     int rc;
1514 
1515     r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
1516     r3 = run->s390_sieic.ipa & 0x000f;
1517     rc = handle_diag_288(&cpu->env, r1, r3);
1518     if (rc) {
1519         kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION);
1520     }
1521 }
1522 
1523 static void kvm_handle_diag_308(S390CPU *cpu, struct kvm_run *run)
1524 {
1525     uint64_t r1, r3;
1526 
1527     r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
1528     r3 = run->s390_sieic.ipa & 0x000f;
1529     handle_diag_308(&cpu->env, r1, r3, RA_IGNORED);
1530 }
1531 
1532 static int handle_sw_breakpoint(S390CPU *cpu, struct kvm_run *run)
1533 {
1534     CPUS390XState *env = &cpu->env;
1535     unsigned long pc;
1536 
1537     pc = env->psw.addr - sw_bp_ilen;
1538     if (kvm_find_sw_breakpoint(CPU(cpu), pc)) {
1539         env->psw.addr = pc;
1540         return EXCP_DEBUG;
1541     }
1542 
1543     return -ENOENT;
1544 }
1545 
1546 void kvm_s390_set_diag318(CPUState *cs, uint64_t diag318_info)
1547 {
1548     CPUS390XState *env = &S390_CPU(cs)->env;
1549 
1550     /* Feat bit is set only if KVM supports sync for diag318 */
1551     if (s390_has_feat(S390_FEAT_DIAG_318)) {
1552         env->diag318_info = diag318_info;
1553         cs->kvm_run->s.regs.diag318 = diag318_info;
1554         cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_DIAG318;
1555         /*
1556          * diag 318 info is zeroed during a clear reset and
1557          * diag 308 IPL subcodes.
1558          */
1559     }
1560 }
1561 
1562 static void handle_diag_318(S390CPU *cpu, struct kvm_run *run)
1563 {
1564     uint64_t reg = (run->s390_sieic.ipa & 0x00f0) >> 4;
1565     uint64_t diag318_info = run->s.regs.gprs[reg];
1566     CPUState *t;
1567 
1568     /*
1569      * DIAG 318 can only be enabled with KVM support. As such, let's
1570      * ensure a guest cannot execute this instruction erroneously.
1571      */
1572     if (!s390_has_feat(S390_FEAT_DIAG_318)) {
1573         kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION);
1574         return;
1575     }
1576 
1577     CPU_FOREACH(t) {
1578         run_on_cpu(t, s390_do_cpu_set_diag318,
1579                    RUN_ON_CPU_HOST_ULONG(diag318_info));
1580     }
1581 }
1582 
1583 #define DIAG_KVM_CODE_MASK 0x000000000000ffff
1584 
1585 static int handle_diag(S390CPU *cpu, struct kvm_run *run, uint32_t ipb)
1586 {
1587     int r = 0;
1588     uint16_t func_code;
1589 
1590     /*
1591      * For any diagnose call we support, bits 48-63 of the resulting
1592      * address specify the function code; the remainder is ignored.
1593      */
1594     func_code = decode_basedisp_rs(&cpu->env, ipb, NULL) & DIAG_KVM_CODE_MASK;
1595     switch (func_code) {
1596     case DIAG_TIMEREVENT:
1597         kvm_handle_diag_288(cpu, run);
1598         break;
1599     case DIAG_IPL:
1600         kvm_handle_diag_308(cpu, run);
1601         break;
1602     case DIAG_SET_CONTROL_PROGRAM_CODES:
1603         handle_diag_318(cpu, run);
1604         break;
1605     case DIAG_KVM_HYPERCALL:
1606         r = handle_hypercall(cpu, run);
1607         break;
1608     case DIAG_KVM_BREAKPOINT:
1609         r = handle_sw_breakpoint(cpu, run);
1610         break;
1611     default:
1612         trace_kvm_insn_diag(func_code);
1613         kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION);
1614         break;
1615     }
1616 
1617     return r;
1618 }
1619 
1620 static int kvm_s390_handle_sigp(S390CPU *cpu, uint8_t ipa1, uint32_t ipb)
1621 {
1622     CPUS390XState *env = &cpu->env;
1623     const uint8_t r1 = ipa1 >> 4;
1624     const uint8_t r3 = ipa1 & 0x0f;
1625     int ret;
1626     uint8_t order;
1627 
1628     /* get order code */
1629     order = decode_basedisp_rs(env, ipb, NULL) & SIGP_ORDER_MASK;
1630 
1631     ret = handle_sigp(env, order, r1, r3);
1632     setcc(cpu, ret);
1633     return 0;
1634 }
1635 
1636 static int handle_instruction(S390CPU *cpu, struct kvm_run *run)
1637 {
1638     unsigned int ipa0 = (run->s390_sieic.ipa & 0xff00);
1639     uint8_t ipa1 = run->s390_sieic.ipa & 0x00ff;
1640     int r = -1;
1641 
1642     trace_kvm_insn(run->s390_sieic.ipa, run->s390_sieic.ipb);
1643     switch (ipa0) {
1644     case IPA0_B2:
1645         r = handle_b2(cpu, run, ipa1);
1646         break;
1647     case IPA0_B9:
1648         r = handle_b9(cpu, run, ipa1);
1649         break;
1650     case IPA0_EB:
1651         r = handle_eb(cpu, run, run->s390_sieic.ipb & 0xff);
1652         break;
1653     case IPA0_E3:
1654         r = handle_e3(cpu, run, run->s390_sieic.ipb & 0xff);
1655         break;
1656     case IPA0_DIAG:
1657         r = handle_diag(cpu, run, run->s390_sieic.ipb);
1658         break;
1659     case IPA0_SIGP:
1660         r = kvm_s390_handle_sigp(cpu, ipa1, run->s390_sieic.ipb);
1661         break;
1662     }
1663 
1664     if (r < 0) {
1665         r = 0;
1666         kvm_s390_program_interrupt(cpu, PGM_OPERATION);
1667     }
1668 
1669     return r;
1670 }
1671 
1672 static void unmanageable_intercept(S390CPU *cpu, S390CrashReason reason,
1673                                    int pswoffset)
1674 {
1675     CPUState *cs = CPU(cpu);
1676 
1677     s390_cpu_halt(cpu);
1678     cpu->env.crash_reason = reason;
1679     qemu_system_guest_panicked(cpu_get_crash_info(cs));
1680 }
1681 
1682 /* try to detect pgm check loops */
1683 static int handle_oper_loop(S390CPU *cpu, struct kvm_run *run)
1684 {
1685     CPUState *cs = CPU(cpu);
1686     PSW oldpsw, newpsw;
1687 
1688     newpsw.mask = ldq_phys(cs->as, cpu->env.psa +
1689                            offsetof(LowCore, program_new_psw));
1690     newpsw.addr = ldq_phys(cs->as, cpu->env.psa +
1691                            offsetof(LowCore, program_new_psw) + 8);
1692     oldpsw.mask  = run->psw_mask;
1693     oldpsw.addr  = run->psw_addr;
1694     /*
1695      * Avoid endless loops of operation exceptions, if the pgm new
1696      * PSW will cause a new operation exception.
1697      * The heuristic checks if the pgm new psw is within 6 bytes before
1698      * the faulting psw address (with same DAT, AS settings) and the
1699      * new psw is not a wait psw and the fault was not triggered by
1700      * problem state. In that case go into crashed state.
1701      */
1702 
1703     if (oldpsw.addr - newpsw.addr <= 6 &&
1704         !(newpsw.mask & PSW_MASK_WAIT) &&
1705         !(oldpsw.mask & PSW_MASK_PSTATE) &&
1706         (newpsw.mask & PSW_MASK_ASC) == (oldpsw.mask & PSW_MASK_ASC) &&
1707         (newpsw.mask & PSW_MASK_DAT) == (oldpsw.mask & PSW_MASK_DAT)) {
1708         unmanageable_intercept(cpu, S390_CRASH_REASON_OPINT_LOOP,
1709                                offsetof(LowCore, program_new_psw));
1710         return EXCP_HALTED;
1711     }
1712     return 0;
1713 }
1714 
1715 static int handle_intercept(S390CPU *cpu)
1716 {
1717     CPUState *cs = CPU(cpu);
1718     struct kvm_run *run = cs->kvm_run;
1719     int icpt_code = run->s390_sieic.icptcode;
1720     int r = 0;
1721 
1722     trace_kvm_intercept(icpt_code, (long)run->psw_addr);
1723     switch (icpt_code) {
1724         case ICPT_INSTRUCTION:
1725         case ICPT_PV_INSTR:
1726         case ICPT_PV_INSTR_NOTIFICATION:
1727             r = handle_instruction(cpu, run);
1728             break;
1729         case ICPT_PROGRAM:
1730             unmanageable_intercept(cpu, S390_CRASH_REASON_PGMINT_LOOP,
1731                                    offsetof(LowCore, program_new_psw));
1732             r = EXCP_HALTED;
1733             break;
1734         case ICPT_EXT_INT:
1735             unmanageable_intercept(cpu, S390_CRASH_REASON_EXTINT_LOOP,
1736                                    offsetof(LowCore, external_new_psw));
1737             r = EXCP_HALTED;
1738             break;
1739         case ICPT_WAITPSW:
1740             /* disabled wait, since enabled wait is handled in kernel */
1741             s390_handle_wait(cpu);
1742             r = EXCP_HALTED;
1743             break;
1744         case ICPT_CPU_STOP:
1745             do_stop_interrupt(&cpu->env);
1746             r = EXCP_HALTED;
1747             break;
1748         case ICPT_OPEREXC:
1749             /* check for break points */
1750             r = handle_sw_breakpoint(cpu, run);
1751             if (r == -ENOENT) {
1752                 /* Then check for potential pgm check loops */
1753                 r = handle_oper_loop(cpu, run);
1754                 if (r == 0) {
1755                     kvm_s390_program_interrupt(cpu, PGM_OPERATION);
1756                 }
1757             }
1758             break;
1759         case ICPT_SOFT_INTERCEPT:
1760             fprintf(stderr, "KVM unimplemented icpt SOFT\n");
1761             exit(1);
1762             break;
1763         case ICPT_IO:
1764             fprintf(stderr, "KVM unimplemented icpt IO\n");
1765             exit(1);
1766             break;
1767         default:
1768             fprintf(stderr, "Unknown intercept code: %d\n", icpt_code);
1769             exit(1);
1770             break;
1771     }
1772 
1773     return r;
1774 }
1775 
1776 static int handle_tsch(S390CPU *cpu)
1777 {
1778     CPUState *cs = CPU(cpu);
1779     struct kvm_run *run = cs->kvm_run;
1780     int ret;
1781 
1782     ret = ioinst_handle_tsch(cpu, cpu->env.regs[1], run->s390_tsch.ipb,
1783                              RA_IGNORED);
1784     if (ret < 0) {
1785         /*
1786          * Failure.
1787          * If an I/O interrupt had been dequeued, we have to reinject it.
1788          */
1789         if (run->s390_tsch.dequeued) {
1790             s390_io_interrupt(run->s390_tsch.subchannel_id,
1791                               run->s390_tsch.subchannel_nr,
1792                               run->s390_tsch.io_int_parm,
1793                               run->s390_tsch.io_int_word);
1794         }
1795         ret = 0;
1796     }
1797     return ret;
1798 }
1799 
1800 static void insert_stsi_3_2_2(S390CPU *cpu, __u64 addr, uint8_t ar)
1801 {
1802     const MachineState *ms = MACHINE(qdev_get_machine());
1803     uint16_t conf_cpus = 0, reserved_cpus = 0;
1804     SysIB_322 sysib;
1805     int del, i;
1806 
1807     if (s390_is_pv()) {
1808         s390_cpu_pv_mem_read(cpu, 0, &sysib, sizeof(sysib));
1809     } else if (s390_cpu_virt_mem_read(cpu, addr, ar, &sysib, sizeof(sysib))) {
1810         return;
1811     }
1812     /* Shift the stack of Extended Names to prepare for our own data */
1813     memmove(&sysib.ext_names[1], &sysib.ext_names[0],
1814             sizeof(sysib.ext_names[0]) * (sysib.count - 1));
1815     /* First virt level, that doesn't provide Ext Names delimits stack. It is
1816      * assumed it's not capable of managing Extended Names for lower levels.
1817      */
1818     for (del = 1; del < sysib.count; del++) {
1819         if (!sysib.vm[del].ext_name_encoding || !sysib.ext_names[del][0]) {
1820             break;
1821         }
1822     }
1823     if (del < sysib.count) {
1824         memset(sysib.ext_names[del], 0,
1825                sizeof(sysib.ext_names[0]) * (sysib.count - del));
1826     }
1827 
1828     /* count the cpus and split them into configured and reserved ones */
1829     for (i = 0; i < ms->possible_cpus->len; i++) {
1830         if (ms->possible_cpus->cpus[i].cpu) {
1831             conf_cpus++;
1832         } else {
1833             reserved_cpus++;
1834         }
1835     }
1836     sysib.vm[0].total_cpus = conf_cpus + reserved_cpus;
1837     sysib.vm[0].conf_cpus = conf_cpus;
1838     sysib.vm[0].reserved_cpus = reserved_cpus;
1839 
1840     /* Insert short machine name in EBCDIC, padded with blanks */
1841     if (qemu_name) {
1842         memset(sysib.vm[0].name, 0x40, sizeof(sysib.vm[0].name));
1843         ebcdic_put(sysib.vm[0].name, qemu_name, MIN(sizeof(sysib.vm[0].name),
1844                                                     strlen(qemu_name)));
1845     }
1846     sysib.vm[0].ext_name_encoding = 2; /* 2 = UTF-8 */
1847     /* If hypervisor specifies zero Extended Name in STSI322 SYSIB, it's
1848      * considered by s390 as not capable of providing any Extended Name.
1849      * Therefore if no name was specified on qemu invocation, we go with the
1850      * same "KVMguest" default, which KVM has filled into short name field.
1851      */
1852     strpadcpy((char *)sysib.ext_names[0],
1853               sizeof(sysib.ext_names[0]),
1854               qemu_name ?: "KVMguest", '\0');
1855 
1856     /* Insert UUID */
1857     memcpy(sysib.vm[0].uuid, &qemu_uuid, sizeof(sysib.vm[0].uuid));
1858 
1859     if (s390_is_pv()) {
1860         s390_cpu_pv_mem_write(cpu, 0, &sysib, sizeof(sysib));
1861     } else {
1862         s390_cpu_virt_mem_write(cpu, addr, ar, &sysib, sizeof(sysib));
1863     }
1864 }
1865 
1866 static int handle_stsi(S390CPU *cpu)
1867 {
1868     CPUState *cs = CPU(cpu);
1869     struct kvm_run *run = cs->kvm_run;
1870 
1871     switch (run->s390_stsi.fc) {
1872     case 3:
1873         if (run->s390_stsi.sel1 != 2 || run->s390_stsi.sel2 != 2) {
1874             return 0;
1875         }
1876         insert_stsi_3_2_2(cpu, run->s390_stsi.addr, run->s390_stsi.ar);
1877         return 0;
1878     case 15:
1879         insert_stsi_15_1_x(cpu, run->s390_stsi.sel2, run->s390_stsi.addr,
1880                            run->s390_stsi.ar, RA_IGNORED);
1881         return 0;
1882     default:
1883         return 0;
1884     }
1885 }
1886 
1887 static int kvm_arch_handle_debug_exit(S390CPU *cpu)
1888 {
1889     CPUState *cs = CPU(cpu);
1890     struct kvm_run *run = cs->kvm_run;
1891 
1892     int ret = 0;
1893     struct kvm_debug_exit_arch *arch_info = &run->debug.arch;
1894 
1895     switch (arch_info->type) {
1896     case KVM_HW_WP_WRITE:
1897         if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) {
1898             cs->watchpoint_hit = &hw_watchpoint;
1899             hw_watchpoint.vaddr = arch_info->addr;
1900             hw_watchpoint.flags = BP_MEM_WRITE;
1901             ret = EXCP_DEBUG;
1902         }
1903         break;
1904     case KVM_HW_BP:
1905         if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) {
1906             ret = EXCP_DEBUG;
1907         }
1908         break;
1909     case KVM_SINGLESTEP:
1910         if (cs->singlestep_enabled) {
1911             ret = EXCP_DEBUG;
1912         }
1913         break;
1914     default:
1915         ret = -ENOSYS;
1916     }
1917 
1918     return ret;
1919 }
1920 
1921 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
1922 {
1923     S390CPU *cpu = S390_CPU(cs);
1924     int ret = 0;
1925 
1926     qemu_mutex_lock_iothread();
1927 
1928     kvm_cpu_synchronize_state(cs);
1929 
1930     switch (run->exit_reason) {
1931         case KVM_EXIT_S390_SIEIC:
1932             ret = handle_intercept(cpu);
1933             break;
1934         case KVM_EXIT_S390_RESET:
1935             s390_ipl_reset_request(cs, S390_RESET_REIPL);
1936             break;
1937         case KVM_EXIT_S390_TSCH:
1938             ret = handle_tsch(cpu);
1939             break;
1940         case KVM_EXIT_S390_STSI:
1941             ret = handle_stsi(cpu);
1942             break;
1943         case KVM_EXIT_DEBUG:
1944             ret = kvm_arch_handle_debug_exit(cpu);
1945             break;
1946         default:
1947             fprintf(stderr, "Unknown KVM exit: %d\n", run->exit_reason);
1948             break;
1949     }
1950     qemu_mutex_unlock_iothread();
1951 
1952     if (ret == 0) {
1953         ret = EXCP_INTERRUPT;
1954     }
1955     return ret;
1956 }
1957 
1958 bool kvm_arch_stop_on_emulation_error(CPUState *cpu)
1959 {
1960     return true;
1961 }
1962 
1963 void kvm_s390_enable_css_support(S390CPU *cpu)
1964 {
1965     int r;
1966 
1967     /* Activate host kernel channel subsystem support. */
1968     r = kvm_vcpu_enable_cap(CPU(cpu), KVM_CAP_S390_CSS_SUPPORT, 0);
1969     assert(r == 0);
1970 }
1971 
1972 void kvm_arch_init_irq_routing(KVMState *s)
1973 {
1974     /*
1975      * Note that while irqchip capabilities generally imply that cpustates
1976      * are handled in-kernel, it is not true for s390 (yet); therefore, we
1977      * have to override the common code kvm_halt_in_kernel_allowed setting.
1978      */
1979     if (kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) {
1980         kvm_gsi_routing_allowed = true;
1981         kvm_halt_in_kernel_allowed = false;
1982     }
1983 }
1984 
1985 int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch,
1986                                     int vq, bool assign)
1987 {
1988     struct kvm_ioeventfd kick = {
1989         .flags = KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY |
1990         KVM_IOEVENTFD_FLAG_DATAMATCH,
1991         .fd = event_notifier_get_fd(notifier),
1992         .datamatch = vq,
1993         .addr = sch,
1994         .len = 8,
1995     };
1996     trace_kvm_assign_subch_ioeventfd(kick.fd, kick.addr, assign,
1997                                      kick.datamatch);
1998     if (!kvm_check_extension(kvm_state, KVM_CAP_IOEVENTFD)) {
1999         return -ENOSYS;
2000     }
2001     if (!assign) {
2002         kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
2003     }
2004     return kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
2005 }
2006 
2007 int kvm_s390_get_protected_dump(void)
2008 {
2009     return cap_protected_dump;
2010 }
2011 
2012 int kvm_s390_get_ri(void)
2013 {
2014     return cap_ri;
2015 }
2016 
2017 int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state)
2018 {
2019     struct kvm_mp_state mp_state = {};
2020     int ret;
2021 
2022     /* the kvm part might not have been initialized yet */
2023     if (CPU(cpu)->kvm_state == NULL) {
2024         return 0;
2025     }
2026 
2027     switch (cpu_state) {
2028     case S390_CPU_STATE_STOPPED:
2029         mp_state.mp_state = KVM_MP_STATE_STOPPED;
2030         break;
2031     case S390_CPU_STATE_CHECK_STOP:
2032         mp_state.mp_state = KVM_MP_STATE_CHECK_STOP;
2033         break;
2034     case S390_CPU_STATE_OPERATING:
2035         mp_state.mp_state = KVM_MP_STATE_OPERATING;
2036         break;
2037     case S390_CPU_STATE_LOAD:
2038         mp_state.mp_state = KVM_MP_STATE_LOAD;
2039         break;
2040     default:
2041         error_report("Requested CPU state is not a valid S390 CPU state: %u",
2042                      cpu_state);
2043         exit(1);
2044     }
2045 
2046     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
2047     if (ret) {
2048         trace_kvm_failed_cpu_state_set(CPU(cpu)->cpu_index, cpu_state,
2049                                        strerror(-ret));
2050     }
2051 
2052     return ret;
2053 }
2054 
2055 void kvm_s390_vcpu_interrupt_pre_save(S390CPU *cpu)
2056 {
2057     unsigned int max_cpus = MACHINE(qdev_get_machine())->smp.max_cpus;
2058     struct kvm_s390_irq_state irq_state = {
2059         .buf = (uint64_t) cpu->irqstate,
2060         .len = VCPU_IRQ_BUF_SIZE(max_cpus),
2061     };
2062     CPUState *cs = CPU(cpu);
2063     int32_t bytes;
2064 
2065     if (!kvm_check_extension(kvm_state, KVM_CAP_S390_IRQ_STATE)) {
2066         return;
2067     }
2068 
2069     bytes = kvm_vcpu_ioctl(cs, KVM_S390_GET_IRQ_STATE, &irq_state);
2070     if (bytes < 0) {
2071         cpu->irqstate_saved_size = 0;
2072         error_report("Migration of interrupt state failed");
2073         return;
2074     }
2075 
2076     cpu->irqstate_saved_size = bytes;
2077 }
2078 
2079 int kvm_s390_vcpu_interrupt_post_load(S390CPU *cpu)
2080 {
2081     CPUState *cs = CPU(cpu);
2082     struct kvm_s390_irq_state irq_state = {
2083         .buf = (uint64_t) cpu->irqstate,
2084         .len = cpu->irqstate_saved_size,
2085     };
2086     int r;
2087 
2088     if (cpu->irqstate_saved_size == 0) {
2089         return 0;
2090     }
2091 
2092     if (!kvm_check_extension(kvm_state, KVM_CAP_S390_IRQ_STATE)) {
2093         return -ENOSYS;
2094     }
2095 
2096     r = kvm_vcpu_ioctl(cs, KVM_S390_SET_IRQ_STATE, &irq_state);
2097     if (r) {
2098         error_report("Setting interrupt state failed %d", r);
2099     }
2100     return r;
2101 }
2102 
2103 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
2104                              uint64_t address, uint32_t data, PCIDevice *dev)
2105 {
2106     S390PCIBusDevice *pbdev;
2107     uint32_t vec = data & ZPCI_MSI_VEC_MASK;
2108 
2109     if (!dev) {
2110         trace_kvm_msi_route_fixup("no pci device");
2111         return -ENODEV;
2112     }
2113 
2114     pbdev = s390_pci_find_dev_by_target(s390_get_phb(), DEVICE(dev)->id);
2115     if (!pbdev) {
2116         trace_kvm_msi_route_fixup("no zpci device");
2117         return -ENODEV;
2118     }
2119 
2120     route->type = KVM_IRQ_ROUTING_S390_ADAPTER;
2121     route->flags = 0;
2122     route->u.adapter.summary_addr = pbdev->routes.adapter.summary_addr;
2123     route->u.adapter.ind_addr = pbdev->routes.adapter.ind_addr;
2124     route->u.adapter.summary_offset = pbdev->routes.adapter.summary_offset;
2125     route->u.adapter.ind_offset = pbdev->routes.adapter.ind_offset + vec;
2126     route->u.adapter.adapter_id = pbdev->routes.adapter.adapter_id;
2127     return 0;
2128 }
2129 
2130 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
2131                                 int vector, PCIDevice *dev)
2132 {
2133     return 0;
2134 }
2135 
2136 int kvm_arch_release_virq_post(int virq)
2137 {
2138     return 0;
2139 }
2140 
2141 int kvm_arch_msi_data_to_gsi(uint32_t data)
2142 {
2143     abort();
2144 }
2145 
2146 static int query_cpu_subfunc(S390FeatBitmap features)
2147 {
2148     struct kvm_s390_vm_cpu_subfunc prop = {};
2149     struct kvm_device_attr attr = {
2150         .group = KVM_S390_VM_CPU_MODEL,
2151         .attr = KVM_S390_VM_CPU_MACHINE_SUBFUNC,
2152         .addr = (uint64_t) &prop,
2153     };
2154     int rc;
2155 
2156     rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
2157     if (rc) {
2158         return  rc;
2159     }
2160 
2161     /*
2162      * We're going to add all subfunctions now, if the corresponding feature
2163      * is available that unlocks the query functions.
2164      */
2165     s390_add_from_feat_block(features, S390_FEAT_TYPE_PLO, prop.plo);
2166     if (test_bit(S390_FEAT_TOD_CLOCK_STEERING, features)) {
2167         s390_add_from_feat_block(features, S390_FEAT_TYPE_PTFF, prop.ptff);
2168     }
2169     if (test_bit(S390_FEAT_MSA, features)) {
2170         s390_add_from_feat_block(features, S390_FEAT_TYPE_KMAC, prop.kmac);
2171         s390_add_from_feat_block(features, S390_FEAT_TYPE_KMC, prop.kmc);
2172         s390_add_from_feat_block(features, S390_FEAT_TYPE_KM, prop.km);
2173         s390_add_from_feat_block(features, S390_FEAT_TYPE_KIMD, prop.kimd);
2174         s390_add_from_feat_block(features, S390_FEAT_TYPE_KLMD, prop.klmd);
2175     }
2176     if (test_bit(S390_FEAT_MSA_EXT_3, features)) {
2177         s390_add_from_feat_block(features, S390_FEAT_TYPE_PCKMO, prop.pckmo);
2178     }
2179     if (test_bit(S390_FEAT_MSA_EXT_4, features)) {
2180         s390_add_from_feat_block(features, S390_FEAT_TYPE_KMCTR, prop.kmctr);
2181         s390_add_from_feat_block(features, S390_FEAT_TYPE_KMF, prop.kmf);
2182         s390_add_from_feat_block(features, S390_FEAT_TYPE_KMO, prop.kmo);
2183         s390_add_from_feat_block(features, S390_FEAT_TYPE_PCC, prop.pcc);
2184     }
2185     if (test_bit(S390_FEAT_MSA_EXT_5, features)) {
2186         s390_add_from_feat_block(features, S390_FEAT_TYPE_PPNO, prop.ppno);
2187     }
2188     if (test_bit(S390_FEAT_MSA_EXT_8, features)) {
2189         s390_add_from_feat_block(features, S390_FEAT_TYPE_KMA, prop.kma);
2190     }
2191     if (test_bit(S390_FEAT_MSA_EXT_9, features)) {
2192         s390_add_from_feat_block(features, S390_FEAT_TYPE_KDSA, prop.kdsa);
2193     }
2194     if (test_bit(S390_FEAT_ESORT_BASE, features)) {
2195         s390_add_from_feat_block(features, S390_FEAT_TYPE_SORTL, prop.sortl);
2196     }
2197     if (test_bit(S390_FEAT_DEFLATE_BASE, features)) {
2198         s390_add_from_feat_block(features, S390_FEAT_TYPE_DFLTCC, prop.dfltcc);
2199     }
2200     return 0;
2201 }
2202 
2203 static int configure_cpu_subfunc(const S390FeatBitmap features)
2204 {
2205     struct kvm_s390_vm_cpu_subfunc prop = {};
2206     struct kvm_device_attr attr = {
2207         .group = KVM_S390_VM_CPU_MODEL,
2208         .attr = KVM_S390_VM_CPU_PROCESSOR_SUBFUNC,
2209         .addr = (uint64_t) &prop,
2210     };
2211 
2212     if (!kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL,
2213                            KVM_S390_VM_CPU_PROCESSOR_SUBFUNC)) {
2214         /* hardware support might be missing, IBC will handle most of this */
2215         return 0;
2216     }
2217 
2218     s390_fill_feat_block(features, S390_FEAT_TYPE_PLO, prop.plo);
2219     if (test_bit(S390_FEAT_TOD_CLOCK_STEERING, features)) {
2220         s390_fill_feat_block(features, S390_FEAT_TYPE_PTFF, prop.ptff);
2221     }
2222     if (test_bit(S390_FEAT_MSA, features)) {
2223         s390_fill_feat_block(features, S390_FEAT_TYPE_KMAC, prop.kmac);
2224         s390_fill_feat_block(features, S390_FEAT_TYPE_KMC, prop.kmc);
2225         s390_fill_feat_block(features, S390_FEAT_TYPE_KM, prop.km);
2226         s390_fill_feat_block(features, S390_FEAT_TYPE_KIMD, prop.kimd);
2227         s390_fill_feat_block(features, S390_FEAT_TYPE_KLMD, prop.klmd);
2228     }
2229     if (test_bit(S390_FEAT_MSA_EXT_3, features)) {
2230         s390_fill_feat_block(features, S390_FEAT_TYPE_PCKMO, prop.pckmo);
2231     }
2232     if (test_bit(S390_FEAT_MSA_EXT_4, features)) {
2233         s390_fill_feat_block(features, S390_FEAT_TYPE_KMCTR, prop.kmctr);
2234         s390_fill_feat_block(features, S390_FEAT_TYPE_KMF, prop.kmf);
2235         s390_fill_feat_block(features, S390_FEAT_TYPE_KMO, prop.kmo);
2236         s390_fill_feat_block(features, S390_FEAT_TYPE_PCC, prop.pcc);
2237     }
2238     if (test_bit(S390_FEAT_MSA_EXT_5, features)) {
2239         s390_fill_feat_block(features, S390_FEAT_TYPE_PPNO, prop.ppno);
2240     }
2241     if (test_bit(S390_FEAT_MSA_EXT_8, features)) {
2242         s390_fill_feat_block(features, S390_FEAT_TYPE_KMA, prop.kma);
2243     }
2244     if (test_bit(S390_FEAT_MSA_EXT_9, features)) {
2245         s390_fill_feat_block(features, S390_FEAT_TYPE_KDSA, prop.kdsa);
2246     }
2247     if (test_bit(S390_FEAT_ESORT_BASE, features)) {
2248         s390_fill_feat_block(features, S390_FEAT_TYPE_SORTL, prop.sortl);
2249     }
2250     if (test_bit(S390_FEAT_DEFLATE_BASE, features)) {
2251         s390_fill_feat_block(features, S390_FEAT_TYPE_DFLTCC, prop.dfltcc);
2252     }
2253     return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
2254 }
2255 
2256 static bool ap_available(void)
2257 {
2258     return kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO,
2259                              KVM_S390_VM_CRYPTO_ENABLE_APIE);
2260 }
2261 
2262 static bool ap_enabled(const S390FeatBitmap features)
2263 {
2264     return test_bit(S390_FEAT_AP, features);
2265 }
2266 
2267 static bool uv_feat_supported(void)
2268 {
2269     return kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL,
2270                              KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST);
2271 }
2272 
2273 static int query_uv_feat_guest(S390FeatBitmap features)
2274 {
2275     struct kvm_s390_vm_cpu_uv_feat prop = {};
2276     struct kvm_device_attr attr = {
2277         .group = KVM_S390_VM_CPU_MODEL,
2278         .attr = KVM_S390_VM_CPU_MACHINE_UV_FEAT_GUEST,
2279         .addr = (uint64_t) &prop,
2280     };
2281     int rc;
2282 
2283     /* AP support check is currently the only user of the UV feature test */
2284     if (!(uv_feat_supported() && ap_available())) {
2285         return 0;
2286     }
2287 
2288     rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
2289     if (rc) {
2290         return  rc;
2291     }
2292 
2293     if (prop.ap) {
2294         set_bit(S390_FEAT_UV_FEAT_AP, features);
2295     }
2296     if (prop.ap_intr) {
2297         set_bit(S390_FEAT_UV_FEAT_AP_INTR, features);
2298     }
2299 
2300     return 0;
2301 }
2302 
2303 static int kvm_to_feat[][2] = {
2304     { KVM_S390_VM_CPU_FEAT_ESOP, S390_FEAT_ESOP },
2305     { KVM_S390_VM_CPU_FEAT_SIEF2, S390_FEAT_SIE_F2 },
2306     { KVM_S390_VM_CPU_FEAT_64BSCAO , S390_FEAT_SIE_64BSCAO },
2307     { KVM_S390_VM_CPU_FEAT_SIIF, S390_FEAT_SIE_SIIF },
2308     { KVM_S390_VM_CPU_FEAT_GPERE, S390_FEAT_SIE_GPERE },
2309     { KVM_S390_VM_CPU_FEAT_GSLS, S390_FEAT_SIE_GSLS },
2310     { KVM_S390_VM_CPU_FEAT_IB, S390_FEAT_SIE_IB },
2311     { KVM_S390_VM_CPU_FEAT_CEI, S390_FEAT_SIE_CEI },
2312     { KVM_S390_VM_CPU_FEAT_IBS, S390_FEAT_SIE_IBS },
2313     { KVM_S390_VM_CPU_FEAT_SKEY, S390_FEAT_SIE_SKEY },
2314     { KVM_S390_VM_CPU_FEAT_CMMA, S390_FEAT_SIE_CMMA },
2315     { KVM_S390_VM_CPU_FEAT_PFMFI, S390_FEAT_SIE_PFMFI},
2316     { KVM_S390_VM_CPU_FEAT_SIGPIF, S390_FEAT_SIE_SIGPIF},
2317     { KVM_S390_VM_CPU_FEAT_KSS, S390_FEAT_SIE_KSS},
2318 };
2319 
2320 static int query_cpu_feat(S390FeatBitmap features)
2321 {
2322     struct kvm_s390_vm_cpu_feat prop = {};
2323     struct kvm_device_attr attr = {
2324         .group = KVM_S390_VM_CPU_MODEL,
2325         .attr = KVM_S390_VM_CPU_MACHINE_FEAT,
2326         .addr = (uint64_t) &prop,
2327     };
2328     int rc;
2329     int i;
2330 
2331     rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
2332     if (rc) {
2333         return  rc;
2334     }
2335 
2336     for (i = 0; i < ARRAY_SIZE(kvm_to_feat); i++) {
2337         if (test_be_bit(kvm_to_feat[i][0], (uint8_t *) prop.feat)) {
2338             set_bit(kvm_to_feat[i][1], features);
2339         }
2340     }
2341     return 0;
2342 }
2343 
2344 static int configure_cpu_feat(const S390FeatBitmap features)
2345 {
2346     struct kvm_s390_vm_cpu_feat prop = {};
2347     struct kvm_device_attr attr = {
2348         .group = KVM_S390_VM_CPU_MODEL,
2349         .attr = KVM_S390_VM_CPU_PROCESSOR_FEAT,
2350         .addr = (uint64_t) &prop,
2351     };
2352     int i;
2353 
2354     for (i = 0; i < ARRAY_SIZE(kvm_to_feat); i++) {
2355         if (test_bit(kvm_to_feat[i][1], features)) {
2356             set_be_bit(kvm_to_feat[i][0], (uint8_t *) prop.feat);
2357         }
2358     }
2359     return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
2360 }
2361 
2362 bool kvm_s390_cpu_models_supported(void)
2363 {
2364     if (!cpu_model_allowed()) {
2365         /* compatibility machines interfere with the cpu model */
2366         return false;
2367     }
2368     return kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL,
2369                              KVM_S390_VM_CPU_MACHINE) &&
2370            kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL,
2371                              KVM_S390_VM_CPU_PROCESSOR) &&
2372            kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL,
2373                              KVM_S390_VM_CPU_MACHINE_FEAT) &&
2374            kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL,
2375                              KVM_S390_VM_CPU_PROCESSOR_FEAT) &&
2376            kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL,
2377                              KVM_S390_VM_CPU_MACHINE_SUBFUNC);
2378 }
2379 
2380 void kvm_s390_get_host_cpu_model(S390CPUModel *model, Error **errp)
2381 {
2382     struct kvm_s390_vm_cpu_machine prop = {};
2383     struct kvm_device_attr attr = {
2384         .group = KVM_S390_VM_CPU_MODEL,
2385         .attr = KVM_S390_VM_CPU_MACHINE,
2386         .addr = (uint64_t) &prop,
2387     };
2388     uint16_t unblocked_ibc = 0, cpu_type = 0;
2389     int rc;
2390 
2391     memset(model, 0, sizeof(*model));
2392 
2393     if (!kvm_s390_cpu_models_supported()) {
2394         error_setg(errp, "KVM doesn't support CPU models");
2395         return;
2396     }
2397 
2398     /* query the basic cpu model properties */
2399     rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
2400     if (rc) {
2401         error_setg(errp, "KVM: Error querying host CPU model: %d", rc);
2402         return;
2403     }
2404 
2405     cpu_type = cpuid_type(prop.cpuid);
2406     if (has_ibc(prop.ibc)) {
2407         model->lowest_ibc = lowest_ibc(prop.ibc);
2408         unblocked_ibc = unblocked_ibc(prop.ibc);
2409     }
2410     model->cpu_id = cpuid_id(prop.cpuid);
2411     model->cpu_id_format = cpuid_format(prop.cpuid);
2412     model->cpu_ver = 0xff;
2413 
2414     /* get supported cpu features indicated via STFL(E) */
2415     s390_add_from_feat_block(model->features, S390_FEAT_TYPE_STFL,
2416                              (uint8_t *) prop.fac_mask);
2417     /* dat-enhancement facility 2 has no bit but was introduced with stfle */
2418     if (test_bit(S390_FEAT_STFLE, model->features)) {
2419         set_bit(S390_FEAT_DAT_ENH_2, model->features);
2420     }
2421     /* get supported cpu features indicated e.g. via SCLP */
2422     rc = query_cpu_feat(model->features);
2423     if (rc) {
2424         error_setg(errp, "KVM: Error querying CPU features: %d", rc);
2425         return;
2426     }
2427     /* get supported cpu subfunctions indicated via query / test bit */
2428     rc = query_cpu_subfunc(model->features);
2429     if (rc) {
2430         error_setg(errp, "KVM: Error querying CPU subfunctions: %d", rc);
2431         return;
2432     }
2433 
2434     /* PTFF subfunctions might be indicated although kernel support missing */
2435     if (!test_bit(S390_FEAT_MULTIPLE_EPOCH, model->features)) {
2436         clear_bit(S390_FEAT_PTFF_QSIE, model->features);
2437         clear_bit(S390_FEAT_PTFF_QTOUE, model->features);
2438         clear_bit(S390_FEAT_PTFF_STOE, model->features);
2439         clear_bit(S390_FEAT_PTFF_STOUE, model->features);
2440     }
2441 
2442     /* with cpu model support, CMM is only indicated if really available */
2443     if (kvm_s390_cmma_available()) {
2444         set_bit(S390_FEAT_CMM, model->features);
2445     } else {
2446         /* no cmm -> no cmm nt */
2447         clear_bit(S390_FEAT_CMM_NT, model->features);
2448     }
2449 
2450     /* bpb needs kernel support for migration, VSIE and reset */
2451     if (!kvm_check_extension(kvm_state, KVM_CAP_S390_BPB)) {
2452         clear_bit(S390_FEAT_BPB, model->features);
2453     }
2454 
2455     /*
2456      * If we have support for protected virtualization, indicate
2457      * the protected virtualization IPL unpack facility.
2458      */
2459     if (cap_protected) {
2460         set_bit(S390_FEAT_UNPACK, model->features);
2461     }
2462 
2463     /*
2464      * If we have kernel support for CPU Topology indicate the
2465      * configuration-topology facility.
2466      */
2467     if (kvm_check_extension(kvm_state, KVM_CAP_S390_CPU_TOPOLOGY)) {
2468         set_bit(S390_FEAT_CONFIGURATION_TOPOLOGY, model->features);
2469     }
2470 
2471     /* We emulate a zPCI bus and AEN, therefore we don't need HW support */
2472     set_bit(S390_FEAT_ZPCI, model->features);
2473     set_bit(S390_FEAT_ADAPTER_EVENT_NOTIFICATION, model->features);
2474 
2475     if (s390_known_cpu_type(cpu_type)) {
2476         /* we want the exact model, even if some features are missing */
2477         model->def = s390_find_cpu_def(cpu_type, ibc_gen(unblocked_ibc),
2478                                        ibc_ec_ga(unblocked_ibc), NULL);
2479     } else {
2480         /* model unknown, e.g. too new - search using features */
2481         model->def = s390_find_cpu_def(0, ibc_gen(unblocked_ibc),
2482                                        ibc_ec_ga(unblocked_ibc),
2483                                        model->features);
2484     }
2485     if (!model->def) {
2486         error_setg(errp, "KVM: host CPU model could not be identified");
2487         return;
2488     }
2489     /* for now, we can only provide the AP feature with HW support */
2490     if (ap_available()) {
2491         set_bit(S390_FEAT_AP, model->features);
2492     }
2493 
2494     /*
2495      * Extended-Length SCCB is handled entirely within QEMU.
2496      * For PV guests this is completely fenced by the Ultravisor, as Service
2497      * Call error checking and STFLE interpretation are handled via SIE.
2498      */
2499     set_bit(S390_FEAT_EXTENDED_LENGTH_SCCB, model->features);
2500 
2501     if (kvm_check_extension(kvm_state, KVM_CAP_S390_DIAG318)) {
2502         set_bit(S390_FEAT_DIAG_318, model->features);
2503     }
2504 
2505     /* Test for Ultravisor features that influence secure guest behavior */
2506     query_uv_feat_guest(model->features);
2507 
2508     /* strip of features that are not part of the maximum model */
2509     bitmap_and(model->features, model->features, model->def->full_feat,
2510                S390_FEAT_MAX);
2511 }
2512 
2513 static int configure_uv_feat_guest(const S390FeatBitmap features)
2514 {
2515     struct kvm_s390_vm_cpu_uv_feat uv_feat = {};
2516     struct kvm_device_attr attribute = {
2517         .group = KVM_S390_VM_CPU_MODEL,
2518         .attr = KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST,
2519         .addr = (__u64) &uv_feat,
2520     };
2521 
2522     /* AP support check is currently the only user of the UV feature test */
2523     if (!(uv_feat_supported() && ap_enabled(features))) {
2524         return 0;
2525     }
2526 
2527     if (test_bit(S390_FEAT_UV_FEAT_AP, features)) {
2528         uv_feat.ap = 1;
2529     }
2530     if (test_bit(S390_FEAT_UV_FEAT_AP_INTR, features)) {
2531         uv_feat.ap_intr = 1;
2532     }
2533 
2534     return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attribute);
2535 }
2536 
2537 static void kvm_s390_configure_apie(bool interpret)
2538 {
2539     uint64_t attr = interpret ? KVM_S390_VM_CRYPTO_ENABLE_APIE :
2540                                 KVM_S390_VM_CRYPTO_DISABLE_APIE;
2541 
2542     if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) {
2543         kvm_s390_set_crypto_attr(attr);
2544     }
2545 }
2546 
2547 void kvm_s390_apply_cpu_model(const S390CPUModel *model, Error **errp)
2548 {
2549     struct kvm_s390_vm_cpu_processor prop  = {
2550         .fac_list = { 0 },
2551     };
2552     struct kvm_device_attr attr = {
2553         .group = KVM_S390_VM_CPU_MODEL,
2554         .attr = KVM_S390_VM_CPU_PROCESSOR,
2555         .addr = (uint64_t) &prop,
2556     };
2557     int rc;
2558 
2559     if (!model) {
2560         /* compatibility handling if cpu models are disabled */
2561         if (kvm_s390_cmma_available()) {
2562             kvm_s390_enable_cmma();
2563         }
2564         return;
2565     }
2566     if (!kvm_s390_cpu_models_supported()) {
2567         error_setg(errp, "KVM doesn't support CPU models");
2568         return;
2569     }
2570     prop.cpuid = s390_cpuid_from_cpu_model(model);
2571     prop.ibc = s390_ibc_from_cpu_model(model);
2572     /* configure cpu features indicated via STFL(e) */
2573     s390_fill_feat_block(model->features, S390_FEAT_TYPE_STFL,
2574                          (uint8_t *) prop.fac_list);
2575     rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
2576     if (rc) {
2577         error_setg(errp, "KVM: Error configuring the CPU model: %d", rc);
2578         return;
2579     }
2580     /* configure cpu features indicated e.g. via SCLP */
2581     rc = configure_cpu_feat(model->features);
2582     if (rc) {
2583         error_setg(errp, "KVM: Error configuring CPU features: %d", rc);
2584         return;
2585     }
2586     /* configure cpu subfunctions indicated via query / test bit */
2587     rc = configure_cpu_subfunc(model->features);
2588     if (rc) {
2589         error_setg(errp, "KVM: Error configuring CPU subfunctions: %d", rc);
2590         return;
2591     }
2592     /* enable CMM via CMMA */
2593     if (test_bit(S390_FEAT_CMM, model->features)) {
2594         kvm_s390_enable_cmma();
2595     }
2596 
2597     if (ap_enabled(model->features)) {
2598         kvm_s390_configure_apie(true);
2599     }
2600 
2601     /* configure UV-features for the guest indicated via query / test_bit */
2602     rc = configure_uv_feat_guest(model->features);
2603     if (rc) {
2604         error_setg(errp, "KVM: Error configuring CPU UV features %d", rc);
2605         return;
2606     }
2607 }
2608 
2609 void kvm_s390_restart_interrupt(S390CPU *cpu)
2610 {
2611     struct kvm_s390_irq irq = {
2612         .type = KVM_S390_RESTART,
2613     };
2614 
2615     kvm_s390_vcpu_interrupt(cpu, &irq);
2616 }
2617 
2618 void kvm_s390_stop_interrupt(S390CPU *cpu)
2619 {
2620     struct kvm_s390_irq irq = {
2621         .type = KVM_S390_SIGP_STOP,
2622     };
2623 
2624     kvm_s390_vcpu_interrupt(cpu, &irq);
2625 }
2626 
2627 bool kvm_arch_cpu_check_are_resettable(void)
2628 {
2629     return true;
2630 }
2631 
2632 int kvm_s390_get_zpci_op(void)
2633 {
2634     return cap_zpci_op;
2635 }
2636 
2637 int kvm_s390_topology_set_mtcr(uint64_t attr)
2638 {
2639     struct kvm_device_attr attribute = {
2640         .group = KVM_S390_VM_CPU_TOPOLOGY,
2641         .attr  = attr,
2642     };
2643 
2644     if (!s390_has_feat(S390_FEAT_CONFIGURATION_TOPOLOGY)) {
2645         return 0;
2646     }
2647     if (!kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_TOPOLOGY, attr)) {
2648         return -ENOTSUP;
2649     }
2650 
2651     return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attribute);
2652 }
2653 
2654 void kvm_arch_accel_class_init(ObjectClass *oc)
2655 {
2656 }
2657