xref: /qemu/target/s390x/tcg/misc_helper.c (revision d051d0e1)
1 /*
2  *  S/390 misc helper routines
3  *
4  *  Copyright (c) 2009 Ulrich Hecht
5  *  Copyright (c) 2009 Alexander Graf
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "qemu/cutils.h"
23 #include "qemu/main-loop.h"
24 #include "cpu.h"
25 #include "s390x-internal.h"
26 #include "exec/memory.h"
27 #include "qemu/host-utils.h"
28 #include "exec/helper-proto.h"
29 #include "qemu/timer.h"
30 #include "exec/exec-all.h"
31 #include "exec/cpu_ldst.h"
32 #include "qapi/error.h"
33 #include "tcg_s390x.h"
34 #include "s390-tod.h"
35 
36 #if !defined(CONFIG_USER_ONLY)
37 #include "sysemu/cpus.h"
38 #include "sysemu/sysemu.h"
39 #include "hw/s390x/ebcdic.h"
40 #include "hw/s390x/s390-virtio-hcall.h"
41 #include "hw/s390x/sclp.h"
42 #include "hw/s390x/s390_flic.h"
43 #include "hw/s390x/ioinst.h"
44 #include "hw/s390x/s390-pci-inst.h"
45 #include "hw/boards.h"
46 #include "hw/s390x/tod.h"
47 #endif
48 
49 /* #define DEBUG_HELPER */
50 #ifdef DEBUG_HELPER
51 #define HELPER_LOG(x...) qemu_log(x)
52 #else
53 #define HELPER_LOG(x...)
54 #endif
55 
56 /* Raise an exception statically from a TB.  */
57 void HELPER(exception)(CPUS390XState *env, uint32_t excp)
58 {
59     CPUState *cs = env_cpu(env);
60 
61     HELPER_LOG("%s: exception %d\n", __func__, excp);
62     cs->exception_index = excp;
63     cpu_loop_exit(cs);
64 }
65 
66 /* Store CPU Timer (also used for EXTRACT CPU TIME) */
67 uint64_t HELPER(stpt)(CPUS390XState *env)
68 {
69 #if defined(CONFIG_USER_ONLY)
70     /*
71      * Fake a descending CPU timer. We could get negative values here,
72      * but we don't care as it is up to the OS when to process that
73      * interrupt and reset to > 0.
74      */
75     return UINT64_MAX - (uint64_t)cpu_get_host_ticks();
76 #else
77     return time2tod(env->cputm - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
78 #endif
79 }
80 
81 /* Store Clock */
82 uint64_t HELPER(stck)(CPUS390XState *env)
83 {
84 #ifdef CONFIG_USER_ONLY
85     struct timespec ts;
86     uint64_t ns;
87 
88     clock_gettime(CLOCK_REALTIME, &ts);
89     ns = ts.tv_sec * NANOSECONDS_PER_SECOND + ts.tv_nsec;
90 
91     return TOD_UNIX_EPOCH + time2tod(ns);
92 #else
93     S390TODState *td = s390_get_todstate();
94     S390TODClass *tdc = S390_TOD_GET_CLASS(td);
95     S390TOD tod;
96 
97     tdc->get(td, &tod, &error_abort);
98     return tod.low;
99 #endif
100 }
101 
102 #ifndef CONFIG_USER_ONLY
103 /* SCLP service call */
104 uint32_t HELPER(servc)(CPUS390XState *env, uint64_t r1, uint64_t r2)
105 {
106     qemu_mutex_lock_iothread();
107     int r = sclp_service_call(env, r1, r2);
108     qemu_mutex_unlock_iothread();
109     if (r < 0) {
110         tcg_s390_program_interrupt(env, -r, GETPC());
111     }
112     return r;
113 }
114 
115 void HELPER(diag)(CPUS390XState *env, uint32_t r1, uint32_t r3, uint32_t num)
116 {
117     uint64_t r;
118 
119     switch (num) {
120     case 0x500:
121         /* KVM hypercall */
122         qemu_mutex_lock_iothread();
123         r = s390_virtio_hypercall(env);
124         qemu_mutex_unlock_iothread();
125         break;
126     case 0x44:
127         /* yield */
128         r = 0;
129         break;
130     case 0x308:
131         /* ipl */
132         qemu_mutex_lock_iothread();
133         handle_diag_308(env, r1, r3, GETPC());
134         qemu_mutex_unlock_iothread();
135         r = 0;
136         break;
137     case 0x288:
138         /* time bomb (watchdog) */
139         r = handle_diag_288(env, r1, r3);
140         break;
141     default:
142         r = -1;
143         break;
144     }
145 
146     if (r) {
147         tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC());
148     }
149 }
150 
151 /* Set Prefix */
152 void HELPER(spx)(CPUS390XState *env, uint64_t a1)
153 {
154     const uint32_t prefix = a1 & 0x7fffe000;
155     const uint32_t old_prefix = env->psa;
156     CPUState *cs = env_cpu(env);
157 
158     if (prefix == old_prefix) {
159         return;
160     }
161 
162     env->psa = prefix;
163     HELPER_LOG("prefix: %#x\n", prefix);
164     tlb_flush_page(cs, 0);
165     tlb_flush_page(cs, TARGET_PAGE_SIZE);
166     if (prefix != 0) {
167         tlb_flush_page(cs, prefix);
168         tlb_flush_page(cs, prefix + TARGET_PAGE_SIZE);
169     }
170     if (old_prefix != 0) {
171         tlb_flush_page(cs, old_prefix);
172         tlb_flush_page(cs, old_prefix + TARGET_PAGE_SIZE);
173     }
174 }
175 
176 static void update_ckc_timer(CPUS390XState *env)
177 {
178     S390TODState *td = s390_get_todstate();
179     uint64_t time;
180 
181     /* stop the timer and remove pending CKC IRQs */
182     timer_del(env->tod_timer);
183     g_assert(qemu_mutex_iothread_locked());
184     env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR;
185 
186     /* the tod has to exceed the ckc, this can never happen if ckc is all 1's */
187     if (env->ckc == -1ULL) {
188         return;
189     }
190 
191     /* difference between origins */
192     time = env->ckc - td->base.low;
193 
194     /* nanoseconds */
195     time = tod2time(time);
196 
197     timer_mod(env->tod_timer, time);
198 }
199 
200 /* Set Clock Comparator */
201 void HELPER(sckc)(CPUS390XState *env, uint64_t ckc)
202 {
203     env->ckc = ckc;
204 
205     qemu_mutex_lock_iothread();
206     update_ckc_timer(env);
207     qemu_mutex_unlock_iothread();
208 }
209 
210 void tcg_s390_tod_updated(CPUState *cs, run_on_cpu_data opaque)
211 {
212     S390CPU *cpu = S390_CPU(cs);
213 
214     update_ckc_timer(&cpu->env);
215 }
216 
217 /* Set Clock */
218 uint32_t HELPER(sck)(CPUS390XState *env, uint64_t tod_low)
219 {
220     S390TODState *td = s390_get_todstate();
221     S390TODClass *tdc = S390_TOD_GET_CLASS(td);
222     S390TOD tod = {
223         .high = 0,
224         .low = tod_low,
225     };
226 
227     qemu_mutex_lock_iothread();
228     tdc->set(td, &tod, &error_abort);
229     qemu_mutex_unlock_iothread();
230     return 0;
231 }
232 
233 /* Set Tod Programmable Field */
234 void HELPER(sckpf)(CPUS390XState *env, uint64_t r0)
235 {
236     uint32_t val = r0;
237 
238     if (val & 0xffff0000) {
239         tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC());
240     }
241     env->todpr = val;
242 }
243 
244 /* Store Clock Comparator */
245 uint64_t HELPER(stckc)(CPUS390XState *env)
246 {
247     return env->ckc;
248 }
249 
250 /* Set CPU Timer */
251 void HELPER(spt)(CPUS390XState *env, uint64_t time)
252 {
253     if (time == -1ULL) {
254         return;
255     }
256 
257     /* nanoseconds */
258     time = tod2time(time);
259 
260     env->cputm = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + time;
261 
262     timer_mod(env->cpu_timer, env->cputm);
263 }
264 
265 /* Store System Information */
266 uint32_t HELPER(stsi)(CPUS390XState *env, uint64_t a0, uint64_t r0, uint64_t r1)
267 {
268     const uintptr_t ra = GETPC();
269     const uint32_t sel1 = r0 & STSI_R0_SEL1_MASK;
270     const uint32_t sel2 = r1 & STSI_R1_SEL2_MASK;
271     const MachineState *ms = MACHINE(qdev_get_machine());
272     uint16_t total_cpus = 0, conf_cpus = 0, reserved_cpus = 0;
273     S390CPU *cpu = env_archcpu(env);
274     SysIB sysib = { };
275     int i, cc = 0;
276 
277     if ((r0 & STSI_R0_FC_MASK) > STSI_R0_FC_LEVEL_3) {
278         /* invalid function code: no other checks are performed */
279         return 3;
280     }
281 
282     if ((r0 & STSI_R0_RESERVED_MASK) || (r1 & STSI_R1_RESERVED_MASK)) {
283         tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
284     }
285 
286     if ((r0 & STSI_R0_FC_MASK) == STSI_R0_FC_CURRENT) {
287         /* query the current level: no further checks are performed */
288         env->regs[0] = STSI_R0_FC_LEVEL_3;
289         return 0;
290     }
291 
292     if (a0 & ~TARGET_PAGE_MASK) {
293         tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
294     }
295 
296     /* count the cpus and split them into configured and reserved ones */
297     for (i = 0; i < ms->possible_cpus->len; i++) {
298         total_cpus++;
299         if (ms->possible_cpus->cpus[i].cpu) {
300             conf_cpus++;
301         } else {
302             reserved_cpus++;
303         }
304     }
305 
306     /*
307      * In theory, we could report Level 1 / Level 2 as current. However,
308      * the Linux kernel will detect this as running under LPAR and assume
309      * that we have a sclp linemode console (which is always present on
310      * LPAR, but not the default for QEMU), therefore not displaying boot
311      * messages and making booting a Linux kernel under TCG harder.
312      *
313      * For now we fake the same SMP configuration on all levels.
314      *
315      * TODO: We could later make the level configurable via the machine
316      *       and change defaults (linemode console) based on machine type
317      *       and accelerator.
318      */
319     switch (r0 & STSI_R0_FC_MASK) {
320     case STSI_R0_FC_LEVEL_1:
321         if ((sel1 == 1) && (sel2 == 1)) {
322             /* Basic Machine Configuration */
323             char type[5] = {};
324 
325             ebcdic_put(sysib.sysib_111.manuf, "QEMU            ", 16);
326             /* same as machine type number in STORE CPU ID, but in EBCDIC */
327             snprintf(type, ARRAY_SIZE(type), "%X", cpu->model->def->type);
328             ebcdic_put(sysib.sysib_111.type, type, 4);
329             /* model number (not stored in STORE CPU ID for z/Architecure) */
330             ebcdic_put(sysib.sysib_111.model, "QEMU            ", 16);
331             ebcdic_put(sysib.sysib_111.sequence, "QEMU            ", 16);
332             ebcdic_put(sysib.sysib_111.plant, "QEMU", 4);
333         } else if ((sel1 == 2) && (sel2 == 1)) {
334             /* Basic Machine CPU */
335             ebcdic_put(sysib.sysib_121.sequence, "QEMUQEMUQEMUQEMU", 16);
336             ebcdic_put(sysib.sysib_121.plant, "QEMU", 4);
337             sysib.sysib_121.cpu_addr = cpu_to_be16(env->core_id);
338         } else if ((sel1 == 2) && (sel2 == 2)) {
339             /* Basic Machine CPUs */
340             sysib.sysib_122.capability = cpu_to_be32(0x443afc29);
341             sysib.sysib_122.total_cpus = cpu_to_be16(total_cpus);
342             sysib.sysib_122.conf_cpus = cpu_to_be16(conf_cpus);
343             sysib.sysib_122.reserved_cpus = cpu_to_be16(reserved_cpus);
344         } else {
345             cc = 3;
346         }
347         break;
348     case STSI_R0_FC_LEVEL_2:
349         if ((sel1 == 2) && (sel2 == 1)) {
350             /* LPAR CPU */
351             ebcdic_put(sysib.sysib_221.sequence, "QEMUQEMUQEMUQEMU", 16);
352             ebcdic_put(sysib.sysib_221.plant, "QEMU", 4);
353             sysib.sysib_221.cpu_addr = cpu_to_be16(env->core_id);
354         } else if ((sel1 == 2) && (sel2 == 2)) {
355             /* LPAR CPUs */
356             sysib.sysib_222.lcpuc = 0x80; /* dedicated */
357             sysib.sysib_222.total_cpus = cpu_to_be16(total_cpus);
358             sysib.sysib_222.conf_cpus = cpu_to_be16(conf_cpus);
359             sysib.sysib_222.reserved_cpus = cpu_to_be16(reserved_cpus);
360             ebcdic_put(sysib.sysib_222.name, "QEMU    ", 8);
361             sysib.sysib_222.caf = cpu_to_be32(1000);
362             sysib.sysib_222.dedicated_cpus = cpu_to_be16(conf_cpus);
363         } else {
364             cc = 3;
365         }
366         break;
367     case STSI_R0_FC_LEVEL_3:
368         if ((sel1 == 2) && (sel2 == 2)) {
369             /* VM CPUs */
370             sysib.sysib_322.count = 1;
371             sysib.sysib_322.vm[0].total_cpus = cpu_to_be16(total_cpus);
372             sysib.sysib_322.vm[0].conf_cpus = cpu_to_be16(conf_cpus);
373             sysib.sysib_322.vm[0].reserved_cpus = cpu_to_be16(reserved_cpus);
374             sysib.sysib_322.vm[0].caf = cpu_to_be32(1000);
375             /* Linux kernel uses this to distinguish us from z/VM */
376             ebcdic_put(sysib.sysib_322.vm[0].cpi, "KVM/Linux       ", 16);
377             sysib.sysib_322.vm[0].ext_name_encoding = 2; /* UTF-8 */
378 
379             /* If our VM has a name, use the real name */
380             if (qemu_name) {
381                 memset(sysib.sysib_322.vm[0].name, 0x40,
382                        sizeof(sysib.sysib_322.vm[0].name));
383                 ebcdic_put(sysib.sysib_322.vm[0].name, qemu_name,
384                            MIN(sizeof(sysib.sysib_322.vm[0].name),
385                                strlen(qemu_name)));
386                 strpadcpy((char *)sysib.sysib_322.ext_names[0],
387                           sizeof(sysib.sysib_322.ext_names[0]),
388                           qemu_name, '\0');
389 
390             } else {
391                 ebcdic_put(sysib.sysib_322.vm[0].name, "TCGguest", 8);
392                 strcpy((char *)sysib.sysib_322.ext_names[0], "TCGguest");
393             }
394 
395             /* add the uuid */
396             memcpy(sysib.sysib_322.vm[0].uuid, &qemu_uuid,
397                    sizeof(sysib.sysib_322.vm[0].uuid));
398         } else {
399             cc = 3;
400         }
401         break;
402     }
403 
404     if (cc == 0) {
405         if (s390_cpu_virt_mem_write(cpu, a0, 0, &sysib, sizeof(sysib))) {
406             s390_cpu_virt_mem_handle_exc(cpu, ra);
407         }
408     }
409 
410     return cc;
411 }
412 
413 uint32_t HELPER(sigp)(CPUS390XState *env, uint64_t order_code, uint32_t r1,
414                       uint32_t r3)
415 {
416     int cc;
417 
418     /* TODO: needed to inject interrupts  - push further down */
419     qemu_mutex_lock_iothread();
420     cc = handle_sigp(env, order_code & SIGP_ORDER_MASK, r1, r3);
421     qemu_mutex_unlock_iothread();
422 
423     return cc;
424 }
425 #endif
426 
427 #ifndef CONFIG_USER_ONLY
428 void HELPER(xsch)(CPUS390XState *env, uint64_t r1)
429 {
430     S390CPU *cpu = env_archcpu(env);
431     qemu_mutex_lock_iothread();
432     ioinst_handle_xsch(cpu, r1, GETPC());
433     qemu_mutex_unlock_iothread();
434 }
435 
436 void HELPER(csch)(CPUS390XState *env, uint64_t r1)
437 {
438     S390CPU *cpu = env_archcpu(env);
439     qemu_mutex_lock_iothread();
440     ioinst_handle_csch(cpu, r1, GETPC());
441     qemu_mutex_unlock_iothread();
442 }
443 
444 void HELPER(hsch)(CPUS390XState *env, uint64_t r1)
445 {
446     S390CPU *cpu = env_archcpu(env);
447     qemu_mutex_lock_iothread();
448     ioinst_handle_hsch(cpu, r1, GETPC());
449     qemu_mutex_unlock_iothread();
450 }
451 
452 void HELPER(msch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
453 {
454     S390CPU *cpu = env_archcpu(env);
455     qemu_mutex_lock_iothread();
456     ioinst_handle_msch(cpu, r1, inst >> 16, GETPC());
457     qemu_mutex_unlock_iothread();
458 }
459 
460 void HELPER(rchp)(CPUS390XState *env, uint64_t r1)
461 {
462     S390CPU *cpu = env_archcpu(env);
463     qemu_mutex_lock_iothread();
464     ioinst_handle_rchp(cpu, r1, GETPC());
465     qemu_mutex_unlock_iothread();
466 }
467 
468 void HELPER(rsch)(CPUS390XState *env, uint64_t r1)
469 {
470     S390CPU *cpu = env_archcpu(env);
471     qemu_mutex_lock_iothread();
472     ioinst_handle_rsch(cpu, r1, GETPC());
473     qemu_mutex_unlock_iothread();
474 }
475 
476 void HELPER(sal)(CPUS390XState *env, uint64_t r1)
477 {
478     S390CPU *cpu = env_archcpu(env);
479 
480     qemu_mutex_lock_iothread();
481     ioinst_handle_sal(cpu, r1, GETPC());
482     qemu_mutex_unlock_iothread();
483 }
484 
485 void HELPER(schm)(CPUS390XState *env, uint64_t r1, uint64_t r2, uint64_t inst)
486 {
487     S390CPU *cpu = env_archcpu(env);
488 
489     qemu_mutex_lock_iothread();
490     ioinst_handle_schm(cpu, r1, r2, inst >> 16, GETPC());
491     qemu_mutex_unlock_iothread();
492 }
493 
494 void HELPER(ssch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
495 {
496     S390CPU *cpu = env_archcpu(env);
497     qemu_mutex_lock_iothread();
498     ioinst_handle_ssch(cpu, r1, inst >> 16, GETPC());
499     qemu_mutex_unlock_iothread();
500 }
501 
502 void HELPER(stcrw)(CPUS390XState *env, uint64_t inst)
503 {
504     S390CPU *cpu = env_archcpu(env);
505 
506     qemu_mutex_lock_iothread();
507     ioinst_handle_stcrw(cpu, inst >> 16, GETPC());
508     qemu_mutex_unlock_iothread();
509 }
510 
511 void HELPER(stsch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
512 {
513     S390CPU *cpu = env_archcpu(env);
514     qemu_mutex_lock_iothread();
515     ioinst_handle_stsch(cpu, r1, inst >> 16, GETPC());
516     qemu_mutex_unlock_iothread();
517 }
518 
519 uint32_t HELPER(tpi)(CPUS390XState *env, uint64_t addr)
520 {
521     const uintptr_t ra = GETPC();
522     S390CPU *cpu = env_archcpu(env);
523     QEMUS390FLICState *flic = s390_get_qemu_flic(s390_get_flic());
524     QEMUS390FlicIO *io = NULL;
525     LowCore *lowcore;
526 
527     if (addr & 0x3) {
528         tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
529     }
530 
531     qemu_mutex_lock_iothread();
532     io = qemu_s390_flic_dequeue_io(flic, env->cregs[6]);
533     if (!io) {
534         qemu_mutex_unlock_iothread();
535         return 0;
536     }
537 
538     if (addr) {
539         struct {
540             uint16_t id;
541             uint16_t nr;
542             uint32_t parm;
543         } intc = {
544             .id = cpu_to_be16(io->id),
545             .nr = cpu_to_be16(io->nr),
546             .parm = cpu_to_be32(io->parm),
547         };
548 
549         if (s390_cpu_virt_mem_write(cpu, addr, 0, &intc, sizeof(intc))) {
550             /* writing failed, reinject and properly clean up */
551             s390_io_interrupt(io->id, io->nr, io->parm, io->word);
552             qemu_mutex_unlock_iothread();
553             g_free(io);
554             s390_cpu_virt_mem_handle_exc(cpu, ra);
555             return 0;
556         }
557     } else {
558         /* no protection applies */
559         lowcore = cpu_map_lowcore(env);
560         lowcore->subchannel_id = cpu_to_be16(io->id);
561         lowcore->subchannel_nr = cpu_to_be16(io->nr);
562         lowcore->io_int_parm = cpu_to_be32(io->parm);
563         lowcore->io_int_word = cpu_to_be32(io->word);
564         cpu_unmap_lowcore(lowcore);
565     }
566 
567     g_free(io);
568     qemu_mutex_unlock_iothread();
569     return 1;
570 }
571 
572 void HELPER(tsch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
573 {
574     S390CPU *cpu = env_archcpu(env);
575     qemu_mutex_lock_iothread();
576     ioinst_handle_tsch(cpu, r1, inst >> 16, GETPC());
577     qemu_mutex_unlock_iothread();
578 }
579 
580 void HELPER(chsc)(CPUS390XState *env, uint64_t inst)
581 {
582     S390CPU *cpu = env_archcpu(env);
583     qemu_mutex_lock_iothread();
584     ioinst_handle_chsc(cpu, inst >> 16, GETPC());
585     qemu_mutex_unlock_iothread();
586 }
587 #endif
588 
589 #ifndef CONFIG_USER_ONLY
590 void HELPER(per_check_exception)(CPUS390XState *env)
591 {
592     if (env->per_perc_atmid) {
593         tcg_s390_program_interrupt(env, PGM_PER, GETPC());
594     }
595 }
596 
597 /* Check if an address is within the PER starting address and the PER
598    ending address.  The address range might loop.  */
599 static inline bool get_per_in_range(CPUS390XState *env, uint64_t addr)
600 {
601     if (env->cregs[10] <= env->cregs[11]) {
602         return env->cregs[10] <= addr && addr <= env->cregs[11];
603     } else {
604         return env->cregs[10] <= addr || addr <= env->cregs[11];
605     }
606 }
607 
608 void HELPER(per_branch)(CPUS390XState *env, uint64_t from, uint64_t to)
609 {
610     if ((env->cregs[9] & PER_CR9_EVENT_BRANCH)) {
611         if (!(env->cregs[9] & PER_CR9_CONTROL_BRANCH_ADDRESS)
612             || get_per_in_range(env, to)) {
613             env->per_address = from;
614             env->per_perc_atmid = PER_CODE_EVENT_BRANCH | get_per_atmid(env);
615         }
616     }
617 }
618 
619 void HELPER(per_ifetch)(CPUS390XState *env, uint64_t addr)
620 {
621     if ((env->cregs[9] & PER_CR9_EVENT_IFETCH) && get_per_in_range(env, addr)) {
622         env->per_address = addr;
623         env->per_perc_atmid = PER_CODE_EVENT_IFETCH | get_per_atmid(env);
624 
625         /* If the instruction has to be nullified, trigger the
626            exception immediately. */
627         if (env->cregs[9] & PER_CR9_EVENT_NULLIFICATION) {
628             CPUState *cs = env_cpu(env);
629 
630             env->per_perc_atmid |= PER_CODE_EVENT_NULLIFICATION;
631             env->int_pgm_code = PGM_PER;
632             env->int_pgm_ilen = get_ilen(cpu_ldub_code(env, addr));
633 
634             cs->exception_index = EXCP_PGM;
635             cpu_loop_exit(cs);
636         }
637     }
638 }
639 
640 void HELPER(per_store_real)(CPUS390XState *env)
641 {
642     if ((env->cregs[9] & PER_CR9_EVENT_STORE) &&
643         (env->cregs[9] & PER_CR9_EVENT_STORE_REAL)) {
644         /* PSW is saved just before calling the helper.  */
645         env->per_address = env->psw.addr;
646         env->per_perc_atmid = PER_CODE_EVENT_STORE_REAL | get_per_atmid(env);
647     }
648 }
649 #endif
650 
651 static uint8_t stfl_bytes[2048];
652 static unsigned int used_stfl_bytes;
653 
654 static void prepare_stfl(void)
655 {
656     static bool initialized;
657     int i;
658 
659     /* racy, but we don't care, the same values are always written */
660     if (initialized) {
661         return;
662     }
663 
664     s390_get_feat_block(S390_FEAT_TYPE_STFL, stfl_bytes);
665     for (i = 0; i < sizeof(stfl_bytes); i++) {
666         if (stfl_bytes[i]) {
667             used_stfl_bytes = i + 1;
668         }
669     }
670     initialized = true;
671 }
672 
673 #ifndef CONFIG_USER_ONLY
674 void HELPER(stfl)(CPUS390XState *env)
675 {
676     LowCore *lowcore;
677 
678     lowcore = cpu_map_lowcore(env);
679     prepare_stfl();
680     memcpy(&lowcore->stfl_fac_list, stfl_bytes, sizeof(lowcore->stfl_fac_list));
681     cpu_unmap_lowcore(lowcore);
682 }
683 #endif
684 
685 uint32_t HELPER(stfle)(CPUS390XState *env, uint64_t addr)
686 {
687     const uintptr_t ra = GETPC();
688     const int count_bytes = ((env->regs[0] & 0xff) + 1) * 8;
689     int max_bytes;
690     int i;
691 
692     if (addr & 0x7) {
693         tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
694     }
695 
696     prepare_stfl();
697     max_bytes = ROUND_UP(used_stfl_bytes, 8);
698 
699     /*
700      * The PoP says that doublewords beyond the highest-numbered facility
701      * bit may or may not be stored.  However, existing hardware appears to
702      * not store the words, and existing software depend on that.
703      */
704     for (i = 0; i < MIN(count_bytes, max_bytes); ++i) {
705         cpu_stb_data_ra(env, addr + i, stfl_bytes[i], ra);
706     }
707 
708     env->regs[0] = deposit64(env->regs[0], 0, 8, (max_bytes / 8) - 1);
709     return count_bytes >= max_bytes ? 0 : 3;
710 }
711 
712 #ifndef CONFIG_USER_ONLY
713 /*
714  * Note: we ignore any return code of the functions called for the pci
715  * instructions, as the only time they return !0 is when the stub is
716  * called, and in that case we didn't even offer the zpci facility.
717  * The only exception is SIC, where program checks need to be handled
718  * by the caller.
719  */
720 void HELPER(clp)(CPUS390XState *env, uint32_t r2)
721 {
722     S390CPU *cpu = env_archcpu(env);
723 
724     qemu_mutex_lock_iothread();
725     clp_service_call(cpu, r2, GETPC());
726     qemu_mutex_unlock_iothread();
727 }
728 
729 void HELPER(pcilg)(CPUS390XState *env, uint32_t r1, uint32_t r2)
730 {
731     S390CPU *cpu = env_archcpu(env);
732 
733     qemu_mutex_lock_iothread();
734     pcilg_service_call(cpu, r1, r2, GETPC());
735     qemu_mutex_unlock_iothread();
736 }
737 
738 void HELPER(pcistg)(CPUS390XState *env, uint32_t r1, uint32_t r2)
739 {
740     S390CPU *cpu = env_archcpu(env);
741 
742     qemu_mutex_lock_iothread();
743     pcistg_service_call(cpu, r1, r2, GETPC());
744     qemu_mutex_unlock_iothread();
745 }
746 
747 void HELPER(stpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba,
748                      uint32_t ar)
749 {
750     S390CPU *cpu = env_archcpu(env);
751 
752     qemu_mutex_lock_iothread();
753     stpcifc_service_call(cpu, r1, fiba, ar, GETPC());
754     qemu_mutex_unlock_iothread();
755 }
756 
757 void HELPER(sic)(CPUS390XState *env, uint64_t r1, uint64_t r3)
758 {
759     int r;
760 
761     qemu_mutex_lock_iothread();
762     r = css_do_sic(env, (r3 >> 27) & 0x7, r1 & 0xffff);
763     qemu_mutex_unlock_iothread();
764     /* css_do_sic() may actually return a PGM_xxx value to inject */
765     if (r) {
766         tcg_s390_program_interrupt(env, -r, GETPC());
767     }
768 }
769 
770 void HELPER(rpcit)(CPUS390XState *env, uint32_t r1, uint32_t r2)
771 {
772     S390CPU *cpu = env_archcpu(env);
773 
774     qemu_mutex_lock_iothread();
775     rpcit_service_call(cpu, r1, r2, GETPC());
776     qemu_mutex_unlock_iothread();
777 }
778 
779 void HELPER(pcistb)(CPUS390XState *env, uint32_t r1, uint32_t r3,
780                     uint64_t gaddr, uint32_t ar)
781 {
782     S390CPU *cpu = env_archcpu(env);
783 
784     qemu_mutex_lock_iothread();
785     pcistb_service_call(cpu, r1, r3, gaddr, ar, GETPC());
786     qemu_mutex_unlock_iothread();
787 }
788 
789 void HELPER(mpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba,
790                     uint32_t ar)
791 {
792     S390CPU *cpu = env_archcpu(env);
793 
794     qemu_mutex_lock_iothread();
795     mpcifc_service_call(cpu, r1, fiba, ar, GETPC());
796     qemu_mutex_unlock_iothread();
797 }
798 #endif
799