xref: /qemu/target/s390x/tcg/excp_helper.c (revision b83a80e8)
1 /*
2  * s390x exception / interrupt helpers
3  *
4  *  Copyright (c) 2009 Ulrich Hecht
5  *  Copyright (c) 2011 Alexander Graf
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "s390x-internal.h"
24 #include "exec/helper-proto.h"
25 #include "qemu/timer.h"
26 #include "exec/exec-all.h"
27 #include "exec/cpu_ldst.h"
28 #include "hw/s390x/ioinst.h"
29 #include "exec/address-spaces.h"
30 #include "tcg_s390x.h"
31 #ifndef CONFIG_USER_ONLY
32 #include "hw/s390x/s390_flic.h"
33 #include "hw/boards.h"
34 #endif
35 
36 void QEMU_NORETURN tcg_s390_program_interrupt(CPUS390XState *env,
37                                               uint32_t code, uintptr_t ra)
38 {
39     CPUState *cs = env_cpu(env);
40 
41     cpu_restore_state(cs, ra, true);
42     qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n",
43                   env->psw.addr);
44     trigger_pgm_exception(env, code);
45     cpu_loop_exit(cs);
46 }
47 
48 void QEMU_NORETURN tcg_s390_data_exception(CPUS390XState *env, uint32_t dxc,
49                                            uintptr_t ra)
50 {
51     g_assert(dxc <= 0xff);
52 #if !defined(CONFIG_USER_ONLY)
53     /* Store the DXC into the lowcore */
54     stl_phys(env_cpu(env)->as,
55              env->psa + offsetof(LowCore, data_exc_code), dxc);
56 #endif
57 
58     /* Store the DXC into the FPC if AFP is enabled */
59     if (env->cregs[0] & CR0_AFP) {
60         env->fpc = deposit32(env->fpc, 8, 8, dxc);
61     }
62     tcg_s390_program_interrupt(env, PGM_DATA, ra);
63 }
64 
65 void QEMU_NORETURN tcg_s390_vector_exception(CPUS390XState *env, uint32_t vxc,
66                                              uintptr_t ra)
67 {
68     g_assert(vxc <= 0xff);
69 #if !defined(CONFIG_USER_ONLY)
70     /* Always store the VXC into the lowcore, without AFP it is undefined */
71     stl_phys(env_cpu(env)->as,
72              env->psa + offsetof(LowCore, data_exc_code), vxc);
73 #endif
74 
75     /* Always store the VXC into the FPC, without AFP it is undefined */
76     env->fpc = deposit32(env->fpc, 8, 8, vxc);
77     tcg_s390_program_interrupt(env, PGM_VECTOR_PROCESSING, ra);
78 }
79 
80 void HELPER(data_exception)(CPUS390XState *env, uint32_t dxc)
81 {
82     tcg_s390_data_exception(env, dxc, GETPC());
83 }
84 
85 /*
86  * Unaligned accesses are only diagnosed with MO_ALIGN.  At the moment,
87  * this is only for the atomic operations, for which we want to raise a
88  * specification exception.
89  */
90 static void QEMU_NORETURN do_unaligned_access(CPUState *cs, uintptr_t retaddr)
91 {
92     S390CPU *cpu = S390_CPU(cs);
93     CPUS390XState *env = &cpu->env;
94 
95     tcg_s390_program_interrupt(env, PGM_SPECIFICATION, retaddr);
96 }
97 
98 #if defined(CONFIG_USER_ONLY)
99 
100 void s390_cpu_do_interrupt(CPUState *cs)
101 {
102     cs->exception_index = -1;
103 }
104 
105 void s390_cpu_record_sigsegv(CPUState *cs, vaddr address,
106                              MMUAccessType access_type,
107                              bool maperr, uintptr_t retaddr)
108 {
109     S390CPU *cpu = S390_CPU(cs);
110 
111     trigger_pgm_exception(&cpu->env, maperr ? PGM_ADDRESSING : PGM_PROTECTION);
112     /*
113      * On real machines this value is dropped into LowMem. Since this
114      * is userland, simply put this someplace that cpu_loop can find it.
115      * S390 only gives the page of the fault, not the exact address.
116      * C.f. the construction of TEC in mmu_translate().
117      */
118     cpu->env.__excp_addr = address & TARGET_PAGE_MASK;
119     cpu_loop_exit_restore(cs, retaddr);
120 }
121 
122 void s390_cpu_record_sigbus(CPUState *cs, vaddr address,
123                             MMUAccessType access_type, uintptr_t retaddr)
124 {
125     do_unaligned_access(cs, retaddr);
126 }
127 
128 #else /* !CONFIG_USER_ONLY */
129 
130 static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx)
131 {
132     switch (mmu_idx) {
133     case MMU_PRIMARY_IDX:
134         return PSW_ASC_PRIMARY;
135     case MMU_SECONDARY_IDX:
136         return PSW_ASC_SECONDARY;
137     case MMU_HOME_IDX:
138         return PSW_ASC_HOME;
139     default:
140         abort();
141     }
142 }
143 
144 bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
145                        MMUAccessType access_type, int mmu_idx,
146                        bool probe, uintptr_t retaddr)
147 {
148     S390CPU *cpu = S390_CPU(cs);
149     CPUS390XState *env = &cpu->env;
150     target_ulong vaddr, raddr;
151     uint64_t asc, tec;
152     int prot, excp;
153 
154     qemu_log_mask(CPU_LOG_MMU, "%s: addr 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
155                   __func__, address, access_type, mmu_idx);
156 
157     vaddr = address;
158 
159     if (mmu_idx < MMU_REAL_IDX) {
160         asc = cpu_mmu_idx_to_asc(mmu_idx);
161         /* 31-Bit mode */
162         if (!(env->psw.mask & PSW_MASK_64)) {
163             vaddr &= 0x7fffffff;
164         }
165         excp = mmu_translate(env, vaddr, access_type, asc, &raddr, &prot, &tec);
166     } else if (mmu_idx == MMU_REAL_IDX) {
167         /* 31-Bit mode */
168         if (!(env->psw.mask & PSW_MASK_64)) {
169             vaddr &= 0x7fffffff;
170         }
171         excp = mmu_translate_real(env, vaddr, access_type, &raddr, &prot, &tec);
172     } else {
173         g_assert_not_reached();
174     }
175 
176     env->tlb_fill_exc = excp;
177     env->tlb_fill_tec = tec;
178 
179     if (!excp) {
180         qemu_log_mask(CPU_LOG_MMU,
181                       "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
182                       __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
183         tlb_set_page(cs, address & TARGET_PAGE_MASK, raddr, prot,
184                      mmu_idx, TARGET_PAGE_SIZE);
185         return true;
186     }
187     if (probe) {
188         return false;
189     }
190 
191     if (excp != PGM_ADDRESSING) {
192         stq_phys(env_cpu(env)->as,
193                  env->psa + offsetof(LowCore, trans_exc_code), tec);
194     }
195 
196     /*
197      * For data accesses, ILEN will be filled in from the unwind info,
198      * within cpu_loop_exit_restore.  For code accesses, retaddr == 0,
199      * and so unwinding will not occur.  However, ILEN is also undefined
200      * for that case -- we choose to set ILEN = 2.
201      */
202     env->int_pgm_ilen = 2;
203     trigger_pgm_exception(env, excp);
204     cpu_loop_exit_restore(cs, retaddr);
205 }
206 
207 static void do_program_interrupt(CPUS390XState *env)
208 {
209     uint64_t mask, addr;
210     LowCore *lowcore;
211     int ilen = env->int_pgm_ilen;
212 
213     assert(ilen == 2 || ilen == 4 || ilen == 6);
214 
215     switch (env->int_pgm_code) {
216     case PGM_PER:
217         if (env->per_perc_atmid & PER_CODE_EVENT_NULLIFICATION) {
218             break;
219         }
220         /* FALL THROUGH */
221     case PGM_OPERATION:
222     case PGM_PRIVILEGED:
223     case PGM_EXECUTE:
224     case PGM_PROTECTION:
225     case PGM_ADDRESSING:
226     case PGM_SPECIFICATION:
227     case PGM_DATA:
228     case PGM_FIXPT_OVERFLOW:
229     case PGM_FIXPT_DIVIDE:
230     case PGM_DEC_OVERFLOW:
231     case PGM_DEC_DIVIDE:
232     case PGM_HFP_EXP_OVERFLOW:
233     case PGM_HFP_EXP_UNDERFLOW:
234     case PGM_HFP_SIGNIFICANCE:
235     case PGM_HFP_DIVIDE:
236     case PGM_TRANS_SPEC:
237     case PGM_SPECIAL_OP:
238     case PGM_OPERAND:
239     case PGM_HFP_SQRT:
240     case PGM_PC_TRANS_SPEC:
241     case PGM_ALET_SPEC:
242     case PGM_MONITOR:
243         /* advance the PSW if our exception is not nullifying */
244         env->psw.addr += ilen;
245         break;
246     }
247 
248     qemu_log_mask(CPU_LOG_INT,
249                   "%s: code=0x%x ilen=%d psw: %" PRIx64 " %" PRIx64 "\n",
250                   __func__, env->int_pgm_code, ilen, env->psw.mask,
251                   env->psw.addr);
252 
253     lowcore = cpu_map_lowcore(env);
254 
255     /* Signal PER events with the exception.  */
256     if (env->per_perc_atmid) {
257         env->int_pgm_code |= PGM_PER;
258         lowcore->per_address = cpu_to_be64(env->per_address);
259         lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
260         env->per_perc_atmid = 0;
261     }
262 
263     lowcore->pgm_ilen = cpu_to_be16(ilen);
264     lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
265     lowcore->program_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
266     lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
267     mask = be64_to_cpu(lowcore->program_new_psw.mask);
268     addr = be64_to_cpu(lowcore->program_new_psw.addr);
269     lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
270 
271     cpu_unmap_lowcore(lowcore);
272 
273     s390_cpu_set_psw(env, mask, addr);
274 }
275 
276 static void do_svc_interrupt(CPUS390XState *env)
277 {
278     uint64_t mask, addr;
279     LowCore *lowcore;
280 
281     lowcore = cpu_map_lowcore(env);
282 
283     lowcore->svc_code = cpu_to_be16(env->int_svc_code);
284     lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
285     lowcore->svc_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
286     lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
287     mask = be64_to_cpu(lowcore->svc_new_psw.mask);
288     addr = be64_to_cpu(lowcore->svc_new_psw.addr);
289 
290     cpu_unmap_lowcore(lowcore);
291 
292     s390_cpu_set_psw(env, mask, addr);
293 
294     /* When a PER event is pending, the PER exception has to happen
295        immediately after the SERVICE CALL one.  */
296     if (env->per_perc_atmid) {
297         env->int_pgm_code = PGM_PER;
298         env->int_pgm_ilen = env->int_svc_ilen;
299         do_program_interrupt(env);
300     }
301 }
302 
303 #define VIRTIO_SUBCODE_64 0x0D00
304 
305 static void do_ext_interrupt(CPUS390XState *env)
306 {
307     QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
308     S390CPU *cpu = env_archcpu(env);
309     uint64_t mask, addr;
310     uint16_t cpu_addr;
311     LowCore *lowcore;
312 
313     if (!(env->psw.mask & PSW_MASK_EXT)) {
314         cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
315     }
316 
317     lowcore = cpu_map_lowcore(env);
318 
319     if ((env->pending_int & INTERRUPT_EMERGENCY_SIGNAL) &&
320         (env->cregs[0] & CR0_EMERGENCY_SIGNAL_SC)) {
321         MachineState *ms = MACHINE(qdev_get_machine());
322         unsigned int max_cpus = ms->smp.max_cpus;
323 
324         lowcore->ext_int_code = cpu_to_be16(EXT_EMERGENCY);
325         cpu_addr = find_first_bit(env->emergency_signals, S390_MAX_CPUS);
326         g_assert(cpu_addr < S390_MAX_CPUS);
327         lowcore->cpu_addr = cpu_to_be16(cpu_addr);
328         clear_bit(cpu_addr, env->emergency_signals);
329         if (bitmap_empty(env->emergency_signals, max_cpus)) {
330             env->pending_int &= ~INTERRUPT_EMERGENCY_SIGNAL;
331         }
332     } else if ((env->pending_int & INTERRUPT_EXTERNAL_CALL) &&
333                (env->cregs[0] & CR0_EXTERNAL_CALL_SC)) {
334         lowcore->ext_int_code = cpu_to_be16(EXT_EXTERNAL_CALL);
335         lowcore->cpu_addr = cpu_to_be16(env->external_call_addr);
336         env->pending_int &= ~INTERRUPT_EXTERNAL_CALL;
337     } else if ((env->pending_int & INTERRUPT_EXT_CLOCK_COMPARATOR) &&
338                (env->cregs[0] & CR0_CKC_SC)) {
339         lowcore->ext_int_code = cpu_to_be16(EXT_CLOCK_COMP);
340         lowcore->cpu_addr = 0;
341         env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR;
342     } else if ((env->pending_int & INTERRUPT_EXT_CPU_TIMER) &&
343                (env->cregs[0] & CR0_CPU_TIMER_SC)) {
344         lowcore->ext_int_code = cpu_to_be16(EXT_CPU_TIMER);
345         lowcore->cpu_addr = 0;
346         env->pending_int &= ~INTERRUPT_EXT_CPU_TIMER;
347     } else if (qemu_s390_flic_has_service(flic) &&
348                (env->cregs[0] & CR0_SERVICE_SC)) {
349         uint32_t param;
350 
351         param = qemu_s390_flic_dequeue_service(flic);
352         lowcore->ext_int_code = cpu_to_be16(EXT_SERVICE);
353         lowcore->ext_params = cpu_to_be32(param);
354         lowcore->cpu_addr = 0;
355     } else {
356         g_assert_not_reached();
357     }
358 
359     mask = be64_to_cpu(lowcore->external_new_psw.mask);
360     addr = be64_to_cpu(lowcore->external_new_psw.addr);
361     lowcore->external_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
362     lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
363 
364     cpu_unmap_lowcore(lowcore);
365 
366     s390_cpu_set_psw(env, mask, addr);
367 }
368 
369 static void do_io_interrupt(CPUS390XState *env)
370 {
371     QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
372     uint64_t mask, addr;
373     QEMUS390FlicIO *io;
374     LowCore *lowcore;
375 
376     g_assert(env->psw.mask & PSW_MASK_IO);
377     io = qemu_s390_flic_dequeue_io(flic, env->cregs[6]);
378     g_assert(io);
379 
380     lowcore = cpu_map_lowcore(env);
381 
382     lowcore->subchannel_id = cpu_to_be16(io->id);
383     lowcore->subchannel_nr = cpu_to_be16(io->nr);
384     lowcore->io_int_parm = cpu_to_be32(io->parm);
385     lowcore->io_int_word = cpu_to_be32(io->word);
386     lowcore->io_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
387     lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
388     mask = be64_to_cpu(lowcore->io_new_psw.mask);
389     addr = be64_to_cpu(lowcore->io_new_psw.addr);
390 
391     cpu_unmap_lowcore(lowcore);
392     g_free(io);
393 
394     s390_cpu_set_psw(env, mask, addr);
395 }
396 
397 typedef struct MchkExtSaveArea {
398     uint64_t    vregs[32][2];                     /* 0x0000 */
399     uint8_t     pad_0x0200[0x0400 - 0x0200];      /* 0x0200 */
400 } MchkExtSaveArea;
401 QEMU_BUILD_BUG_ON(sizeof(MchkExtSaveArea) != 1024);
402 
403 static int mchk_store_vregs(CPUS390XState *env, uint64_t mcesao)
404 {
405     hwaddr len = sizeof(MchkExtSaveArea);
406     MchkExtSaveArea *sa;
407     int i;
408 
409     sa = cpu_physical_memory_map(mcesao, &len, true);
410     if (!sa) {
411         return -EFAULT;
412     }
413     if (len != sizeof(MchkExtSaveArea)) {
414         cpu_physical_memory_unmap(sa, len, 1, 0);
415         return -EFAULT;
416     }
417 
418     for (i = 0; i < 32; i++) {
419         sa->vregs[i][0] = cpu_to_be64(env->vregs[i][0]);
420         sa->vregs[i][1] = cpu_to_be64(env->vregs[i][1]);
421     }
422 
423     cpu_physical_memory_unmap(sa, len, 1, len);
424     return 0;
425 }
426 
427 static void do_mchk_interrupt(CPUS390XState *env)
428 {
429     QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
430     uint64_t mcic = s390_build_validity_mcic() | MCIC_SC_CP;
431     uint64_t mask, addr, mcesao = 0;
432     LowCore *lowcore;
433     int i;
434 
435     /* for now we only support channel report machine checks (floating) */
436     g_assert(env->psw.mask & PSW_MASK_MCHECK);
437     g_assert(env->cregs[14] & CR14_CHANNEL_REPORT_SC);
438 
439     qemu_s390_flic_dequeue_crw_mchk(flic);
440 
441     lowcore = cpu_map_lowcore(env);
442 
443     /* extended save area */
444     if (mcic & MCIC_VB_VR) {
445         /* length and alignment is 1024 bytes */
446         mcesao = be64_to_cpu(lowcore->mcesad) & ~0x3ffull;
447     }
448 
449     /* try to store vector registers */
450     if (!mcesao || mchk_store_vregs(env, mcesao)) {
451         mcic &= ~MCIC_VB_VR;
452     }
453 
454     /* we are always in z/Architecture mode */
455     lowcore->ar_access_id = 1;
456 
457     for (i = 0; i < 16; i++) {
458         lowcore->floating_pt_save_area[i] = cpu_to_be64(*get_freg(env, i));
459         lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
460         lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
461         lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
462     }
463     lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
464     lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
465     lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
466     lowcore->cpu_timer_save_area = cpu_to_be64(env->cputm);
467     lowcore->clock_comp_save_area = cpu_to_be64(env->ckc >> 8);
468 
469     lowcore->mcic = cpu_to_be64(mcic);
470     lowcore->mcck_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
471     lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
472     mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
473     addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
474 
475     cpu_unmap_lowcore(lowcore);
476 
477     s390_cpu_set_psw(env, mask, addr);
478 }
479 
480 void s390_cpu_do_interrupt(CPUState *cs)
481 {
482     QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
483     S390CPU *cpu = S390_CPU(cs);
484     CPUS390XState *env = &cpu->env;
485     bool stopped = false;
486 
487     qemu_log_mask(CPU_LOG_INT, "%s: %d at psw=%" PRIx64 ":%" PRIx64 "\n",
488                   __func__, cs->exception_index, env->psw.mask, env->psw.addr);
489 
490 try_deliver:
491     /* handle machine checks */
492     if (cs->exception_index == -1 && s390_cpu_has_mcck_int(cpu)) {
493         cs->exception_index = EXCP_MCHK;
494     }
495     /* handle external interrupts */
496     if (cs->exception_index == -1 && s390_cpu_has_ext_int(cpu)) {
497         cs->exception_index = EXCP_EXT;
498     }
499     /* handle I/O interrupts */
500     if (cs->exception_index == -1 && s390_cpu_has_io_int(cpu)) {
501         cs->exception_index = EXCP_IO;
502     }
503     /* RESTART interrupt */
504     if (cs->exception_index == -1 && s390_cpu_has_restart_int(cpu)) {
505         cs->exception_index = EXCP_RESTART;
506     }
507     /* STOP interrupt has least priority */
508     if (cs->exception_index == -1 && s390_cpu_has_stop_int(cpu)) {
509         cs->exception_index = EXCP_STOP;
510     }
511 
512     switch (cs->exception_index) {
513     case EXCP_PGM:
514         do_program_interrupt(env);
515         break;
516     case EXCP_SVC:
517         do_svc_interrupt(env);
518         break;
519     case EXCP_EXT:
520         do_ext_interrupt(env);
521         break;
522     case EXCP_IO:
523         do_io_interrupt(env);
524         break;
525     case EXCP_MCHK:
526         do_mchk_interrupt(env);
527         break;
528     case EXCP_RESTART:
529         do_restart_interrupt(env);
530         break;
531     case EXCP_STOP:
532         do_stop_interrupt(env);
533         stopped = true;
534         break;
535     }
536 
537     if (cs->exception_index != -1 && !stopped) {
538         /* check if there are more pending interrupts to deliver */
539         cs->exception_index = -1;
540         goto try_deliver;
541     }
542     cs->exception_index = -1;
543 
544     /* we might still have pending interrupts, but not deliverable */
545     if (!env->pending_int && !qemu_s390_flic_has_any(flic)) {
546         cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
547     }
548 
549     /* WAIT PSW during interrupt injection or STOP interrupt */
550     if ((env->psw.mask & PSW_MASK_WAIT) || stopped) {
551         /* don't trigger a cpu_loop_exit(), use an interrupt instead */
552         cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
553     } else if (cs->halted) {
554         /* unhalt if we had a WAIT PSW somehwere in our injection chain */
555         s390_cpu_unhalt(cpu);
556     }
557 }
558 
559 bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
560 {
561     if (interrupt_request & CPU_INTERRUPT_HARD) {
562         S390CPU *cpu = S390_CPU(cs);
563         CPUS390XState *env = &cpu->env;
564 
565         if (env->ex_value) {
566             /* Execution of the target insn is indivisible from
567                the parent EXECUTE insn.  */
568             return false;
569         }
570         if (s390_cpu_has_int(cpu)) {
571             s390_cpu_do_interrupt(cs);
572             return true;
573         }
574         if (env->psw.mask & PSW_MASK_WAIT) {
575             /* Woken up because of a floating interrupt but it has already
576              * been delivered. Go back to sleep. */
577             cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
578         }
579     }
580     return false;
581 }
582 
583 void s390x_cpu_debug_excp_handler(CPUState *cs)
584 {
585     S390CPU *cpu = S390_CPU(cs);
586     CPUS390XState *env = &cpu->env;
587     CPUWatchpoint *wp_hit = cs->watchpoint_hit;
588 
589     if (wp_hit && wp_hit->flags & BP_CPU) {
590         /* FIXME: When the storage-alteration-space control bit is set,
591            the exception should only be triggered if the memory access
592            is done using an address space with the storage-alteration-event
593            bit set.  We have no way to detect that with the current
594            watchpoint code.  */
595         cs->watchpoint_hit = NULL;
596 
597         env->per_address = env->psw.addr;
598         env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
599         /* FIXME: We currently no way to detect the address space used
600            to trigger the watchpoint.  For now just consider it is the
601            current default ASC. This turn to be true except when MVCP
602            and MVCS instrutions are not used.  */
603         env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
604 
605         /*
606          * Remove all watchpoints to re-execute the code.  A PER exception
607          * will be triggered, it will call s390_cpu_set_psw which will
608          * recompute the watchpoints.
609          */
610         cpu_watchpoint_remove_all(cs, BP_CPU);
611         cpu_loop_exit_noexc(cs);
612     }
613 }
614 
615 void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
616                                    MMUAccessType access_type,
617                                    int mmu_idx, uintptr_t retaddr)
618 {
619     do_unaligned_access(cs, retaddr);
620 }
621 
622 static void QEMU_NORETURN monitor_event(CPUS390XState *env,
623                                         uint64_t monitor_code,
624                                         uint8_t monitor_class, uintptr_t ra)
625 {
626     /* Store the Monitor Code and the Monitor Class Number into the lowcore */
627     stq_phys(env_cpu(env)->as,
628              env->psa + offsetof(LowCore, monitor_code), monitor_code);
629     stw_phys(env_cpu(env)->as,
630              env->psa + offsetof(LowCore, mon_class_num), monitor_class);
631 
632     tcg_s390_program_interrupt(env, PGM_MONITOR, ra);
633 }
634 
635 void HELPER(monitor_call)(CPUS390XState *env, uint64_t monitor_code,
636                           uint32_t monitor_class)
637 {
638     g_assert(monitor_class <= 0xff);
639 
640     if (env->cregs[8] & (0x8000 >> monitor_class)) {
641         monitor_event(env, monitor_code, monitor_class, GETPC());
642     }
643 }
644 
645 #endif /* !CONFIG_USER_ONLY */
646