xref: /qemu/target/s390x/helper.c (revision e79ea67a)
1 /*
2  *  S/390 helpers
3  *
4  *  Copyright (c) 2009 Ulrich Hecht
5  *  Copyright (c) 2011 Alexander Graf
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "qapi/error.h"
23 #include "cpu.h"
24 #include "exec/gdbstub.h"
25 #include "qemu/timer.h"
26 #include "exec/exec-all.h"
27 #include "exec/cpu_ldst.h"
28 #include "hw/s390x/ioinst.h"
29 #ifndef CONFIG_USER_ONLY
30 #include "sysemu/sysemu.h"
31 #endif
32 
33 //#define DEBUG_S390
34 //#define DEBUG_S390_STDOUT
35 
36 #ifdef DEBUG_S390
37 #ifdef DEBUG_S390_STDOUT
38 #define DPRINTF(fmt, ...) \
39     do { fprintf(stderr, fmt, ## __VA_ARGS__); \
40          if (qemu_log_separate()) qemu_log(fmt, ##__VA_ARGS__); } while (0)
41 #else
42 #define DPRINTF(fmt, ...) \
43     do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
44 #endif
45 #else
46 #define DPRINTF(fmt, ...) \
47     do { } while (0)
48 #endif
49 
50 
51 #ifndef CONFIG_USER_ONLY
52 void s390x_tod_timer(void *opaque)
53 {
54     S390CPU *cpu = opaque;
55     CPUS390XState *env = &cpu->env;
56 
57     env->pending_int |= INTERRUPT_TOD;
58     cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
59 }
60 
61 void s390x_cpu_timer(void *opaque)
62 {
63     S390CPU *cpu = opaque;
64     CPUS390XState *env = &cpu->env;
65 
66     env->pending_int |= INTERRUPT_CPUTIMER;
67     cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
68 }
69 #endif
70 
71 S390CPU *cpu_s390x_create(const char *cpu_model, Error **errp)
72 {
73     static bool features_parsed;
74     char *name, *features;
75     const char *typename;
76     ObjectClass *oc;
77     CPUClass *cc;
78 
79     name = g_strdup(cpu_model);
80     features = strchr(name, ',');
81     if (features) {
82         features[0] = 0;
83         features++;
84     }
85 
86     oc = cpu_class_by_name(TYPE_S390_CPU, name);
87     if (!oc) {
88         error_setg(errp, "Unknown CPU definition \'%s\'", name);
89         g_free(name);
90         return NULL;
91     }
92     typename = object_class_get_name(oc);
93 
94     if (!features_parsed) {
95         features_parsed = true;
96         cc = CPU_CLASS(oc);
97         cc->parse_features(typename, features, errp);
98     }
99     g_free(name);
100 
101     if (*errp) {
102         return NULL;
103     }
104     return S390_CPU(CPU(object_new(typename)));
105 }
106 
107 S390CPU *s390x_new_cpu(const char *cpu_model, int64_t id, Error **errp)
108 {
109     S390CPU *cpu;
110     Error *err = NULL;
111 
112     cpu = cpu_s390x_create(cpu_model, &err);
113     if (err != NULL) {
114         goto out;
115     }
116 
117     object_property_set_int(OBJECT(cpu), id, "id", &err);
118     if (err != NULL) {
119         goto out;
120     }
121     object_property_set_bool(OBJECT(cpu), true, "realized", &err);
122 
123 out:
124     if (err) {
125         error_propagate(errp, err);
126         object_unref(OBJECT(cpu));
127         cpu = NULL;
128     }
129     return cpu;
130 }
131 
132 S390CPU *cpu_s390x_init(const char *cpu_model)
133 {
134     Error *err = NULL;
135     S390CPU *cpu;
136     /* Use to track CPU ID for linux-user only */
137     static int64_t next_cpu_id;
138 
139     cpu = s390x_new_cpu(cpu_model, next_cpu_id++, &err);
140     if (err) {
141         error_report_err(err);
142     }
143     return cpu;
144 }
145 
146 #if defined(CONFIG_USER_ONLY)
147 
148 void s390_cpu_do_interrupt(CPUState *cs)
149 {
150     cs->exception_index = -1;
151 }
152 
153 int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
154                               int rw, int mmu_idx)
155 {
156     S390CPU *cpu = S390_CPU(cs);
157 
158     cs->exception_index = EXCP_PGM;
159     cpu->env.int_pgm_code = PGM_ADDRESSING;
160     /* On real machines this value is dropped into LowMem.  Since this
161        is userland, simply put this someplace that cpu_loop can find it.  */
162     cpu->env.__excp_addr = address;
163     return 1;
164 }
165 
166 #else /* !CONFIG_USER_ONLY */
167 
168 /* Ensure to exit the TB after this call! */
169 void trigger_pgm_exception(CPUS390XState *env, uint32_t code, uint32_t ilen)
170 {
171     CPUState *cs = CPU(s390_env_get_cpu(env));
172 
173     cs->exception_index = EXCP_PGM;
174     env->int_pgm_code = code;
175     env->int_pgm_ilen = ilen;
176 }
177 
178 int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr,
179                               int rw, int mmu_idx)
180 {
181     S390CPU *cpu = S390_CPU(cs);
182     CPUS390XState *env = &cpu->env;
183     uint64_t asc = cpu_mmu_idx_to_asc(mmu_idx);
184     target_ulong vaddr, raddr;
185     int prot;
186 
187     DPRINTF("%s: address 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
188             __func__, orig_vaddr, rw, mmu_idx);
189 
190     orig_vaddr &= TARGET_PAGE_MASK;
191     vaddr = orig_vaddr;
192 
193     /* 31-Bit mode */
194     if (!(env->psw.mask & PSW_MASK_64)) {
195         vaddr &= 0x7fffffff;
196     }
197 
198     if (mmu_translate(env, vaddr, rw, asc, &raddr, &prot, true)) {
199         /* Translation ended in exception */
200         return 1;
201     }
202 
203     /* check out of RAM access */
204     if (raddr > ram_size) {
205         DPRINTF("%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n", __func__,
206                 (uint64_t)raddr, (uint64_t)ram_size);
207         trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_AUTO);
208         return 1;
209     }
210 
211     qemu_log_mask(CPU_LOG_MMU, "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
212             __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
213 
214     tlb_set_page(cs, orig_vaddr, raddr, prot,
215                  mmu_idx, TARGET_PAGE_SIZE);
216 
217     return 0;
218 }
219 
220 hwaddr s390_cpu_get_phys_page_debug(CPUState *cs, vaddr vaddr)
221 {
222     S390CPU *cpu = S390_CPU(cs);
223     CPUS390XState *env = &cpu->env;
224     target_ulong raddr;
225     int prot;
226     uint64_t asc = env->psw.mask & PSW_MASK_ASC;
227 
228     /* 31-Bit mode */
229     if (!(env->psw.mask & PSW_MASK_64)) {
230         vaddr &= 0x7fffffff;
231     }
232 
233     if (mmu_translate(env, vaddr, MMU_INST_FETCH, asc, &raddr, &prot, false)) {
234         return -1;
235     }
236     return raddr;
237 }
238 
239 hwaddr s390_cpu_get_phys_addr_debug(CPUState *cs, vaddr vaddr)
240 {
241     hwaddr phys_addr;
242     target_ulong page;
243 
244     page = vaddr & TARGET_PAGE_MASK;
245     phys_addr = cpu_get_phys_page_debug(cs, page);
246     phys_addr += (vaddr & ~TARGET_PAGE_MASK);
247 
248     return phys_addr;
249 }
250 
251 void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr)
252 {
253     uint64_t old_mask = env->psw.mask;
254 
255     env->psw.addr = addr;
256     env->psw.mask = mask;
257     if (tcg_enabled()) {
258         env->cc_op = (mask >> 44) & 3;
259     }
260 
261     if ((old_mask ^ mask) & PSW_MASK_PER) {
262         s390_cpu_recompute_watchpoints(CPU(s390_env_get_cpu(env)));
263     }
264 
265     if (mask & PSW_MASK_WAIT) {
266         S390CPU *cpu = s390_env_get_cpu(env);
267         if (s390_cpu_halt(cpu) == 0) {
268 #ifndef CONFIG_USER_ONLY
269             qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
270 #endif
271         }
272     }
273 }
274 
275 static uint64_t get_psw_mask(CPUS390XState *env)
276 {
277     uint64_t r = env->psw.mask;
278 
279     if (tcg_enabled()) {
280         env->cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst,
281                              env->cc_vr);
282 
283         r &= ~PSW_MASK_CC;
284         assert(!(env->cc_op & ~3));
285         r |= (uint64_t)env->cc_op << 44;
286     }
287 
288     return r;
289 }
290 
291 static LowCore *cpu_map_lowcore(CPUS390XState *env)
292 {
293     S390CPU *cpu = s390_env_get_cpu(env);
294     LowCore *lowcore;
295     hwaddr len = sizeof(LowCore);
296 
297     lowcore = cpu_physical_memory_map(env->psa, &len, 1);
298 
299     if (len < sizeof(LowCore)) {
300         cpu_abort(CPU(cpu), "Could not map lowcore\n");
301     }
302 
303     return lowcore;
304 }
305 
306 static void cpu_unmap_lowcore(LowCore *lowcore)
307 {
308     cpu_physical_memory_unmap(lowcore, sizeof(LowCore), 1, sizeof(LowCore));
309 }
310 
311 void do_restart_interrupt(CPUS390XState *env)
312 {
313     uint64_t mask, addr;
314     LowCore *lowcore;
315 
316     lowcore = cpu_map_lowcore(env);
317 
318     lowcore->restart_old_psw.mask = cpu_to_be64(get_psw_mask(env));
319     lowcore->restart_old_psw.addr = cpu_to_be64(env->psw.addr);
320     mask = be64_to_cpu(lowcore->restart_new_psw.mask);
321     addr = be64_to_cpu(lowcore->restart_new_psw.addr);
322 
323     cpu_unmap_lowcore(lowcore);
324 
325     load_psw(env, mask, addr);
326 }
327 
328 static void do_program_interrupt(CPUS390XState *env)
329 {
330     uint64_t mask, addr;
331     LowCore *lowcore;
332     int ilen = env->int_pgm_ilen;
333 
334     if (ilen == ILEN_AUTO) {
335         ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
336     }
337     assert(ilen == 2 || ilen == 4 || ilen == 6);
338 
339     switch (env->int_pgm_code) {
340     case PGM_PER:
341         if (env->per_perc_atmid & PER_CODE_EVENT_NULLIFICATION) {
342             break;
343         }
344         /* FALL THROUGH */
345     case PGM_OPERATION:
346     case PGM_PRIVILEGED:
347     case PGM_EXECUTE:
348     case PGM_PROTECTION:
349     case PGM_ADDRESSING:
350     case PGM_SPECIFICATION:
351     case PGM_DATA:
352     case PGM_FIXPT_OVERFLOW:
353     case PGM_FIXPT_DIVIDE:
354     case PGM_DEC_OVERFLOW:
355     case PGM_DEC_DIVIDE:
356     case PGM_HFP_EXP_OVERFLOW:
357     case PGM_HFP_EXP_UNDERFLOW:
358     case PGM_HFP_SIGNIFICANCE:
359     case PGM_HFP_DIVIDE:
360     case PGM_TRANS_SPEC:
361     case PGM_SPECIAL_OP:
362     case PGM_OPERAND:
363     case PGM_HFP_SQRT:
364     case PGM_PC_TRANS_SPEC:
365     case PGM_ALET_SPEC:
366     case PGM_MONITOR:
367         /* advance the PSW if our exception is not nullifying */
368         env->psw.addr += ilen;
369         break;
370     }
371 
372     qemu_log_mask(CPU_LOG_INT, "%s: code=0x%x ilen=%d\n",
373                   __func__, env->int_pgm_code, ilen);
374 
375     lowcore = cpu_map_lowcore(env);
376 
377     /* Signal PER events with the exception.  */
378     if (env->per_perc_atmid) {
379         env->int_pgm_code |= PGM_PER;
380         lowcore->per_address = cpu_to_be64(env->per_address);
381         lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
382         env->per_perc_atmid = 0;
383     }
384 
385     lowcore->pgm_ilen = cpu_to_be16(ilen);
386     lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
387     lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env));
388     lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
389     mask = be64_to_cpu(lowcore->program_new_psw.mask);
390     addr = be64_to_cpu(lowcore->program_new_psw.addr);
391     lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
392 
393     cpu_unmap_lowcore(lowcore);
394 
395     DPRINTF("%s: %x %x %" PRIx64 " %" PRIx64 "\n", __func__,
396             env->int_pgm_code, ilen, env->psw.mask,
397             env->psw.addr);
398 
399     load_psw(env, mask, addr);
400 }
401 
402 static void do_svc_interrupt(CPUS390XState *env)
403 {
404     uint64_t mask, addr;
405     LowCore *lowcore;
406 
407     lowcore = cpu_map_lowcore(env);
408 
409     lowcore->svc_code = cpu_to_be16(env->int_svc_code);
410     lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
411     lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env));
412     lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
413     mask = be64_to_cpu(lowcore->svc_new_psw.mask);
414     addr = be64_to_cpu(lowcore->svc_new_psw.addr);
415 
416     cpu_unmap_lowcore(lowcore);
417 
418     load_psw(env, mask, addr);
419 
420     /* When a PER event is pending, the PER exception has to happen
421        immediately after the SERVICE CALL one.  */
422     if (env->per_perc_atmid) {
423         env->int_pgm_code = PGM_PER;
424         env->int_pgm_ilen = env->int_svc_ilen;
425         do_program_interrupt(env);
426     }
427 }
428 
429 #define VIRTIO_SUBCODE_64 0x0D00
430 
431 static void do_ext_interrupt(CPUS390XState *env)
432 {
433     S390CPU *cpu = s390_env_get_cpu(env);
434     uint64_t mask, addr;
435     LowCore *lowcore;
436     ExtQueue *q;
437 
438     if (!(env->psw.mask & PSW_MASK_EXT)) {
439         cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
440     }
441 
442     if (env->ext_index < 0 || env->ext_index >= MAX_EXT_QUEUE) {
443         cpu_abort(CPU(cpu), "Ext queue overrun: %d\n", env->ext_index);
444     }
445 
446     q = &env->ext_queue[env->ext_index];
447     lowcore = cpu_map_lowcore(env);
448 
449     lowcore->ext_int_code = cpu_to_be16(q->code);
450     lowcore->ext_params = cpu_to_be32(q->param);
451     lowcore->ext_params2 = cpu_to_be64(q->param64);
452     lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env));
453     lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
454     lowcore->cpu_addr = cpu_to_be16(env->cpu_num | VIRTIO_SUBCODE_64);
455     mask = be64_to_cpu(lowcore->external_new_psw.mask);
456     addr = be64_to_cpu(lowcore->external_new_psw.addr);
457 
458     cpu_unmap_lowcore(lowcore);
459 
460     env->ext_index--;
461     if (env->ext_index == -1) {
462         env->pending_int &= ~INTERRUPT_EXT;
463     }
464 
465     DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
466             env->psw.mask, env->psw.addr);
467 
468     load_psw(env, mask, addr);
469 }
470 
471 static void do_io_interrupt(CPUS390XState *env)
472 {
473     S390CPU *cpu = s390_env_get_cpu(env);
474     LowCore *lowcore;
475     IOIntQueue *q;
476     uint8_t isc;
477     int disable = 1;
478     int found = 0;
479 
480     if (!(env->psw.mask & PSW_MASK_IO)) {
481         cpu_abort(CPU(cpu), "I/O int w/o I/O mask\n");
482     }
483 
484     for (isc = 0; isc < ARRAY_SIZE(env->io_index); isc++) {
485         uint64_t isc_bits;
486 
487         if (env->io_index[isc] < 0) {
488             continue;
489         }
490         if (env->io_index[isc] >= MAX_IO_QUEUE) {
491             cpu_abort(CPU(cpu), "I/O queue overrun for isc %d: %d\n",
492                       isc, env->io_index[isc]);
493         }
494 
495         q = &env->io_queue[env->io_index[isc]][isc];
496         isc_bits = ISC_TO_ISC_BITS(IO_INT_WORD_ISC(q->word));
497         if (!(env->cregs[6] & isc_bits)) {
498             disable = 0;
499             continue;
500         }
501         if (!found) {
502             uint64_t mask, addr;
503 
504             found = 1;
505             lowcore = cpu_map_lowcore(env);
506 
507             lowcore->subchannel_id = cpu_to_be16(q->id);
508             lowcore->subchannel_nr = cpu_to_be16(q->nr);
509             lowcore->io_int_parm = cpu_to_be32(q->parm);
510             lowcore->io_int_word = cpu_to_be32(q->word);
511             lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env));
512             lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
513             mask = be64_to_cpu(lowcore->io_new_psw.mask);
514             addr = be64_to_cpu(lowcore->io_new_psw.addr);
515 
516             cpu_unmap_lowcore(lowcore);
517 
518             env->io_index[isc]--;
519 
520             DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
521                     env->psw.mask, env->psw.addr);
522             load_psw(env, mask, addr);
523         }
524         if (env->io_index[isc] >= 0) {
525             disable = 0;
526         }
527         continue;
528     }
529 
530     if (disable) {
531         env->pending_int &= ~INTERRUPT_IO;
532     }
533 
534 }
535 
536 static void do_mchk_interrupt(CPUS390XState *env)
537 {
538     S390CPU *cpu = s390_env_get_cpu(env);
539     uint64_t mask, addr;
540     LowCore *lowcore;
541     MchkQueue *q;
542     int i;
543 
544     if (!(env->psw.mask & PSW_MASK_MCHECK)) {
545         cpu_abort(CPU(cpu), "Machine check w/o mchk mask\n");
546     }
547 
548     if (env->mchk_index < 0 || env->mchk_index >= MAX_MCHK_QUEUE) {
549         cpu_abort(CPU(cpu), "Mchk queue overrun: %d\n", env->mchk_index);
550     }
551 
552     q = &env->mchk_queue[env->mchk_index];
553 
554     if (q->type != 1) {
555         /* Don't know how to handle this... */
556         cpu_abort(CPU(cpu), "Unknown machine check type %d\n", q->type);
557     }
558     if (!(env->cregs[14] & (1 << 28))) {
559         /* CRW machine checks disabled */
560         return;
561     }
562 
563     lowcore = cpu_map_lowcore(env);
564 
565     for (i = 0; i < 16; i++) {
566         lowcore->floating_pt_save_area[i] = cpu_to_be64(get_freg(env, i)->ll);
567         lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
568         lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
569         lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
570     }
571     lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
572     lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
573     lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
574     lowcore->cpu_timer_save_area[0] = cpu_to_be32(env->cputm >> 32);
575     lowcore->cpu_timer_save_area[1] = cpu_to_be32((uint32_t)env->cputm);
576     lowcore->clock_comp_save_area[0] = cpu_to_be32(env->ckc >> 32);
577     lowcore->clock_comp_save_area[1] = cpu_to_be32((uint32_t)env->ckc);
578 
579     lowcore->mcck_interruption_code[0] = cpu_to_be32(0x00400f1d);
580     lowcore->mcck_interruption_code[1] = cpu_to_be32(0x40330000);
581     lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env));
582     lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
583     mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
584     addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
585 
586     cpu_unmap_lowcore(lowcore);
587 
588     env->mchk_index--;
589     if (env->mchk_index == -1) {
590         env->pending_int &= ~INTERRUPT_MCHK;
591     }
592 
593     DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
594             env->psw.mask, env->psw.addr);
595 
596     load_psw(env, mask, addr);
597 }
598 
599 void s390_cpu_do_interrupt(CPUState *cs)
600 {
601     S390CPU *cpu = S390_CPU(cs);
602     CPUS390XState *env = &cpu->env;
603 
604     qemu_log_mask(CPU_LOG_INT, "%s: %d at pc=%" PRIx64 "\n",
605                   __func__, cs->exception_index, env->psw.addr);
606 
607     s390_cpu_set_state(CPU_STATE_OPERATING, cpu);
608     /* handle machine checks */
609     if ((env->psw.mask & PSW_MASK_MCHECK) &&
610         (cs->exception_index == -1)) {
611         if (env->pending_int & INTERRUPT_MCHK) {
612             cs->exception_index = EXCP_MCHK;
613         }
614     }
615     /* handle external interrupts */
616     if ((env->psw.mask & PSW_MASK_EXT) &&
617         cs->exception_index == -1) {
618         if (env->pending_int & INTERRUPT_EXT) {
619             /* code is already in env */
620             cs->exception_index = EXCP_EXT;
621         } else if (env->pending_int & INTERRUPT_TOD) {
622             cpu_inject_ext(cpu, 0x1004, 0, 0);
623             cs->exception_index = EXCP_EXT;
624             env->pending_int &= ~INTERRUPT_EXT;
625             env->pending_int &= ~INTERRUPT_TOD;
626         } else if (env->pending_int & INTERRUPT_CPUTIMER) {
627             cpu_inject_ext(cpu, 0x1005, 0, 0);
628             cs->exception_index = EXCP_EXT;
629             env->pending_int &= ~INTERRUPT_EXT;
630             env->pending_int &= ~INTERRUPT_TOD;
631         }
632     }
633     /* handle I/O interrupts */
634     if ((env->psw.mask & PSW_MASK_IO) &&
635         (cs->exception_index == -1)) {
636         if (env->pending_int & INTERRUPT_IO) {
637             cs->exception_index = EXCP_IO;
638         }
639     }
640 
641     switch (cs->exception_index) {
642     case EXCP_PGM:
643         do_program_interrupt(env);
644         break;
645     case EXCP_SVC:
646         do_svc_interrupt(env);
647         break;
648     case EXCP_EXT:
649         do_ext_interrupt(env);
650         break;
651     case EXCP_IO:
652         do_io_interrupt(env);
653         break;
654     case EXCP_MCHK:
655         do_mchk_interrupt(env);
656         break;
657     }
658     cs->exception_index = -1;
659 
660     if (!env->pending_int) {
661         cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
662     }
663 }
664 
665 bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
666 {
667     if (interrupt_request & CPU_INTERRUPT_HARD) {
668         S390CPU *cpu = S390_CPU(cs);
669         CPUS390XState *env = &cpu->env;
670 
671         if (env->ex_value) {
672             /* Execution of the target insn is indivisible from
673                the parent EXECUTE insn.  */
674             return false;
675         }
676         if (env->psw.mask & PSW_MASK_EXT) {
677             s390_cpu_do_interrupt(cs);
678             return true;
679         }
680     }
681     return false;
682 }
683 
684 void s390_cpu_recompute_watchpoints(CPUState *cs)
685 {
686     const int wp_flags = BP_CPU | BP_MEM_WRITE | BP_STOP_BEFORE_ACCESS;
687     S390CPU *cpu = S390_CPU(cs);
688     CPUS390XState *env = &cpu->env;
689 
690     /* We are called when the watchpoints have changed. First
691        remove them all.  */
692     cpu_watchpoint_remove_all(cs, BP_CPU);
693 
694     /* Return if PER is not enabled */
695     if (!(env->psw.mask & PSW_MASK_PER)) {
696         return;
697     }
698 
699     /* Return if storage-alteration event is not enabled.  */
700     if (!(env->cregs[9] & PER_CR9_EVENT_STORE)) {
701         return;
702     }
703 
704     if (env->cregs[10] == 0 && env->cregs[11] == -1LL) {
705         /* We can't create a watchoint spanning the whole memory range, so
706            split it in two parts.   */
707         cpu_watchpoint_insert(cs, 0, 1ULL << 63, wp_flags, NULL);
708         cpu_watchpoint_insert(cs, 1ULL << 63, 1ULL << 63, wp_flags, NULL);
709     } else if (env->cregs[10] > env->cregs[11]) {
710         /* The address range loops, create two watchpoints.  */
711         cpu_watchpoint_insert(cs, env->cregs[10], -env->cregs[10],
712                               wp_flags, NULL);
713         cpu_watchpoint_insert(cs, 0, env->cregs[11] + 1, wp_flags, NULL);
714 
715     } else {
716         /* Default case, create a single watchpoint.  */
717         cpu_watchpoint_insert(cs, env->cregs[10],
718                               env->cregs[11] - env->cregs[10] + 1,
719                               wp_flags, NULL);
720     }
721 }
722 
723 void s390x_cpu_debug_excp_handler(CPUState *cs)
724 {
725     S390CPU *cpu = S390_CPU(cs);
726     CPUS390XState *env = &cpu->env;
727     CPUWatchpoint *wp_hit = cs->watchpoint_hit;
728 
729     if (wp_hit && wp_hit->flags & BP_CPU) {
730         /* FIXME: When the storage-alteration-space control bit is set,
731            the exception should only be triggered if the memory access
732            is done using an address space with the storage-alteration-event
733            bit set.  We have no way to detect that with the current
734            watchpoint code.  */
735         cs->watchpoint_hit = NULL;
736 
737         env->per_address = env->psw.addr;
738         env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
739         /* FIXME: We currently no way to detect the address space used
740            to trigger the watchpoint.  For now just consider it is the
741            current default ASC. This turn to be true except when MVCP
742            and MVCS instrutions are not used.  */
743         env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
744 
745         /* Remove all watchpoints to re-execute the code.  A PER exception
746            will be triggered, it will call load_psw which will recompute
747            the watchpoints.  */
748         cpu_watchpoint_remove_all(cs, BP_CPU);
749         cpu_loop_exit_noexc(cs);
750     }
751 }
752 
753 /* Unaligned accesses are only diagnosed with MO_ALIGN.  At the moment,
754    this is only for the atomic operations, for which we want to raise a
755    specification exception.  */
756 void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
757                                    MMUAccessType access_type,
758                                    int mmu_idx, uintptr_t retaddr)
759 {
760     S390CPU *cpu = S390_CPU(cs);
761     CPUS390XState *env = &cpu->env;
762 
763     if (retaddr) {
764         cpu_restore_state(cs, retaddr);
765     }
766     program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO);
767 }
768 #endif /* CONFIG_USER_ONLY */
769