1 /*
2 * s390x exception / interrupt helpers
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2011 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "s390x-internal.h"
24 #include "exec/helper-proto.h"
25 #include "qemu/timer.h"
26 #include "exec/exec-all.h"
27 #include "exec/cpu_ldst.h"
28 #include "hw/s390x/ioinst.h"
29 #include "exec/address-spaces.h"
30 #include "tcg_s390x.h"
31 #ifndef CONFIG_USER_ONLY
32 #include "hw/s390x/s390_flic.h"
33 #include "hw/boards.h"
34 #endif
35
tcg_s390_program_interrupt(CPUS390XState * env,uint32_t code,uintptr_t ra)36 void QEMU_NORETURN tcg_s390_program_interrupt(CPUS390XState *env,
37 uint32_t code, uintptr_t ra)
38 {
39 CPUState *cs = env_cpu(env);
40
41 cpu_restore_state(cs, ra, true);
42 qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n",
43 env->psw.addr);
44 trigger_pgm_exception(env, code);
45 cpu_loop_exit(cs);
46 }
47
tcg_s390_data_exception(CPUS390XState * env,uint32_t dxc,uintptr_t ra)48 void QEMU_NORETURN tcg_s390_data_exception(CPUS390XState *env, uint32_t dxc,
49 uintptr_t ra)
50 {
51 g_assert(dxc <= 0xff);
52 #if !defined(CONFIG_USER_ONLY)
53 /* Store the DXC into the lowcore */
54 stl_phys(env_cpu(env)->as,
55 env->psa + offsetof(LowCore, data_exc_code), dxc);
56 #endif
57
58 /* Store the DXC into the FPC if AFP is enabled */
59 if (env->cregs[0] & CR0_AFP) {
60 env->fpc = deposit32(env->fpc, 8, 8, dxc);
61 }
62 tcg_s390_program_interrupt(env, PGM_DATA, ra);
63 }
64
tcg_s390_vector_exception(CPUS390XState * env,uint32_t vxc,uintptr_t ra)65 void QEMU_NORETURN tcg_s390_vector_exception(CPUS390XState *env, uint32_t vxc,
66 uintptr_t ra)
67 {
68 g_assert(vxc <= 0xff);
69 #if !defined(CONFIG_USER_ONLY)
70 /* Always store the VXC into the lowcore, without AFP it is undefined */
71 stl_phys(env_cpu(env)->as,
72 env->psa + offsetof(LowCore, data_exc_code), vxc);
73 #endif
74
75 /* Always store the VXC into the FPC, without AFP it is undefined */
76 env->fpc = deposit32(env->fpc, 8, 8, vxc);
77 tcg_s390_program_interrupt(env, PGM_VECTOR_PROCESSING, ra);
78 }
79
HELPER(data_exception)80 void HELPER(data_exception)(CPUS390XState *env, uint32_t dxc)
81 {
82 tcg_s390_data_exception(env, dxc, GETPC());
83 }
84
85 #if defined(CONFIG_USER_ONLY)
86
s390_cpu_do_interrupt(CPUState * cs)87 void s390_cpu_do_interrupt(CPUState *cs)
88 {
89 cs->exception_index = -1;
90 }
91
s390_cpu_tlb_fill(CPUState * cs,vaddr address,int size,MMUAccessType access_type,int mmu_idx,bool probe,uintptr_t retaddr)92 bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
93 MMUAccessType access_type, int mmu_idx,
94 bool probe, uintptr_t retaddr)
95 {
96 S390CPU *cpu = S390_CPU(cs);
97
98 trigger_pgm_exception(&cpu->env, PGM_ADDRESSING);
99 /* On real machines this value is dropped into LowMem. Since this
100 is userland, simply put this someplace that cpu_loop can find it. */
101 cpu->env.__excp_addr = address;
102 cpu_loop_exit_restore(cs, retaddr);
103 }
104
105 #else /* !CONFIG_USER_ONLY */
106
cpu_mmu_idx_to_asc(int mmu_idx)107 static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx)
108 {
109 switch (mmu_idx) {
110 case MMU_PRIMARY_IDX:
111 return PSW_ASC_PRIMARY;
112 case MMU_SECONDARY_IDX:
113 return PSW_ASC_SECONDARY;
114 case MMU_HOME_IDX:
115 return PSW_ASC_HOME;
116 default:
117 abort();
118 }
119 }
120
s390_cpu_tlb_fill(CPUState * cs,vaddr address,int size,MMUAccessType access_type,int mmu_idx,bool probe,uintptr_t retaddr)121 bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
122 MMUAccessType access_type, int mmu_idx,
123 bool probe, uintptr_t retaddr)
124 {
125 S390CPU *cpu = S390_CPU(cs);
126 CPUS390XState *env = &cpu->env;
127 target_ulong vaddr, raddr;
128 uint64_t asc, tec;
129 int prot, excp;
130
131 qemu_log_mask(CPU_LOG_MMU, "%s: addr 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
132 __func__, address, access_type, mmu_idx);
133
134 vaddr = address;
135
136 if (mmu_idx < MMU_REAL_IDX) {
137 asc = cpu_mmu_idx_to_asc(mmu_idx);
138 /* 31-Bit mode */
139 if (!(env->psw.mask & PSW_MASK_64)) {
140 vaddr &= 0x7fffffff;
141 }
142 excp = mmu_translate(env, vaddr, access_type, asc, &raddr, &prot, &tec);
143 } else if (mmu_idx == MMU_REAL_IDX) {
144 /* 31-Bit mode */
145 if (!(env->psw.mask & PSW_MASK_64)) {
146 vaddr &= 0x7fffffff;
147 }
148 excp = mmu_translate_real(env, vaddr, access_type, &raddr, &prot, &tec);
149 } else {
150 g_assert_not_reached();
151 }
152
153 env->tlb_fill_exc = excp;
154 env->tlb_fill_tec = tec;
155
156 if (!excp) {
157 qemu_log_mask(CPU_LOG_MMU,
158 "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
159 __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
160 tlb_set_page(cs, address & TARGET_PAGE_MASK, raddr, prot,
161 mmu_idx, TARGET_PAGE_SIZE);
162 return true;
163 }
164 if (probe) {
165 return false;
166 }
167
168 if (excp != PGM_ADDRESSING) {
169 stq_phys(env_cpu(env)->as,
170 env->psa + offsetof(LowCore, trans_exc_code), tec);
171 }
172
173 /*
174 * For data accesses, ILEN will be filled in from the unwind info,
175 * within cpu_loop_exit_restore. For code accesses, retaddr == 0,
176 * and so unwinding will not occur. However, ILEN is also undefined
177 * for that case -- we choose to set ILEN = 2.
178 */
179 env->int_pgm_ilen = 2;
180 trigger_pgm_exception(env, excp);
181 cpu_loop_exit_restore(cs, retaddr);
182 }
183
do_program_interrupt(CPUS390XState * env)184 static void do_program_interrupt(CPUS390XState *env)
185 {
186 uint64_t mask, addr;
187 LowCore *lowcore;
188 int ilen = env->int_pgm_ilen;
189
190 assert(ilen == 2 || ilen == 4 || ilen == 6);
191
192 switch (env->int_pgm_code) {
193 case PGM_PER:
194 if (env->per_perc_atmid & PER_CODE_EVENT_NULLIFICATION) {
195 break;
196 }
197 /* FALL THROUGH */
198 case PGM_OPERATION:
199 case PGM_PRIVILEGED:
200 case PGM_EXECUTE:
201 case PGM_PROTECTION:
202 case PGM_ADDRESSING:
203 case PGM_SPECIFICATION:
204 case PGM_DATA:
205 case PGM_FIXPT_OVERFLOW:
206 case PGM_FIXPT_DIVIDE:
207 case PGM_DEC_OVERFLOW:
208 case PGM_DEC_DIVIDE:
209 case PGM_HFP_EXP_OVERFLOW:
210 case PGM_HFP_EXP_UNDERFLOW:
211 case PGM_HFP_SIGNIFICANCE:
212 case PGM_HFP_DIVIDE:
213 case PGM_TRANS_SPEC:
214 case PGM_SPECIAL_OP:
215 case PGM_OPERAND:
216 case PGM_HFP_SQRT:
217 case PGM_PC_TRANS_SPEC:
218 case PGM_ALET_SPEC:
219 case PGM_MONITOR:
220 /* advance the PSW if our exception is not nullifying */
221 env->psw.addr += ilen;
222 break;
223 }
224
225 qemu_log_mask(CPU_LOG_INT,
226 "%s: code=0x%x ilen=%d psw: %" PRIx64 " %" PRIx64 "\n",
227 __func__, env->int_pgm_code, ilen, env->psw.mask,
228 env->psw.addr);
229
230 lowcore = cpu_map_lowcore(env);
231
232 /* Signal PER events with the exception. */
233 if (env->per_perc_atmid) {
234 env->int_pgm_code |= PGM_PER;
235 lowcore->per_address = cpu_to_be64(env->per_address);
236 lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
237 env->per_perc_atmid = 0;
238 }
239
240 lowcore->pgm_ilen = cpu_to_be16(ilen);
241 lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
242 lowcore->program_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
243 lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
244 mask = be64_to_cpu(lowcore->program_new_psw.mask);
245 addr = be64_to_cpu(lowcore->program_new_psw.addr);
246 lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
247
248 cpu_unmap_lowcore(lowcore);
249
250 s390_cpu_set_psw(env, mask, addr);
251 }
252
do_svc_interrupt(CPUS390XState * env)253 static void do_svc_interrupt(CPUS390XState *env)
254 {
255 uint64_t mask, addr;
256 LowCore *lowcore;
257
258 lowcore = cpu_map_lowcore(env);
259
260 lowcore->svc_code = cpu_to_be16(env->int_svc_code);
261 lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
262 lowcore->svc_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
263 lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
264 mask = be64_to_cpu(lowcore->svc_new_psw.mask);
265 addr = be64_to_cpu(lowcore->svc_new_psw.addr);
266
267 cpu_unmap_lowcore(lowcore);
268
269 s390_cpu_set_psw(env, mask, addr);
270
271 /* When a PER event is pending, the PER exception has to happen
272 immediately after the SERVICE CALL one. */
273 if (env->per_perc_atmid) {
274 env->int_pgm_code = PGM_PER;
275 env->int_pgm_ilen = env->int_svc_ilen;
276 do_program_interrupt(env);
277 }
278 }
279
280 #define VIRTIO_SUBCODE_64 0x0D00
281
do_ext_interrupt(CPUS390XState * env)282 static void do_ext_interrupt(CPUS390XState *env)
283 {
284 QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
285 S390CPU *cpu = env_archcpu(env);
286 uint64_t mask, addr;
287 uint16_t cpu_addr;
288 LowCore *lowcore;
289
290 if (!(env->psw.mask & PSW_MASK_EXT)) {
291 cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
292 }
293
294 lowcore = cpu_map_lowcore(env);
295
296 if ((env->pending_int & INTERRUPT_EMERGENCY_SIGNAL) &&
297 (env->cregs[0] & CR0_EMERGENCY_SIGNAL_SC)) {
298 MachineState *ms = MACHINE(qdev_get_machine());
299 unsigned int max_cpus = ms->smp.max_cpus;
300
301 lowcore->ext_int_code = cpu_to_be16(EXT_EMERGENCY);
302 cpu_addr = find_first_bit(env->emergency_signals, S390_MAX_CPUS);
303 g_assert(cpu_addr < S390_MAX_CPUS);
304 lowcore->cpu_addr = cpu_to_be16(cpu_addr);
305 clear_bit(cpu_addr, env->emergency_signals);
306 if (bitmap_empty(env->emergency_signals, max_cpus)) {
307 env->pending_int &= ~INTERRUPT_EMERGENCY_SIGNAL;
308 }
309 } else if ((env->pending_int & INTERRUPT_EXTERNAL_CALL) &&
310 (env->cregs[0] & CR0_EXTERNAL_CALL_SC)) {
311 lowcore->ext_int_code = cpu_to_be16(EXT_EXTERNAL_CALL);
312 lowcore->cpu_addr = cpu_to_be16(env->external_call_addr);
313 env->pending_int &= ~INTERRUPT_EXTERNAL_CALL;
314 } else if ((env->pending_int & INTERRUPT_EXT_CLOCK_COMPARATOR) &&
315 (env->cregs[0] & CR0_CKC_SC)) {
316 lowcore->ext_int_code = cpu_to_be16(EXT_CLOCK_COMP);
317 lowcore->cpu_addr = 0;
318 env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR;
319 } else if ((env->pending_int & INTERRUPT_EXT_CPU_TIMER) &&
320 (env->cregs[0] & CR0_CPU_TIMER_SC)) {
321 lowcore->ext_int_code = cpu_to_be16(EXT_CPU_TIMER);
322 lowcore->cpu_addr = 0;
323 env->pending_int &= ~INTERRUPT_EXT_CPU_TIMER;
324 } else if (qemu_s390_flic_has_service(flic) &&
325 (env->cregs[0] & CR0_SERVICE_SC)) {
326 uint32_t param;
327
328 param = qemu_s390_flic_dequeue_service(flic);
329 lowcore->ext_int_code = cpu_to_be16(EXT_SERVICE);
330 lowcore->ext_params = cpu_to_be32(param);
331 lowcore->cpu_addr = 0;
332 } else {
333 g_assert_not_reached();
334 }
335
336 mask = be64_to_cpu(lowcore->external_new_psw.mask);
337 addr = be64_to_cpu(lowcore->external_new_psw.addr);
338 lowcore->external_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
339 lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
340
341 cpu_unmap_lowcore(lowcore);
342
343 s390_cpu_set_psw(env, mask, addr);
344 }
345
do_io_interrupt(CPUS390XState * env)346 static void do_io_interrupt(CPUS390XState *env)
347 {
348 QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
349 uint64_t mask, addr;
350 QEMUS390FlicIO *io;
351 LowCore *lowcore;
352
353 g_assert(env->psw.mask & PSW_MASK_IO);
354 io = qemu_s390_flic_dequeue_io(flic, env->cregs[6]);
355 g_assert(io);
356
357 lowcore = cpu_map_lowcore(env);
358
359 lowcore->subchannel_id = cpu_to_be16(io->id);
360 lowcore->subchannel_nr = cpu_to_be16(io->nr);
361 lowcore->io_int_parm = cpu_to_be32(io->parm);
362 lowcore->io_int_word = cpu_to_be32(io->word);
363 lowcore->io_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
364 lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
365 mask = be64_to_cpu(lowcore->io_new_psw.mask);
366 addr = be64_to_cpu(lowcore->io_new_psw.addr);
367
368 cpu_unmap_lowcore(lowcore);
369 g_free(io);
370
371 s390_cpu_set_psw(env, mask, addr);
372 }
373
374 typedef struct MchkExtSaveArea {
375 uint64_t vregs[32][2]; /* 0x0000 */
376 uint8_t pad_0x0200[0x0400 - 0x0200]; /* 0x0200 */
377 } MchkExtSaveArea;
378 QEMU_BUILD_BUG_ON(sizeof(MchkExtSaveArea) != 1024);
379
mchk_store_vregs(CPUS390XState * env,uint64_t mcesao)380 static int mchk_store_vregs(CPUS390XState *env, uint64_t mcesao)
381 {
382 hwaddr len = sizeof(MchkExtSaveArea);
383 MchkExtSaveArea *sa;
384 int i;
385
386 sa = cpu_physical_memory_map(mcesao, &len, true);
387 if (!sa) {
388 return -EFAULT;
389 }
390 if (len != sizeof(MchkExtSaveArea)) {
391 cpu_physical_memory_unmap(sa, len, 1, 0);
392 return -EFAULT;
393 }
394
395 for (i = 0; i < 32; i++) {
396 sa->vregs[i][0] = cpu_to_be64(env->vregs[i][0]);
397 sa->vregs[i][1] = cpu_to_be64(env->vregs[i][1]);
398 }
399
400 cpu_physical_memory_unmap(sa, len, 1, len);
401 return 0;
402 }
403
do_mchk_interrupt(CPUS390XState * env)404 static void do_mchk_interrupt(CPUS390XState *env)
405 {
406 QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
407 uint64_t mcic = s390_build_validity_mcic() | MCIC_SC_CP;
408 uint64_t mask, addr, mcesao = 0;
409 LowCore *lowcore;
410 int i;
411
412 /* for now we only support channel report machine checks (floating) */
413 g_assert(env->psw.mask & PSW_MASK_MCHECK);
414 g_assert(env->cregs[14] & CR14_CHANNEL_REPORT_SC);
415
416 qemu_s390_flic_dequeue_crw_mchk(flic);
417
418 lowcore = cpu_map_lowcore(env);
419
420 /* extended save area */
421 if (mcic & MCIC_VB_VR) {
422 /* length and alignment is 1024 bytes */
423 mcesao = be64_to_cpu(lowcore->mcesad) & ~0x3ffull;
424 }
425
426 /* try to store vector registers */
427 if (!mcesao || mchk_store_vregs(env, mcesao)) {
428 mcic &= ~MCIC_VB_VR;
429 }
430
431 /* we are always in z/Architecture mode */
432 lowcore->ar_access_id = 1;
433
434 for (i = 0; i < 16; i++) {
435 lowcore->floating_pt_save_area[i] = cpu_to_be64(*get_freg(env, i));
436 lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
437 lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
438 lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
439 }
440 lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
441 lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
442 lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
443 lowcore->cpu_timer_save_area = cpu_to_be64(env->cputm);
444 lowcore->clock_comp_save_area = cpu_to_be64(env->ckc >> 8);
445
446 lowcore->mcic = cpu_to_be64(mcic);
447 lowcore->mcck_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
448 lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
449 mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
450 addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
451
452 cpu_unmap_lowcore(lowcore);
453
454 s390_cpu_set_psw(env, mask, addr);
455 }
456
s390_cpu_do_interrupt(CPUState * cs)457 void s390_cpu_do_interrupt(CPUState *cs)
458 {
459 QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
460 S390CPU *cpu = S390_CPU(cs);
461 CPUS390XState *env = &cpu->env;
462 bool stopped = false;
463
464 qemu_log_mask(CPU_LOG_INT, "%s: %d at psw=%" PRIx64 ":%" PRIx64 "\n",
465 __func__, cs->exception_index, env->psw.mask, env->psw.addr);
466
467 try_deliver:
468 /* handle machine checks */
469 if (cs->exception_index == -1 && s390_cpu_has_mcck_int(cpu)) {
470 cs->exception_index = EXCP_MCHK;
471 }
472 /* handle external interrupts */
473 if (cs->exception_index == -1 && s390_cpu_has_ext_int(cpu)) {
474 cs->exception_index = EXCP_EXT;
475 }
476 /* handle I/O interrupts */
477 if (cs->exception_index == -1 && s390_cpu_has_io_int(cpu)) {
478 cs->exception_index = EXCP_IO;
479 }
480 /* RESTART interrupt */
481 if (cs->exception_index == -1 && s390_cpu_has_restart_int(cpu)) {
482 cs->exception_index = EXCP_RESTART;
483 }
484 /* STOP interrupt has least priority */
485 if (cs->exception_index == -1 && s390_cpu_has_stop_int(cpu)) {
486 cs->exception_index = EXCP_STOP;
487 }
488
489 switch (cs->exception_index) {
490 case EXCP_PGM:
491 do_program_interrupt(env);
492 break;
493 case EXCP_SVC:
494 do_svc_interrupt(env);
495 break;
496 case EXCP_EXT:
497 do_ext_interrupt(env);
498 break;
499 case EXCP_IO:
500 do_io_interrupt(env);
501 break;
502 case EXCP_MCHK:
503 do_mchk_interrupt(env);
504 break;
505 case EXCP_RESTART:
506 do_restart_interrupt(env);
507 break;
508 case EXCP_STOP:
509 do_stop_interrupt(env);
510 stopped = true;
511 break;
512 }
513
514 if (cs->exception_index != -1 && !stopped) {
515 /* check if there are more pending interrupts to deliver */
516 cs->exception_index = -1;
517 goto try_deliver;
518 }
519 cs->exception_index = -1;
520
521 /* we might still have pending interrupts, but not deliverable */
522 if (!env->pending_int && !qemu_s390_flic_has_any(flic)) {
523 cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
524 }
525
526 /* WAIT PSW during interrupt injection or STOP interrupt */
527 if ((env->psw.mask & PSW_MASK_WAIT) || stopped) {
528 /* don't trigger a cpu_loop_exit(), use an interrupt instead */
529 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
530 } else if (cs->halted) {
531 /* unhalt if we had a WAIT PSW somehwere in our injection chain */
532 s390_cpu_unhalt(cpu);
533 }
534 }
535
s390_cpu_exec_interrupt(CPUState * cs,int interrupt_request)536 bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
537 {
538 if (interrupt_request & CPU_INTERRUPT_HARD) {
539 S390CPU *cpu = S390_CPU(cs);
540 CPUS390XState *env = &cpu->env;
541
542 if (env->ex_value) {
543 /* Execution of the target insn is indivisible from
544 the parent EXECUTE insn. */
545 return false;
546 }
547 if (s390_cpu_has_int(cpu)) {
548 s390_cpu_do_interrupt(cs);
549 return true;
550 }
551 if (env->psw.mask & PSW_MASK_WAIT) {
552 /* Woken up because of a floating interrupt but it has already
553 * been delivered. Go back to sleep. */
554 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
555 }
556 }
557 return false;
558 }
559
s390x_cpu_debug_excp_handler(CPUState * cs)560 void s390x_cpu_debug_excp_handler(CPUState *cs)
561 {
562 S390CPU *cpu = S390_CPU(cs);
563 CPUS390XState *env = &cpu->env;
564 CPUWatchpoint *wp_hit = cs->watchpoint_hit;
565
566 if (wp_hit && wp_hit->flags & BP_CPU) {
567 /* FIXME: When the storage-alteration-space control bit is set,
568 the exception should only be triggered if the memory access
569 is done using an address space with the storage-alteration-event
570 bit set. We have no way to detect that with the current
571 watchpoint code. */
572 cs->watchpoint_hit = NULL;
573
574 env->per_address = env->psw.addr;
575 env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
576 /* FIXME: We currently no way to detect the address space used
577 to trigger the watchpoint. For now just consider it is the
578 current default ASC. This turn to be true except when MVCP
579 and MVCS instrutions are not used. */
580 env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
581
582 /*
583 * Remove all watchpoints to re-execute the code. A PER exception
584 * will be triggered, it will call s390_cpu_set_psw which will
585 * recompute the watchpoints.
586 */
587 cpu_watchpoint_remove_all(cs, BP_CPU);
588 cpu_loop_exit_noexc(cs);
589 }
590 }
591
592 /* Unaligned accesses are only diagnosed with MO_ALIGN. At the moment,
593 this is only for the atomic operations, for which we want to raise a
594 specification exception. */
s390x_cpu_do_unaligned_access(CPUState * cs,vaddr addr,MMUAccessType access_type,int mmu_idx,uintptr_t retaddr)595 void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
596 MMUAccessType access_type,
597 int mmu_idx, uintptr_t retaddr)
598 {
599 S390CPU *cpu = S390_CPU(cs);
600 CPUS390XState *env = &cpu->env;
601
602 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, retaddr);
603 }
604
monitor_event(CPUS390XState * env,uint64_t monitor_code,uint8_t monitor_class,uintptr_t ra)605 static void QEMU_NORETURN monitor_event(CPUS390XState *env,
606 uint64_t monitor_code,
607 uint8_t monitor_class, uintptr_t ra)
608 {
609 /* Store the Monitor Code and the Monitor Class Number into the lowcore */
610 stq_phys(env_cpu(env)->as,
611 env->psa + offsetof(LowCore, monitor_code), monitor_code);
612 stw_phys(env_cpu(env)->as,
613 env->psa + offsetof(LowCore, mon_class_num), monitor_class);
614
615 tcg_s390_program_interrupt(env, PGM_MONITOR, ra);
616 }
617
HELPER(monitor_call)618 void HELPER(monitor_call)(CPUS390XState *env, uint64_t monitor_code,
619 uint32_t monitor_class)
620 {
621 g_assert(monitor_class <= 0xff);
622
623 if (env->cregs[8] & (0x8000 >> monitor_class)) {
624 monitor_event(env, monitor_code, monitor_class, GETPC());
625 }
626 }
627
628 #endif /* !CONFIG_USER_ONLY */
629