1 /*
2  * s390x exception / interrupt helpers
3  *
4  *  Copyright (c) 2009 Ulrich Hecht
5  *  Copyright (c) 2011 Alexander Graf
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "internal.h"
24 #include "exec/helper-proto.h"
25 #include "qemu/timer.h"
26 #include "exec/exec-all.h"
27 #include "exec/cpu_ldst.h"
28 #include "hw/s390x/ioinst.h"
29 #include "exec/address-spaces.h"
30 #include "tcg_s390x.h"
31 #ifndef CONFIG_USER_ONLY
32 #include "sysemu/sysemu.h"
33 #include "hw/s390x/s390_flic.h"
34 #endif
35 
tcg_s390_program_interrupt(CPUS390XState * env,uint32_t code,int ilen,uintptr_t ra)36 void QEMU_NORETURN tcg_s390_program_interrupt(CPUS390XState *env, uint32_t code,
37                                               int ilen, uintptr_t ra)
38 {
39     CPUState *cs = CPU(s390_env_get_cpu(env));
40 
41     cpu_restore_state(cs, ra, true);
42     qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n",
43                   env->psw.addr);
44     trigger_pgm_exception(env, code, ilen);
45     cpu_loop_exit(cs);
46 }
47 
tcg_s390_data_exception(CPUS390XState * env,uint32_t dxc,uintptr_t ra)48 void QEMU_NORETURN tcg_s390_data_exception(CPUS390XState *env, uint32_t dxc,
49                                            uintptr_t ra)
50 {
51     g_assert(dxc <= 0xff);
52 #if !defined(CONFIG_USER_ONLY)
53     /* Store the DXC into the lowcore */
54     stl_phys(CPU(s390_env_get_cpu(env))->as,
55              env->psa + offsetof(LowCore, data_exc_code), dxc);
56 #endif
57 
58     /* Store the DXC into the FPC if AFP is enabled */
59     if (env->cregs[0] & CR0_AFP) {
60         env->fpc = deposit32(env->fpc, 8, 8, dxc);
61     }
62     tcg_s390_program_interrupt(env, PGM_DATA, ILEN_AUTO, ra);
63 }
64 
HELPER(data_exception)65 void HELPER(data_exception)(CPUS390XState *env, uint32_t dxc)
66 {
67     tcg_s390_data_exception(env, dxc, GETPC());
68 }
69 
70 #if defined(CONFIG_USER_ONLY)
71 
s390_cpu_do_interrupt(CPUState * cs)72 void s390_cpu_do_interrupt(CPUState *cs)
73 {
74     cs->exception_index = -1;
75 }
76 
s390_cpu_handle_mmu_fault(CPUState * cs,vaddr address,int size,int rw,int mmu_idx)77 int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
78                               int rw, int mmu_idx)
79 {
80     S390CPU *cpu = S390_CPU(cs);
81 
82     trigger_pgm_exception(&cpu->env, PGM_ADDRESSING, ILEN_AUTO);
83     /* On real machines this value is dropped into LowMem.  Since this
84        is userland, simply put this someplace that cpu_loop can find it.  */
85     cpu->env.__excp_addr = address;
86     return 1;
87 }
88 
89 #else /* !CONFIG_USER_ONLY */
90 
cpu_mmu_idx_to_asc(int mmu_idx)91 static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx)
92 {
93     switch (mmu_idx) {
94     case MMU_PRIMARY_IDX:
95         return PSW_ASC_PRIMARY;
96     case MMU_SECONDARY_IDX:
97         return PSW_ASC_SECONDARY;
98     case MMU_HOME_IDX:
99         return PSW_ASC_HOME;
100     default:
101         abort();
102     }
103 }
104 
s390_cpu_handle_mmu_fault(CPUState * cs,vaddr orig_vaddr,int size,int rw,int mmu_idx)105 int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr, int size,
106                               int rw, int mmu_idx)
107 {
108     S390CPU *cpu = S390_CPU(cs);
109     CPUS390XState *env = &cpu->env;
110     target_ulong vaddr, raddr;
111     uint64_t asc;
112     int prot;
113 
114     qemu_log_mask(CPU_LOG_MMU, "%s: addr 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
115                   __func__, orig_vaddr, rw, mmu_idx);
116 
117     vaddr = orig_vaddr;
118 
119     if (mmu_idx < MMU_REAL_IDX) {
120         asc = cpu_mmu_idx_to_asc(mmu_idx);
121         /* 31-Bit mode */
122         if (!(env->psw.mask & PSW_MASK_64)) {
123             vaddr &= 0x7fffffff;
124         }
125         if (mmu_translate(env, vaddr, rw, asc, &raddr, &prot, true)) {
126             return 1;
127         }
128     } else if (mmu_idx == MMU_REAL_IDX) {
129         /* 31-Bit mode */
130         if (!(env->psw.mask & PSW_MASK_64)) {
131             vaddr &= 0x7fffffff;
132         }
133         if (mmu_translate_real(env, vaddr, rw, &raddr, &prot)) {
134             return 1;
135         }
136     } else {
137         abort();
138     }
139 
140     /* check out of RAM access */
141     if (!address_space_access_valid(&address_space_memory, raddr,
142                                     TARGET_PAGE_SIZE, rw,
143                                     MEMTXATTRS_UNSPECIFIED)) {
144         qemu_log_mask(CPU_LOG_MMU,
145                       "%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n",
146                       __func__, (uint64_t)raddr, (uint64_t)ram_size);
147         trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_AUTO);
148         return 1;
149     }
150 
151     qemu_log_mask(CPU_LOG_MMU, "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
152             __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
153 
154     tlb_set_page(cs, orig_vaddr & TARGET_PAGE_MASK, raddr, prot,
155                  mmu_idx, TARGET_PAGE_SIZE);
156 
157     return 0;
158 }
159 
do_program_interrupt(CPUS390XState * env)160 static void do_program_interrupt(CPUS390XState *env)
161 {
162     uint64_t mask, addr;
163     LowCore *lowcore;
164     int ilen = env->int_pgm_ilen;
165 
166     if (ilen == ILEN_AUTO) {
167         ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
168     }
169     assert(ilen == 2 || ilen == 4 || ilen == 6);
170 
171     switch (env->int_pgm_code) {
172     case PGM_PER:
173         if (env->per_perc_atmid & PER_CODE_EVENT_NULLIFICATION) {
174             break;
175         }
176         /* FALL THROUGH */
177     case PGM_OPERATION:
178     case PGM_PRIVILEGED:
179     case PGM_EXECUTE:
180     case PGM_PROTECTION:
181     case PGM_ADDRESSING:
182     case PGM_SPECIFICATION:
183     case PGM_DATA:
184     case PGM_FIXPT_OVERFLOW:
185     case PGM_FIXPT_DIVIDE:
186     case PGM_DEC_OVERFLOW:
187     case PGM_DEC_DIVIDE:
188     case PGM_HFP_EXP_OVERFLOW:
189     case PGM_HFP_EXP_UNDERFLOW:
190     case PGM_HFP_SIGNIFICANCE:
191     case PGM_HFP_DIVIDE:
192     case PGM_TRANS_SPEC:
193     case PGM_SPECIAL_OP:
194     case PGM_OPERAND:
195     case PGM_HFP_SQRT:
196     case PGM_PC_TRANS_SPEC:
197     case PGM_ALET_SPEC:
198     case PGM_MONITOR:
199         /* advance the PSW if our exception is not nullifying */
200         env->psw.addr += ilen;
201         break;
202     }
203 
204     qemu_log_mask(CPU_LOG_INT,
205                   "%s: code=0x%x ilen=%d psw: %" PRIx64 " %" PRIx64 "\n",
206                   __func__, env->int_pgm_code, ilen, env->psw.mask,
207                   env->psw.addr);
208 
209     lowcore = cpu_map_lowcore(env);
210 
211     /* Signal PER events with the exception.  */
212     if (env->per_perc_atmid) {
213         env->int_pgm_code |= PGM_PER;
214         lowcore->per_address = cpu_to_be64(env->per_address);
215         lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
216         env->per_perc_atmid = 0;
217     }
218 
219     lowcore->pgm_ilen = cpu_to_be16(ilen);
220     lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
221     lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env));
222     lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
223     mask = be64_to_cpu(lowcore->program_new_psw.mask);
224     addr = be64_to_cpu(lowcore->program_new_psw.addr);
225     lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
226 
227     cpu_unmap_lowcore(lowcore);
228 
229     load_psw(env, mask, addr);
230 }
231 
do_svc_interrupt(CPUS390XState * env)232 static void do_svc_interrupt(CPUS390XState *env)
233 {
234     uint64_t mask, addr;
235     LowCore *lowcore;
236 
237     lowcore = cpu_map_lowcore(env);
238 
239     lowcore->svc_code = cpu_to_be16(env->int_svc_code);
240     lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
241     lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env));
242     lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
243     mask = be64_to_cpu(lowcore->svc_new_psw.mask);
244     addr = be64_to_cpu(lowcore->svc_new_psw.addr);
245 
246     cpu_unmap_lowcore(lowcore);
247 
248     load_psw(env, mask, addr);
249 
250     /* When a PER event is pending, the PER exception has to happen
251        immediately after the SERVICE CALL one.  */
252     if (env->per_perc_atmid) {
253         env->int_pgm_code = PGM_PER;
254         env->int_pgm_ilen = env->int_svc_ilen;
255         do_program_interrupt(env);
256     }
257 }
258 
259 #define VIRTIO_SUBCODE_64 0x0D00
260 
do_ext_interrupt(CPUS390XState * env)261 static void do_ext_interrupt(CPUS390XState *env)
262 {
263     QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
264     S390CPU *cpu = s390_env_get_cpu(env);
265     uint64_t mask, addr;
266     uint16_t cpu_addr;
267     LowCore *lowcore;
268 
269     if (!(env->psw.mask & PSW_MASK_EXT)) {
270         cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
271     }
272 
273     lowcore = cpu_map_lowcore(env);
274 
275     if ((env->pending_int & INTERRUPT_EMERGENCY_SIGNAL) &&
276         (env->cregs[0] & CR0_EMERGENCY_SIGNAL_SC)) {
277         lowcore->ext_int_code = cpu_to_be16(EXT_EMERGENCY);
278         cpu_addr = find_first_bit(env->emergency_signals, S390_MAX_CPUS);
279         g_assert(cpu_addr < S390_MAX_CPUS);
280         lowcore->cpu_addr = cpu_to_be16(cpu_addr);
281         clear_bit(cpu_addr, env->emergency_signals);
282         if (bitmap_empty(env->emergency_signals, max_cpus)) {
283             env->pending_int &= ~INTERRUPT_EMERGENCY_SIGNAL;
284         }
285     } else if ((env->pending_int & INTERRUPT_EXTERNAL_CALL) &&
286                (env->cregs[0] & CR0_EXTERNAL_CALL_SC)) {
287         lowcore->ext_int_code = cpu_to_be16(EXT_EXTERNAL_CALL);
288         lowcore->cpu_addr = cpu_to_be16(env->external_call_addr);
289         env->pending_int &= ~INTERRUPT_EXTERNAL_CALL;
290     } else if ((env->pending_int & INTERRUPT_EXT_CLOCK_COMPARATOR) &&
291                (env->cregs[0] & CR0_CKC_SC)) {
292         lowcore->ext_int_code = cpu_to_be16(EXT_CLOCK_COMP);
293         lowcore->cpu_addr = 0;
294         env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR;
295     } else if ((env->pending_int & INTERRUPT_EXT_CPU_TIMER) &&
296                (env->cregs[0] & CR0_CPU_TIMER_SC)) {
297         lowcore->ext_int_code = cpu_to_be16(EXT_CPU_TIMER);
298         lowcore->cpu_addr = 0;
299         env->pending_int &= ~INTERRUPT_EXT_CPU_TIMER;
300     } else if (qemu_s390_flic_has_service(flic) &&
301                (env->cregs[0] & CR0_SERVICE_SC)) {
302         uint32_t param;
303 
304         param = qemu_s390_flic_dequeue_service(flic);
305         lowcore->ext_int_code = cpu_to_be16(EXT_SERVICE);
306         lowcore->ext_params = cpu_to_be32(param);
307         lowcore->cpu_addr = 0;
308     } else {
309         g_assert_not_reached();
310     }
311 
312     mask = be64_to_cpu(lowcore->external_new_psw.mask);
313     addr = be64_to_cpu(lowcore->external_new_psw.addr);
314     lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env));
315     lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
316 
317     cpu_unmap_lowcore(lowcore);
318 
319     load_psw(env, mask, addr);
320 }
321 
do_io_interrupt(CPUS390XState * env)322 static void do_io_interrupt(CPUS390XState *env)
323 {
324     QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
325     uint64_t mask, addr;
326     QEMUS390FlicIO *io;
327     LowCore *lowcore;
328 
329     g_assert(env->psw.mask & PSW_MASK_IO);
330     io = qemu_s390_flic_dequeue_io(flic, env->cregs[6]);
331     g_assert(io);
332 
333     lowcore = cpu_map_lowcore(env);
334 
335     lowcore->subchannel_id = cpu_to_be16(io->id);
336     lowcore->subchannel_nr = cpu_to_be16(io->nr);
337     lowcore->io_int_parm = cpu_to_be32(io->parm);
338     lowcore->io_int_word = cpu_to_be32(io->word);
339     lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env));
340     lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
341     mask = be64_to_cpu(lowcore->io_new_psw.mask);
342     addr = be64_to_cpu(lowcore->io_new_psw.addr);
343 
344     cpu_unmap_lowcore(lowcore);
345     g_free(io);
346 
347     load_psw(env, mask, addr);
348 }
349 
do_mchk_interrupt(CPUS390XState * env)350 static void do_mchk_interrupt(CPUS390XState *env)
351 {
352     QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
353     uint64_t mask, addr;
354     LowCore *lowcore;
355     int i;
356 
357     /* for now we only support channel report machine checks (floating) */
358     g_assert(env->psw.mask & PSW_MASK_MCHECK);
359     g_assert(env->cregs[14] & CR14_CHANNEL_REPORT_SC);
360 
361     qemu_s390_flic_dequeue_crw_mchk(flic);
362 
363     lowcore = cpu_map_lowcore(env);
364 
365     /* we are always in z/Architecture mode */
366     lowcore->ar_access_id = 1;
367 
368     for (i = 0; i < 16; i++) {
369         lowcore->floating_pt_save_area[i] = cpu_to_be64(get_freg(env, i)->ll);
370         lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
371         lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
372         lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
373     }
374     lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
375     lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
376     lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
377     lowcore->cpu_timer_save_area = cpu_to_be64(env->cputm);
378     lowcore->clock_comp_save_area = cpu_to_be64(env->ckc >> 8);
379 
380     lowcore->mcic = cpu_to_be64(s390_build_validity_mcic() | MCIC_SC_CP);
381     lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env));
382     lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
383     mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
384     addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
385 
386     cpu_unmap_lowcore(lowcore);
387 
388     load_psw(env, mask, addr);
389 }
390 
s390_cpu_do_interrupt(CPUState * cs)391 void s390_cpu_do_interrupt(CPUState *cs)
392 {
393     QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
394     S390CPU *cpu = S390_CPU(cs);
395     CPUS390XState *env = &cpu->env;
396     bool stopped = false;
397 
398     qemu_log_mask(CPU_LOG_INT, "%s: %d at psw=%" PRIx64 ":%" PRIx64 "\n",
399                   __func__, cs->exception_index, env->psw.mask, env->psw.addr);
400 
401 try_deliver:
402     /* handle machine checks */
403     if (cs->exception_index == -1 && s390_cpu_has_mcck_int(cpu)) {
404         cs->exception_index = EXCP_MCHK;
405     }
406     /* handle external interrupts */
407     if (cs->exception_index == -1 && s390_cpu_has_ext_int(cpu)) {
408         cs->exception_index = EXCP_EXT;
409     }
410     /* handle I/O interrupts */
411     if (cs->exception_index == -1 && s390_cpu_has_io_int(cpu)) {
412         cs->exception_index = EXCP_IO;
413     }
414     /* RESTART interrupt */
415     if (cs->exception_index == -1 && s390_cpu_has_restart_int(cpu)) {
416         cs->exception_index = EXCP_RESTART;
417     }
418     /* STOP interrupt has least priority */
419     if (cs->exception_index == -1 && s390_cpu_has_stop_int(cpu)) {
420         cs->exception_index = EXCP_STOP;
421     }
422 
423     switch (cs->exception_index) {
424     case EXCP_PGM:
425         do_program_interrupt(env);
426         break;
427     case EXCP_SVC:
428         do_svc_interrupt(env);
429         break;
430     case EXCP_EXT:
431         do_ext_interrupt(env);
432         break;
433     case EXCP_IO:
434         do_io_interrupt(env);
435         break;
436     case EXCP_MCHK:
437         do_mchk_interrupt(env);
438         break;
439     case EXCP_RESTART:
440         do_restart_interrupt(env);
441         break;
442     case EXCP_STOP:
443         do_stop_interrupt(env);
444         stopped = true;
445         break;
446     }
447 
448     if (cs->exception_index != -1 && !stopped) {
449         /* check if there are more pending interrupts to deliver */
450         cs->exception_index = -1;
451         goto try_deliver;
452     }
453     cs->exception_index = -1;
454 
455     /* we might still have pending interrupts, but not deliverable */
456     if (!env->pending_int && !qemu_s390_flic_has_any(flic)) {
457         cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
458     }
459 
460     /* WAIT PSW during interrupt injection or STOP interrupt */
461     if ((env->psw.mask & PSW_MASK_WAIT) || stopped) {
462         /* don't trigger a cpu_loop_exit(), use an interrupt instead */
463         cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
464     } else if (cs->halted) {
465         /* unhalt if we had a WAIT PSW somehwere in our injection chain */
466         s390_cpu_unhalt(cpu);
467     }
468 }
469 
s390_cpu_exec_interrupt(CPUState * cs,int interrupt_request)470 bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
471 {
472     if (interrupt_request & CPU_INTERRUPT_HARD) {
473         S390CPU *cpu = S390_CPU(cs);
474         CPUS390XState *env = &cpu->env;
475 
476         if (env->ex_value) {
477             /* Execution of the target insn is indivisible from
478                the parent EXECUTE insn.  */
479             return false;
480         }
481         if (s390_cpu_has_int(cpu)) {
482             s390_cpu_do_interrupt(cs);
483             return true;
484         }
485         if (env->psw.mask & PSW_MASK_WAIT) {
486             /* Woken up because of a floating interrupt but it has already
487              * been delivered. Go back to sleep. */
488             cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
489         }
490     }
491     return false;
492 }
493 
s390x_cpu_debug_excp_handler(CPUState * cs)494 void s390x_cpu_debug_excp_handler(CPUState *cs)
495 {
496     S390CPU *cpu = S390_CPU(cs);
497     CPUS390XState *env = &cpu->env;
498     CPUWatchpoint *wp_hit = cs->watchpoint_hit;
499 
500     if (wp_hit && wp_hit->flags & BP_CPU) {
501         /* FIXME: When the storage-alteration-space control bit is set,
502            the exception should only be triggered if the memory access
503            is done using an address space with the storage-alteration-event
504            bit set.  We have no way to detect that with the current
505            watchpoint code.  */
506         cs->watchpoint_hit = NULL;
507 
508         env->per_address = env->psw.addr;
509         env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
510         /* FIXME: We currently no way to detect the address space used
511            to trigger the watchpoint.  For now just consider it is the
512            current default ASC. This turn to be true except when MVCP
513            and MVCS instrutions are not used.  */
514         env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
515 
516         /* Remove all watchpoints to re-execute the code.  A PER exception
517            will be triggered, it will call load_psw which will recompute
518            the watchpoints.  */
519         cpu_watchpoint_remove_all(cs, BP_CPU);
520         cpu_loop_exit_noexc(cs);
521     }
522 }
523 
524 /* Unaligned accesses are only diagnosed with MO_ALIGN.  At the moment,
525    this is only for the atomic operations, for which we want to raise a
526    specification exception.  */
s390x_cpu_do_unaligned_access(CPUState * cs,vaddr addr,MMUAccessType access_type,int mmu_idx,uintptr_t retaddr)527 void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
528                                    MMUAccessType access_type,
529                                    int mmu_idx, uintptr_t retaddr)
530 {
531     S390CPU *cpu = S390_CPU(cs);
532     CPUS390XState *env = &cpu->env;
533 
534     s390_program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO, retaddr);
535 }
536 
537 #endif /* CONFIG_USER_ONLY */
538