xref: /qemu/target/s390x/helper.c (revision 609f45ea)
1 /*
2  *  S/390 helpers
3  *
4  *  Copyright (c) 2009 Ulrich Hecht
5  *  Copyright (c) 2011 Alexander Graf
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "internal.h"
24 #include "exec/gdbstub.h"
25 #include "qemu/timer.h"
26 #include "hw/s390x/ioinst.h"
27 #include "sysemu/hw_accel.h"
28 #ifndef CONFIG_USER_ONLY
29 #include "sysemu/sysemu.h"
30 #endif
31 
32 #ifndef CONFIG_USER_ONLY
33 void s390x_tod_timer(void *opaque)
34 {
35     cpu_inject_clock_comparator((S390CPU *) opaque);
36 }
37 
38 void s390x_cpu_timer(void *opaque)
39 {
40     cpu_inject_cpu_timer((S390CPU *) opaque);
41 }
42 #endif
43 
44 #ifndef CONFIG_USER_ONLY
45 
46 hwaddr s390_cpu_get_phys_page_debug(CPUState *cs, vaddr vaddr)
47 {
48     S390CPU *cpu = S390_CPU(cs);
49     CPUS390XState *env = &cpu->env;
50     target_ulong raddr;
51     int prot;
52     uint64_t asc = env->psw.mask & PSW_MASK_ASC;
53 
54     /* 31-Bit mode */
55     if (!(env->psw.mask & PSW_MASK_64)) {
56         vaddr &= 0x7fffffff;
57     }
58 
59     if (mmu_translate(env, vaddr, MMU_INST_FETCH, asc, &raddr, &prot, false)) {
60         return -1;
61     }
62     return raddr;
63 }
64 
65 hwaddr s390_cpu_get_phys_addr_debug(CPUState *cs, vaddr vaddr)
66 {
67     hwaddr phys_addr;
68     target_ulong page;
69 
70     page = vaddr & TARGET_PAGE_MASK;
71     phys_addr = cpu_get_phys_page_debug(cs, page);
72     phys_addr += (vaddr & ~TARGET_PAGE_MASK);
73 
74     return phys_addr;
75 }
76 
77 static inline bool is_special_wait_psw(uint64_t psw_addr)
78 {
79     /* signal quiesce */
80     return psw_addr == 0xfffUL;
81 }
82 
83 void s390_handle_wait(S390CPU *cpu)
84 {
85     CPUState *cs = CPU(cpu);
86 
87     if (s390_cpu_halt(cpu) == 0) {
88 #ifndef CONFIG_USER_ONLY
89         if (is_special_wait_psw(cpu->env.psw.addr)) {
90             qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
91         } else {
92             cpu->env.crash_reason = S390_CRASH_REASON_DISABLED_WAIT;
93             qemu_system_guest_panicked(cpu_get_crash_info(cs));
94         }
95 #endif
96     }
97 }
98 
99 void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr)
100 {
101     uint64_t old_mask = env->psw.mask;
102 
103     env->psw.addr = addr;
104     env->psw.mask = mask;
105 
106     /* KVM will handle all WAITs and trigger a WAIT exit on disabled_wait */
107     if (!tcg_enabled()) {
108         return;
109     }
110     env->cc_op = (mask >> 44) & 3;
111 
112     if ((old_mask ^ mask) & PSW_MASK_PER) {
113         s390_cpu_recompute_watchpoints(CPU(s390_env_get_cpu(env)));
114     }
115 
116     if (mask & PSW_MASK_WAIT) {
117         s390_handle_wait(s390_env_get_cpu(env));
118     }
119 }
120 
121 uint64_t get_psw_mask(CPUS390XState *env)
122 {
123     uint64_t r = env->psw.mask;
124 
125     if (tcg_enabled()) {
126         env->cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst,
127                              env->cc_vr);
128 
129         r &= ~PSW_MASK_CC;
130         assert(!(env->cc_op & ~3));
131         r |= (uint64_t)env->cc_op << 44;
132     }
133 
134     return r;
135 }
136 
137 LowCore *cpu_map_lowcore(CPUS390XState *env)
138 {
139     S390CPU *cpu = s390_env_get_cpu(env);
140     LowCore *lowcore;
141     hwaddr len = sizeof(LowCore);
142 
143     lowcore = cpu_physical_memory_map(env->psa, &len, 1);
144 
145     if (len < sizeof(LowCore)) {
146         cpu_abort(CPU(cpu), "Could not map lowcore\n");
147     }
148 
149     return lowcore;
150 }
151 
152 void cpu_unmap_lowcore(LowCore *lowcore)
153 {
154     cpu_physical_memory_unmap(lowcore, sizeof(LowCore), 1, sizeof(LowCore));
155 }
156 
157 void do_restart_interrupt(CPUS390XState *env)
158 {
159     uint64_t mask, addr;
160     LowCore *lowcore;
161 
162     lowcore = cpu_map_lowcore(env);
163 
164     lowcore->restart_old_psw.mask = cpu_to_be64(get_psw_mask(env));
165     lowcore->restart_old_psw.addr = cpu_to_be64(env->psw.addr);
166     mask = be64_to_cpu(lowcore->restart_new_psw.mask);
167     addr = be64_to_cpu(lowcore->restart_new_psw.addr);
168 
169     cpu_unmap_lowcore(lowcore);
170     env->pending_int &= ~INTERRUPT_RESTART;
171 
172     load_psw(env, mask, addr);
173 }
174 
175 void s390_cpu_recompute_watchpoints(CPUState *cs)
176 {
177     const int wp_flags = BP_CPU | BP_MEM_WRITE | BP_STOP_BEFORE_ACCESS;
178     S390CPU *cpu = S390_CPU(cs);
179     CPUS390XState *env = &cpu->env;
180 
181     /* We are called when the watchpoints have changed. First
182        remove them all.  */
183     cpu_watchpoint_remove_all(cs, BP_CPU);
184 
185     /* Return if PER is not enabled */
186     if (!(env->psw.mask & PSW_MASK_PER)) {
187         return;
188     }
189 
190     /* Return if storage-alteration event is not enabled.  */
191     if (!(env->cregs[9] & PER_CR9_EVENT_STORE)) {
192         return;
193     }
194 
195     if (env->cregs[10] == 0 && env->cregs[11] == -1LL) {
196         /* We can't create a watchoint spanning the whole memory range, so
197            split it in two parts.   */
198         cpu_watchpoint_insert(cs, 0, 1ULL << 63, wp_flags, NULL);
199         cpu_watchpoint_insert(cs, 1ULL << 63, 1ULL << 63, wp_flags, NULL);
200     } else if (env->cregs[10] > env->cregs[11]) {
201         /* The address range loops, create two watchpoints.  */
202         cpu_watchpoint_insert(cs, env->cregs[10], -env->cregs[10],
203                               wp_flags, NULL);
204         cpu_watchpoint_insert(cs, 0, env->cregs[11] + 1, wp_flags, NULL);
205 
206     } else {
207         /* Default case, create a single watchpoint.  */
208         cpu_watchpoint_insert(cs, env->cregs[10],
209                               env->cregs[11] - env->cregs[10] + 1,
210                               wp_flags, NULL);
211     }
212 }
213 
214 struct sigp_save_area {
215     uint64_t    fprs[16];                       /* 0x0000 */
216     uint64_t    grs[16];                        /* 0x0080 */
217     PSW         psw;                            /* 0x0100 */
218     uint8_t     pad_0x0110[0x0118 - 0x0110];    /* 0x0110 */
219     uint32_t    prefix;                         /* 0x0118 */
220     uint32_t    fpc;                            /* 0x011c */
221     uint8_t     pad_0x0120[0x0124 - 0x0120];    /* 0x0120 */
222     uint32_t    todpr;                          /* 0x0124 */
223     uint64_t    cputm;                          /* 0x0128 */
224     uint64_t    ckc;                            /* 0x0130 */
225     uint8_t     pad_0x0138[0x0140 - 0x0138];    /* 0x0138 */
226     uint32_t    ars[16];                        /* 0x0140 */
227     uint64_t    crs[16];                        /* 0x0384 */
228 };
229 QEMU_BUILD_BUG_ON(sizeof(struct sigp_save_area) != 512);
230 
231 int s390_store_status(S390CPU *cpu, hwaddr addr, bool store_arch)
232 {
233     static const uint8_t ar_id = 1;
234     struct sigp_save_area *sa;
235     hwaddr len = sizeof(*sa);
236     int i;
237 
238     sa = cpu_physical_memory_map(addr, &len, 1);
239     if (!sa) {
240         return -EFAULT;
241     }
242     if (len != sizeof(*sa)) {
243         cpu_physical_memory_unmap(sa, len, 1, 0);
244         return -EFAULT;
245     }
246 
247     if (store_arch) {
248         cpu_physical_memory_write(offsetof(LowCore, ar_access_id), &ar_id, 1);
249     }
250     for (i = 0; i < 16; ++i) {
251         sa->fprs[i] = cpu_to_be64(get_freg(&cpu->env, i)->ll);
252     }
253     for (i = 0; i < 16; ++i) {
254         sa->grs[i] = cpu_to_be64(cpu->env.regs[i]);
255     }
256     sa->psw.addr = cpu_to_be64(cpu->env.psw.addr);
257     sa->psw.mask = cpu_to_be64(get_psw_mask(&cpu->env));
258     sa->prefix = cpu_to_be32(cpu->env.psa);
259     sa->fpc = cpu_to_be32(cpu->env.fpc);
260     sa->todpr = cpu_to_be32(cpu->env.todpr);
261     sa->cputm = cpu_to_be64(cpu->env.cputm);
262     sa->ckc = cpu_to_be64(cpu->env.ckc >> 8);
263     for (i = 0; i < 16; ++i) {
264         sa->ars[i] = cpu_to_be32(cpu->env.aregs[i]);
265     }
266     for (i = 0; i < 16; ++i) {
267         sa->crs[i] = cpu_to_be64(cpu->env.cregs[i]);
268     }
269 
270     cpu_physical_memory_unmap(sa, len, 1, len);
271 
272     return 0;
273 }
274 
275 #define ADTL_GS_OFFSET   1024 /* offset of GS data in adtl save area */
276 #define ADTL_GS_MIN_SIZE 2048 /* minimal size of adtl save area for GS */
277 int s390_store_adtl_status(S390CPU *cpu, hwaddr addr, hwaddr len)
278 {
279     hwaddr save = len;
280     void *mem;
281 
282     mem = cpu_physical_memory_map(addr, &save, 1);
283     if (!mem) {
284         return -EFAULT;
285     }
286     if (save != len) {
287         cpu_physical_memory_unmap(mem, len, 1, 0);
288         return -EFAULT;
289     }
290 
291     /* FIXME: as soon as TCG supports these features, convert cpu->be */
292     if (s390_has_feat(S390_FEAT_VECTOR)) {
293         memcpy(mem, &cpu->env.vregs, 512);
294     }
295     if (s390_has_feat(S390_FEAT_GUARDED_STORAGE) && len >= ADTL_GS_MIN_SIZE) {
296         memcpy(mem + ADTL_GS_OFFSET, &cpu->env.gscb, 32);
297     }
298 
299     cpu_physical_memory_unmap(mem, len, 1, len);
300 
301     return 0;
302 }
303 #endif /* CONFIG_USER_ONLY */
304 
305 void s390_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
306                          int flags)
307 {
308     S390CPU *cpu = S390_CPU(cs);
309     CPUS390XState *env = &cpu->env;
310     int i;
311 
312     if (env->cc_op > 3) {
313         cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
314                     env->psw.mask, env->psw.addr, cc_name(env->cc_op));
315     } else {
316         cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
317                     env->psw.mask, env->psw.addr, env->cc_op);
318     }
319 
320     for (i = 0; i < 16; i++) {
321         cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
322         if ((i % 4) == 3) {
323             cpu_fprintf(f, "\n");
324         } else {
325             cpu_fprintf(f, " ");
326         }
327     }
328 
329     if (flags & CPU_DUMP_FPU) {
330         if (s390_has_feat(S390_FEAT_VECTOR)) {
331             for (i = 0; i < 32; i++) {
332                 cpu_fprintf(f, "V%02d=%016" PRIx64 "%016" PRIx64 "%c",
333                             i, env->vregs[i][0].ll, env->vregs[i][1].ll,
334                             i % 2 ? '\n' : ' ');
335             }
336         } else {
337             for (i = 0; i < 16; i++) {
338                 cpu_fprintf(f, "F%02d=%016" PRIx64 "%c",
339                             i, get_freg(env, i)->ll,
340                             (i % 4) == 3 ? '\n' : ' ');
341             }
342         }
343     }
344 
345 #ifndef CONFIG_USER_ONLY
346     for (i = 0; i < 16; i++) {
347         cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
348         if ((i % 4) == 3) {
349             cpu_fprintf(f, "\n");
350         } else {
351             cpu_fprintf(f, " ");
352         }
353     }
354 #endif
355 
356 #ifdef DEBUG_INLINE_BRANCHES
357     for (i = 0; i < CC_OP_MAX; i++) {
358         cpu_fprintf(f, "  %15s = %10ld\t%10ld\n", cc_name(i),
359                     inline_branch_miss[i], inline_branch_hit[i]);
360     }
361 #endif
362 
363     cpu_fprintf(f, "\n");
364 }
365 
366 const char *cc_name(enum cc_op cc_op)
367 {
368     static const char * const cc_names[] = {
369         [CC_OP_CONST0]    = "CC_OP_CONST0",
370         [CC_OP_CONST1]    = "CC_OP_CONST1",
371         [CC_OP_CONST2]    = "CC_OP_CONST2",
372         [CC_OP_CONST3]    = "CC_OP_CONST3",
373         [CC_OP_DYNAMIC]   = "CC_OP_DYNAMIC",
374         [CC_OP_STATIC]    = "CC_OP_STATIC",
375         [CC_OP_NZ]        = "CC_OP_NZ",
376         [CC_OP_LTGT_32]   = "CC_OP_LTGT_32",
377         [CC_OP_LTGT_64]   = "CC_OP_LTGT_64",
378         [CC_OP_LTUGTU_32] = "CC_OP_LTUGTU_32",
379         [CC_OP_LTUGTU_64] = "CC_OP_LTUGTU_64",
380         [CC_OP_LTGT0_32]  = "CC_OP_LTGT0_32",
381         [CC_OP_LTGT0_64]  = "CC_OP_LTGT0_64",
382         [CC_OP_ADD_64]    = "CC_OP_ADD_64",
383         [CC_OP_ADDU_64]   = "CC_OP_ADDU_64",
384         [CC_OP_ADDC_64]   = "CC_OP_ADDC_64",
385         [CC_OP_SUB_64]    = "CC_OP_SUB_64",
386         [CC_OP_SUBU_64]   = "CC_OP_SUBU_64",
387         [CC_OP_SUBB_64]   = "CC_OP_SUBB_64",
388         [CC_OP_ABS_64]    = "CC_OP_ABS_64",
389         [CC_OP_NABS_64]   = "CC_OP_NABS_64",
390         [CC_OP_ADD_32]    = "CC_OP_ADD_32",
391         [CC_OP_ADDU_32]   = "CC_OP_ADDU_32",
392         [CC_OP_ADDC_32]   = "CC_OP_ADDC_32",
393         [CC_OP_SUB_32]    = "CC_OP_SUB_32",
394         [CC_OP_SUBU_32]   = "CC_OP_SUBU_32",
395         [CC_OP_SUBB_32]   = "CC_OP_SUBB_32",
396         [CC_OP_ABS_32]    = "CC_OP_ABS_32",
397         [CC_OP_NABS_32]   = "CC_OP_NABS_32",
398         [CC_OP_COMP_32]   = "CC_OP_COMP_32",
399         [CC_OP_COMP_64]   = "CC_OP_COMP_64",
400         [CC_OP_TM_32]     = "CC_OP_TM_32",
401         [CC_OP_TM_64]     = "CC_OP_TM_64",
402         [CC_OP_NZ_F32]    = "CC_OP_NZ_F32",
403         [CC_OP_NZ_F64]    = "CC_OP_NZ_F64",
404         [CC_OP_NZ_F128]   = "CC_OP_NZ_F128",
405         [CC_OP_ICM]       = "CC_OP_ICM",
406         [CC_OP_SLA_32]    = "CC_OP_SLA_32",
407         [CC_OP_SLA_64]    = "CC_OP_SLA_64",
408         [CC_OP_FLOGR]     = "CC_OP_FLOGR",
409     };
410 
411     return cc_names[cc_op];
412 }
413