xref: /qemu/target/ppc/machine.c (revision 22e3284f)
1 #include "qemu/osdep.h"
2 #include "qemu-common.h"
3 #include "cpu.h"
4 #include "exec/exec-all.h"
5 #include "hw/hw.h"
6 #include "hw/boards.h"
7 #include "sysemu/kvm.h"
8 #include "helper_regs.h"
9 #include "mmu-hash64.h"
10 #include "migration/cpu.h"
11 #include "qapi/error.h"
12 #include "kvm_ppc.h"
13 #include "exec/helper-proto.h"
14 
15 static int cpu_load_old(QEMUFile *f, void *opaque, int version_id)
16 {
17     PowerPCCPU *cpu = opaque;
18     CPUPPCState *env = &cpu->env;
19     unsigned int i, j;
20     target_ulong sdr1;
21     uint32_t fpscr, vscr;
22 #if defined(TARGET_PPC64)
23     int32_t slb_nr;
24 #endif
25     target_ulong xer;
26 
27     for (i = 0; i < 32; i++)
28         qemu_get_betls(f, &env->gpr[i]);
29 #if !defined(TARGET_PPC64)
30     for (i = 0; i < 32; i++)
31         qemu_get_betls(f, &env->gprh[i]);
32 #endif
33     qemu_get_betls(f, &env->lr);
34     qemu_get_betls(f, &env->ctr);
35     for (i = 0; i < 8; i++)
36         qemu_get_be32s(f, &env->crf[i]);
37     qemu_get_betls(f, &xer);
38     cpu_write_xer(env, xer);
39     qemu_get_betls(f, &env->reserve_addr);
40     qemu_get_betls(f, &env->msr);
41     for (i = 0; i < 4; i++)
42         qemu_get_betls(f, &env->tgpr[i]);
43     for (i = 0; i < 32; i++) {
44         union {
45             float64 d;
46             uint64_t l;
47         } u;
48         u.l = qemu_get_be64(f);
49         *cpu_fpr_ptr(env, i) = u.d;
50     }
51     qemu_get_be32s(f, &fpscr);
52     env->fpscr = fpscr;
53     qemu_get_sbe32s(f, &env->access_type);
54 #if defined(TARGET_PPC64)
55     qemu_get_betls(f, &env->spr[SPR_ASR]);
56     qemu_get_sbe32s(f, &slb_nr);
57 #endif
58     qemu_get_betls(f, &sdr1);
59     for (i = 0; i < 32; i++)
60         qemu_get_betls(f, &env->sr[i]);
61     for (i = 0; i < 2; i++)
62         for (j = 0; j < 8; j++)
63             qemu_get_betls(f, &env->DBAT[i][j]);
64     for (i = 0; i < 2; i++)
65         for (j = 0; j < 8; j++)
66             qemu_get_betls(f, &env->IBAT[i][j]);
67     qemu_get_sbe32s(f, &env->nb_tlb);
68     qemu_get_sbe32s(f, &env->tlb_per_way);
69     qemu_get_sbe32s(f, &env->nb_ways);
70     qemu_get_sbe32s(f, &env->last_way);
71     qemu_get_sbe32s(f, &env->id_tlbs);
72     qemu_get_sbe32s(f, &env->nb_pids);
73     if (env->tlb.tlb6) {
74         // XXX assumes 6xx
75         for (i = 0; i < env->nb_tlb; i++) {
76             qemu_get_betls(f, &env->tlb.tlb6[i].pte0);
77             qemu_get_betls(f, &env->tlb.tlb6[i].pte1);
78             qemu_get_betls(f, &env->tlb.tlb6[i].EPN);
79         }
80     }
81     for (i = 0; i < 4; i++)
82         qemu_get_betls(f, &env->pb[i]);
83     for (i = 0; i < 1024; i++)
84         qemu_get_betls(f, &env->spr[i]);
85     if (!cpu->vhyp) {
86         ppc_store_sdr1(env, sdr1);
87     }
88     qemu_get_be32s(f, &vscr);
89     helper_mtvscr(env, vscr);
90     qemu_get_be64s(f, &env->spe_acc);
91     qemu_get_be32s(f, &env->spe_fscr);
92     qemu_get_betls(f, &env->msr_mask);
93     qemu_get_be32s(f, &env->flags);
94     qemu_get_sbe32s(f, &env->error_code);
95     qemu_get_be32s(f, &env->pending_interrupts);
96     qemu_get_be32s(f, &env->irq_input_state);
97     for (i = 0; i < POWERPC_EXCP_NB; i++)
98         qemu_get_betls(f, &env->excp_vectors[i]);
99     qemu_get_betls(f, &env->excp_prefix);
100     qemu_get_betls(f, &env->ivor_mask);
101     qemu_get_betls(f, &env->ivpr_mask);
102     qemu_get_betls(f, &env->hreset_vector);
103     qemu_get_betls(f, &env->nip);
104     qemu_get_betls(f, &env->hflags);
105     qemu_get_betls(f, &env->hflags_nmsr);
106     qemu_get_sbe32(f); /* Discard unused mmu_idx */
107     qemu_get_sbe32(f); /* Discard unused power_mode */
108 
109     /* Recompute mmu indices */
110     hreg_compute_mem_idx(env);
111 
112     return 0;
113 }
114 
115 static int get_avr(QEMUFile *f, void *pv, size_t size,
116                    const VMStateField *field)
117 {
118     ppc_avr_t *v = pv;
119 
120     v->u64[0] = qemu_get_be64(f);
121     v->u64[1] = qemu_get_be64(f);
122 
123     return 0;
124 }
125 
126 static int put_avr(QEMUFile *f, void *pv, size_t size,
127                    const VMStateField *field, QJSON *vmdesc)
128 {
129     ppc_avr_t *v = pv;
130 
131     qemu_put_be64(f, v->u64[0]);
132     qemu_put_be64(f, v->u64[1]);
133     return 0;
134 }
135 
136 static const VMStateInfo vmstate_info_avr = {
137     .name = "avr",
138     .get  = get_avr,
139     .put  = put_avr,
140 };
141 
142 #define VMSTATE_AVR_ARRAY_V(_f, _s, _n, _v)                       \
143     VMSTATE_SUB_ARRAY(_f, _s, 32, _n, _v, vmstate_info_avr, ppc_avr_t)
144 
145 #define VMSTATE_AVR_ARRAY(_f, _s, _n)                             \
146     VMSTATE_AVR_ARRAY_V(_f, _s, _n, 0)
147 
148 static int get_fpr(QEMUFile *f, void *pv, size_t size,
149                    const VMStateField *field)
150 {
151     ppc_vsr_t *v = pv;
152 
153     v->VsrD(0) = qemu_get_be64(f);
154 
155     return 0;
156 }
157 
158 static int put_fpr(QEMUFile *f, void *pv, size_t size,
159                    const VMStateField *field, QJSON *vmdesc)
160 {
161     ppc_vsr_t *v = pv;
162 
163     qemu_put_be64(f, v->VsrD(0));
164     return 0;
165 }
166 
167 static const VMStateInfo vmstate_info_fpr = {
168     .name = "fpr",
169     .get  = get_fpr,
170     .put  = put_fpr,
171 };
172 
173 #define VMSTATE_FPR_ARRAY_V(_f, _s, _n, _v)                       \
174     VMSTATE_SUB_ARRAY(_f, _s, 0, _n, _v, vmstate_info_fpr, ppc_vsr_t)
175 
176 #define VMSTATE_FPR_ARRAY(_f, _s, _n)                             \
177     VMSTATE_FPR_ARRAY_V(_f, _s, _n, 0)
178 
179 static int get_vsr(QEMUFile *f, void *pv, size_t size,
180                    const VMStateField *field)
181 {
182     ppc_vsr_t *v = pv;
183 
184     v->VsrD(1) = qemu_get_be64(f);
185 
186     return 0;
187 }
188 
189 static int put_vsr(QEMUFile *f, void *pv, size_t size,
190                    const VMStateField *field, QJSON *vmdesc)
191 {
192     ppc_vsr_t *v = pv;
193 
194     qemu_put_be64(f, v->VsrD(1));
195     return 0;
196 }
197 
198 static const VMStateInfo vmstate_info_vsr = {
199     .name = "vsr",
200     .get  = get_vsr,
201     .put  = put_vsr,
202 };
203 
204 #define VMSTATE_VSR_ARRAY_V(_f, _s, _n, _v)                       \
205     VMSTATE_SUB_ARRAY(_f, _s, 0, _n, _v, vmstate_info_vsr, ppc_vsr_t)
206 
207 #define VMSTATE_VSR_ARRAY(_f, _s, _n)                             \
208     VMSTATE_VSR_ARRAY_V(_f, _s, _n, 0)
209 
210 static bool cpu_pre_2_8_migration(void *opaque, int version_id)
211 {
212     PowerPCCPU *cpu = opaque;
213 
214     return cpu->pre_2_8_migration;
215 }
216 
217 #if defined(TARGET_PPC64)
218 static bool cpu_pre_3_0_migration(void *opaque, int version_id)
219 {
220     PowerPCCPU *cpu = opaque;
221 
222     return cpu->pre_3_0_migration;
223 }
224 #endif
225 
226 static int cpu_pre_save(void *opaque)
227 {
228     PowerPCCPU *cpu = opaque;
229     CPUPPCState *env = &cpu->env;
230     int i;
231     uint64_t insns_compat_mask =
232         PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB
233         | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES
234         | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | PPC_FLOAT_FRSQRTES
235         | PPC_FLOAT_STFIWX | PPC_FLOAT_EXT
236         | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ
237         | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC
238         | PPC_64B | PPC_64BX | PPC_ALTIVEC
239         | PPC_SEGMENT_64B | PPC_SLBI | PPC_POPCNTB | PPC_POPCNTWD;
240     uint64_t insns_compat_mask2 = PPC2_VSX | PPC2_VSX207 | PPC2_DFP | PPC2_DBRX
241         | PPC2_PERM_ISA206 | PPC2_DIVE_ISA206
242         | PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206
243         | PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207
244         | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207
245         | PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 | PPC2_TM;
246 
247     env->spr[SPR_LR] = env->lr;
248     env->spr[SPR_CTR] = env->ctr;
249     env->spr[SPR_XER] = cpu_read_xer(env);
250 #if defined(TARGET_PPC64)
251     env->spr[SPR_CFAR] = env->cfar;
252 #endif
253     env->spr[SPR_BOOKE_SPEFSCR] = env->spe_fscr;
254 
255     for (i = 0; (i < 4) && (i < env->nb_BATs); i++) {
256         env->spr[SPR_DBAT0U + 2*i] = env->DBAT[0][i];
257         env->spr[SPR_DBAT0U + 2*i + 1] = env->DBAT[1][i];
258         env->spr[SPR_IBAT0U + 2*i] = env->IBAT[0][i];
259         env->spr[SPR_IBAT0U + 2*i + 1] = env->IBAT[1][i];
260     }
261     for (i = 0; (i < 4) && ((i+4) < env->nb_BATs); i++) {
262         env->spr[SPR_DBAT4U + 2*i] = env->DBAT[0][i+4];
263         env->spr[SPR_DBAT4U + 2*i + 1] = env->DBAT[1][i+4];
264         env->spr[SPR_IBAT4U + 2*i] = env->IBAT[0][i+4];
265         env->spr[SPR_IBAT4U + 2*i + 1] = env->IBAT[1][i+4];
266     }
267 
268     /* Hacks for migration compatibility between 2.6, 2.7 & 2.8 */
269     if (cpu->pre_2_8_migration) {
270         /* Mask out bits that got added to msr_mask since the versions
271          * which stupidly included it in the migration stream. */
272         target_ulong metamask = 0
273 #if defined(TARGET_PPC64)
274             | (1ULL << MSR_TS0)
275             | (1ULL << MSR_TS1)
276 #endif
277             ;
278         cpu->mig_msr_mask = env->msr_mask & ~metamask;
279         cpu->mig_insns_flags = env->insns_flags & insns_compat_mask;
280         /* CPU models supported by old machines all have PPC_MEM_TLBIE,
281          * so we set it unconditionally to allow backward migration from
282          * a POWER9 host to a POWER8 host.
283          */
284         cpu->mig_insns_flags |= PPC_MEM_TLBIE;
285         cpu->mig_insns_flags2 = env->insns_flags2 & insns_compat_mask2;
286         cpu->mig_nb_BATs = env->nb_BATs;
287     }
288     if (cpu->pre_3_0_migration) {
289         if (cpu->hash64_opts) {
290             cpu->mig_slb_nr = cpu->hash64_opts->slb_size;
291         }
292     }
293 
294     return 0;
295 }
296 
297 /*
298  * Determine if a given PVR is a "close enough" match to the CPU
299  * object.  For TCG and KVM PR it would probably be sufficient to
300  * require an exact PVR match.  However for KVM HV the user is
301  * restricted to a PVR exactly matching the host CPU.  The correct way
302  * to handle this is to put the guest into an architected
303  * compatibility mode.  However, to allow a more forgiving transition
304  * and migration from before this was widely done, we allow migration
305  * between sufficiently similar PVRs, as determined by the CPU class's
306  * pvr_match() hook.
307  */
308 static bool pvr_match(PowerPCCPU *cpu, uint32_t pvr)
309 {
310     PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
311 
312     if (pvr == pcc->pvr) {
313         return true;
314     }
315     return pcc->pvr_match(pcc, pvr);
316 }
317 
318 static int cpu_post_load(void *opaque, int version_id)
319 {
320     PowerPCCPU *cpu = opaque;
321     CPUPPCState *env = &cpu->env;
322     int i;
323     target_ulong msr;
324 
325     /*
326      * If we're operating in compat mode, we should be ok as long as
327      * the destination supports the same compatiblity mode.
328      *
329      * Otherwise, however, we require that the destination has exactly
330      * the same CPU model as the source.
331      */
332 
333 #if defined(TARGET_PPC64)
334     if (cpu->compat_pvr) {
335         uint32_t compat_pvr = cpu->compat_pvr;
336         Error *local_err = NULL;
337 
338         cpu->compat_pvr = 0;
339         ppc_set_compat(cpu, compat_pvr, &local_err);
340         if (local_err) {
341             error_report_err(local_err);
342             return -1;
343         }
344     } else
345 #endif
346     {
347         if (!pvr_match(cpu, env->spr[SPR_PVR])) {
348             return -1;
349         }
350     }
351 
352     /*
353      * If we're running with KVM HV, there is a chance that the guest
354      * is running with KVM HV and its kernel does not have the
355      * capability of dealing with a different PVR other than this
356      * exact host PVR in KVM_SET_SREGS. If that happens, the
357      * guest freezes after migration.
358      *
359      * The function kvmppc_pvr_workaround_required does this verification
360      * by first checking if the kernel has the cap, returning true immediately
361      * if that is the case. Otherwise, it checks if we're running in KVM PR.
362      * If the guest kernel does not have the cap and we're not running KVM-PR
363      * (so, it is running KVM-HV), we need to ensure that KVM_SET_SREGS will
364      * receive the PVR it expects as a workaround.
365      *
366      */
367 #if defined(CONFIG_KVM)
368     if (kvmppc_pvr_workaround_required(cpu)) {
369         env->spr[SPR_PVR] = env->spr_cb[SPR_PVR].default_value;
370     }
371 #endif
372 
373     env->lr = env->spr[SPR_LR];
374     env->ctr = env->spr[SPR_CTR];
375     cpu_write_xer(env, env->spr[SPR_XER]);
376 #if defined(TARGET_PPC64)
377     env->cfar = env->spr[SPR_CFAR];
378 #endif
379     env->spe_fscr = env->spr[SPR_BOOKE_SPEFSCR];
380 
381     for (i = 0; (i < 4) && (i < env->nb_BATs); i++) {
382         env->DBAT[0][i] = env->spr[SPR_DBAT0U + 2*i];
383         env->DBAT[1][i] = env->spr[SPR_DBAT0U + 2*i + 1];
384         env->IBAT[0][i] = env->spr[SPR_IBAT0U + 2*i];
385         env->IBAT[1][i] = env->spr[SPR_IBAT0U + 2*i + 1];
386     }
387     for (i = 0; (i < 4) && ((i+4) < env->nb_BATs); i++) {
388         env->DBAT[0][i+4] = env->spr[SPR_DBAT4U + 2*i];
389         env->DBAT[1][i+4] = env->spr[SPR_DBAT4U + 2*i + 1];
390         env->IBAT[0][i+4] = env->spr[SPR_IBAT4U + 2*i];
391         env->IBAT[1][i+4] = env->spr[SPR_IBAT4U + 2*i + 1];
392     }
393 
394     if (!cpu->vhyp) {
395         ppc_store_sdr1(env, env->spr[SPR_SDR1]);
396     }
397 
398     /* Invalidate all supported msr bits except MSR_TGPR/MSR_HVB before restoring */
399     msr = env->msr;
400     env->msr ^= env->msr_mask & ~((1ULL << MSR_TGPR) | MSR_HVB);
401     ppc_store_msr(env, msr);
402 
403     hreg_compute_mem_idx(env);
404 
405     return 0;
406 }
407 
408 static bool fpu_needed(void *opaque)
409 {
410     PowerPCCPU *cpu = opaque;
411 
412     return (cpu->env.insns_flags & PPC_FLOAT);
413 }
414 
415 static const VMStateDescription vmstate_fpu = {
416     .name = "cpu/fpu",
417     .version_id = 1,
418     .minimum_version_id = 1,
419     .needed = fpu_needed,
420     .fields = (VMStateField[]) {
421         VMSTATE_FPR_ARRAY(env.vsr, PowerPCCPU, 32),
422         VMSTATE_UINTTL(env.fpscr, PowerPCCPU),
423         VMSTATE_END_OF_LIST()
424     },
425 };
426 
427 static bool altivec_needed(void *opaque)
428 {
429     PowerPCCPU *cpu = opaque;
430 
431     return (cpu->env.insns_flags & PPC_ALTIVEC);
432 }
433 
434 static int get_vscr(QEMUFile *f, void *opaque, size_t size,
435                     const VMStateField *field)
436 {
437     PowerPCCPU *cpu = opaque;
438     helper_mtvscr(&cpu->env, qemu_get_be32(f));
439     return 0;
440 }
441 
442 static int put_vscr(QEMUFile *f, void *opaque, size_t size,
443                     const VMStateField *field, QJSON *vmdesc)
444 {
445     PowerPCCPU *cpu = opaque;
446     qemu_put_be32(f, helper_mfvscr(&cpu->env));
447     return 0;
448 }
449 
450 static const VMStateInfo vmstate_vscr = {
451     .name = "cpu/altivec/vscr",
452     .get = get_vscr,
453     .put = put_vscr,
454 };
455 
456 static const VMStateDescription vmstate_altivec = {
457     .name = "cpu/altivec",
458     .version_id = 1,
459     .minimum_version_id = 1,
460     .needed = altivec_needed,
461     .fields = (VMStateField[]) {
462         VMSTATE_AVR_ARRAY(env.vsr, PowerPCCPU, 32),
463         /*
464          * Save the architecture value of the vscr, not the internally
465          * expanded version.  Since this architecture value does not
466          * exist in memory to be stored, this requires a but of hoop
467          * jumping.  We want OFFSET=0 so that we effectively pass CPU
468          * to the helper functions.
469          */
470         {
471             .name = "vscr",
472             .version_id = 0,
473             .size = sizeof(uint32_t),
474             .info = &vmstate_vscr,
475             .flags = VMS_SINGLE,
476             .offset = 0
477         },
478         VMSTATE_END_OF_LIST()
479     },
480 };
481 
482 static bool vsx_needed(void *opaque)
483 {
484     PowerPCCPU *cpu = opaque;
485 
486     return (cpu->env.insns_flags2 & PPC2_VSX);
487 }
488 
489 static const VMStateDescription vmstate_vsx = {
490     .name = "cpu/vsx",
491     .version_id = 1,
492     .minimum_version_id = 1,
493     .needed = vsx_needed,
494     .fields = (VMStateField[]) {
495         VMSTATE_VSR_ARRAY(env.vsr, PowerPCCPU, 32),
496         VMSTATE_END_OF_LIST()
497     },
498 };
499 
500 #ifdef TARGET_PPC64
501 /* Transactional memory state */
502 static bool tm_needed(void *opaque)
503 {
504     PowerPCCPU *cpu = opaque;
505     CPUPPCState *env = &cpu->env;
506     return msr_ts;
507 }
508 
509 static const VMStateDescription vmstate_tm = {
510     .name = "cpu/tm",
511     .version_id = 1,
512     .minimum_version_id = 1,
513     .minimum_version_id_old = 1,
514     .needed = tm_needed,
515     .fields      = (VMStateField []) {
516         VMSTATE_UINTTL_ARRAY(env.tm_gpr, PowerPCCPU, 32),
517         VMSTATE_AVR_ARRAY(env.tm_vsr, PowerPCCPU, 64),
518         VMSTATE_UINT64(env.tm_cr, PowerPCCPU),
519         VMSTATE_UINT64(env.tm_lr, PowerPCCPU),
520         VMSTATE_UINT64(env.tm_ctr, PowerPCCPU),
521         VMSTATE_UINT64(env.tm_fpscr, PowerPCCPU),
522         VMSTATE_UINT64(env.tm_amr, PowerPCCPU),
523         VMSTATE_UINT64(env.tm_ppr, PowerPCCPU),
524         VMSTATE_UINT64(env.tm_vrsave, PowerPCCPU),
525         VMSTATE_UINT32(env.tm_vscr, PowerPCCPU),
526         VMSTATE_UINT64(env.tm_dscr, PowerPCCPU),
527         VMSTATE_UINT64(env.tm_tar, PowerPCCPU),
528         VMSTATE_END_OF_LIST()
529     },
530 };
531 #endif
532 
533 static bool sr_needed(void *opaque)
534 {
535 #ifdef TARGET_PPC64
536     PowerPCCPU *cpu = opaque;
537 
538     return !(cpu->env.mmu_model & POWERPC_MMU_64);
539 #else
540     return true;
541 #endif
542 }
543 
544 static const VMStateDescription vmstate_sr = {
545     .name = "cpu/sr",
546     .version_id = 1,
547     .minimum_version_id = 1,
548     .needed = sr_needed,
549     .fields = (VMStateField[]) {
550         VMSTATE_UINTTL_ARRAY(env.sr, PowerPCCPU, 32),
551         VMSTATE_END_OF_LIST()
552     },
553 };
554 
555 #ifdef TARGET_PPC64
556 static int get_slbe(QEMUFile *f, void *pv, size_t size,
557                     const VMStateField *field)
558 {
559     ppc_slb_t *v = pv;
560 
561     v->esid = qemu_get_be64(f);
562     v->vsid = qemu_get_be64(f);
563 
564     return 0;
565 }
566 
567 static int put_slbe(QEMUFile *f, void *pv, size_t size,
568                     const VMStateField *field, QJSON *vmdesc)
569 {
570     ppc_slb_t *v = pv;
571 
572     qemu_put_be64(f, v->esid);
573     qemu_put_be64(f, v->vsid);
574     return 0;
575 }
576 
577 static const VMStateInfo vmstate_info_slbe = {
578     .name = "slbe",
579     .get  = get_slbe,
580     .put  = put_slbe,
581 };
582 
583 #define VMSTATE_SLB_ARRAY_V(_f, _s, _n, _v)                       \
584     VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_slbe, ppc_slb_t)
585 
586 #define VMSTATE_SLB_ARRAY(_f, _s, _n)                             \
587     VMSTATE_SLB_ARRAY_V(_f, _s, _n, 0)
588 
589 static bool slb_needed(void *opaque)
590 {
591     PowerPCCPU *cpu = opaque;
592 
593     /* We don't support any of the old segment table based 64-bit CPUs */
594     return (cpu->env.mmu_model & POWERPC_MMU_64);
595 }
596 
597 static int slb_post_load(void *opaque, int version_id)
598 {
599     PowerPCCPU *cpu = opaque;
600     CPUPPCState *env = &cpu->env;
601     int i;
602 
603     /* We've pulled in the raw esid and vsid values from the migration
604      * stream, but we need to recompute the page size pointers */
605     for (i = 0; i < cpu->hash64_opts->slb_size; i++) {
606         if (ppc_store_slb(cpu, i, env->slb[i].esid, env->slb[i].vsid) < 0) {
607             /* Migration source had bad values in its SLB */
608             return -1;
609         }
610     }
611 
612     return 0;
613 }
614 
615 static const VMStateDescription vmstate_slb = {
616     .name = "cpu/slb",
617     .version_id = 1,
618     .minimum_version_id = 1,
619     .needed = slb_needed,
620     .post_load = slb_post_load,
621     .fields = (VMStateField[]) {
622         VMSTATE_INT32_TEST(mig_slb_nr, PowerPCCPU, cpu_pre_3_0_migration),
623         VMSTATE_SLB_ARRAY(env.slb, PowerPCCPU, MAX_SLB_ENTRIES),
624         VMSTATE_END_OF_LIST()
625     }
626 };
627 #endif /* TARGET_PPC64 */
628 
629 static const VMStateDescription vmstate_tlb6xx_entry = {
630     .name = "cpu/tlb6xx_entry",
631     .version_id = 1,
632     .minimum_version_id = 1,
633     .fields = (VMStateField[]) {
634         VMSTATE_UINTTL(pte0, ppc6xx_tlb_t),
635         VMSTATE_UINTTL(pte1, ppc6xx_tlb_t),
636         VMSTATE_UINTTL(EPN, ppc6xx_tlb_t),
637         VMSTATE_END_OF_LIST()
638     },
639 };
640 
641 static bool tlb6xx_needed(void *opaque)
642 {
643     PowerPCCPU *cpu = opaque;
644     CPUPPCState *env = &cpu->env;
645 
646     return env->nb_tlb && (env->tlb_type == TLB_6XX);
647 }
648 
649 static const VMStateDescription vmstate_tlb6xx = {
650     .name = "cpu/tlb6xx",
651     .version_id = 1,
652     .minimum_version_id = 1,
653     .needed = tlb6xx_needed,
654     .fields = (VMStateField[]) {
655         VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL),
656         VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlb6, PowerPCCPU,
657                                             env.nb_tlb,
658                                             vmstate_tlb6xx_entry,
659                                             ppc6xx_tlb_t),
660         VMSTATE_UINTTL_ARRAY(env.tgpr, PowerPCCPU, 4),
661         VMSTATE_END_OF_LIST()
662     }
663 };
664 
665 static const VMStateDescription vmstate_tlbemb_entry = {
666     .name = "cpu/tlbemb_entry",
667     .version_id = 1,
668     .minimum_version_id = 1,
669     .fields = (VMStateField[]) {
670         VMSTATE_UINT64(RPN, ppcemb_tlb_t),
671         VMSTATE_UINTTL(EPN, ppcemb_tlb_t),
672         VMSTATE_UINTTL(PID, ppcemb_tlb_t),
673         VMSTATE_UINTTL(size, ppcemb_tlb_t),
674         VMSTATE_UINT32(prot, ppcemb_tlb_t),
675         VMSTATE_UINT32(attr, ppcemb_tlb_t),
676         VMSTATE_END_OF_LIST()
677     },
678 };
679 
680 static bool tlbemb_needed(void *opaque)
681 {
682     PowerPCCPU *cpu = opaque;
683     CPUPPCState *env = &cpu->env;
684 
685     return env->nb_tlb && (env->tlb_type == TLB_EMB);
686 }
687 
688 static bool pbr403_needed(void *opaque)
689 {
690     PowerPCCPU *cpu = opaque;
691     uint32_t pvr = cpu->env.spr[SPR_PVR];
692 
693     return (pvr & 0xffff0000) == 0x00200000;
694 }
695 
696 static const VMStateDescription vmstate_pbr403 = {
697     .name = "cpu/pbr403",
698     .version_id = 1,
699     .minimum_version_id = 1,
700     .needed = pbr403_needed,
701     .fields = (VMStateField[]) {
702         VMSTATE_UINTTL_ARRAY(env.pb, PowerPCCPU, 4),
703         VMSTATE_END_OF_LIST()
704     },
705 };
706 
707 static const VMStateDescription vmstate_tlbemb = {
708     .name = "cpu/tlb6xx",
709     .version_id = 1,
710     .minimum_version_id = 1,
711     .needed = tlbemb_needed,
712     .fields = (VMStateField[]) {
713         VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL),
714         VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlbe, PowerPCCPU,
715                                             env.nb_tlb,
716                                             vmstate_tlbemb_entry,
717                                             ppcemb_tlb_t),
718         /* 403 protection registers */
719         VMSTATE_END_OF_LIST()
720     },
721     .subsections = (const VMStateDescription*[]) {
722         &vmstate_pbr403,
723         NULL
724     }
725 };
726 
727 static const VMStateDescription vmstate_tlbmas_entry = {
728     .name = "cpu/tlbmas_entry",
729     .version_id = 1,
730     .minimum_version_id = 1,
731     .fields = (VMStateField[]) {
732         VMSTATE_UINT32(mas8, ppcmas_tlb_t),
733         VMSTATE_UINT32(mas1, ppcmas_tlb_t),
734         VMSTATE_UINT64(mas2, ppcmas_tlb_t),
735         VMSTATE_UINT64(mas7_3, ppcmas_tlb_t),
736         VMSTATE_END_OF_LIST()
737     },
738 };
739 
740 static bool tlbmas_needed(void *opaque)
741 {
742     PowerPCCPU *cpu = opaque;
743     CPUPPCState *env = &cpu->env;
744 
745     return env->nb_tlb && (env->tlb_type == TLB_MAS);
746 }
747 
748 static const VMStateDescription vmstate_tlbmas = {
749     .name = "cpu/tlbmas",
750     .version_id = 1,
751     .minimum_version_id = 1,
752     .needed = tlbmas_needed,
753     .fields = (VMStateField[]) {
754         VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL),
755         VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlbm, PowerPCCPU,
756                                             env.nb_tlb,
757                                             vmstate_tlbmas_entry,
758                                             ppcmas_tlb_t),
759         VMSTATE_END_OF_LIST()
760     }
761 };
762 
763 static bool compat_needed(void *opaque)
764 {
765     PowerPCCPU *cpu = opaque;
766 
767     assert(!(cpu->compat_pvr && !cpu->vhyp));
768     return !cpu->pre_2_10_migration && cpu->compat_pvr != 0;
769 }
770 
771 static const VMStateDescription vmstate_compat = {
772     .name = "cpu/compat",
773     .version_id = 1,
774     .minimum_version_id = 1,
775     .needed = compat_needed,
776     .fields = (VMStateField[]) {
777         VMSTATE_UINT32(compat_pvr, PowerPCCPU),
778         VMSTATE_END_OF_LIST()
779     }
780 };
781 
782 const VMStateDescription vmstate_ppc_cpu = {
783     .name = "cpu",
784     .version_id = 5,
785     .minimum_version_id = 5,
786     .minimum_version_id_old = 4,
787     .load_state_old = cpu_load_old,
788     .pre_save = cpu_pre_save,
789     .post_load = cpu_post_load,
790     .fields = (VMStateField[]) {
791         VMSTATE_UNUSED(sizeof(target_ulong)), /* was _EQUAL(env.spr[SPR_PVR]) */
792 
793         /* User mode architected state */
794         VMSTATE_UINTTL_ARRAY(env.gpr, PowerPCCPU, 32),
795 #if !defined(TARGET_PPC64)
796         VMSTATE_UINTTL_ARRAY(env.gprh, PowerPCCPU, 32),
797 #endif
798         VMSTATE_UINT32_ARRAY(env.crf, PowerPCCPU, 8),
799         VMSTATE_UINTTL(env.nip, PowerPCCPU),
800 
801         /* SPRs */
802         VMSTATE_UINTTL_ARRAY(env.spr, PowerPCCPU, 1024),
803         VMSTATE_UINT64(env.spe_acc, PowerPCCPU),
804 
805         /* Reservation */
806         VMSTATE_UINTTL(env.reserve_addr, PowerPCCPU),
807 
808         /* Supervisor mode architected state */
809         VMSTATE_UINTTL(env.msr, PowerPCCPU),
810 
811         /* Internal state */
812         VMSTATE_UINTTL(env.hflags_nmsr, PowerPCCPU),
813         /* FIXME: access_type? */
814 
815         /* Sanity checking */
816         VMSTATE_UINTTL_TEST(mig_msr_mask, PowerPCCPU, cpu_pre_2_8_migration),
817         VMSTATE_UINT64_TEST(mig_insns_flags, PowerPCCPU, cpu_pre_2_8_migration),
818         VMSTATE_UINT64_TEST(mig_insns_flags2, PowerPCCPU,
819                             cpu_pre_2_8_migration),
820         VMSTATE_UINT32_TEST(mig_nb_BATs, PowerPCCPU, cpu_pre_2_8_migration),
821         VMSTATE_END_OF_LIST()
822     },
823     .subsections = (const VMStateDescription*[]) {
824         &vmstate_fpu,
825         &vmstate_altivec,
826         &vmstate_vsx,
827         &vmstate_sr,
828 #ifdef TARGET_PPC64
829         &vmstate_tm,
830         &vmstate_slb,
831 #endif /* TARGET_PPC64 */
832         &vmstate_tlb6xx,
833         &vmstate_tlbemb,
834         &vmstate_tlbmas,
835         &vmstate_compat,
836         NULL
837     }
838 };
839