xref: /qemu/hw/ppc/spapr.c (revision 19f9c044)
1 /*
2  * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
3  *
4  * Copyright (c) 2004-2007 Fabrice Bellard
5  * Copyright (c) 2007 Jocelyn Mayer
6  * Copyright (c) 2010 David Gibson, IBM Corporation.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a copy
9  * of this software and associated documentation files (the "Software"), to deal
10  * in the Software without restriction, including without limitation the rights
11  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12  * copies of the Software, and to permit persons to whom the Software is
13  * furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice shall be included in
16  * all copies or substantial portions of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24  * THE SOFTWARE.
25  */
26 
27 #include "qemu/osdep.h"
28 #include "qemu/datadir.h"
29 #include "qemu/memalign.h"
30 #include "qemu/guest-random.h"
31 #include "qapi/error.h"
32 #include "qapi/qapi-events-machine.h"
33 #include "qapi/qapi-events-qdev.h"
34 #include "qapi/visitor.h"
35 #include "sysemu/sysemu.h"
36 #include "sysemu/hostmem.h"
37 #include "sysemu/numa.h"
38 #include "sysemu/qtest.h"
39 #include "sysemu/reset.h"
40 #include "sysemu/runstate.h"
41 #include "qemu/log.h"
42 #include "hw/fw-path-provider.h"
43 #include "elf.h"
44 #include "net/net.h"
45 #include "sysemu/device_tree.h"
46 #include "sysemu/cpus.h"
47 #include "sysemu/hw_accel.h"
48 #include "kvm_ppc.h"
49 #include "migration/misc.h"
50 #include "migration/qemu-file-types.h"
51 #include "migration/global_state.h"
52 #include "migration/register.h"
53 #include "migration/blocker.h"
54 #include "mmu-hash64.h"
55 #include "mmu-book3s-v3.h"
56 #include "cpu-models.h"
57 #include "hw/core/cpu.h"
58 
59 #include "hw/ppc/ppc.h"
60 #include "hw/loader.h"
61 
62 #include "hw/ppc/fdt.h"
63 #include "hw/ppc/spapr.h"
64 #include "hw/ppc/spapr_nested.h"
65 #include "hw/ppc/spapr_vio.h"
66 #include "hw/ppc/vof.h"
67 #include "hw/qdev-properties.h"
68 #include "hw/pci-host/spapr.h"
69 #include "hw/pci/msi.h"
70 
71 #include "hw/pci/pci.h"
72 #include "hw/scsi/scsi.h"
73 #include "hw/virtio/virtio-scsi.h"
74 #include "hw/virtio/vhost-scsi-common.h"
75 
76 #include "exec/ram_addr.h"
77 #include "hw/usb.h"
78 #include "qemu/config-file.h"
79 #include "qemu/error-report.h"
80 #include "trace.h"
81 #include "hw/nmi.h"
82 #include "hw/intc/intc.h"
83 
84 #include "hw/ppc/spapr_cpu_core.h"
85 #include "hw/mem/memory-device.h"
86 #include "hw/ppc/spapr_tpm_proxy.h"
87 #include "hw/ppc/spapr_nvdimm.h"
88 #include "hw/ppc/spapr_numa.h"
89 #include "hw/ppc/pef.h"
90 
91 #include "monitor/monitor.h"
92 
93 #include <libfdt.h>
94 
95 /* SLOF memory layout:
96  *
97  * SLOF raw image loaded at 0, copies its romfs right below the flat
98  * device-tree, then position SLOF itself 31M below that
99  *
100  * So we set FW_OVERHEAD to 40MB which should account for all of that
101  * and more
102  *
103  * We load our kernel at 4M, leaving space for SLOF initial image
104  */
105 #define FDT_MAX_ADDR            0x80000000 /* FDT must stay below that */
106 #define FW_MAX_SIZE             0x400000
107 #define FW_FILE_NAME            "slof.bin"
108 #define FW_FILE_NAME_VOF        "vof.bin"
109 #define FW_OVERHEAD             0x2800000
110 #define KERNEL_LOAD_ADDR        FW_MAX_SIZE
111 
112 #define MIN_RMA_SLOF            (128 * MiB)
113 
114 #define PHANDLE_INTC            0x00001111
115 
116 /* These two functions implement the VCPU id numbering: one to compute them
117  * all and one to identify thread 0 of a VCORE. Any change to the first one
118  * is likely to have an impact on the second one, so let's keep them close.
119  */
120 static int spapr_vcpu_id(SpaprMachineState *spapr, int cpu_index)
121 {
122     MachineState *ms = MACHINE(spapr);
123     unsigned int smp_threads = ms->smp.threads;
124 
125     assert(spapr->vsmt);
126     return
127         (cpu_index / smp_threads) * spapr->vsmt + cpu_index % smp_threads;
128 }
129 static bool spapr_is_thread0_in_vcore(SpaprMachineState *spapr,
130                                       PowerPCCPU *cpu)
131 {
132     assert(spapr->vsmt);
133     return spapr_get_vcpu_id(cpu) % spapr->vsmt == 0;
134 }
135 
136 static bool pre_2_10_vmstate_dummy_icp_needed(void *opaque)
137 {
138     /* Dummy entries correspond to unused ICPState objects in older QEMUs,
139      * and newer QEMUs don't even have them. In both cases, we don't want
140      * to send anything on the wire.
141      */
142     return false;
143 }
144 
145 static const VMStateDescription pre_2_10_vmstate_dummy_icp = {
146     /*
147      * Hack ahead.  We can't have two devices with the same name and
148      * instance id.  So I rename this to pass make check.
149      * Real help from people who knows the hardware is needed.
150      */
151     .name = "icp/server",
152     .version_id = 1,
153     .minimum_version_id = 1,
154     .needed = pre_2_10_vmstate_dummy_icp_needed,
155     .fields = (const VMStateField[]) {
156         VMSTATE_UNUSED(4), /* uint32_t xirr */
157         VMSTATE_UNUSED(1), /* uint8_t pending_priority */
158         VMSTATE_UNUSED(1), /* uint8_t mfrr */
159         VMSTATE_END_OF_LIST()
160     },
161 };
162 
163 /*
164  * See comment in hw/intc/xics.c:icp_realize()
165  *
166  * You have to remove vmstate_replace_hack_for_ppc() when you remove
167  * the machine types that need the following function.
168  */
169 static void pre_2_10_vmstate_register_dummy_icp(int i)
170 {
171     vmstate_register(NULL, i, &pre_2_10_vmstate_dummy_icp,
172                      (void *)(uintptr_t) i);
173 }
174 
175 /*
176  * See comment in hw/intc/xics.c:icp_realize()
177  *
178  * You have to remove vmstate_replace_hack_for_ppc() when you remove
179  * the machine types that need the following function.
180  */
181 static void pre_2_10_vmstate_unregister_dummy_icp(int i)
182 {
183     /*
184      * This used to be:
185      *
186      *    vmstate_unregister(NULL, &pre_2_10_vmstate_dummy_icp,
187      *                      (void *)(uintptr_t) i);
188      */
189 }
190 
191 int spapr_max_server_number(SpaprMachineState *spapr)
192 {
193     MachineState *ms = MACHINE(spapr);
194 
195     assert(spapr->vsmt);
196     return DIV_ROUND_UP(ms->smp.max_cpus * spapr->vsmt, ms->smp.threads);
197 }
198 
199 static int spapr_fixup_cpu_smt_dt(void *fdt, int offset, PowerPCCPU *cpu,
200                                   int smt_threads)
201 {
202     int i, ret = 0;
203     g_autofree uint32_t *servers_prop = g_new(uint32_t, smt_threads);
204     g_autofree uint32_t *gservers_prop = g_new(uint32_t, smt_threads * 2);
205     int index = spapr_get_vcpu_id(cpu);
206 
207     if (cpu->compat_pvr) {
208         ret = fdt_setprop_cell(fdt, offset, "cpu-version", cpu->compat_pvr);
209         if (ret < 0) {
210             return ret;
211         }
212     }
213 
214     /* Build interrupt servers and gservers properties */
215     for (i = 0; i < smt_threads; i++) {
216         servers_prop[i] = cpu_to_be32(index + i);
217         /* Hack, direct the group queues back to cpu 0 */
218         gservers_prop[i*2] = cpu_to_be32(index + i);
219         gservers_prop[i*2 + 1] = 0;
220     }
221     ret = fdt_setprop(fdt, offset, "ibm,ppc-interrupt-server#s",
222                       servers_prop, sizeof(*servers_prop) * smt_threads);
223     if (ret < 0) {
224         return ret;
225     }
226     ret = fdt_setprop(fdt, offset, "ibm,ppc-interrupt-gserver#s",
227                       gservers_prop, sizeof(*gservers_prop) * smt_threads * 2);
228 
229     return ret;
230 }
231 
232 static void spapr_dt_pa_features(SpaprMachineState *spapr,
233                                  PowerPCCPU *cpu,
234                                  void *fdt, int offset)
235 {
236     uint8_t pa_features_206[] = { 6, 0,
237         0xf6, 0x1f, 0xc7, 0x00, 0x80, 0xc0 };
238     uint8_t pa_features_207[] = { 24, 0,
239         0xf6, 0x1f, 0xc7, 0xc0, 0x80, 0xf0,
240         0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
241         0x00, 0x00, 0x00, 0x00, 0x80, 0x00,
242         0x80, 0x00, 0x80, 0x00, 0x00, 0x00 };
243     uint8_t pa_features_300[] = { 66, 0,
244         /* 0: MMU|FPU|SLB|RUN|DABR|NX, 1: fri[nzpm]|DABRX|SPRG3|SLB0|PP110 */
245         /* 2: VPM|DS205|PPR|DS202|DS206, 3: LSD|URG, SSO, 5: LE|CFAR|EB|LSQ */
246         0xf6, 0x1f, 0xc7, 0xc0, 0x80, 0xf0, /* 0 - 5 */
247         /* 6: DS207 */
248         0x80, 0x00, 0x00, 0x00, 0x00, 0x00, /* 6 - 11 */
249         /* 16: Vector */
250         0x00, 0x00, 0x00, 0x00, 0x80, 0x00, /* 12 - 17 */
251         /* 18: Vec. Scalar, 20: Vec. XOR, 22: HTM */
252         0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 18 - 23 */
253         /* 24: Ext. Dec, 26: 64 bit ftrs, 28: PM ftrs */
254         0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 24 - 29 */
255         /* 30: MMR, 32: LE atomic, 34: EBB + ext EBB */
256         0x80, 0x00, 0x80, 0x00, 0xC0, 0x00, /* 30 - 35 */
257         /* 36: SPR SO, 38: Copy/Paste, 40: Radix MMU */
258         0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 36 - 41 */
259         /* 42: PM, 44: PC RA, 46: SC vec'd */
260         0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 42 - 47 */
261         /* 48: SIMD, 50: QP BFP, 52: String */
262         0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 48 - 53 */
263         /* 54: DecFP, 56: DecI, 58: SHA */
264         0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 54 - 59 */
265         /* 60: NM atomic, 62: RNG */
266         0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 60 - 65 */
267     };
268     uint8_t *pa_features = NULL;
269     size_t pa_size;
270 
271     if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_2_06, 0, cpu->compat_pvr)) {
272         pa_features = pa_features_206;
273         pa_size = sizeof(pa_features_206);
274     }
275     if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_2_07, 0, cpu->compat_pvr)) {
276         pa_features = pa_features_207;
277         pa_size = sizeof(pa_features_207);
278     }
279     if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_00, 0, cpu->compat_pvr)) {
280         pa_features = pa_features_300;
281         pa_size = sizeof(pa_features_300);
282     }
283     if (!pa_features) {
284         return;
285     }
286 
287     if (ppc_hash64_has(cpu, PPC_HASH64_CI_LARGEPAGE)) {
288         /*
289          * Note: we keep CI large pages off by default because a 64K capable
290          * guest provisioned with large pages might otherwise try to map a qemu
291          * framebuffer (or other kind of memory mapped PCI BAR) using 64K pages
292          * even if that qemu runs on a 4k host.
293          * We dd this bit back here if we are confident this is not an issue
294          */
295         pa_features[3] |= 0x20;
296     }
297     if ((spapr_get_cap(spapr, SPAPR_CAP_HTM) != 0) && pa_size > 24) {
298         pa_features[24] |= 0x80;    /* Transactional memory support */
299     }
300     if (spapr->cas_pre_isa3_guest && pa_size > 40) {
301         /* Workaround for broken kernels that attempt (guest) radix
302          * mode when they can't handle it, if they see the radix bit set
303          * in pa-features. So hide it from them. */
304         pa_features[40 + 2] &= ~0x80; /* Radix MMU */
305     }
306 
307     _FDT((fdt_setprop(fdt, offset, "ibm,pa-features", pa_features, pa_size)));
308 }
309 
310 static hwaddr spapr_node0_size(MachineState *machine)
311 {
312     if (machine->numa_state->num_nodes) {
313         int i;
314         for (i = 0; i < machine->numa_state->num_nodes; ++i) {
315             if (machine->numa_state->nodes[i].node_mem) {
316                 return MIN(pow2floor(machine->numa_state->nodes[i].node_mem),
317                            machine->ram_size);
318             }
319         }
320     }
321     return machine->ram_size;
322 }
323 
324 static void add_str(GString *s, const gchar *s1)
325 {
326     g_string_append_len(s, s1, strlen(s1) + 1);
327 }
328 
329 static int spapr_dt_memory_node(SpaprMachineState *spapr, void *fdt, int nodeid,
330                                 hwaddr start, hwaddr size)
331 {
332     char mem_name[32];
333     uint64_t mem_reg_property[2];
334     int off;
335 
336     mem_reg_property[0] = cpu_to_be64(start);
337     mem_reg_property[1] = cpu_to_be64(size);
338 
339     sprintf(mem_name, "memory@%" HWADDR_PRIx, start);
340     off = fdt_add_subnode(fdt, 0, mem_name);
341     _FDT(off);
342     _FDT((fdt_setprop_string(fdt, off, "device_type", "memory")));
343     _FDT((fdt_setprop(fdt, off, "reg", mem_reg_property,
344                       sizeof(mem_reg_property))));
345     spapr_numa_write_associativity_dt(spapr, fdt, off, nodeid);
346     return off;
347 }
348 
349 static uint32_t spapr_pc_dimm_node(MemoryDeviceInfoList *list, ram_addr_t addr)
350 {
351     MemoryDeviceInfoList *info;
352 
353     for (info = list; info; info = info->next) {
354         MemoryDeviceInfo *value = info->value;
355 
356         if (value && value->type == MEMORY_DEVICE_INFO_KIND_DIMM) {
357             PCDIMMDeviceInfo *pcdimm_info = value->u.dimm.data;
358 
359             if (addr >= pcdimm_info->addr &&
360                 addr < (pcdimm_info->addr + pcdimm_info->size)) {
361                 return pcdimm_info->node;
362             }
363         }
364     }
365 
366     return -1;
367 }
368 
369 struct sPAPRDrconfCellV2 {
370      uint32_t seq_lmbs;
371      uint64_t base_addr;
372      uint32_t drc_index;
373      uint32_t aa_index;
374      uint32_t flags;
375 } QEMU_PACKED;
376 
377 typedef struct DrconfCellQueue {
378     struct sPAPRDrconfCellV2 cell;
379     QSIMPLEQ_ENTRY(DrconfCellQueue) entry;
380 } DrconfCellQueue;
381 
382 static DrconfCellQueue *
383 spapr_get_drconf_cell(uint32_t seq_lmbs, uint64_t base_addr,
384                       uint32_t drc_index, uint32_t aa_index,
385                       uint32_t flags)
386 {
387     DrconfCellQueue *elem;
388 
389     elem = g_malloc0(sizeof(*elem));
390     elem->cell.seq_lmbs = cpu_to_be32(seq_lmbs);
391     elem->cell.base_addr = cpu_to_be64(base_addr);
392     elem->cell.drc_index = cpu_to_be32(drc_index);
393     elem->cell.aa_index = cpu_to_be32(aa_index);
394     elem->cell.flags = cpu_to_be32(flags);
395 
396     return elem;
397 }
398 
399 static int spapr_dt_dynamic_memory_v2(SpaprMachineState *spapr, void *fdt,
400                                       int offset, MemoryDeviceInfoList *dimms)
401 {
402     MachineState *machine = MACHINE(spapr);
403     uint8_t *int_buf, *cur_index;
404     int ret;
405     uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE;
406     uint64_t addr, cur_addr, size;
407     uint32_t nr_boot_lmbs = (machine->device_memory->base / lmb_size);
408     uint64_t mem_end = machine->device_memory->base +
409                        memory_region_size(&machine->device_memory->mr);
410     uint32_t node, buf_len, nr_entries = 0;
411     SpaprDrc *drc;
412     DrconfCellQueue *elem, *next;
413     MemoryDeviceInfoList *info;
414     QSIMPLEQ_HEAD(, DrconfCellQueue) drconf_queue
415         = QSIMPLEQ_HEAD_INITIALIZER(drconf_queue);
416 
417     /* Entry to cover RAM and the gap area */
418     elem = spapr_get_drconf_cell(nr_boot_lmbs, 0, 0, -1,
419                                  SPAPR_LMB_FLAGS_RESERVED |
420                                  SPAPR_LMB_FLAGS_DRC_INVALID);
421     QSIMPLEQ_INSERT_TAIL(&drconf_queue, elem, entry);
422     nr_entries++;
423 
424     cur_addr = machine->device_memory->base;
425     for (info = dimms; info; info = info->next) {
426         PCDIMMDeviceInfo *di = info->value->u.dimm.data;
427 
428         addr = di->addr;
429         size = di->size;
430         node = di->node;
431 
432         /*
433          * The NVDIMM area is hotpluggable after the NVDIMM is unplugged. The
434          * area is marked hotpluggable in the next iteration for the bigger
435          * chunk including the NVDIMM occupied area.
436          */
437         if (info->value->type == MEMORY_DEVICE_INFO_KIND_NVDIMM)
438             continue;
439 
440         /* Entry for hot-pluggable area */
441         if (cur_addr < addr) {
442             drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, cur_addr / lmb_size);
443             g_assert(drc);
444             elem = spapr_get_drconf_cell((addr - cur_addr) / lmb_size,
445                                          cur_addr, spapr_drc_index(drc), -1, 0);
446             QSIMPLEQ_INSERT_TAIL(&drconf_queue, elem, entry);
447             nr_entries++;
448         }
449 
450         /* Entry for DIMM */
451         drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, addr / lmb_size);
452         g_assert(drc);
453         elem = spapr_get_drconf_cell(size / lmb_size, addr,
454                                      spapr_drc_index(drc), node,
455                                      (SPAPR_LMB_FLAGS_ASSIGNED |
456                                       SPAPR_LMB_FLAGS_HOTREMOVABLE));
457         QSIMPLEQ_INSERT_TAIL(&drconf_queue, elem, entry);
458         nr_entries++;
459         cur_addr = addr + size;
460     }
461 
462     /* Entry for remaining hotpluggable area */
463     if (cur_addr < mem_end) {
464         drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, cur_addr / lmb_size);
465         g_assert(drc);
466         elem = spapr_get_drconf_cell((mem_end - cur_addr) / lmb_size,
467                                      cur_addr, spapr_drc_index(drc), -1, 0);
468         QSIMPLEQ_INSERT_TAIL(&drconf_queue, elem, entry);
469         nr_entries++;
470     }
471 
472     buf_len = nr_entries * sizeof(struct sPAPRDrconfCellV2) + sizeof(uint32_t);
473     int_buf = cur_index = g_malloc0(buf_len);
474     *(uint32_t *)int_buf = cpu_to_be32(nr_entries);
475     cur_index += sizeof(nr_entries);
476 
477     QSIMPLEQ_FOREACH_SAFE(elem, &drconf_queue, entry, next) {
478         memcpy(cur_index, &elem->cell, sizeof(elem->cell));
479         cur_index += sizeof(elem->cell);
480         QSIMPLEQ_REMOVE(&drconf_queue, elem, DrconfCellQueue, entry);
481         g_free(elem);
482     }
483 
484     ret = fdt_setprop(fdt, offset, "ibm,dynamic-memory-v2", int_buf, buf_len);
485     g_free(int_buf);
486     if (ret < 0) {
487         return -1;
488     }
489     return 0;
490 }
491 
492 static int spapr_dt_dynamic_memory(SpaprMachineState *spapr, void *fdt,
493                                    int offset, MemoryDeviceInfoList *dimms)
494 {
495     MachineState *machine = MACHINE(spapr);
496     int i, ret;
497     uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE;
498     uint32_t device_lmb_start = machine->device_memory->base / lmb_size;
499     uint32_t nr_lmbs = (machine->device_memory->base +
500                        memory_region_size(&machine->device_memory->mr)) /
501                        lmb_size;
502     uint32_t *int_buf, *cur_index, buf_len;
503 
504     /*
505      * Allocate enough buffer size to fit in ibm,dynamic-memory
506      */
507     buf_len = (nr_lmbs * SPAPR_DR_LMB_LIST_ENTRY_SIZE + 1) * sizeof(uint32_t);
508     cur_index = int_buf = g_malloc0(buf_len);
509     int_buf[0] = cpu_to_be32(nr_lmbs);
510     cur_index++;
511     for (i = 0; i < nr_lmbs; i++) {
512         uint64_t addr = i * lmb_size;
513         uint32_t *dynamic_memory = cur_index;
514 
515         if (i >= device_lmb_start) {
516             SpaprDrc *drc;
517 
518             drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, i);
519             g_assert(drc);
520 
521             dynamic_memory[0] = cpu_to_be32(addr >> 32);
522             dynamic_memory[1] = cpu_to_be32(addr & 0xffffffff);
523             dynamic_memory[2] = cpu_to_be32(spapr_drc_index(drc));
524             dynamic_memory[3] = cpu_to_be32(0); /* reserved */
525             dynamic_memory[4] = cpu_to_be32(spapr_pc_dimm_node(dimms, addr));
526             if (memory_region_present(get_system_memory(), addr)) {
527                 dynamic_memory[5] = cpu_to_be32(SPAPR_LMB_FLAGS_ASSIGNED);
528             } else {
529                 dynamic_memory[5] = cpu_to_be32(0);
530             }
531         } else {
532             /*
533              * LMB information for RMA, boot time RAM and gap b/n RAM and
534              * device memory region -- all these are marked as reserved
535              * and as having no valid DRC.
536              */
537             dynamic_memory[0] = cpu_to_be32(addr >> 32);
538             dynamic_memory[1] = cpu_to_be32(addr & 0xffffffff);
539             dynamic_memory[2] = cpu_to_be32(0);
540             dynamic_memory[3] = cpu_to_be32(0); /* reserved */
541             dynamic_memory[4] = cpu_to_be32(-1);
542             dynamic_memory[5] = cpu_to_be32(SPAPR_LMB_FLAGS_RESERVED |
543                                             SPAPR_LMB_FLAGS_DRC_INVALID);
544         }
545 
546         cur_index += SPAPR_DR_LMB_LIST_ENTRY_SIZE;
547     }
548     ret = fdt_setprop(fdt, offset, "ibm,dynamic-memory", int_buf, buf_len);
549     g_free(int_buf);
550     if (ret < 0) {
551         return -1;
552     }
553     return 0;
554 }
555 
556 /*
557  * Adds ibm,dynamic-reconfiguration-memory node.
558  * Refer to docs/specs/ppc-spapr-hotplug.txt for the documentation
559  * of this device tree node.
560  */
561 static int spapr_dt_dynamic_reconfiguration_memory(SpaprMachineState *spapr,
562                                                    void *fdt)
563 {
564     MachineState *machine = MACHINE(spapr);
565     int ret, offset;
566     uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE;
567     uint32_t prop_lmb_size[] = {cpu_to_be32(lmb_size >> 32),
568                                 cpu_to_be32(lmb_size & 0xffffffff)};
569     MemoryDeviceInfoList *dimms = NULL;
570 
571     /* Don't create the node if there is no device memory. */
572     if (!machine->device_memory) {
573         return 0;
574     }
575 
576     offset = fdt_add_subnode(fdt, 0, "ibm,dynamic-reconfiguration-memory");
577 
578     ret = fdt_setprop(fdt, offset, "ibm,lmb-size", prop_lmb_size,
579                     sizeof(prop_lmb_size));
580     if (ret < 0) {
581         return ret;
582     }
583 
584     ret = fdt_setprop_cell(fdt, offset, "ibm,memory-flags-mask", 0xff);
585     if (ret < 0) {
586         return ret;
587     }
588 
589     ret = fdt_setprop_cell(fdt, offset, "ibm,memory-preservation-time", 0x0);
590     if (ret < 0) {
591         return ret;
592     }
593 
594     /* ibm,dynamic-memory or ibm,dynamic-memory-v2 */
595     dimms = qmp_memory_device_list();
596     if (spapr_ovec_test(spapr->ov5_cas, OV5_DRMEM_V2)) {
597         ret = spapr_dt_dynamic_memory_v2(spapr, fdt, offset, dimms);
598     } else {
599         ret = spapr_dt_dynamic_memory(spapr, fdt, offset, dimms);
600     }
601     qapi_free_MemoryDeviceInfoList(dimms);
602 
603     if (ret < 0) {
604         return ret;
605     }
606 
607     ret = spapr_numa_write_assoc_lookup_arrays(spapr, fdt, offset);
608 
609     return ret;
610 }
611 
612 static int spapr_dt_memory(SpaprMachineState *spapr, void *fdt)
613 {
614     MachineState *machine = MACHINE(spapr);
615     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
616     hwaddr mem_start, node_size;
617     int i, nb_nodes = machine->numa_state->num_nodes;
618     NodeInfo *nodes = machine->numa_state->nodes;
619 
620     for (i = 0, mem_start = 0; i < nb_nodes; ++i) {
621         if (!nodes[i].node_mem) {
622             continue;
623         }
624         if (mem_start >= machine->ram_size) {
625             node_size = 0;
626         } else {
627             node_size = nodes[i].node_mem;
628             if (node_size > machine->ram_size - mem_start) {
629                 node_size = machine->ram_size - mem_start;
630             }
631         }
632         if (!mem_start) {
633             /* spapr_machine_init() checks for rma_size <= node0_size
634              * already */
635             spapr_dt_memory_node(spapr, fdt, i, 0, spapr->rma_size);
636             mem_start += spapr->rma_size;
637             node_size -= spapr->rma_size;
638         }
639         for ( ; node_size; ) {
640             hwaddr sizetmp = pow2floor(node_size);
641 
642             /* mem_start != 0 here */
643             if (ctzl(mem_start) < ctzl(sizetmp)) {
644                 sizetmp = 1ULL << ctzl(mem_start);
645             }
646 
647             spapr_dt_memory_node(spapr, fdt, i, mem_start, sizetmp);
648             node_size -= sizetmp;
649             mem_start += sizetmp;
650         }
651     }
652 
653     /* Generate ibm,dynamic-reconfiguration-memory node if required */
654     if (spapr_ovec_test(spapr->ov5_cas, OV5_DRCONF_MEMORY)) {
655         int ret;
656 
657         g_assert(smc->dr_lmb_enabled);
658         ret = spapr_dt_dynamic_reconfiguration_memory(spapr, fdt);
659         if (ret) {
660             return ret;
661         }
662     }
663 
664     return 0;
665 }
666 
667 static void spapr_dt_cpu(CPUState *cs, void *fdt, int offset,
668                          SpaprMachineState *spapr)
669 {
670     MachineState *ms = MACHINE(spapr);
671     PowerPCCPU *cpu = POWERPC_CPU(cs);
672     CPUPPCState *env = &cpu->env;
673     PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs);
674     int index = spapr_get_vcpu_id(cpu);
675     uint32_t segs[] = {cpu_to_be32(28), cpu_to_be32(40),
676                        0xffffffff, 0xffffffff};
677     uint32_t tbfreq = kvm_enabled() ? kvmppc_get_tbfreq()
678         : SPAPR_TIMEBASE_FREQ;
679     uint32_t cpufreq = kvm_enabled() ? kvmppc_get_clockfreq() : 1000000000;
680     uint32_t page_sizes_prop[64];
681     size_t page_sizes_prop_size;
682     unsigned int smp_threads = ms->smp.threads;
683     uint32_t vcpus_per_socket = smp_threads * ms->smp.cores;
684     uint32_t pft_size_prop[] = {0, cpu_to_be32(spapr->htab_shift)};
685     int compat_smt = MIN(smp_threads, ppc_compat_max_vthreads(cpu));
686     SpaprDrc *drc;
687     int drc_index;
688     uint32_t radix_AP_encodings[PPC_PAGE_SIZES_MAX_SZ];
689     int i;
690 
691     drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU, index);
692     if (drc) {
693         drc_index = spapr_drc_index(drc);
694         _FDT((fdt_setprop_cell(fdt, offset, "ibm,my-drc-index", drc_index)));
695     }
696 
697     _FDT((fdt_setprop_cell(fdt, offset, "reg", index)));
698     _FDT((fdt_setprop_string(fdt, offset, "device_type", "cpu")));
699 
700     _FDT((fdt_setprop_cell(fdt, offset, "cpu-version", env->spr[SPR_PVR])));
701     _FDT((fdt_setprop_cell(fdt, offset, "d-cache-block-size",
702                            env->dcache_line_size)));
703     _FDT((fdt_setprop_cell(fdt, offset, "d-cache-line-size",
704                            env->dcache_line_size)));
705     _FDT((fdt_setprop_cell(fdt, offset, "i-cache-block-size",
706                            env->icache_line_size)));
707     _FDT((fdt_setprop_cell(fdt, offset, "i-cache-line-size",
708                            env->icache_line_size)));
709 
710     if (pcc->l1_dcache_size) {
711         _FDT((fdt_setprop_cell(fdt, offset, "d-cache-size",
712                                pcc->l1_dcache_size)));
713     } else {
714         warn_report("Unknown L1 dcache size for cpu");
715     }
716     if (pcc->l1_icache_size) {
717         _FDT((fdt_setprop_cell(fdt, offset, "i-cache-size",
718                                pcc->l1_icache_size)));
719     } else {
720         warn_report("Unknown L1 icache size for cpu");
721     }
722 
723     _FDT((fdt_setprop_cell(fdt, offset, "timebase-frequency", tbfreq)));
724     _FDT((fdt_setprop_cell(fdt, offset, "clock-frequency", cpufreq)));
725     _FDT((fdt_setprop_cell(fdt, offset, "slb-size", cpu->hash64_opts->slb_size)));
726     _FDT((fdt_setprop_cell(fdt, offset, "ibm,slb-size", cpu->hash64_opts->slb_size)));
727     _FDT((fdt_setprop_string(fdt, offset, "status", "okay")));
728     _FDT((fdt_setprop(fdt, offset, "64-bit", NULL, 0)));
729 
730     if (ppc_has_spr(cpu, SPR_PURR)) {
731         _FDT((fdt_setprop_cell(fdt, offset, "ibm,purr", 1)));
732     }
733     if (ppc_has_spr(cpu, SPR_PURR)) {
734         _FDT((fdt_setprop_cell(fdt, offset, "ibm,spurr", 1)));
735     }
736 
737     if (ppc_hash64_has(cpu, PPC_HASH64_1TSEG)) {
738         _FDT((fdt_setprop(fdt, offset, "ibm,processor-segment-sizes",
739                           segs, sizeof(segs))));
740     }
741 
742     /* Advertise VSX (vector extensions) if available
743      *   1               == VMX / Altivec available
744      *   2               == VSX available
745      *
746      * Only CPUs for which we create core types in spapr_cpu_core.c
747      * are possible, and all of those have VMX */
748     if (env->insns_flags & PPC_ALTIVEC) {
749         if (spapr_get_cap(spapr, SPAPR_CAP_VSX) != 0) {
750             _FDT((fdt_setprop_cell(fdt, offset, "ibm,vmx", 2)));
751         } else {
752             _FDT((fdt_setprop_cell(fdt, offset, "ibm,vmx", 1)));
753         }
754     }
755 
756     /* Advertise DFP (Decimal Floating Point) if available
757      *   0 / no property == no DFP
758      *   1               == DFP available */
759     if (spapr_get_cap(spapr, SPAPR_CAP_DFP) != 0) {
760         _FDT((fdt_setprop_cell(fdt, offset, "ibm,dfp", 1)));
761     }
762 
763     page_sizes_prop_size = ppc_create_page_sizes_prop(cpu, page_sizes_prop,
764                                                       sizeof(page_sizes_prop));
765     if (page_sizes_prop_size) {
766         _FDT((fdt_setprop(fdt, offset, "ibm,segment-page-sizes",
767                           page_sizes_prop, page_sizes_prop_size)));
768     }
769 
770     spapr_dt_pa_features(spapr, cpu, fdt, offset);
771 
772     _FDT((fdt_setprop_cell(fdt, offset, "ibm,chip-id",
773                            cs->cpu_index / vcpus_per_socket)));
774 
775     _FDT((fdt_setprop(fdt, offset, "ibm,pft-size",
776                       pft_size_prop, sizeof(pft_size_prop))));
777 
778     if (ms->numa_state->num_nodes > 1) {
779         _FDT(spapr_numa_fixup_cpu_dt(spapr, fdt, offset, cpu));
780     }
781 
782     _FDT(spapr_fixup_cpu_smt_dt(fdt, offset, cpu, compat_smt));
783 
784     if (pcc->radix_page_info) {
785         for (i = 0; i < pcc->radix_page_info->count; i++) {
786             radix_AP_encodings[i] =
787                 cpu_to_be32(pcc->radix_page_info->entries[i]);
788         }
789         _FDT((fdt_setprop(fdt, offset, "ibm,processor-radix-AP-encodings",
790                           radix_AP_encodings,
791                           pcc->radix_page_info->count *
792                           sizeof(radix_AP_encodings[0]))));
793     }
794 
795     /*
796      * We set this property to let the guest know that it can use the large
797      * decrementer and its width in bits.
798      */
799     if (spapr_get_cap(spapr, SPAPR_CAP_LARGE_DECREMENTER) != SPAPR_CAP_OFF)
800         _FDT((fdt_setprop_u32(fdt, offset, "ibm,dec-bits",
801                               pcc->lrg_decr_bits)));
802 }
803 
804 static void spapr_dt_one_cpu(void *fdt, SpaprMachineState *spapr, CPUState *cs,
805                              int cpus_offset)
806 {
807     PowerPCCPU *cpu = POWERPC_CPU(cs);
808     int index = spapr_get_vcpu_id(cpu);
809     DeviceClass *dc = DEVICE_GET_CLASS(cs);
810     g_autofree char *nodename = NULL;
811     int offset;
812 
813     if (!spapr_is_thread0_in_vcore(spapr, cpu)) {
814         return;
815     }
816 
817     nodename = g_strdup_printf("%s@%x", dc->fw_name, index);
818     offset = fdt_add_subnode(fdt, cpus_offset, nodename);
819     _FDT(offset);
820     spapr_dt_cpu(cs, fdt, offset, spapr);
821 }
822 
823 
824 static void spapr_dt_cpus(void *fdt, SpaprMachineState *spapr)
825 {
826     CPUState **rev;
827     CPUState *cs;
828     int n_cpus;
829     int cpus_offset;
830     int i;
831 
832     cpus_offset = fdt_add_subnode(fdt, 0, "cpus");
833     _FDT(cpus_offset);
834     _FDT((fdt_setprop_cell(fdt, cpus_offset, "#address-cells", 0x1)));
835     _FDT((fdt_setprop_cell(fdt, cpus_offset, "#size-cells", 0x0)));
836 
837     /*
838      * We walk the CPUs in reverse order to ensure that CPU DT nodes
839      * created by fdt_add_subnode() end up in the right order in FDT
840      * for the guest kernel the enumerate the CPUs correctly.
841      *
842      * The CPU list cannot be traversed in reverse order, so we need
843      * to do extra work.
844      */
845     n_cpus = 0;
846     rev = NULL;
847     CPU_FOREACH(cs) {
848         rev = g_renew(CPUState *, rev, n_cpus + 1);
849         rev[n_cpus++] = cs;
850     }
851 
852     for (i = n_cpus - 1; i >= 0; i--) {
853         spapr_dt_one_cpu(fdt, spapr, rev[i], cpus_offset);
854     }
855 
856     g_free(rev);
857 }
858 
859 static int spapr_dt_rng(void *fdt)
860 {
861     int node;
862     int ret;
863 
864     node = qemu_fdt_add_subnode(fdt, "/ibm,platform-facilities");
865     if (node <= 0) {
866         return -1;
867     }
868     ret = fdt_setprop_string(fdt, node, "device_type",
869                              "ibm,platform-facilities");
870     ret |= fdt_setprop_cell(fdt, node, "#address-cells", 0x1);
871     ret |= fdt_setprop_cell(fdt, node, "#size-cells", 0x0);
872 
873     node = fdt_add_subnode(fdt, node, "ibm,random-v1");
874     if (node <= 0) {
875         return -1;
876     }
877     ret |= fdt_setprop_string(fdt, node, "compatible", "ibm,random");
878 
879     return ret ? -1 : 0;
880 }
881 
882 static void spapr_dt_rtas(SpaprMachineState *spapr, void *fdt)
883 {
884     MachineState *ms = MACHINE(spapr);
885     int rtas;
886     GString *hypertas = g_string_sized_new(256);
887     GString *qemu_hypertas = g_string_sized_new(256);
888     uint32_t lrdr_capacity[] = {
889         0,
890         0,
891         cpu_to_be32(SPAPR_MEMORY_BLOCK_SIZE >> 32),
892         cpu_to_be32(SPAPR_MEMORY_BLOCK_SIZE & 0xffffffff),
893         cpu_to_be32(ms->smp.max_cpus / ms->smp.threads),
894     };
895 
896     /* Do we have device memory? */
897     if (MACHINE(spapr)->device_memory) {
898         uint64_t max_device_addr = MACHINE(spapr)->device_memory->base +
899             memory_region_size(&MACHINE(spapr)->device_memory->mr);
900 
901         lrdr_capacity[0] = cpu_to_be32(max_device_addr >> 32);
902         lrdr_capacity[1] = cpu_to_be32(max_device_addr & 0xffffffff);
903     }
904 
905     _FDT(rtas = fdt_add_subnode(fdt, 0, "rtas"));
906 
907     /* hypertas */
908     add_str(hypertas, "hcall-pft");
909     add_str(hypertas, "hcall-term");
910     add_str(hypertas, "hcall-dabr");
911     add_str(hypertas, "hcall-interrupt");
912     add_str(hypertas, "hcall-tce");
913     add_str(hypertas, "hcall-vio");
914     add_str(hypertas, "hcall-splpar");
915     add_str(hypertas, "hcall-join");
916     add_str(hypertas, "hcall-bulk");
917     add_str(hypertas, "hcall-set-mode");
918     add_str(hypertas, "hcall-sprg0");
919     add_str(hypertas, "hcall-copy");
920     add_str(hypertas, "hcall-debug");
921     add_str(hypertas, "hcall-vphn");
922     if (spapr_get_cap(spapr, SPAPR_CAP_RPT_INVALIDATE) == SPAPR_CAP_ON) {
923         add_str(hypertas, "hcall-rpt-invalidate");
924     }
925 
926     add_str(qemu_hypertas, "hcall-memop1");
927 
928     if (!kvm_enabled() || kvmppc_spapr_use_multitce()) {
929         add_str(hypertas, "hcall-multi-tce");
930     }
931 
932     if (spapr->resize_hpt != SPAPR_RESIZE_HPT_DISABLED) {
933         add_str(hypertas, "hcall-hpt-resize");
934     }
935 
936     add_str(hypertas, "hcall-watchdog");
937 
938     _FDT(fdt_setprop(fdt, rtas, "ibm,hypertas-functions",
939                      hypertas->str, hypertas->len));
940     g_string_free(hypertas, TRUE);
941     _FDT(fdt_setprop(fdt, rtas, "qemu,hypertas-functions",
942                      qemu_hypertas->str, qemu_hypertas->len));
943     g_string_free(qemu_hypertas, TRUE);
944 
945     spapr_numa_write_rtas_dt(spapr, fdt, rtas);
946 
947     /*
948      * FWNMI reserves RTAS_ERROR_LOG_MAX for the machine check error log,
949      * and 16 bytes per CPU for system reset error log plus an extra 8 bytes.
950      *
951      * The system reset requirements are driven by existing Linux and PowerVM
952      * implementation which (contrary to PAPR) saves r3 in the error log
953      * structure like machine check, so Linux expects to find the saved r3
954      * value at the address in r3 upon FWNMI-enabled sreset interrupt (and
955      * does not look at the error value).
956      *
957      * System reset interrupts are not subject to interlock like machine
958      * check, so this memory area could be corrupted if the sreset is
959      * interrupted by a machine check (or vice versa) if it was shared. To
960      * prevent this, system reset uses per-CPU areas for the sreset save
961      * area. A system reset that interrupts a system reset handler could
962      * still overwrite this area, but Linux doesn't try to recover in that
963      * case anyway.
964      *
965      * The extra 8 bytes is required because Linux's FWNMI error log check
966      * is off-by-one.
967      *
968      * RTAS_MIN_SIZE is required for the RTAS blob itself.
969      */
970     _FDT(fdt_setprop_cell(fdt, rtas, "rtas-size", RTAS_MIN_SIZE +
971                           RTAS_ERROR_LOG_MAX +
972                           ms->smp.max_cpus * sizeof(uint64_t) * 2 +
973                           sizeof(uint64_t)));
974     _FDT(fdt_setprop_cell(fdt, rtas, "rtas-error-log-max",
975                           RTAS_ERROR_LOG_MAX));
976     _FDT(fdt_setprop_cell(fdt, rtas, "rtas-event-scan-rate",
977                           RTAS_EVENT_SCAN_RATE));
978 
979     g_assert(msi_nonbroken);
980     _FDT(fdt_setprop(fdt, rtas, "ibm,change-msix-capable", NULL, 0));
981 
982     /*
983      * According to PAPR, rtas ibm,os-term does not guarantee a return
984      * back to the guest cpu.
985      *
986      * While an additional ibm,extended-os-term property indicates
987      * that rtas call return will always occur. Set this property.
988      */
989     _FDT(fdt_setprop(fdt, rtas, "ibm,extended-os-term", NULL, 0));
990 
991     _FDT(fdt_setprop(fdt, rtas, "ibm,lrdr-capacity",
992                      lrdr_capacity, sizeof(lrdr_capacity)));
993 
994     spapr_dt_rtas_tokens(fdt, rtas);
995 }
996 
997 /*
998  * Prepare ibm,arch-vec-5-platform-support, which indicates the MMU
999  * and the XIVE features that the guest may request and thus the valid
1000  * values for bytes 23..26 of option vector 5:
1001  */
1002 static void spapr_dt_ov5_platform_support(SpaprMachineState *spapr, void *fdt,
1003                                           int chosen)
1004 {
1005     PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu);
1006 
1007     char val[2 * 4] = {
1008         23, 0x00, /* XICS / XIVE mode */
1009         24, 0x00, /* Hash/Radix, filled in below. */
1010         25, 0x00, /* Hash options: Segment Tables == no, GTSE == no. */
1011         26, 0x40, /* Radix options: GTSE == yes. */
1012     };
1013 
1014     if (spapr->irq->xics && spapr->irq->xive) {
1015         val[1] = SPAPR_OV5_XIVE_BOTH;
1016     } else if (spapr->irq->xive) {
1017         val[1] = SPAPR_OV5_XIVE_EXPLOIT;
1018     } else {
1019         assert(spapr->irq->xics);
1020         val[1] = SPAPR_OV5_XIVE_LEGACY;
1021     }
1022 
1023     if (!ppc_check_compat(first_ppc_cpu, CPU_POWERPC_LOGICAL_3_00, 0,
1024                           first_ppc_cpu->compat_pvr)) {
1025         /*
1026          * If we're in a pre POWER9 compat mode then the guest should
1027          * do hash and use the legacy interrupt mode
1028          */
1029         val[1] = SPAPR_OV5_XIVE_LEGACY; /* XICS */
1030         val[3] = 0x00; /* Hash */
1031         spapr_check_mmu_mode(false);
1032     } else if (kvm_enabled()) {
1033         if (kvmppc_has_cap_mmu_radix() && kvmppc_has_cap_mmu_hash_v3()) {
1034             val[3] = 0x80; /* OV5_MMU_BOTH */
1035         } else if (kvmppc_has_cap_mmu_radix()) {
1036             val[3] = 0x40; /* OV5_MMU_RADIX_300 */
1037         } else {
1038             val[3] = 0x00; /* Hash */
1039         }
1040     } else {
1041         /* V3 MMU supports both hash and radix in tcg (with dynamic switching) */
1042         val[3] = 0xC0;
1043     }
1044     _FDT(fdt_setprop(fdt, chosen, "ibm,arch-vec-5-platform-support",
1045                      val, sizeof(val)));
1046 }
1047 
1048 static void spapr_dt_chosen(SpaprMachineState *spapr, void *fdt, bool reset)
1049 {
1050     MachineState *machine = MACHINE(spapr);
1051     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine);
1052     int chosen;
1053 
1054     _FDT(chosen = fdt_add_subnode(fdt, 0, "chosen"));
1055 
1056     if (reset) {
1057         const char *boot_device = spapr->boot_device;
1058         g_autofree char *stdout_path = spapr_vio_stdout_path(spapr->vio_bus);
1059         size_t cb = 0;
1060         g_autofree char *bootlist = get_boot_devices_list(&cb);
1061 
1062         if (machine->kernel_cmdline && machine->kernel_cmdline[0]) {
1063             _FDT(fdt_setprop_string(fdt, chosen, "bootargs",
1064                                     machine->kernel_cmdline));
1065         }
1066 
1067         if (spapr->initrd_size) {
1068             _FDT(fdt_setprop_cell(fdt, chosen, "linux,initrd-start",
1069                                   spapr->initrd_base));
1070             _FDT(fdt_setprop_cell(fdt, chosen, "linux,initrd-end",
1071                                   spapr->initrd_base + spapr->initrd_size));
1072         }
1073 
1074         if (spapr->kernel_size) {
1075             uint64_t kprop[2] = { cpu_to_be64(spapr->kernel_addr),
1076                                   cpu_to_be64(spapr->kernel_size) };
1077 
1078             _FDT(fdt_setprop(fdt, chosen, "qemu,boot-kernel",
1079                          &kprop, sizeof(kprop)));
1080             if (spapr->kernel_le) {
1081                 _FDT(fdt_setprop(fdt, chosen, "qemu,boot-kernel-le", NULL, 0));
1082             }
1083         }
1084         if (machine->boot_config.has_menu && machine->boot_config.menu) {
1085             _FDT((fdt_setprop_cell(fdt, chosen, "qemu,boot-menu", true)));
1086         }
1087         _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-width", graphic_width));
1088         _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-height", graphic_height));
1089         _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-depth", graphic_depth));
1090 
1091         if (cb && bootlist) {
1092             int i;
1093 
1094             for (i = 0; i < cb; i++) {
1095                 if (bootlist[i] == '\n') {
1096                     bootlist[i] = ' ';
1097                 }
1098             }
1099             _FDT(fdt_setprop_string(fdt, chosen, "qemu,boot-list", bootlist));
1100         }
1101 
1102         if (boot_device && strlen(boot_device)) {
1103             _FDT(fdt_setprop_string(fdt, chosen, "qemu,boot-device", boot_device));
1104         }
1105 
1106         if (spapr->want_stdout_path && stdout_path) {
1107             /*
1108              * "linux,stdout-path" and "stdout" properties are
1109              * deprecated by linux kernel. New platforms should only
1110              * use the "stdout-path" property. Set the new property
1111              * and continue using older property to remain compatible
1112              * with the existing firmware.
1113              */
1114             _FDT(fdt_setprop_string(fdt, chosen, "linux,stdout-path", stdout_path));
1115             _FDT(fdt_setprop_string(fdt, chosen, "stdout-path", stdout_path));
1116         }
1117 
1118         /*
1119          * We can deal with BAR reallocation just fine, advertise it
1120          * to the guest
1121          */
1122         if (smc->linux_pci_probe) {
1123             _FDT(fdt_setprop_cell(fdt, chosen, "linux,pci-probe-only", 0));
1124         }
1125 
1126         spapr_dt_ov5_platform_support(spapr, fdt, chosen);
1127     }
1128 
1129     _FDT(fdt_setprop(fdt, chosen, "rng-seed", spapr->fdt_rng_seed, 32));
1130 
1131     _FDT(spapr_dt_ovec(fdt, chosen, spapr->ov5_cas, "ibm,architecture-vec-5"));
1132 }
1133 
1134 static void spapr_dt_hypervisor(SpaprMachineState *spapr, void *fdt)
1135 {
1136     /* The /hypervisor node isn't in PAPR - this is a hack to allow PR
1137      * KVM to work under pHyp with some guest co-operation */
1138     int hypervisor;
1139     uint8_t hypercall[16];
1140 
1141     _FDT(hypervisor = fdt_add_subnode(fdt, 0, "hypervisor"));
1142     /* indicate KVM hypercall interface */
1143     _FDT(fdt_setprop_string(fdt, hypervisor, "compatible", "linux,kvm"));
1144     if (kvmppc_has_cap_fixup_hcalls()) {
1145         /*
1146          * Older KVM versions with older guest kernels were broken
1147          * with the magic page, don't allow the guest to map it.
1148          */
1149         if (!kvmppc_get_hypercall(cpu_env(first_cpu), hypercall,
1150                                   sizeof(hypercall))) {
1151             _FDT(fdt_setprop(fdt, hypervisor, "hcall-instructions",
1152                              hypercall, sizeof(hypercall)));
1153         }
1154     }
1155 }
1156 
1157 void *spapr_build_fdt(SpaprMachineState *spapr, bool reset, size_t space)
1158 {
1159     MachineState *machine = MACHINE(spapr);
1160     MachineClass *mc = MACHINE_GET_CLASS(machine);
1161     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine);
1162     uint32_t root_drc_type_mask = 0;
1163     int ret;
1164     void *fdt;
1165     SpaprPhbState *phb;
1166     char *buf;
1167 
1168     fdt = g_malloc0(space);
1169     _FDT((fdt_create_empty_tree(fdt, space)));
1170 
1171     /* Root node */
1172     _FDT(fdt_setprop_string(fdt, 0, "device_type", "chrp"));
1173     _FDT(fdt_setprop_string(fdt, 0, "model", "IBM pSeries (emulated by qemu)"));
1174     _FDT(fdt_setprop_string(fdt, 0, "compatible", "qemu,pseries"));
1175 
1176     /* Guest UUID & Name*/
1177     buf = qemu_uuid_unparse_strdup(&qemu_uuid);
1178     _FDT(fdt_setprop_string(fdt, 0, "vm,uuid", buf));
1179     if (qemu_uuid_set) {
1180         _FDT(fdt_setprop_string(fdt, 0, "system-id", buf));
1181     }
1182     g_free(buf);
1183 
1184     if (qemu_get_vm_name()) {
1185         _FDT(fdt_setprop_string(fdt, 0, "ibm,partition-name",
1186                                 qemu_get_vm_name()));
1187     }
1188 
1189     /* Host Model & Serial Number */
1190     if (spapr->host_model) {
1191         _FDT(fdt_setprop_string(fdt, 0, "host-model", spapr->host_model));
1192     } else if (smc->broken_host_serial_model && kvmppc_get_host_model(&buf)) {
1193         _FDT(fdt_setprop_string(fdt, 0, "host-model", buf));
1194         g_free(buf);
1195     }
1196 
1197     if (spapr->host_serial) {
1198         _FDT(fdt_setprop_string(fdt, 0, "host-serial", spapr->host_serial));
1199     } else if (smc->broken_host_serial_model && kvmppc_get_host_serial(&buf)) {
1200         _FDT(fdt_setprop_string(fdt, 0, "host-serial", buf));
1201         g_free(buf);
1202     }
1203 
1204     _FDT(fdt_setprop_cell(fdt, 0, "#address-cells", 2));
1205     _FDT(fdt_setprop_cell(fdt, 0, "#size-cells", 2));
1206 
1207     /* /interrupt controller */
1208     spapr_irq_dt(spapr, spapr_max_server_number(spapr), fdt, PHANDLE_INTC);
1209 
1210     ret = spapr_dt_memory(spapr, fdt);
1211     if (ret < 0) {
1212         error_report("couldn't setup memory nodes in fdt");
1213         exit(1);
1214     }
1215 
1216     /* /vdevice */
1217     spapr_dt_vdevice(spapr->vio_bus, fdt);
1218 
1219     if (object_resolve_path_type("", TYPE_SPAPR_RNG, NULL)) {
1220         ret = spapr_dt_rng(fdt);
1221         if (ret < 0) {
1222             error_report("could not set up rng device in the fdt");
1223             exit(1);
1224         }
1225     }
1226 
1227     QLIST_FOREACH(phb, &spapr->phbs, list) {
1228         ret = spapr_dt_phb(spapr, phb, PHANDLE_INTC, fdt, NULL);
1229         if (ret < 0) {
1230             error_report("couldn't setup PCI devices in fdt");
1231             exit(1);
1232         }
1233     }
1234 
1235     spapr_dt_cpus(fdt, spapr);
1236 
1237     /* ibm,drc-indexes and friends */
1238     if (smc->dr_lmb_enabled) {
1239         root_drc_type_mask |= SPAPR_DR_CONNECTOR_TYPE_LMB;
1240     }
1241     if (smc->dr_phb_enabled) {
1242         root_drc_type_mask |= SPAPR_DR_CONNECTOR_TYPE_PHB;
1243     }
1244     if (mc->nvdimm_supported) {
1245         root_drc_type_mask |= SPAPR_DR_CONNECTOR_TYPE_PMEM;
1246     }
1247     if (root_drc_type_mask) {
1248         _FDT(spapr_dt_drc(fdt, 0, NULL, root_drc_type_mask));
1249     }
1250 
1251     if (mc->has_hotpluggable_cpus) {
1252         int offset = fdt_path_offset(fdt, "/cpus");
1253         ret = spapr_dt_drc(fdt, offset, NULL, SPAPR_DR_CONNECTOR_TYPE_CPU);
1254         if (ret < 0) {
1255             error_report("Couldn't set up CPU DR device tree properties");
1256             exit(1);
1257         }
1258     }
1259 
1260     /* /event-sources */
1261     spapr_dt_events(spapr, fdt);
1262 
1263     /* /rtas */
1264     spapr_dt_rtas(spapr, fdt);
1265 
1266     /* /chosen */
1267     spapr_dt_chosen(spapr, fdt, reset);
1268 
1269     /* /hypervisor */
1270     if (kvm_enabled()) {
1271         spapr_dt_hypervisor(spapr, fdt);
1272     }
1273 
1274     /* Build memory reserve map */
1275     if (reset) {
1276         if (spapr->kernel_size) {
1277             _FDT((fdt_add_mem_rsv(fdt, spapr->kernel_addr,
1278                                   spapr->kernel_size)));
1279         }
1280         if (spapr->initrd_size) {
1281             _FDT((fdt_add_mem_rsv(fdt, spapr->initrd_base,
1282                                   spapr->initrd_size)));
1283         }
1284     }
1285 
1286     /* NVDIMM devices */
1287     if (mc->nvdimm_supported) {
1288         spapr_dt_persistent_memory(spapr, fdt);
1289     }
1290 
1291     return fdt;
1292 }
1293 
1294 static uint64_t translate_kernel_address(void *opaque, uint64_t addr)
1295 {
1296     SpaprMachineState *spapr = opaque;
1297 
1298     return (addr & 0x0fffffff) + spapr->kernel_addr;
1299 }
1300 
1301 static void emulate_spapr_hypercall(PPCVirtualHypervisor *vhyp,
1302                                     PowerPCCPU *cpu)
1303 {
1304     CPUPPCState *env = &cpu->env;
1305 
1306     /* The TCG path should also be holding the BQL at this point */
1307     g_assert(bql_locked());
1308 
1309     g_assert(!vhyp_cpu_in_nested(cpu));
1310 
1311     if (FIELD_EX64(env->msr, MSR, PR)) {
1312         hcall_dprintf("Hypercall made with MSR[PR]=1\n");
1313         env->gpr[3] = H_PRIVILEGE;
1314     } else {
1315         env->gpr[3] = spapr_hypercall(cpu, env->gpr[3], &env->gpr[4]);
1316     }
1317 }
1318 
1319 struct LPCRSyncState {
1320     target_ulong value;
1321     target_ulong mask;
1322 };
1323 
1324 static void do_lpcr_sync(CPUState *cs, run_on_cpu_data arg)
1325 {
1326     struct LPCRSyncState *s = arg.host_ptr;
1327     PowerPCCPU *cpu = POWERPC_CPU(cs);
1328     CPUPPCState *env = &cpu->env;
1329     target_ulong lpcr;
1330 
1331     cpu_synchronize_state(cs);
1332     lpcr = env->spr[SPR_LPCR];
1333     lpcr &= ~s->mask;
1334     lpcr |= s->value;
1335     ppc_store_lpcr(cpu, lpcr);
1336 }
1337 
1338 void spapr_set_all_lpcrs(target_ulong value, target_ulong mask)
1339 {
1340     CPUState *cs;
1341     struct LPCRSyncState s = {
1342         .value = value,
1343         .mask = mask
1344     };
1345     CPU_FOREACH(cs) {
1346         run_on_cpu(cs, do_lpcr_sync, RUN_ON_CPU_HOST_PTR(&s));
1347     }
1348 }
1349 
1350 /* May be used when the machine is not running */
1351 void spapr_init_all_lpcrs(target_ulong value, target_ulong mask)
1352 {
1353     CPUState *cs;
1354     CPU_FOREACH(cs) {
1355         PowerPCCPU *cpu = POWERPC_CPU(cs);
1356         CPUPPCState *env = &cpu->env;
1357         target_ulong lpcr;
1358 
1359         lpcr = env->spr[SPR_LPCR];
1360         lpcr &= ~(LPCR_HR | LPCR_UPRT);
1361         ppc_store_lpcr(cpu, lpcr);
1362     }
1363 }
1364 
1365 
1366 static bool spapr_get_pate(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu,
1367                            target_ulong lpid, ppc_v3_pate_t *entry)
1368 {
1369     SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
1370     SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
1371 
1372     if (!spapr_cpu->in_nested) {
1373         assert(lpid == 0);
1374 
1375         /* Copy PATE1:GR into PATE0:HR */
1376         entry->dw0 = spapr->patb_entry & PATE0_HR;
1377         entry->dw1 = spapr->patb_entry;
1378 
1379     } else {
1380         uint64_t patb, pats;
1381 
1382         assert(lpid != 0);
1383 
1384         patb = spapr->nested_ptcr & PTCR_PATB;
1385         pats = spapr->nested_ptcr & PTCR_PATS;
1386 
1387         /* Check if partition table is properly aligned */
1388         if (patb & MAKE_64BIT_MASK(0, pats + 12)) {
1389             return false;
1390         }
1391 
1392         /* Calculate number of entries */
1393         pats = 1ull << (pats + 12 - 4);
1394         if (pats <= lpid) {
1395             return false;
1396         }
1397 
1398         /* Grab entry */
1399         patb += 16 * lpid;
1400         entry->dw0 = ldq_phys(CPU(cpu)->as, patb);
1401         entry->dw1 = ldq_phys(CPU(cpu)->as, patb + 8);
1402     }
1403 
1404     return true;
1405 }
1406 
1407 #define HPTE(_table, _i)   (void *)(((uint64_t *)(_table)) + ((_i) * 2))
1408 #define HPTE_VALID(_hpte)  (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_VALID)
1409 #define HPTE_DIRTY(_hpte)  (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_HPTE_DIRTY)
1410 #define CLEAN_HPTE(_hpte)  ((*(uint64_t *)(_hpte)) &= tswap64(~HPTE64_V_HPTE_DIRTY))
1411 #define DIRTY_HPTE(_hpte)  ((*(uint64_t *)(_hpte)) |= tswap64(HPTE64_V_HPTE_DIRTY))
1412 
1413 /*
1414  * Get the fd to access the kernel htab, re-opening it if necessary
1415  */
1416 static int get_htab_fd(SpaprMachineState *spapr)
1417 {
1418     Error *local_err = NULL;
1419 
1420     if (spapr->htab_fd >= 0) {
1421         return spapr->htab_fd;
1422     }
1423 
1424     spapr->htab_fd = kvmppc_get_htab_fd(false, 0, &local_err);
1425     if (spapr->htab_fd < 0) {
1426         error_report_err(local_err);
1427     }
1428 
1429     return spapr->htab_fd;
1430 }
1431 
1432 void close_htab_fd(SpaprMachineState *spapr)
1433 {
1434     if (spapr->htab_fd >= 0) {
1435         close(spapr->htab_fd);
1436     }
1437     spapr->htab_fd = -1;
1438 }
1439 
1440 static hwaddr spapr_hpt_mask(PPCVirtualHypervisor *vhyp)
1441 {
1442     SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
1443 
1444     return HTAB_SIZE(spapr) / HASH_PTEG_SIZE_64 - 1;
1445 }
1446 
1447 static target_ulong spapr_encode_hpt_for_kvm_pr(PPCVirtualHypervisor *vhyp)
1448 {
1449     SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
1450 
1451     assert(kvm_enabled());
1452 
1453     if (!spapr->htab) {
1454         return 0;
1455     }
1456 
1457     return (target_ulong)(uintptr_t)spapr->htab | (spapr->htab_shift - 18);
1458 }
1459 
1460 static const ppc_hash_pte64_t *spapr_map_hptes(PPCVirtualHypervisor *vhyp,
1461                                                 hwaddr ptex, int n)
1462 {
1463     SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
1464     hwaddr pte_offset = ptex * HASH_PTE_SIZE_64;
1465 
1466     if (!spapr->htab) {
1467         /*
1468          * HTAB is controlled by KVM. Fetch into temporary buffer
1469          */
1470         ppc_hash_pte64_t *hptes = g_malloc(n * HASH_PTE_SIZE_64);
1471         kvmppc_read_hptes(hptes, ptex, n);
1472         return hptes;
1473     }
1474 
1475     /*
1476      * HTAB is controlled by QEMU. Just point to the internally
1477      * accessible PTEG.
1478      */
1479     return (const ppc_hash_pte64_t *)(spapr->htab + pte_offset);
1480 }
1481 
1482 static void spapr_unmap_hptes(PPCVirtualHypervisor *vhyp,
1483                               const ppc_hash_pte64_t *hptes,
1484                               hwaddr ptex, int n)
1485 {
1486     SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
1487 
1488     if (!spapr->htab) {
1489         g_free((void *)hptes);
1490     }
1491 
1492     /* Nothing to do for qemu managed HPT */
1493 }
1494 
1495 void spapr_store_hpte(PowerPCCPU *cpu, hwaddr ptex,
1496                       uint64_t pte0, uint64_t pte1)
1497 {
1498     SpaprMachineState *spapr = SPAPR_MACHINE(cpu->vhyp);
1499     hwaddr offset = ptex * HASH_PTE_SIZE_64;
1500 
1501     if (!spapr->htab) {
1502         kvmppc_write_hpte(ptex, pte0, pte1);
1503     } else {
1504         if (pte0 & HPTE64_V_VALID) {
1505             stq_p(spapr->htab + offset + HPTE64_DW1, pte1);
1506             /*
1507              * When setting valid, we write PTE1 first. This ensures
1508              * proper synchronization with the reading code in
1509              * ppc_hash64_pteg_search()
1510              */
1511             smp_wmb();
1512             stq_p(spapr->htab + offset, pte0);
1513         } else {
1514             stq_p(spapr->htab + offset, pte0);
1515             /*
1516              * When clearing it we set PTE0 first. This ensures proper
1517              * synchronization with the reading code in
1518              * ppc_hash64_pteg_search()
1519              */
1520             smp_wmb();
1521             stq_p(spapr->htab + offset + HPTE64_DW1, pte1);
1522         }
1523     }
1524 }
1525 
1526 static void spapr_hpte_set_c(PPCVirtualHypervisor *vhyp, hwaddr ptex,
1527                              uint64_t pte1)
1528 {
1529     hwaddr offset = ptex * HASH_PTE_SIZE_64 + HPTE64_DW1_C;
1530     SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
1531 
1532     if (!spapr->htab) {
1533         /* There should always be a hash table when this is called */
1534         error_report("spapr_hpte_set_c called with no hash table !");
1535         return;
1536     }
1537 
1538     /* The HW performs a non-atomic byte update */
1539     stb_p(spapr->htab + offset, (pte1 & 0xff) | 0x80);
1540 }
1541 
1542 static void spapr_hpte_set_r(PPCVirtualHypervisor *vhyp, hwaddr ptex,
1543                              uint64_t pte1)
1544 {
1545     hwaddr offset = ptex * HASH_PTE_SIZE_64 + HPTE64_DW1_R;
1546     SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
1547 
1548     if (!spapr->htab) {
1549         /* There should always be a hash table when this is called */
1550         error_report("spapr_hpte_set_r called with no hash table !");
1551         return;
1552     }
1553 
1554     /* The HW performs a non-atomic byte update */
1555     stb_p(spapr->htab + offset, ((pte1 >> 8) & 0xff) | 0x01);
1556 }
1557 
1558 int spapr_hpt_shift_for_ramsize(uint64_t ramsize)
1559 {
1560     int shift;
1561 
1562     /* We aim for a hash table of size 1/128 the size of RAM (rounded
1563      * up).  The PAPR recommendation is actually 1/64 of RAM size, but
1564      * that's much more than is needed for Linux guests */
1565     shift = ctz64(pow2ceil(ramsize)) - 7;
1566     shift = MAX(shift, 18); /* Minimum architected size */
1567     shift = MIN(shift, 46); /* Maximum architected size */
1568     return shift;
1569 }
1570 
1571 void spapr_free_hpt(SpaprMachineState *spapr)
1572 {
1573     qemu_vfree(spapr->htab);
1574     spapr->htab = NULL;
1575     spapr->htab_shift = 0;
1576     close_htab_fd(spapr);
1577 }
1578 
1579 int spapr_reallocate_hpt(SpaprMachineState *spapr, int shift, Error **errp)
1580 {
1581     ERRP_GUARD();
1582     long rc;
1583 
1584     /* Clean up any HPT info from a previous boot */
1585     spapr_free_hpt(spapr);
1586 
1587     rc = kvmppc_reset_htab(shift);
1588 
1589     if (rc == -EOPNOTSUPP) {
1590         error_setg(errp, "HPT not supported in nested guests");
1591         return -EOPNOTSUPP;
1592     }
1593 
1594     if (rc < 0) {
1595         /* kernel-side HPT needed, but couldn't allocate one */
1596         error_setg_errno(errp, errno, "Failed to allocate KVM HPT of order %d",
1597                          shift);
1598         error_append_hint(errp, "Try smaller maxmem?\n");
1599         return -errno;
1600     } else if (rc > 0) {
1601         /* kernel-side HPT allocated */
1602         if (rc != shift) {
1603             error_setg(errp,
1604                        "Requested order %d HPT, but kernel allocated order %ld",
1605                        shift, rc);
1606             error_append_hint(errp, "Try smaller maxmem?\n");
1607             return -ENOSPC;
1608         }
1609 
1610         spapr->htab_shift = shift;
1611         spapr->htab = NULL;
1612     } else {
1613         /* kernel-side HPT not needed, allocate in userspace instead */
1614         size_t size = 1ULL << shift;
1615         int i;
1616 
1617         spapr->htab = qemu_memalign(size, size);
1618         memset(spapr->htab, 0, size);
1619         spapr->htab_shift = shift;
1620 
1621         for (i = 0; i < size / HASH_PTE_SIZE_64; i++) {
1622             DIRTY_HPTE(HPTE(spapr->htab, i));
1623         }
1624     }
1625     /* We're setting up a hash table, so that means we're not radix */
1626     spapr->patb_entry = 0;
1627     spapr_init_all_lpcrs(0, LPCR_HR | LPCR_UPRT);
1628     return 0;
1629 }
1630 
1631 void spapr_setup_hpt(SpaprMachineState *spapr)
1632 {
1633     int hpt_shift;
1634 
1635     if (spapr->resize_hpt == SPAPR_RESIZE_HPT_DISABLED) {
1636         hpt_shift = spapr_hpt_shift_for_ramsize(MACHINE(spapr)->maxram_size);
1637     } else {
1638         uint64_t current_ram_size;
1639 
1640         current_ram_size = MACHINE(spapr)->ram_size + get_plugged_memory_size();
1641         hpt_shift = spapr_hpt_shift_for_ramsize(current_ram_size);
1642     }
1643     spapr_reallocate_hpt(spapr, hpt_shift, &error_fatal);
1644 
1645     if (kvm_enabled()) {
1646         hwaddr vrma_limit = kvmppc_vrma_limit(spapr->htab_shift);
1647 
1648         /* Check our RMA fits in the possible VRMA */
1649         if (vrma_limit < spapr->rma_size) {
1650             error_report("Unable to create %" HWADDR_PRIu
1651                          "MiB RMA (VRMA only allows %" HWADDR_PRIu "MiB",
1652                          spapr->rma_size / MiB, vrma_limit / MiB);
1653             exit(EXIT_FAILURE);
1654         }
1655     }
1656 }
1657 
1658 void spapr_check_mmu_mode(bool guest_radix)
1659 {
1660     if (guest_radix) {
1661         if (kvm_enabled() && !kvmppc_has_cap_mmu_radix()) {
1662             error_report("Guest requested unavailable MMU mode (radix).");
1663             exit(EXIT_FAILURE);
1664         }
1665     } else {
1666         if (kvm_enabled() && kvmppc_has_cap_mmu_radix()
1667             && !kvmppc_has_cap_mmu_hash_v3()) {
1668             error_report("Guest requested unavailable MMU mode (hash).");
1669             exit(EXIT_FAILURE);
1670         }
1671     }
1672 }
1673 
1674 static void spapr_machine_reset(MachineState *machine, ShutdownCause reason)
1675 {
1676     SpaprMachineState *spapr = SPAPR_MACHINE(machine);
1677     PowerPCCPU *first_ppc_cpu;
1678     hwaddr fdt_addr;
1679     void *fdt;
1680     int rc;
1681 
1682     if (reason != SHUTDOWN_CAUSE_SNAPSHOT_LOAD) {
1683         /*
1684          * Record-replay snapshot load must not consume random, this was
1685          * already replayed from initial machine reset.
1686          */
1687         qemu_guest_getrandom_nofail(spapr->fdt_rng_seed, 32);
1688     }
1689 
1690     pef_kvm_reset(machine->cgs, &error_fatal);
1691     spapr_caps_apply(spapr);
1692 
1693     first_ppc_cpu = POWERPC_CPU(first_cpu);
1694     if (kvm_enabled() && kvmppc_has_cap_mmu_radix() &&
1695         ppc_type_check_compat(machine->cpu_type, CPU_POWERPC_LOGICAL_3_00, 0,
1696                               spapr->max_compat_pvr)) {
1697         /*
1698          * If using KVM with radix mode available, VCPUs can be started
1699          * without a HPT because KVM will start them in radix mode.
1700          * Set the GR bit in PATE so that we know there is no HPT.
1701          */
1702         spapr->patb_entry = PATE1_GR;
1703         spapr_set_all_lpcrs(LPCR_HR | LPCR_UPRT, LPCR_HR | LPCR_UPRT);
1704     } else {
1705         spapr_setup_hpt(spapr);
1706     }
1707 
1708     qemu_devices_reset(reason);
1709 
1710     spapr_ovec_cleanup(spapr->ov5_cas);
1711     spapr->ov5_cas = spapr_ovec_new();
1712 
1713     ppc_init_compat_all(spapr->max_compat_pvr, &error_fatal);
1714 
1715     /*
1716      * This is fixing some of the default configuration of the XIVE
1717      * devices. To be called after the reset of the machine devices.
1718      */
1719     spapr_irq_reset(spapr, &error_fatal);
1720 
1721     /*
1722      * There is no CAS under qtest. Simulate one to please the code that
1723      * depends on spapr->ov5_cas. This is especially needed to test device
1724      * unplug, so we do that before resetting the DRCs.
1725      */
1726     if (qtest_enabled()) {
1727         spapr_ovec_cleanup(spapr->ov5_cas);
1728         spapr->ov5_cas = spapr_ovec_clone(spapr->ov5);
1729     }
1730 
1731     spapr_nvdimm_finish_flushes();
1732 
1733     /* DRC reset may cause a device to be unplugged. This will cause troubles
1734      * if this device is used by another device (eg, a running vhost backend
1735      * will crash QEMU if the DIMM holding the vring goes away). To avoid such
1736      * situations, we reset DRCs after all devices have been reset.
1737      */
1738     spapr_drc_reset_all(spapr);
1739 
1740     spapr_clear_pending_events(spapr);
1741 
1742     /*
1743      * We place the device tree just below either the top of the RMA,
1744      * or just below 2GB, whichever is lower, so that it can be
1745      * processed with 32-bit real mode code if necessary
1746      */
1747     fdt_addr = MIN(spapr->rma_size, FDT_MAX_ADDR) - FDT_MAX_SIZE;
1748 
1749     fdt = spapr_build_fdt(spapr, true, FDT_MAX_SIZE);
1750     if (spapr->vof) {
1751         spapr_vof_reset(spapr, fdt, &error_fatal);
1752         /*
1753          * Do not pack the FDT as the client may change properties.
1754          * VOF client does not expect the FDT so we do not load it to the VM.
1755          */
1756     } else {
1757         rc = fdt_pack(fdt);
1758         /* Should only fail if we've built a corrupted tree */
1759         assert(rc == 0);
1760 
1761         spapr_cpu_set_entry_state(first_ppc_cpu, SPAPR_ENTRY_POINT,
1762                                   0, fdt_addr, 0);
1763         cpu_physical_memory_write(fdt_addr, fdt, fdt_totalsize(fdt));
1764     }
1765     qemu_fdt_dumpdtb(fdt, fdt_totalsize(fdt));
1766 
1767     g_free(spapr->fdt_blob);
1768     spapr->fdt_size = fdt_totalsize(fdt);
1769     spapr->fdt_initial_size = spapr->fdt_size;
1770     spapr->fdt_blob = fdt;
1771 
1772     /* Set machine->fdt for 'dumpdtb' QMP/HMP command */
1773     machine->fdt = fdt;
1774 
1775     /* Set up the entry state */
1776     first_ppc_cpu->env.gpr[5] = 0;
1777 
1778     spapr->fwnmi_system_reset_addr = -1;
1779     spapr->fwnmi_machine_check_addr = -1;
1780     spapr->fwnmi_machine_check_interlock = -1;
1781 
1782     /* Signal all vCPUs waiting on this condition */
1783     qemu_cond_broadcast(&spapr->fwnmi_machine_check_interlock_cond);
1784 
1785     migrate_del_blocker(&spapr->fwnmi_migration_blocker);
1786 }
1787 
1788 static void spapr_create_nvram(SpaprMachineState *spapr)
1789 {
1790     DeviceState *dev = qdev_new("spapr-nvram");
1791     DriveInfo *dinfo = drive_get(IF_PFLASH, 0, 0);
1792 
1793     if (dinfo) {
1794         qdev_prop_set_drive_err(dev, "drive", blk_by_legacy_dinfo(dinfo),
1795                                 &error_fatal);
1796     }
1797 
1798     qdev_realize_and_unref(dev, &spapr->vio_bus->bus, &error_fatal);
1799 
1800     spapr->nvram = (struct SpaprNvram *)dev;
1801 }
1802 
1803 static void spapr_rtc_create(SpaprMachineState *spapr)
1804 {
1805     object_initialize_child_with_props(OBJECT(spapr), "rtc", &spapr->rtc,
1806                                        sizeof(spapr->rtc), TYPE_SPAPR_RTC,
1807                                        &error_fatal, NULL);
1808     qdev_realize(DEVICE(&spapr->rtc), NULL, &error_fatal);
1809     object_property_add_alias(OBJECT(spapr), "rtc-time", OBJECT(&spapr->rtc),
1810                               "date");
1811 }
1812 
1813 /* Returns whether we want to use VGA or not */
1814 static bool spapr_vga_init(PCIBus *pci_bus, Error **errp)
1815 {
1816     vga_interface_created = true;
1817     switch (vga_interface_type) {
1818     case VGA_NONE:
1819         return false;
1820     case VGA_DEVICE:
1821         return true;
1822     case VGA_STD:
1823     case VGA_VIRTIO:
1824     case VGA_CIRRUS:
1825         return pci_vga_init(pci_bus) != NULL;
1826     default:
1827         error_setg(errp,
1828                    "Unsupported VGA mode, only -vga std or -vga virtio is supported");
1829         return false;
1830     }
1831 }
1832 
1833 static int spapr_pre_load(void *opaque)
1834 {
1835     int rc;
1836 
1837     rc = spapr_caps_pre_load(opaque);
1838     if (rc) {
1839         return rc;
1840     }
1841 
1842     return 0;
1843 }
1844 
1845 static int spapr_post_load(void *opaque, int version_id)
1846 {
1847     SpaprMachineState *spapr = (SpaprMachineState *)opaque;
1848     int err = 0;
1849 
1850     err = spapr_caps_post_migration(spapr);
1851     if (err) {
1852         return err;
1853     }
1854 
1855     /*
1856      * In earlier versions, there was no separate qdev for the PAPR
1857      * RTC, so the RTC offset was stored directly in sPAPREnvironment.
1858      * So when migrating from those versions, poke the incoming offset
1859      * value into the RTC device
1860      */
1861     if (version_id < 3) {
1862         err = spapr_rtc_import_offset(&spapr->rtc, spapr->rtc_offset);
1863         if (err) {
1864             return err;
1865         }
1866     }
1867 
1868     if (kvm_enabled() && spapr->patb_entry) {
1869         PowerPCCPU *cpu = POWERPC_CPU(first_cpu);
1870         bool radix = !!(spapr->patb_entry & PATE1_GR);
1871         bool gtse = !!(cpu->env.spr[SPR_LPCR] & LPCR_GTSE);
1872 
1873         /*
1874          * Update LPCR:HR and UPRT as they may not be set properly in
1875          * the stream
1876          */
1877         spapr_set_all_lpcrs(radix ? (LPCR_HR | LPCR_UPRT) : 0,
1878                             LPCR_HR | LPCR_UPRT);
1879 
1880         err = kvmppc_configure_v3_mmu(cpu, radix, gtse, spapr->patb_entry);
1881         if (err) {
1882             error_report("Process table config unsupported by the host");
1883             return -EINVAL;
1884         }
1885     }
1886 
1887     err = spapr_irq_post_load(spapr, version_id);
1888     if (err) {
1889         return err;
1890     }
1891 
1892     return err;
1893 }
1894 
1895 static int spapr_pre_save(void *opaque)
1896 {
1897     int rc;
1898 
1899     rc = spapr_caps_pre_save(opaque);
1900     if (rc) {
1901         return rc;
1902     }
1903 
1904     return 0;
1905 }
1906 
1907 static bool version_before_3(void *opaque, int version_id)
1908 {
1909     return version_id < 3;
1910 }
1911 
1912 static bool spapr_pending_events_needed(void *opaque)
1913 {
1914     SpaprMachineState *spapr = (SpaprMachineState *)opaque;
1915     return !QTAILQ_EMPTY(&spapr->pending_events);
1916 }
1917 
1918 static const VMStateDescription vmstate_spapr_event_entry = {
1919     .name = "spapr_event_log_entry",
1920     .version_id = 1,
1921     .minimum_version_id = 1,
1922     .fields = (const VMStateField[]) {
1923         VMSTATE_UINT32(summary, SpaprEventLogEntry),
1924         VMSTATE_UINT32(extended_length, SpaprEventLogEntry),
1925         VMSTATE_VBUFFER_ALLOC_UINT32(extended_log, SpaprEventLogEntry, 0,
1926                                      NULL, extended_length),
1927         VMSTATE_END_OF_LIST()
1928     },
1929 };
1930 
1931 static const VMStateDescription vmstate_spapr_pending_events = {
1932     .name = "spapr_pending_events",
1933     .version_id = 1,
1934     .minimum_version_id = 1,
1935     .needed = spapr_pending_events_needed,
1936     .fields = (const VMStateField[]) {
1937         VMSTATE_QTAILQ_V(pending_events, SpaprMachineState, 1,
1938                          vmstate_spapr_event_entry, SpaprEventLogEntry, next),
1939         VMSTATE_END_OF_LIST()
1940     },
1941 };
1942 
1943 static bool spapr_ov5_cas_needed(void *opaque)
1944 {
1945     SpaprMachineState *spapr = opaque;
1946     SpaprOptionVector *ov5_mask = spapr_ovec_new();
1947     bool cas_needed;
1948 
1949     /* Prior to the introduction of SpaprOptionVector, we had two option
1950      * vectors we dealt with: OV5_FORM1_AFFINITY, and OV5_DRCONF_MEMORY.
1951      * Both of these options encode machine topology into the device-tree
1952      * in such a way that the now-booted OS should still be able to interact
1953      * appropriately with QEMU regardless of what options were actually
1954      * negotiatied on the source side.
1955      *
1956      * As such, we can avoid migrating the CAS-negotiated options if these
1957      * are the only options available on the current machine/platform.
1958      * Since these are the only options available for pseries-2.7 and
1959      * earlier, this allows us to maintain old->new/new->old migration
1960      * compatibility.
1961      *
1962      * For QEMU 2.8+, there are additional CAS-negotiatable options available
1963      * via default pseries-2.8 machines and explicit command-line parameters.
1964      * Some of these options, like OV5_HP_EVT, *do* require QEMU to be aware
1965      * of the actual CAS-negotiated values to continue working properly. For
1966      * example, availability of memory unplug depends on knowing whether
1967      * OV5_HP_EVT was negotiated via CAS.
1968      *
1969      * Thus, for any cases where the set of available CAS-negotiatable
1970      * options extends beyond OV5_FORM1_AFFINITY and OV5_DRCONF_MEMORY, we
1971      * include the CAS-negotiated options in the migration stream, unless
1972      * if they affect boot time behaviour only.
1973      */
1974     spapr_ovec_set(ov5_mask, OV5_FORM1_AFFINITY);
1975     spapr_ovec_set(ov5_mask, OV5_DRCONF_MEMORY);
1976     spapr_ovec_set(ov5_mask, OV5_DRMEM_V2);
1977 
1978     /* We need extra information if we have any bits outside the mask
1979      * defined above */
1980     cas_needed = !spapr_ovec_subset(spapr->ov5, ov5_mask);
1981 
1982     spapr_ovec_cleanup(ov5_mask);
1983 
1984     return cas_needed;
1985 }
1986 
1987 static const VMStateDescription vmstate_spapr_ov5_cas = {
1988     .name = "spapr_option_vector_ov5_cas",
1989     .version_id = 1,
1990     .minimum_version_id = 1,
1991     .needed = spapr_ov5_cas_needed,
1992     .fields = (const VMStateField[]) {
1993         VMSTATE_STRUCT_POINTER_V(ov5_cas, SpaprMachineState, 1,
1994                                  vmstate_spapr_ovec, SpaprOptionVector),
1995         VMSTATE_END_OF_LIST()
1996     },
1997 };
1998 
1999 static bool spapr_patb_entry_needed(void *opaque)
2000 {
2001     SpaprMachineState *spapr = opaque;
2002 
2003     return !!spapr->patb_entry;
2004 }
2005 
2006 static const VMStateDescription vmstate_spapr_patb_entry = {
2007     .name = "spapr_patb_entry",
2008     .version_id = 1,
2009     .minimum_version_id = 1,
2010     .needed = spapr_patb_entry_needed,
2011     .fields = (const VMStateField[]) {
2012         VMSTATE_UINT64(patb_entry, SpaprMachineState),
2013         VMSTATE_END_OF_LIST()
2014     },
2015 };
2016 
2017 static bool spapr_irq_map_needed(void *opaque)
2018 {
2019     SpaprMachineState *spapr = opaque;
2020 
2021     return spapr->irq_map && !bitmap_empty(spapr->irq_map, spapr->irq_map_nr);
2022 }
2023 
2024 static const VMStateDescription vmstate_spapr_irq_map = {
2025     .name = "spapr_irq_map",
2026     .version_id = 1,
2027     .minimum_version_id = 1,
2028     .needed = spapr_irq_map_needed,
2029     .fields = (const VMStateField[]) {
2030         VMSTATE_BITMAP(irq_map, SpaprMachineState, 0, irq_map_nr),
2031         VMSTATE_END_OF_LIST()
2032     },
2033 };
2034 
2035 static bool spapr_dtb_needed(void *opaque)
2036 {
2037     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(opaque);
2038 
2039     return smc->update_dt_enabled;
2040 }
2041 
2042 static int spapr_dtb_pre_load(void *opaque)
2043 {
2044     SpaprMachineState *spapr = (SpaprMachineState *)opaque;
2045 
2046     g_free(spapr->fdt_blob);
2047     spapr->fdt_blob = NULL;
2048     spapr->fdt_size = 0;
2049 
2050     return 0;
2051 }
2052 
2053 static const VMStateDescription vmstate_spapr_dtb = {
2054     .name = "spapr_dtb",
2055     .version_id = 1,
2056     .minimum_version_id = 1,
2057     .needed = spapr_dtb_needed,
2058     .pre_load = spapr_dtb_pre_load,
2059     .fields = (const VMStateField[]) {
2060         VMSTATE_UINT32(fdt_initial_size, SpaprMachineState),
2061         VMSTATE_UINT32(fdt_size, SpaprMachineState),
2062         VMSTATE_VBUFFER_ALLOC_UINT32(fdt_blob, SpaprMachineState, 0, NULL,
2063                                      fdt_size),
2064         VMSTATE_END_OF_LIST()
2065     },
2066 };
2067 
2068 static bool spapr_fwnmi_needed(void *opaque)
2069 {
2070     SpaprMachineState *spapr = (SpaprMachineState *)opaque;
2071 
2072     return spapr->fwnmi_machine_check_addr != -1;
2073 }
2074 
2075 static int spapr_fwnmi_pre_save(void *opaque)
2076 {
2077     SpaprMachineState *spapr = (SpaprMachineState *)opaque;
2078 
2079     /*
2080      * Check if machine check handling is in progress and print a
2081      * warning message.
2082      */
2083     if (spapr->fwnmi_machine_check_interlock != -1) {
2084         warn_report("A machine check is being handled during migration. The"
2085                 "handler may run and log hardware error on the destination");
2086     }
2087 
2088     return 0;
2089 }
2090 
2091 static const VMStateDescription vmstate_spapr_fwnmi = {
2092     .name = "spapr_fwnmi",
2093     .version_id = 1,
2094     .minimum_version_id = 1,
2095     .needed = spapr_fwnmi_needed,
2096     .pre_save = spapr_fwnmi_pre_save,
2097     .fields = (const VMStateField[]) {
2098         VMSTATE_UINT64(fwnmi_system_reset_addr, SpaprMachineState),
2099         VMSTATE_UINT64(fwnmi_machine_check_addr, SpaprMachineState),
2100         VMSTATE_INT32(fwnmi_machine_check_interlock, SpaprMachineState),
2101         VMSTATE_END_OF_LIST()
2102     },
2103 };
2104 
2105 static const VMStateDescription vmstate_spapr = {
2106     .name = "spapr",
2107     .version_id = 3,
2108     .minimum_version_id = 1,
2109     .pre_load = spapr_pre_load,
2110     .post_load = spapr_post_load,
2111     .pre_save = spapr_pre_save,
2112     .fields = (const VMStateField[]) {
2113         /* used to be @next_irq */
2114         VMSTATE_UNUSED_BUFFER(version_before_3, 0, 4),
2115 
2116         /* RTC offset */
2117         VMSTATE_UINT64_TEST(rtc_offset, SpaprMachineState, version_before_3),
2118 
2119         VMSTATE_PPC_TIMEBASE_V(tb, SpaprMachineState, 2),
2120         VMSTATE_END_OF_LIST()
2121     },
2122     .subsections = (const VMStateDescription * const []) {
2123         &vmstate_spapr_ov5_cas,
2124         &vmstate_spapr_patb_entry,
2125         &vmstate_spapr_pending_events,
2126         &vmstate_spapr_cap_htm,
2127         &vmstate_spapr_cap_vsx,
2128         &vmstate_spapr_cap_dfp,
2129         &vmstate_spapr_cap_cfpc,
2130         &vmstate_spapr_cap_sbbc,
2131         &vmstate_spapr_cap_ibs,
2132         &vmstate_spapr_cap_hpt_maxpagesize,
2133         &vmstate_spapr_irq_map,
2134         &vmstate_spapr_cap_nested_kvm_hv,
2135         &vmstate_spapr_dtb,
2136         &vmstate_spapr_cap_large_decr,
2137         &vmstate_spapr_cap_ccf_assist,
2138         &vmstate_spapr_cap_fwnmi,
2139         &vmstate_spapr_fwnmi,
2140         &vmstate_spapr_cap_rpt_invalidate,
2141         NULL
2142     }
2143 };
2144 
2145 static int htab_save_setup(QEMUFile *f, void *opaque)
2146 {
2147     SpaprMachineState *spapr = opaque;
2148 
2149     /* "Iteration" header */
2150     if (!spapr->htab_shift) {
2151         qemu_put_be32(f, -1);
2152     } else {
2153         qemu_put_be32(f, spapr->htab_shift);
2154     }
2155 
2156     if (spapr->htab) {
2157         spapr->htab_save_index = 0;
2158         spapr->htab_first_pass = true;
2159     } else {
2160         if (spapr->htab_shift) {
2161             assert(kvm_enabled());
2162         }
2163     }
2164 
2165 
2166     return 0;
2167 }
2168 
2169 static void htab_save_chunk(QEMUFile *f, SpaprMachineState *spapr,
2170                             int chunkstart, int n_valid, int n_invalid)
2171 {
2172     qemu_put_be32(f, chunkstart);
2173     qemu_put_be16(f, n_valid);
2174     qemu_put_be16(f, n_invalid);
2175     qemu_put_buffer(f, HPTE(spapr->htab, chunkstart),
2176                     HASH_PTE_SIZE_64 * n_valid);
2177 }
2178 
2179 static void htab_save_end_marker(QEMUFile *f)
2180 {
2181     qemu_put_be32(f, 0);
2182     qemu_put_be16(f, 0);
2183     qemu_put_be16(f, 0);
2184 }
2185 
2186 static void htab_save_first_pass(QEMUFile *f, SpaprMachineState *spapr,
2187                                  int64_t max_ns)
2188 {
2189     bool has_timeout = max_ns != -1;
2190     int htabslots = HTAB_SIZE(spapr) / HASH_PTE_SIZE_64;
2191     int index = spapr->htab_save_index;
2192     int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
2193 
2194     assert(spapr->htab_first_pass);
2195 
2196     do {
2197         int chunkstart;
2198 
2199         /* Consume invalid HPTEs */
2200         while ((index < htabslots)
2201                && !HPTE_VALID(HPTE(spapr->htab, index))) {
2202             CLEAN_HPTE(HPTE(spapr->htab, index));
2203             index++;
2204         }
2205 
2206         /* Consume valid HPTEs */
2207         chunkstart = index;
2208         while ((index < htabslots) && (index - chunkstart < USHRT_MAX)
2209                && HPTE_VALID(HPTE(spapr->htab, index))) {
2210             CLEAN_HPTE(HPTE(spapr->htab, index));
2211             index++;
2212         }
2213 
2214         if (index > chunkstart) {
2215             int n_valid = index - chunkstart;
2216 
2217             htab_save_chunk(f, spapr, chunkstart, n_valid, 0);
2218 
2219             if (has_timeout &&
2220                 (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) > max_ns) {
2221                 break;
2222             }
2223         }
2224     } while ((index < htabslots) && !migration_rate_exceeded(f));
2225 
2226     if (index >= htabslots) {
2227         assert(index == htabslots);
2228         index = 0;
2229         spapr->htab_first_pass = false;
2230     }
2231     spapr->htab_save_index = index;
2232 }
2233 
2234 static int htab_save_later_pass(QEMUFile *f, SpaprMachineState *spapr,
2235                                 int64_t max_ns)
2236 {
2237     bool final = max_ns < 0;
2238     int htabslots = HTAB_SIZE(spapr) / HASH_PTE_SIZE_64;
2239     int examined = 0, sent = 0;
2240     int index = spapr->htab_save_index;
2241     int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
2242 
2243     assert(!spapr->htab_first_pass);
2244 
2245     do {
2246         int chunkstart, invalidstart;
2247 
2248         /* Consume non-dirty HPTEs */
2249         while ((index < htabslots)
2250                && !HPTE_DIRTY(HPTE(spapr->htab, index))) {
2251             index++;
2252             examined++;
2253         }
2254 
2255         chunkstart = index;
2256         /* Consume valid dirty HPTEs */
2257         while ((index < htabslots) && (index - chunkstart < USHRT_MAX)
2258                && HPTE_DIRTY(HPTE(spapr->htab, index))
2259                && HPTE_VALID(HPTE(spapr->htab, index))) {
2260             CLEAN_HPTE(HPTE(spapr->htab, index));
2261             index++;
2262             examined++;
2263         }
2264 
2265         invalidstart = index;
2266         /* Consume invalid dirty HPTEs */
2267         while ((index < htabslots) && (index - invalidstart < USHRT_MAX)
2268                && HPTE_DIRTY(HPTE(spapr->htab, index))
2269                && !HPTE_VALID(HPTE(spapr->htab, index))) {
2270             CLEAN_HPTE(HPTE(spapr->htab, index));
2271             index++;
2272             examined++;
2273         }
2274 
2275         if (index > chunkstart) {
2276             int n_valid = invalidstart - chunkstart;
2277             int n_invalid = index - invalidstart;
2278 
2279             htab_save_chunk(f, spapr, chunkstart, n_valid, n_invalid);
2280             sent += index - chunkstart;
2281 
2282             if (!final && (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) > max_ns) {
2283                 break;
2284             }
2285         }
2286 
2287         if (examined >= htabslots) {
2288             break;
2289         }
2290 
2291         if (index >= htabslots) {
2292             assert(index == htabslots);
2293             index = 0;
2294         }
2295     } while ((examined < htabslots) && (!migration_rate_exceeded(f) || final));
2296 
2297     if (index >= htabslots) {
2298         assert(index == htabslots);
2299         index = 0;
2300     }
2301 
2302     spapr->htab_save_index = index;
2303 
2304     return (examined >= htabslots) && (sent == 0) ? 1 : 0;
2305 }
2306 
2307 #define MAX_ITERATION_NS    5000000 /* 5 ms */
2308 #define MAX_KVM_BUF_SIZE    2048
2309 
2310 static int htab_save_iterate(QEMUFile *f, void *opaque)
2311 {
2312     SpaprMachineState *spapr = opaque;
2313     int fd;
2314     int rc = 0;
2315 
2316     /* Iteration header */
2317     if (!spapr->htab_shift) {
2318         qemu_put_be32(f, -1);
2319         return 1;
2320     } else {
2321         qemu_put_be32(f, 0);
2322     }
2323 
2324     if (!spapr->htab) {
2325         assert(kvm_enabled());
2326 
2327         fd = get_htab_fd(spapr);
2328         if (fd < 0) {
2329             return fd;
2330         }
2331 
2332         rc = kvmppc_save_htab(f, fd, MAX_KVM_BUF_SIZE, MAX_ITERATION_NS);
2333         if (rc < 0) {
2334             return rc;
2335         }
2336     } else  if (spapr->htab_first_pass) {
2337         htab_save_first_pass(f, spapr, MAX_ITERATION_NS);
2338     } else {
2339         rc = htab_save_later_pass(f, spapr, MAX_ITERATION_NS);
2340     }
2341 
2342     htab_save_end_marker(f);
2343 
2344     return rc;
2345 }
2346 
2347 static int htab_save_complete(QEMUFile *f, void *opaque)
2348 {
2349     SpaprMachineState *spapr = opaque;
2350     int fd;
2351 
2352     /* Iteration header */
2353     if (!spapr->htab_shift) {
2354         qemu_put_be32(f, -1);
2355         return 0;
2356     } else {
2357         qemu_put_be32(f, 0);
2358     }
2359 
2360     if (!spapr->htab) {
2361         int rc;
2362 
2363         assert(kvm_enabled());
2364 
2365         fd = get_htab_fd(spapr);
2366         if (fd < 0) {
2367             return fd;
2368         }
2369 
2370         rc = kvmppc_save_htab(f, fd, MAX_KVM_BUF_SIZE, -1);
2371         if (rc < 0) {
2372             return rc;
2373         }
2374     } else {
2375         if (spapr->htab_first_pass) {
2376             htab_save_first_pass(f, spapr, -1);
2377         }
2378         htab_save_later_pass(f, spapr, -1);
2379     }
2380 
2381     /* End marker */
2382     htab_save_end_marker(f);
2383 
2384     return 0;
2385 }
2386 
2387 static int htab_load(QEMUFile *f, void *opaque, int version_id)
2388 {
2389     SpaprMachineState *spapr = opaque;
2390     uint32_t section_hdr;
2391     int fd = -1;
2392     Error *local_err = NULL;
2393 
2394     if (version_id < 1 || version_id > 1) {
2395         error_report("htab_load() bad version");
2396         return -EINVAL;
2397     }
2398 
2399     section_hdr = qemu_get_be32(f);
2400 
2401     if (section_hdr == -1) {
2402         spapr_free_hpt(spapr);
2403         return 0;
2404     }
2405 
2406     if (section_hdr) {
2407         int ret;
2408 
2409         /* First section gives the htab size */
2410         ret = spapr_reallocate_hpt(spapr, section_hdr, &local_err);
2411         if (ret < 0) {
2412             error_report_err(local_err);
2413             return ret;
2414         }
2415         return 0;
2416     }
2417 
2418     if (!spapr->htab) {
2419         assert(kvm_enabled());
2420 
2421         fd = kvmppc_get_htab_fd(true, 0, &local_err);
2422         if (fd < 0) {
2423             error_report_err(local_err);
2424             return fd;
2425         }
2426     }
2427 
2428     while (true) {
2429         uint32_t index;
2430         uint16_t n_valid, n_invalid;
2431 
2432         index = qemu_get_be32(f);
2433         n_valid = qemu_get_be16(f);
2434         n_invalid = qemu_get_be16(f);
2435 
2436         if ((index == 0) && (n_valid == 0) && (n_invalid == 0)) {
2437             /* End of Stream */
2438             break;
2439         }
2440 
2441         if ((index + n_valid + n_invalid) >
2442             (HTAB_SIZE(spapr) / HASH_PTE_SIZE_64)) {
2443             /* Bad index in stream */
2444             error_report(
2445                 "htab_load() bad index %d (%hd+%hd entries) in htab stream (htab_shift=%d)",
2446                 index, n_valid, n_invalid, spapr->htab_shift);
2447             return -EINVAL;
2448         }
2449 
2450         if (spapr->htab) {
2451             if (n_valid) {
2452                 qemu_get_buffer(f, HPTE(spapr->htab, index),
2453                                 HASH_PTE_SIZE_64 * n_valid);
2454             }
2455             if (n_invalid) {
2456                 memset(HPTE(spapr->htab, index + n_valid), 0,
2457                        HASH_PTE_SIZE_64 * n_invalid);
2458             }
2459         } else {
2460             int rc;
2461 
2462             assert(fd >= 0);
2463 
2464             rc = kvmppc_load_htab_chunk(f, fd, index, n_valid, n_invalid,
2465                                         &local_err);
2466             if (rc < 0) {
2467                 error_report_err(local_err);
2468                 return rc;
2469             }
2470         }
2471     }
2472 
2473     if (!spapr->htab) {
2474         assert(fd >= 0);
2475         close(fd);
2476     }
2477 
2478     return 0;
2479 }
2480 
2481 static void htab_save_cleanup(void *opaque)
2482 {
2483     SpaprMachineState *spapr = opaque;
2484 
2485     close_htab_fd(spapr);
2486 }
2487 
2488 static SaveVMHandlers savevm_htab_handlers = {
2489     .save_setup = htab_save_setup,
2490     .save_live_iterate = htab_save_iterate,
2491     .save_live_complete_precopy = htab_save_complete,
2492     .save_cleanup = htab_save_cleanup,
2493     .load_state = htab_load,
2494 };
2495 
2496 static void spapr_boot_set(void *opaque, const char *boot_device,
2497                            Error **errp)
2498 {
2499     SpaprMachineState *spapr = SPAPR_MACHINE(opaque);
2500 
2501     g_free(spapr->boot_device);
2502     spapr->boot_device = g_strdup(boot_device);
2503 }
2504 
2505 static void spapr_create_lmb_dr_connectors(SpaprMachineState *spapr)
2506 {
2507     MachineState *machine = MACHINE(spapr);
2508     uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE;
2509     uint32_t nr_lmbs = (machine->maxram_size - machine->ram_size)/lmb_size;
2510     int i;
2511 
2512     g_assert(!nr_lmbs || machine->device_memory);
2513     for (i = 0; i < nr_lmbs; i++) {
2514         uint64_t addr;
2515 
2516         addr = i * lmb_size + machine->device_memory->base;
2517         spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_LMB,
2518                                addr / lmb_size);
2519     }
2520 }
2521 
2522 /*
2523  * If RAM size, maxmem size and individual node mem sizes aren't aligned
2524  * to SPAPR_MEMORY_BLOCK_SIZE(256MB), then refuse to start the guest
2525  * since we can't support such unaligned sizes with DRCONF_MEMORY.
2526  */
2527 static void spapr_validate_node_memory(MachineState *machine, Error **errp)
2528 {
2529     int i;
2530 
2531     if (machine->ram_size % SPAPR_MEMORY_BLOCK_SIZE) {
2532         error_setg(errp, "Memory size 0x" RAM_ADDR_FMT
2533                    " is not aligned to %" PRIu64 " MiB",
2534                    machine->ram_size,
2535                    SPAPR_MEMORY_BLOCK_SIZE / MiB);
2536         return;
2537     }
2538 
2539     if (machine->maxram_size % SPAPR_MEMORY_BLOCK_SIZE) {
2540         error_setg(errp, "Maximum memory size 0x" RAM_ADDR_FMT
2541                    " is not aligned to %" PRIu64 " MiB",
2542                    machine->ram_size,
2543                    SPAPR_MEMORY_BLOCK_SIZE / MiB);
2544         return;
2545     }
2546 
2547     for (i = 0; i < machine->numa_state->num_nodes; i++) {
2548         if (machine->numa_state->nodes[i].node_mem % SPAPR_MEMORY_BLOCK_SIZE) {
2549             error_setg(errp,
2550                        "Node %d memory size 0x%" PRIx64
2551                        " is not aligned to %" PRIu64 " MiB",
2552                        i, machine->numa_state->nodes[i].node_mem,
2553                        SPAPR_MEMORY_BLOCK_SIZE / MiB);
2554             return;
2555         }
2556     }
2557 }
2558 
2559 /* find cpu slot in machine->possible_cpus by core_id */
2560 static CPUArchId *spapr_find_cpu_slot(MachineState *ms, uint32_t id, int *idx)
2561 {
2562     int index = id / ms->smp.threads;
2563 
2564     if (index >= ms->possible_cpus->len) {
2565         return NULL;
2566     }
2567     if (idx) {
2568         *idx = index;
2569     }
2570     return &ms->possible_cpus->cpus[index];
2571 }
2572 
2573 static void spapr_set_vsmt_mode(SpaprMachineState *spapr, Error **errp)
2574 {
2575     MachineState *ms = MACHINE(spapr);
2576     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
2577     Error *local_err = NULL;
2578     bool vsmt_user = !!spapr->vsmt;
2579     int kvm_smt = kvmppc_smt_threads();
2580     int ret;
2581     unsigned int smp_threads = ms->smp.threads;
2582 
2583     if (tcg_enabled()) {
2584         if (smp_threads > 1 &&
2585             !ppc_type_check_compat(ms->cpu_type, CPU_POWERPC_LOGICAL_2_07, 0,
2586                                    spapr->max_compat_pvr)) {
2587             error_setg(errp, "TCG only supports SMT on POWER8 or newer CPUs");
2588             return;
2589         }
2590 
2591         if (smp_threads > 8) {
2592             error_setg(errp, "TCG cannot support more than 8 threads/core "
2593                        "on a pseries machine");
2594             return;
2595         }
2596     }
2597     if (!is_power_of_2(smp_threads)) {
2598         error_setg(errp, "Cannot support %d threads/core on a pseries "
2599                    "machine because it must be a power of 2", smp_threads);
2600         return;
2601     }
2602 
2603     /* Determine the VSMT mode to use: */
2604     if (vsmt_user) {
2605         if (spapr->vsmt < smp_threads) {
2606             error_setg(errp, "Cannot support VSMT mode %d"
2607                        " because it must be >= threads/core (%d)",
2608                        spapr->vsmt, smp_threads);
2609             return;
2610         }
2611         /* In this case, spapr->vsmt has been set by the command line */
2612     } else if (!smc->smp_threads_vsmt) {
2613         /*
2614          * Default VSMT value is tricky, because we need it to be as
2615          * consistent as possible (for migration), but this requires
2616          * changing it for at least some existing cases.  We pick 8 as
2617          * the value that we'd get with KVM on POWER8, the
2618          * overwhelmingly common case in production systems.
2619          */
2620         spapr->vsmt = MAX(8, smp_threads);
2621     } else {
2622         spapr->vsmt = smp_threads;
2623     }
2624 
2625     /* KVM: If necessary, set the SMT mode: */
2626     if (kvm_enabled() && (spapr->vsmt != kvm_smt)) {
2627         ret = kvmppc_set_smt_threads(spapr->vsmt);
2628         if (ret) {
2629             /* Looks like KVM isn't able to change VSMT mode */
2630             error_setg(&local_err,
2631                        "Failed to set KVM's VSMT mode to %d (errno %d)",
2632                        spapr->vsmt, ret);
2633             /* We can live with that if the default one is big enough
2634              * for the number of threads, and a submultiple of the one
2635              * we want.  In this case we'll waste some vcpu ids, but
2636              * behaviour will be correct */
2637             if ((kvm_smt >= smp_threads) && ((spapr->vsmt % kvm_smt) == 0)) {
2638                 warn_report_err(local_err);
2639             } else {
2640                 if (!vsmt_user) {
2641                     error_append_hint(&local_err,
2642                                       "On PPC, a VM with %d threads/core"
2643                                       " on a host with %d threads/core"
2644                                       " requires the use of VSMT mode %d.\n",
2645                                       smp_threads, kvm_smt, spapr->vsmt);
2646                 }
2647                 kvmppc_error_append_smt_possible_hint(&local_err);
2648                 error_propagate(errp, local_err);
2649             }
2650         }
2651     }
2652     /* else TCG: nothing to do currently */
2653 }
2654 
2655 static void spapr_init_cpus(SpaprMachineState *spapr)
2656 {
2657     MachineState *machine = MACHINE(spapr);
2658     MachineClass *mc = MACHINE_GET_CLASS(machine);
2659     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine);
2660     const char *type = spapr_get_cpu_core_type(machine->cpu_type);
2661     const CPUArchIdList *possible_cpus;
2662     unsigned int smp_cpus = machine->smp.cpus;
2663     unsigned int smp_threads = machine->smp.threads;
2664     unsigned int max_cpus = machine->smp.max_cpus;
2665     int boot_cores_nr = smp_cpus / smp_threads;
2666     int i;
2667 
2668     possible_cpus = mc->possible_cpu_arch_ids(machine);
2669     if (mc->has_hotpluggable_cpus) {
2670         if (smp_cpus % smp_threads) {
2671             error_report("smp_cpus (%u) must be multiple of threads (%u)",
2672                          smp_cpus, smp_threads);
2673             exit(1);
2674         }
2675         if (max_cpus % smp_threads) {
2676             error_report("max_cpus (%u) must be multiple of threads (%u)",
2677                          max_cpus, smp_threads);
2678             exit(1);
2679         }
2680     } else {
2681         if (max_cpus != smp_cpus) {
2682             error_report("This machine version does not support CPU hotplug");
2683             exit(1);
2684         }
2685         boot_cores_nr = possible_cpus->len;
2686     }
2687 
2688     if (smc->pre_2_10_has_unused_icps) {
2689         for (i = 0; i < spapr_max_server_number(spapr); i++) {
2690             /* Dummy entries get deregistered when real ICPState objects
2691              * are registered during CPU core hotplug.
2692              */
2693             pre_2_10_vmstate_register_dummy_icp(i);
2694         }
2695     }
2696 
2697     for (i = 0; i < possible_cpus->len; i++) {
2698         int core_id = i * smp_threads;
2699 
2700         if (mc->has_hotpluggable_cpus) {
2701             spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_CPU,
2702                                    spapr_vcpu_id(spapr, core_id));
2703         }
2704 
2705         if (i < boot_cores_nr) {
2706             Object *core  = object_new(type);
2707             int nr_threads = smp_threads;
2708 
2709             /* Handle the partially filled core for older machine types */
2710             if ((i + 1) * smp_threads >= smp_cpus) {
2711                 nr_threads = smp_cpus - i * smp_threads;
2712             }
2713 
2714             object_property_set_int(core, "nr-threads", nr_threads,
2715                                     &error_fatal);
2716             object_property_set_int(core, CPU_CORE_PROP_CORE_ID, core_id,
2717                                     &error_fatal);
2718             qdev_realize(DEVICE(core), NULL, &error_fatal);
2719 
2720             object_unref(core);
2721         }
2722     }
2723 }
2724 
2725 static PCIHostState *spapr_create_default_phb(void)
2726 {
2727     DeviceState *dev;
2728 
2729     dev = qdev_new(TYPE_SPAPR_PCI_HOST_BRIDGE);
2730     qdev_prop_set_uint32(dev, "index", 0);
2731     sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
2732 
2733     return PCI_HOST_BRIDGE(dev);
2734 }
2735 
2736 static hwaddr spapr_rma_size(SpaprMachineState *spapr, Error **errp)
2737 {
2738     MachineState *machine = MACHINE(spapr);
2739     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
2740     hwaddr rma_size = machine->ram_size;
2741     hwaddr node0_size = spapr_node0_size(machine);
2742 
2743     /* RMA has to fit in the first NUMA node */
2744     rma_size = MIN(rma_size, node0_size);
2745 
2746     /*
2747      * VRMA access is via a special 1TiB SLB mapping, so the RMA can
2748      * never exceed that
2749      */
2750     rma_size = MIN(rma_size, 1 * TiB);
2751 
2752     /*
2753      * Clamp the RMA size based on machine type.  This is for
2754      * migration compatibility with older qemu versions, which limited
2755      * the RMA size for complicated and mostly bad reasons.
2756      */
2757     if (smc->rma_limit) {
2758         rma_size = MIN(rma_size, smc->rma_limit);
2759     }
2760 
2761     if (rma_size < MIN_RMA_SLOF) {
2762         error_setg(errp,
2763                    "pSeries SLOF firmware requires >= %" HWADDR_PRIx
2764                    "ldMiB guest RMA (Real Mode Area memory)",
2765                    MIN_RMA_SLOF / MiB);
2766         return 0;
2767     }
2768 
2769     return rma_size;
2770 }
2771 
2772 static void spapr_create_nvdimm_dr_connectors(SpaprMachineState *spapr)
2773 {
2774     MachineState *machine = MACHINE(spapr);
2775     int i;
2776 
2777     for (i = 0; i < machine->ram_slots; i++) {
2778         spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_PMEM, i);
2779     }
2780 }
2781 
2782 /* pSeries LPAR / sPAPR hardware init */
2783 static void spapr_machine_init(MachineState *machine)
2784 {
2785     SpaprMachineState *spapr = SPAPR_MACHINE(machine);
2786     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine);
2787     MachineClass *mc = MACHINE_GET_CLASS(machine);
2788     const char *bios_default = spapr->vof ? FW_FILE_NAME_VOF : FW_FILE_NAME;
2789     const char *bios_name = machine->firmware ?: bios_default;
2790     g_autofree char *filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name);
2791     const char *kernel_filename = machine->kernel_filename;
2792     const char *initrd_filename = machine->initrd_filename;
2793     PCIHostState *phb;
2794     bool has_vga;
2795     int i;
2796     MemoryRegion *sysmem = get_system_memory();
2797     long load_limit, fw_size;
2798     Error *resize_hpt_err = NULL;
2799     NICInfo *nd;
2800 
2801     if (!filename) {
2802         error_report("Could not find LPAR firmware '%s'", bios_name);
2803         exit(1);
2804     }
2805     fw_size = load_image_targphys(filename, 0, FW_MAX_SIZE);
2806     if (fw_size <= 0) {
2807         error_report("Could not load LPAR firmware '%s'", filename);
2808         exit(1);
2809     }
2810 
2811     /*
2812      * if Secure VM (PEF) support is configured, then initialize it
2813      */
2814     pef_kvm_init(machine->cgs, &error_fatal);
2815 
2816     msi_nonbroken = true;
2817 
2818     QLIST_INIT(&spapr->phbs);
2819     QTAILQ_INIT(&spapr->pending_dimm_unplugs);
2820 
2821     /* Determine capabilities to run with */
2822     spapr_caps_init(spapr);
2823 
2824     kvmppc_check_papr_resize_hpt(&resize_hpt_err);
2825     if (spapr->resize_hpt == SPAPR_RESIZE_HPT_DEFAULT) {
2826         /*
2827          * If the user explicitly requested a mode we should either
2828          * supply it, or fail completely (which we do below).  But if
2829          * it's not set explicitly, we reset our mode to something
2830          * that works
2831          */
2832         if (resize_hpt_err) {
2833             spapr->resize_hpt = SPAPR_RESIZE_HPT_DISABLED;
2834             error_free(resize_hpt_err);
2835             resize_hpt_err = NULL;
2836         } else {
2837             spapr->resize_hpt = smc->resize_hpt_default;
2838         }
2839     }
2840 
2841     assert(spapr->resize_hpt != SPAPR_RESIZE_HPT_DEFAULT);
2842 
2843     if ((spapr->resize_hpt != SPAPR_RESIZE_HPT_DISABLED) && resize_hpt_err) {
2844         /*
2845          * User requested HPT resize, but this host can't supply it.  Bail out
2846          */
2847         error_report_err(resize_hpt_err);
2848         exit(1);
2849     }
2850     error_free(resize_hpt_err);
2851 
2852     spapr->rma_size = spapr_rma_size(spapr, &error_fatal);
2853 
2854     /* Setup a load limit for the ramdisk leaving room for SLOF and FDT */
2855     load_limit = MIN(spapr->rma_size, FDT_MAX_ADDR) - FW_OVERHEAD;
2856 
2857     /*
2858      * VSMT must be set in order to be able to compute VCPU ids, ie to
2859      * call spapr_max_server_number() or spapr_vcpu_id().
2860      */
2861     spapr_set_vsmt_mode(spapr, &error_fatal);
2862 
2863     /* Set up Interrupt Controller before we create the VCPUs */
2864     spapr_irq_init(spapr, &error_fatal);
2865 
2866     /* Set up containers for ibm,client-architecture-support negotiated options
2867      */
2868     spapr->ov5 = spapr_ovec_new();
2869     spapr->ov5_cas = spapr_ovec_new();
2870 
2871     if (smc->dr_lmb_enabled) {
2872         spapr_ovec_set(spapr->ov5, OV5_DRCONF_MEMORY);
2873         spapr_validate_node_memory(machine, &error_fatal);
2874     }
2875 
2876     spapr_ovec_set(spapr->ov5, OV5_FORM1_AFFINITY);
2877 
2878     /* Do not advertise FORM2 NUMA support for pseries-6.1 and older */
2879     if (!smc->pre_6_2_numa_affinity) {
2880         spapr_ovec_set(spapr->ov5, OV5_FORM2_AFFINITY);
2881     }
2882 
2883     /* advertise support for dedicated HP event source to guests */
2884     if (spapr->use_hotplug_event_source) {
2885         spapr_ovec_set(spapr->ov5, OV5_HP_EVT);
2886     }
2887 
2888     /* advertise support for HPT resizing */
2889     if (spapr->resize_hpt != SPAPR_RESIZE_HPT_DISABLED) {
2890         spapr_ovec_set(spapr->ov5, OV5_HPT_RESIZE);
2891     }
2892 
2893     /* advertise support for ibm,dyamic-memory-v2 */
2894     spapr_ovec_set(spapr->ov5, OV5_DRMEM_V2);
2895 
2896     /* advertise XIVE on POWER9 machines */
2897     if (spapr->irq->xive) {
2898         spapr_ovec_set(spapr->ov5, OV5_XIVE_EXPLOIT);
2899     }
2900 
2901     /* init CPUs */
2902     spapr_init_cpus(spapr);
2903 
2904     /* Init numa_assoc_array */
2905     spapr_numa_associativity_init(spapr, machine);
2906 
2907     if ((!kvm_enabled() || kvmppc_has_cap_mmu_radix()) &&
2908         ppc_type_check_compat(machine->cpu_type, CPU_POWERPC_LOGICAL_3_00, 0,
2909                               spapr->max_compat_pvr)) {
2910         spapr_ovec_set(spapr->ov5, OV5_MMU_RADIX_300);
2911         /* KVM and TCG always allow GTSE with radix... */
2912         spapr_ovec_set(spapr->ov5, OV5_MMU_RADIX_GTSE);
2913     }
2914     /* ... but not with hash (currently). */
2915 
2916     if (kvm_enabled()) {
2917         /* Enable H_LOGICAL_CI_* so SLOF can talk to in-kernel devices */
2918         kvmppc_enable_logical_ci_hcalls();
2919         kvmppc_enable_set_mode_hcall();
2920 
2921         /* H_CLEAR_MOD/_REF are mandatory in PAPR, but off by default */
2922         kvmppc_enable_clear_ref_mod_hcalls();
2923 
2924         /* Enable H_PAGE_INIT */
2925         kvmppc_enable_h_page_init();
2926     }
2927 
2928     /* map RAM */
2929     memory_region_add_subregion(sysmem, 0, machine->ram);
2930 
2931     /* initialize hotplug memory address space */
2932     if (machine->ram_size < machine->maxram_size) {
2933         ram_addr_t device_mem_size = machine->maxram_size - machine->ram_size;
2934         hwaddr device_mem_base;
2935 
2936         /*
2937          * Limit the number of hotpluggable memory slots to half the number
2938          * slots that KVM supports, leaving the other half for PCI and other
2939          * devices. However ensure that number of slots doesn't drop below 32.
2940          */
2941         int max_memslots = kvm_enabled() ? kvm_get_max_memslots() / 2 :
2942                            SPAPR_MAX_RAM_SLOTS;
2943 
2944         if (max_memslots < SPAPR_MAX_RAM_SLOTS) {
2945             max_memslots = SPAPR_MAX_RAM_SLOTS;
2946         }
2947         if (machine->ram_slots > max_memslots) {
2948             error_report("Specified number of memory slots %"
2949                          PRIu64" exceeds max supported %d",
2950                          machine->ram_slots, max_memslots);
2951             exit(1);
2952         }
2953 
2954         device_mem_base = ROUND_UP(machine->ram_size, SPAPR_DEVICE_MEM_ALIGN);
2955         machine_memory_devices_init(machine, device_mem_base, device_mem_size);
2956     }
2957 
2958     if (smc->dr_lmb_enabled) {
2959         spapr_create_lmb_dr_connectors(spapr);
2960     }
2961 
2962     if (mc->nvdimm_supported) {
2963         spapr_create_nvdimm_dr_connectors(spapr);
2964     }
2965 
2966     /* Set up RTAS event infrastructure */
2967     spapr_events_init(spapr);
2968 
2969     /* Set up the RTC RTAS interfaces */
2970     spapr_rtc_create(spapr);
2971 
2972     /* Set up VIO bus */
2973     spapr->vio_bus = spapr_vio_bus_init();
2974 
2975     for (i = 0; serial_hd(i); i++) {
2976         spapr_vty_create(spapr->vio_bus, serial_hd(i));
2977     }
2978 
2979     /* We always have at least the nvram device on VIO */
2980     spapr_create_nvram(spapr);
2981 
2982     /*
2983      * Setup hotplug / dynamic-reconfiguration connectors. top-level
2984      * connectors (described in root DT node's "ibm,drc-types" property)
2985      * are pre-initialized here. additional child connectors (such as
2986      * connectors for a PHBs PCI slots) are added as needed during their
2987      * parent's realization.
2988      */
2989     if (smc->dr_phb_enabled) {
2990         for (i = 0; i < SPAPR_MAX_PHBS; i++) {
2991             spapr_dr_connector_new(OBJECT(machine), TYPE_SPAPR_DRC_PHB, i);
2992         }
2993     }
2994 
2995     /* Set up PCI */
2996     spapr_pci_rtas_init();
2997 
2998     phb = spapr_create_default_phb();
2999 
3000     while ((nd = qemu_find_nic_info("spapr-vlan", true, "ibmveth"))) {
3001         spapr_vlan_create(spapr->vio_bus, nd);
3002     }
3003 
3004     pci_init_nic_devices(phb->bus, NULL);
3005 
3006     for (i = 0; i <= drive_get_max_bus(IF_SCSI); i++) {
3007         spapr_vscsi_create(spapr->vio_bus);
3008     }
3009 
3010     /* Graphics */
3011     has_vga = spapr_vga_init(phb->bus, &error_fatal);
3012     if (has_vga) {
3013         spapr->want_stdout_path = !machine->enable_graphics;
3014         machine->usb |= defaults_enabled() && !machine->usb_disabled;
3015     } else {
3016         spapr->want_stdout_path = true;
3017     }
3018 
3019     if (machine->usb) {
3020         if (smc->use_ohci_by_default) {
3021             pci_create_simple(phb->bus, -1, "pci-ohci");
3022         } else {
3023             pci_create_simple(phb->bus, -1, "nec-usb-xhci");
3024         }
3025 
3026         if (has_vga) {
3027             USBBus *usb_bus;
3028 
3029             usb_bus = USB_BUS(object_resolve_type_unambiguous(TYPE_USB_BUS,
3030                                                               &error_abort));
3031             usb_create_simple(usb_bus, "usb-kbd");
3032             usb_create_simple(usb_bus, "usb-mouse");
3033         }
3034     }
3035 
3036     if (kernel_filename) {
3037         uint64_t loaded_addr = 0;
3038 
3039         spapr->kernel_size = load_elf(kernel_filename, NULL,
3040                                       translate_kernel_address, spapr,
3041                                       NULL, &loaded_addr, NULL, NULL, 1,
3042                                       PPC_ELF_MACHINE, 0, 0);
3043         if (spapr->kernel_size == ELF_LOAD_WRONG_ENDIAN) {
3044             spapr->kernel_size = load_elf(kernel_filename, NULL,
3045                                           translate_kernel_address, spapr,
3046                                           NULL, &loaded_addr, NULL, NULL, 0,
3047                                           PPC_ELF_MACHINE, 0, 0);
3048             spapr->kernel_le = spapr->kernel_size > 0;
3049         }
3050         if (spapr->kernel_size < 0) {
3051             error_report("error loading %s: %s", kernel_filename,
3052                          load_elf_strerror(spapr->kernel_size));
3053             exit(1);
3054         }
3055 
3056         if (spapr->kernel_addr != loaded_addr) {
3057             warn_report("spapr: kernel_addr changed from 0x%"PRIx64
3058                         " to 0x%"PRIx64,
3059                         spapr->kernel_addr, loaded_addr);
3060             spapr->kernel_addr = loaded_addr;
3061         }
3062 
3063         /* load initrd */
3064         if (initrd_filename) {
3065             /* Try to locate the initrd in the gap between the kernel
3066              * and the firmware. Add a bit of space just in case
3067              */
3068             spapr->initrd_base = (spapr->kernel_addr + spapr->kernel_size
3069                                   + 0x1ffff) & ~0xffff;
3070             spapr->initrd_size = load_image_targphys(initrd_filename,
3071                                                      spapr->initrd_base,
3072                                                      load_limit
3073                                                      - spapr->initrd_base);
3074             if (spapr->initrd_size < 0) {
3075                 error_report("could not load initial ram disk '%s'",
3076                              initrd_filename);
3077                 exit(1);
3078             }
3079         }
3080     }
3081 
3082     /* FIXME: Should register things through the MachineState's qdev
3083      * interface, this is a legacy from the sPAPREnvironment structure
3084      * which predated MachineState but had a similar function */
3085     vmstate_register(NULL, 0, &vmstate_spapr, spapr);
3086     register_savevm_live("spapr/htab", VMSTATE_INSTANCE_ID_ANY, 1,
3087                          &savevm_htab_handlers, spapr);
3088 
3089     qbus_set_hotplug_handler(sysbus_get_default(), OBJECT(machine));
3090 
3091     qemu_register_boot_set(spapr_boot_set, spapr);
3092 
3093     /*
3094      * Nothing needs to be done to resume a suspended guest because
3095      * suspending does not change the machine state, so no need for
3096      * a ->wakeup method.
3097      */
3098     qemu_register_wakeup_support();
3099 
3100     if (kvm_enabled()) {
3101         /* to stop and start vmclock */
3102         qemu_add_vm_change_state_handler(cpu_ppc_clock_vm_state_change,
3103                                          &spapr->tb);
3104 
3105         kvmppc_spapr_enable_inkernel_multitce();
3106     }
3107 
3108     qemu_cond_init(&spapr->fwnmi_machine_check_interlock_cond);
3109     if (spapr->vof) {
3110         spapr->vof->fw_size = fw_size; /* for claim() on itself */
3111         spapr_register_hypercall(KVMPPC_H_VOF_CLIENT, spapr_h_vof_client);
3112     }
3113 
3114     spapr_watchdog_init(spapr);
3115 }
3116 
3117 #define DEFAULT_KVM_TYPE "auto"
3118 static int spapr_kvm_type(MachineState *machine, const char *vm_type)
3119 {
3120     /*
3121      * The use of g_ascii_strcasecmp() for 'hv' and 'pr' is to
3122      * accommodate the 'HV' and 'PV' formats that exists in the
3123      * wild. The 'auto' mode is being introduced already as
3124      * lower-case, thus we don't need to bother checking for
3125      * "AUTO".
3126      */
3127     if (!vm_type || !strcmp(vm_type, DEFAULT_KVM_TYPE)) {
3128         return 0;
3129     }
3130 
3131     if (!g_ascii_strcasecmp(vm_type, "hv")) {
3132         return 1;
3133     }
3134 
3135     if (!g_ascii_strcasecmp(vm_type, "pr")) {
3136         return 2;
3137     }
3138 
3139     error_report("Unknown kvm-type specified '%s'", vm_type);
3140     return -1;
3141 }
3142 
3143 /*
3144  * Implementation of an interface to adjust firmware path
3145  * for the bootindex property handling.
3146  */
3147 static char *spapr_get_fw_dev_path(FWPathProvider *p, BusState *bus,
3148                                    DeviceState *dev)
3149 {
3150 #define CAST(type, obj, name) \
3151     ((type *)object_dynamic_cast(OBJECT(obj), (name)))
3152     SCSIDevice *d = CAST(SCSIDevice,  dev, TYPE_SCSI_DEVICE);
3153     SpaprPhbState *phb = CAST(SpaprPhbState, dev, TYPE_SPAPR_PCI_HOST_BRIDGE);
3154     VHostSCSICommon *vsc = CAST(VHostSCSICommon, dev, TYPE_VHOST_SCSI_COMMON);
3155     PCIDevice *pcidev = CAST(PCIDevice, dev, TYPE_PCI_DEVICE);
3156 
3157     if (d && bus) {
3158         void *spapr = CAST(void, bus->parent, "spapr-vscsi");
3159         VirtIOSCSI *virtio = CAST(VirtIOSCSI, bus->parent, TYPE_VIRTIO_SCSI);
3160         USBDevice *usb = CAST(USBDevice, bus->parent, TYPE_USB_DEVICE);
3161 
3162         if (spapr) {
3163             /*
3164              * Replace "channel@0/disk@0,0" with "disk@8000000000000000":
3165              * In the top 16 bits of the 64-bit LUN, we use SRP luns of the form
3166              * 0x8000 | (target << 8) | (bus << 5) | lun
3167              * (see the "Logical unit addressing format" table in SAM5)
3168              */
3169             unsigned id = 0x8000 | (d->id << 8) | (d->channel << 5) | d->lun;
3170             return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev),
3171                                    (uint64_t)id << 48);
3172         } else if (virtio) {
3173             /*
3174              * We use SRP luns of the form 01000000 | (target << 8) | lun
3175              * in the top 32 bits of the 64-bit LUN
3176              * Note: the quote above is from SLOF and it is wrong,
3177              * the actual binding is:
3178              * swap 0100 or 10 << or 20 << ( target lun-id -- srplun )
3179              */
3180             unsigned id = 0x1000000 | (d->id << 16) | d->lun;
3181             if (d->lun >= 256) {
3182                 /* Use the LUN "flat space addressing method" */
3183                 id |= 0x4000;
3184             }
3185             return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev),
3186                                    (uint64_t)id << 32);
3187         } else if (usb) {
3188             /*
3189              * We use SRP luns of the form 01000000 | (usb-port << 16) | lun
3190              * in the top 32 bits of the 64-bit LUN
3191              */
3192             unsigned usb_port = atoi(usb->port->path);
3193             unsigned id = 0x1000000 | (usb_port << 16) | d->lun;
3194             return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev),
3195                                    (uint64_t)id << 32);
3196         }
3197     }
3198 
3199     /*
3200      * SLOF probes the USB devices, and if it recognizes that the device is a
3201      * storage device, it changes its name to "storage" instead of "usb-host",
3202      * and additionally adds a child node for the SCSI LUN, so the correct
3203      * boot path in SLOF is something like .../storage@1/disk@xxx" instead.
3204      */
3205     if (strcmp("usb-host", qdev_fw_name(dev)) == 0) {
3206         USBDevice *usbdev = CAST(USBDevice, dev, TYPE_USB_DEVICE);
3207         if (usb_device_is_scsi_storage(usbdev)) {
3208             return g_strdup_printf("storage@%s/disk", usbdev->port->path);
3209         }
3210     }
3211 
3212     if (phb) {
3213         /* Replace "pci" with "pci@800000020000000" */
3214         return g_strdup_printf("pci@%"PRIX64, phb->buid);
3215     }
3216 
3217     if (vsc) {
3218         /* Same logic as virtio above */
3219         unsigned id = 0x1000000 | (vsc->target << 16) | vsc->lun;
3220         return g_strdup_printf("disk@%"PRIX64, (uint64_t)id << 32);
3221     }
3222 
3223     if (g_str_equal("pci-bridge", qdev_fw_name(dev))) {
3224         /* SLOF uses "pci" instead of "pci-bridge" for PCI bridges */
3225         PCIDevice *pdev = CAST(PCIDevice, dev, TYPE_PCI_DEVICE);
3226         return g_strdup_printf("pci@%x", PCI_SLOT(pdev->devfn));
3227     }
3228 
3229     if (pcidev) {
3230         return spapr_pci_fw_dev_name(pcidev);
3231     }
3232 
3233     return NULL;
3234 }
3235 
3236 static char *spapr_get_kvm_type(Object *obj, Error **errp)
3237 {
3238     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3239 
3240     return g_strdup(spapr->kvm_type);
3241 }
3242 
3243 static void spapr_set_kvm_type(Object *obj, const char *value, Error **errp)
3244 {
3245     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3246 
3247     g_free(spapr->kvm_type);
3248     spapr->kvm_type = g_strdup(value);
3249 }
3250 
3251 static bool spapr_get_modern_hotplug_events(Object *obj, Error **errp)
3252 {
3253     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3254 
3255     return spapr->use_hotplug_event_source;
3256 }
3257 
3258 static void spapr_set_modern_hotplug_events(Object *obj, bool value,
3259                                             Error **errp)
3260 {
3261     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3262 
3263     spapr->use_hotplug_event_source = value;
3264 }
3265 
3266 static bool spapr_get_msix_emulation(Object *obj, Error **errp)
3267 {
3268     return true;
3269 }
3270 
3271 static char *spapr_get_resize_hpt(Object *obj, Error **errp)
3272 {
3273     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3274 
3275     switch (spapr->resize_hpt) {
3276     case SPAPR_RESIZE_HPT_DEFAULT:
3277         return g_strdup("default");
3278     case SPAPR_RESIZE_HPT_DISABLED:
3279         return g_strdup("disabled");
3280     case SPAPR_RESIZE_HPT_ENABLED:
3281         return g_strdup("enabled");
3282     case SPAPR_RESIZE_HPT_REQUIRED:
3283         return g_strdup("required");
3284     }
3285     g_assert_not_reached();
3286 }
3287 
3288 static void spapr_set_resize_hpt(Object *obj, const char *value, Error **errp)
3289 {
3290     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3291 
3292     if (strcmp(value, "default") == 0) {
3293         spapr->resize_hpt = SPAPR_RESIZE_HPT_DEFAULT;
3294     } else if (strcmp(value, "disabled") == 0) {
3295         spapr->resize_hpt = SPAPR_RESIZE_HPT_DISABLED;
3296     } else if (strcmp(value, "enabled") == 0) {
3297         spapr->resize_hpt = SPAPR_RESIZE_HPT_ENABLED;
3298     } else if (strcmp(value, "required") == 0) {
3299         spapr->resize_hpt = SPAPR_RESIZE_HPT_REQUIRED;
3300     } else {
3301         error_setg(errp, "Bad value for \"resize-hpt\" property");
3302     }
3303 }
3304 
3305 static bool spapr_get_vof(Object *obj, Error **errp)
3306 {
3307     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3308 
3309     return spapr->vof != NULL;
3310 }
3311 
3312 static void spapr_set_vof(Object *obj, bool value, Error **errp)
3313 {
3314     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3315 
3316     if (spapr->vof) {
3317         vof_cleanup(spapr->vof);
3318         g_free(spapr->vof);
3319         spapr->vof = NULL;
3320     }
3321     if (!value) {
3322         return;
3323     }
3324     spapr->vof = g_malloc0(sizeof(*spapr->vof));
3325 }
3326 
3327 static char *spapr_get_ic_mode(Object *obj, Error **errp)
3328 {
3329     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3330 
3331     if (spapr->irq == &spapr_irq_xics_legacy) {
3332         return g_strdup("legacy");
3333     } else if (spapr->irq == &spapr_irq_xics) {
3334         return g_strdup("xics");
3335     } else if (spapr->irq == &spapr_irq_xive) {
3336         return g_strdup("xive");
3337     } else if (spapr->irq == &spapr_irq_dual) {
3338         return g_strdup("dual");
3339     }
3340     g_assert_not_reached();
3341 }
3342 
3343 static void spapr_set_ic_mode(Object *obj, const char *value, Error **errp)
3344 {
3345     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3346 
3347     if (SPAPR_MACHINE_GET_CLASS(spapr)->legacy_irq_allocation) {
3348         error_setg(errp, "This machine only uses the legacy XICS backend, don't pass ic-mode");
3349         return;
3350     }
3351 
3352     /* The legacy IRQ backend can not be set */
3353     if (strcmp(value, "xics") == 0) {
3354         spapr->irq = &spapr_irq_xics;
3355     } else if (strcmp(value, "xive") == 0) {
3356         spapr->irq = &spapr_irq_xive;
3357     } else if (strcmp(value, "dual") == 0) {
3358         spapr->irq = &spapr_irq_dual;
3359     } else {
3360         error_setg(errp, "Bad value for \"ic-mode\" property");
3361     }
3362 }
3363 
3364 static char *spapr_get_host_model(Object *obj, Error **errp)
3365 {
3366     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3367 
3368     return g_strdup(spapr->host_model);
3369 }
3370 
3371 static void spapr_set_host_model(Object *obj, const char *value, Error **errp)
3372 {
3373     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3374 
3375     g_free(spapr->host_model);
3376     spapr->host_model = g_strdup(value);
3377 }
3378 
3379 static char *spapr_get_host_serial(Object *obj, Error **errp)
3380 {
3381     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3382 
3383     return g_strdup(spapr->host_serial);
3384 }
3385 
3386 static void spapr_set_host_serial(Object *obj, const char *value, Error **errp)
3387 {
3388     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3389 
3390     g_free(spapr->host_serial);
3391     spapr->host_serial = g_strdup(value);
3392 }
3393 
3394 static void spapr_instance_init(Object *obj)
3395 {
3396     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3397     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
3398     MachineState *ms = MACHINE(spapr);
3399     MachineClass *mc = MACHINE_GET_CLASS(ms);
3400 
3401     /*
3402      * NVDIMM support went live in 5.1 without considering that, in
3403      * other archs, the user needs to enable NVDIMM support with the
3404      * 'nvdimm' machine option and the default behavior is NVDIMM
3405      * support disabled. It is too late to roll back to the standard
3406      * behavior without breaking 5.1 guests.
3407      */
3408     if (mc->nvdimm_supported) {
3409         ms->nvdimms_state->is_enabled = true;
3410     }
3411 
3412     spapr->htab_fd = -1;
3413     spapr->use_hotplug_event_source = true;
3414     spapr->kvm_type = g_strdup(DEFAULT_KVM_TYPE);
3415     object_property_add_str(obj, "kvm-type",
3416                             spapr_get_kvm_type, spapr_set_kvm_type);
3417     object_property_set_description(obj, "kvm-type",
3418                                     "Specifies the KVM virtualization mode (auto,"
3419                                     " hv, pr). Defaults to 'auto'. This mode will use"
3420                                     " any available KVM module loaded in the host,"
3421                                     " where kvm_hv takes precedence if both kvm_hv and"
3422                                     " kvm_pr are loaded.");
3423     object_property_add_bool(obj, "modern-hotplug-events",
3424                             spapr_get_modern_hotplug_events,
3425                             spapr_set_modern_hotplug_events);
3426     object_property_set_description(obj, "modern-hotplug-events",
3427                                     "Use dedicated hotplug event mechanism in"
3428                                     " place of standard EPOW events when possible"
3429                                     " (required for memory hot-unplug support)");
3430     ppc_compat_add_property(obj, "max-cpu-compat", &spapr->max_compat_pvr,
3431                             "Maximum permitted CPU compatibility mode");
3432 
3433     object_property_add_str(obj, "resize-hpt",
3434                             spapr_get_resize_hpt, spapr_set_resize_hpt);
3435     object_property_set_description(obj, "resize-hpt",
3436                                     "Resizing of the Hash Page Table (enabled, disabled, required)");
3437     object_property_add_uint32_ptr(obj, "vsmt",
3438                                    &spapr->vsmt, OBJ_PROP_FLAG_READWRITE);
3439     object_property_set_description(obj, "vsmt",
3440                                     "Virtual SMT: KVM behaves as if this were"
3441                                     " the host's SMT mode");
3442 
3443     object_property_add_bool(obj, "vfio-no-msix-emulation",
3444                              spapr_get_msix_emulation, NULL);
3445 
3446     object_property_add_uint64_ptr(obj, "kernel-addr",
3447                                    &spapr->kernel_addr, OBJ_PROP_FLAG_READWRITE);
3448     object_property_set_description(obj, "kernel-addr",
3449                                     stringify(KERNEL_LOAD_ADDR)
3450                                     " for -kernel is the default");
3451     spapr->kernel_addr = KERNEL_LOAD_ADDR;
3452 
3453     object_property_add_bool(obj, "x-vof", spapr_get_vof, spapr_set_vof);
3454     object_property_set_description(obj, "x-vof",
3455                                     "Enable Virtual Open Firmware (experimental)");
3456 
3457     /* The machine class defines the default interrupt controller mode */
3458     spapr->irq = smc->irq;
3459     object_property_add_str(obj, "ic-mode", spapr_get_ic_mode,
3460                             spapr_set_ic_mode);
3461     object_property_set_description(obj, "ic-mode",
3462                  "Specifies the interrupt controller mode (xics, xive, dual)");
3463 
3464     object_property_add_str(obj, "host-model",
3465         spapr_get_host_model, spapr_set_host_model);
3466     object_property_set_description(obj, "host-model",
3467         "Host model to advertise in guest device tree");
3468     object_property_add_str(obj, "host-serial",
3469         spapr_get_host_serial, spapr_set_host_serial);
3470     object_property_set_description(obj, "host-serial",
3471         "Host serial number to advertise in guest device tree");
3472 }
3473 
3474 static void spapr_machine_finalizefn(Object *obj)
3475 {
3476     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3477 
3478     g_free(spapr->kvm_type);
3479 }
3480 
3481 void spapr_do_system_reset_on_cpu(CPUState *cs, run_on_cpu_data arg)
3482 {
3483     SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
3484     PowerPCCPU *cpu = POWERPC_CPU(cs);
3485     CPUPPCState *env = &cpu->env;
3486 
3487     cpu_synchronize_state(cs);
3488     /* If FWNMI is inactive, addr will be -1, which will deliver to 0x100 */
3489     if (spapr->fwnmi_system_reset_addr != -1) {
3490         uint64_t rtas_addr, addr;
3491 
3492         /* get rtas addr from fdt */
3493         rtas_addr = spapr_get_rtas_addr();
3494         if (!rtas_addr) {
3495             qemu_system_guest_panicked(NULL);
3496             return;
3497         }
3498 
3499         addr = rtas_addr + RTAS_ERROR_LOG_MAX + cs->cpu_index * sizeof(uint64_t)*2;
3500         stq_be_phys(&address_space_memory, addr, env->gpr[3]);
3501         stq_be_phys(&address_space_memory, addr + sizeof(uint64_t), 0);
3502         env->gpr[3] = addr;
3503     }
3504     ppc_cpu_do_system_reset(cs);
3505     if (spapr->fwnmi_system_reset_addr != -1) {
3506         env->nip = spapr->fwnmi_system_reset_addr;
3507     }
3508 }
3509 
3510 static void spapr_nmi(NMIState *n, int cpu_index, Error **errp)
3511 {
3512     CPUState *cs;
3513 
3514     CPU_FOREACH(cs) {
3515         async_run_on_cpu(cs, spapr_do_system_reset_on_cpu, RUN_ON_CPU_NULL);
3516     }
3517 }
3518 
3519 int spapr_lmb_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr,
3520                           void *fdt, int *fdt_start_offset, Error **errp)
3521 {
3522     uint64_t addr;
3523     uint32_t node;
3524 
3525     addr = spapr_drc_index(drc) * SPAPR_MEMORY_BLOCK_SIZE;
3526     node = object_property_get_uint(OBJECT(drc->dev), PC_DIMM_NODE_PROP,
3527                                     &error_abort);
3528     *fdt_start_offset = spapr_dt_memory_node(spapr, fdt, node, addr,
3529                                              SPAPR_MEMORY_BLOCK_SIZE);
3530     return 0;
3531 }
3532 
3533 static void spapr_add_lmbs(DeviceState *dev, uint64_t addr_start, uint64_t size,
3534                            bool dedicated_hp_event_source)
3535 {
3536     SpaprDrc *drc;
3537     uint32_t nr_lmbs = size/SPAPR_MEMORY_BLOCK_SIZE;
3538     int i;
3539     uint64_t addr = addr_start;
3540     bool hotplugged = spapr_drc_hotplugged(dev);
3541 
3542     for (i = 0; i < nr_lmbs; i++) {
3543         drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
3544                               addr / SPAPR_MEMORY_BLOCK_SIZE);
3545         g_assert(drc);
3546 
3547         /*
3548          * memory_device_get_free_addr() provided a range of free addresses
3549          * that doesn't overlap with any existing mapping at pre-plug. The
3550          * corresponding LMB DRCs are thus assumed to be all attachable.
3551          */
3552         spapr_drc_attach(drc, dev);
3553         if (!hotplugged) {
3554             spapr_drc_reset(drc);
3555         }
3556         addr += SPAPR_MEMORY_BLOCK_SIZE;
3557     }
3558     /* send hotplug notification to the
3559      * guest only in case of hotplugged memory
3560      */
3561     if (hotplugged) {
3562         if (dedicated_hp_event_source) {
3563             drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
3564                                   addr_start / SPAPR_MEMORY_BLOCK_SIZE);
3565             g_assert(drc);
3566             spapr_hotplug_req_add_by_count_indexed(SPAPR_DR_CONNECTOR_TYPE_LMB,
3567                                                    nr_lmbs,
3568                                                    spapr_drc_index(drc));
3569         } else {
3570             spapr_hotplug_req_add_by_count(SPAPR_DR_CONNECTOR_TYPE_LMB,
3571                                            nr_lmbs);
3572         }
3573     }
3574 }
3575 
3576 static void spapr_memory_plug(HotplugHandler *hotplug_dev, DeviceState *dev)
3577 {
3578     SpaprMachineState *ms = SPAPR_MACHINE(hotplug_dev);
3579     PCDIMMDevice *dimm = PC_DIMM(dev);
3580     uint64_t size, addr;
3581     int64_t slot;
3582     bool is_nvdimm = object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM);
3583 
3584     size = memory_device_get_region_size(MEMORY_DEVICE(dev), &error_abort);
3585 
3586     pc_dimm_plug(dimm, MACHINE(ms));
3587 
3588     if (!is_nvdimm) {
3589         addr = object_property_get_uint(OBJECT(dimm),
3590                                         PC_DIMM_ADDR_PROP, &error_abort);
3591         spapr_add_lmbs(dev, addr, size,
3592                        spapr_ovec_test(ms->ov5_cas, OV5_HP_EVT));
3593     } else {
3594         slot = object_property_get_int(OBJECT(dimm),
3595                                        PC_DIMM_SLOT_PROP, &error_abort);
3596         /* We should have valid slot number at this point */
3597         g_assert(slot >= 0);
3598         spapr_add_nvdimm(dev, slot);
3599     }
3600 }
3601 
3602 static void spapr_memory_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
3603                                   Error **errp)
3604 {
3605     const SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(hotplug_dev);
3606     SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_dev);
3607     bool is_nvdimm = object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM);
3608     PCDIMMDevice *dimm = PC_DIMM(dev);
3609     Error *local_err = NULL;
3610     uint64_t size;
3611     Object *memdev;
3612     hwaddr pagesize;
3613 
3614     if (!smc->dr_lmb_enabled) {
3615         error_setg(errp, "Memory hotplug not supported for this machine");
3616         return;
3617     }
3618 
3619     size = memory_device_get_region_size(MEMORY_DEVICE(dimm), &local_err);
3620     if (local_err) {
3621         error_propagate(errp, local_err);
3622         return;
3623     }
3624 
3625     if (is_nvdimm) {
3626         if (!spapr_nvdimm_validate(hotplug_dev, NVDIMM(dev), size, errp)) {
3627             return;
3628         }
3629     } else if (size % SPAPR_MEMORY_BLOCK_SIZE) {
3630         error_setg(errp, "Hotplugged memory size must be a multiple of "
3631                    "%" PRIu64 " MB", SPAPR_MEMORY_BLOCK_SIZE / MiB);
3632         return;
3633     }
3634 
3635     memdev = object_property_get_link(OBJECT(dimm), PC_DIMM_MEMDEV_PROP,
3636                                       &error_abort);
3637     pagesize = host_memory_backend_pagesize(MEMORY_BACKEND(memdev));
3638     if (!spapr_check_pagesize(spapr, pagesize, errp)) {
3639         return;
3640     }
3641 
3642     pc_dimm_pre_plug(dimm, MACHINE(hotplug_dev), NULL, errp);
3643 }
3644 
3645 struct SpaprDimmState {
3646     PCDIMMDevice *dimm;
3647     uint32_t nr_lmbs;
3648     QTAILQ_ENTRY(SpaprDimmState) next;
3649 };
3650 
3651 static SpaprDimmState *spapr_pending_dimm_unplugs_find(SpaprMachineState *s,
3652                                                        PCDIMMDevice *dimm)
3653 {
3654     SpaprDimmState *dimm_state = NULL;
3655 
3656     QTAILQ_FOREACH(dimm_state, &s->pending_dimm_unplugs, next) {
3657         if (dimm_state->dimm == dimm) {
3658             break;
3659         }
3660     }
3661     return dimm_state;
3662 }
3663 
3664 static SpaprDimmState *spapr_pending_dimm_unplugs_add(SpaprMachineState *spapr,
3665                                                       uint32_t nr_lmbs,
3666                                                       PCDIMMDevice *dimm)
3667 {
3668     SpaprDimmState *ds = NULL;
3669 
3670     /*
3671      * If this request is for a DIMM whose removal had failed earlier
3672      * (due to guest's refusal to remove the LMBs), we would have this
3673      * dimm already in the pending_dimm_unplugs list. In that
3674      * case don't add again.
3675      */
3676     ds = spapr_pending_dimm_unplugs_find(spapr, dimm);
3677     if (!ds) {
3678         ds = g_new0(SpaprDimmState, 1);
3679         ds->nr_lmbs = nr_lmbs;
3680         ds->dimm = dimm;
3681         QTAILQ_INSERT_HEAD(&spapr->pending_dimm_unplugs, ds, next);
3682     }
3683     return ds;
3684 }
3685 
3686 static void spapr_pending_dimm_unplugs_remove(SpaprMachineState *spapr,
3687                                               SpaprDimmState *dimm_state)
3688 {
3689     QTAILQ_REMOVE(&spapr->pending_dimm_unplugs, dimm_state, next);
3690     g_free(dimm_state);
3691 }
3692 
3693 static SpaprDimmState *spapr_recover_pending_dimm_state(SpaprMachineState *ms,
3694                                                         PCDIMMDevice *dimm)
3695 {
3696     SpaprDrc *drc;
3697     uint64_t size = memory_device_get_region_size(MEMORY_DEVICE(dimm),
3698                                                   &error_abort);
3699     uint32_t nr_lmbs = size / SPAPR_MEMORY_BLOCK_SIZE;
3700     uint32_t avail_lmbs = 0;
3701     uint64_t addr_start, addr;
3702     int i;
3703 
3704     addr_start = object_property_get_uint(OBJECT(dimm), PC_DIMM_ADDR_PROP,
3705                                           &error_abort);
3706 
3707     addr = addr_start;
3708     for (i = 0; i < nr_lmbs; i++) {
3709         drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
3710                               addr / SPAPR_MEMORY_BLOCK_SIZE);
3711         g_assert(drc);
3712         if (drc->dev) {
3713             avail_lmbs++;
3714         }
3715         addr += SPAPR_MEMORY_BLOCK_SIZE;
3716     }
3717 
3718     return spapr_pending_dimm_unplugs_add(ms, avail_lmbs, dimm);
3719 }
3720 
3721 void spapr_memory_unplug_rollback(SpaprMachineState *spapr, DeviceState *dev)
3722 {
3723     SpaprDimmState *ds;
3724     PCDIMMDevice *dimm;
3725     SpaprDrc *drc;
3726     uint32_t nr_lmbs;
3727     uint64_t size, addr_start, addr;
3728     g_autofree char *qapi_error = NULL;
3729     int i;
3730 
3731     if (!dev) {
3732         return;
3733     }
3734 
3735     dimm = PC_DIMM(dev);
3736     ds = spapr_pending_dimm_unplugs_find(spapr, dimm);
3737 
3738     /*
3739      * 'ds == NULL' would mean that the DIMM doesn't have a pending
3740      * unplug state, but one of its DRC is marked as unplug_requested.
3741      * This is bad and weird enough to g_assert() out.
3742      */
3743     g_assert(ds);
3744 
3745     spapr_pending_dimm_unplugs_remove(spapr, ds);
3746 
3747     size = memory_device_get_region_size(MEMORY_DEVICE(dimm), &error_abort);
3748     nr_lmbs = size / SPAPR_MEMORY_BLOCK_SIZE;
3749 
3750     addr_start = object_property_get_uint(OBJECT(dimm), PC_DIMM_ADDR_PROP,
3751                                           &error_abort);
3752 
3753     addr = addr_start;
3754     for (i = 0; i < nr_lmbs; i++) {
3755         drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
3756                               addr / SPAPR_MEMORY_BLOCK_SIZE);
3757         g_assert(drc);
3758 
3759         drc->unplug_requested = false;
3760         addr += SPAPR_MEMORY_BLOCK_SIZE;
3761     }
3762 
3763     /*
3764      * Tell QAPI that something happened and the memory
3765      * hotunplug wasn't successful. Keep sending
3766      * MEM_UNPLUG_ERROR even while sending
3767      * DEVICE_UNPLUG_GUEST_ERROR until the deprecation of
3768      * MEM_UNPLUG_ERROR is due.
3769      */
3770     qapi_error = g_strdup_printf("Memory hotunplug rejected by the guest "
3771                                  "for device %s", dev->id);
3772 
3773     qapi_event_send_mem_unplug_error(dev->id ? : "", qapi_error);
3774 
3775     qapi_event_send_device_unplug_guest_error(dev->id,
3776                                               dev->canonical_path);
3777 }
3778 
3779 /* Callback to be called during DRC release. */
3780 void spapr_lmb_release(DeviceState *dev)
3781 {
3782     HotplugHandler *hotplug_ctrl = qdev_get_hotplug_handler(dev);
3783     SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_ctrl);
3784     SpaprDimmState *ds = spapr_pending_dimm_unplugs_find(spapr, PC_DIMM(dev));
3785 
3786     /* This information will get lost if a migration occurs
3787      * during the unplug process. In this case recover it. */
3788     if (ds == NULL) {
3789         ds = spapr_recover_pending_dimm_state(spapr, PC_DIMM(dev));
3790         g_assert(ds);
3791         /* The DRC being examined by the caller at least must be counted */
3792         g_assert(ds->nr_lmbs);
3793     }
3794 
3795     if (--ds->nr_lmbs) {
3796         return;
3797     }
3798 
3799     /*
3800      * Now that all the LMBs have been removed by the guest, call the
3801      * unplug handler chain. This can never fail.
3802      */
3803     hotplug_handler_unplug(hotplug_ctrl, dev, &error_abort);
3804     object_unparent(OBJECT(dev));
3805 }
3806 
3807 static void spapr_memory_unplug(HotplugHandler *hotplug_dev, DeviceState *dev)
3808 {
3809     SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_dev);
3810     SpaprDimmState *ds = spapr_pending_dimm_unplugs_find(spapr, PC_DIMM(dev));
3811 
3812     /* We really shouldn't get this far without anything to unplug */
3813     g_assert(ds);
3814 
3815     pc_dimm_unplug(PC_DIMM(dev), MACHINE(hotplug_dev));
3816     qdev_unrealize(dev);
3817     spapr_pending_dimm_unplugs_remove(spapr, ds);
3818 }
3819 
3820 static void spapr_memory_unplug_request(HotplugHandler *hotplug_dev,
3821                                         DeviceState *dev, Error **errp)
3822 {
3823     SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_dev);
3824     PCDIMMDevice *dimm = PC_DIMM(dev);
3825     uint32_t nr_lmbs;
3826     uint64_t size, addr_start, addr;
3827     int i;
3828     SpaprDrc *drc;
3829 
3830     if (object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM)) {
3831         error_setg(errp, "nvdimm device hot unplug is not supported yet.");
3832         return;
3833     }
3834 
3835     size = memory_device_get_region_size(MEMORY_DEVICE(dimm), &error_abort);
3836     nr_lmbs = size / SPAPR_MEMORY_BLOCK_SIZE;
3837 
3838     addr_start = object_property_get_uint(OBJECT(dimm), PC_DIMM_ADDR_PROP,
3839                                           &error_abort);
3840 
3841     /*
3842      * An existing pending dimm state for this DIMM means that there is an
3843      * unplug operation in progress, waiting for the spapr_lmb_release
3844      * callback to complete the job (BQL can't cover that far). In this case,
3845      * bail out to avoid detaching DRCs that were already released.
3846      */
3847     if (spapr_pending_dimm_unplugs_find(spapr, dimm)) {
3848         error_setg(errp, "Memory unplug already in progress for device %s",
3849                    dev->id);
3850         return;
3851     }
3852 
3853     spapr_pending_dimm_unplugs_add(spapr, nr_lmbs, dimm);
3854 
3855     addr = addr_start;
3856     for (i = 0; i < nr_lmbs; i++) {
3857         drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
3858                               addr / SPAPR_MEMORY_BLOCK_SIZE);
3859         g_assert(drc);
3860 
3861         spapr_drc_unplug_request(drc);
3862         addr += SPAPR_MEMORY_BLOCK_SIZE;
3863     }
3864 
3865     drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
3866                           addr_start / SPAPR_MEMORY_BLOCK_SIZE);
3867     spapr_hotplug_req_remove_by_count_indexed(SPAPR_DR_CONNECTOR_TYPE_LMB,
3868                                               nr_lmbs, spapr_drc_index(drc));
3869 }
3870 
3871 /* Callback to be called during DRC release. */
3872 void spapr_core_release(DeviceState *dev)
3873 {
3874     HotplugHandler *hotplug_ctrl = qdev_get_hotplug_handler(dev);
3875 
3876     /* Call the unplug handler chain. This can never fail. */
3877     hotplug_handler_unplug(hotplug_ctrl, dev, &error_abort);
3878     object_unparent(OBJECT(dev));
3879 }
3880 
3881 static void spapr_core_unplug(HotplugHandler *hotplug_dev, DeviceState *dev)
3882 {
3883     MachineState *ms = MACHINE(hotplug_dev);
3884     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(ms);
3885     CPUCore *cc = CPU_CORE(dev);
3886     CPUArchId *core_slot = spapr_find_cpu_slot(ms, cc->core_id, NULL);
3887 
3888     if (smc->pre_2_10_has_unused_icps) {
3889         SpaprCpuCore *sc = SPAPR_CPU_CORE(OBJECT(dev));
3890         int i;
3891 
3892         for (i = 0; i < cc->nr_threads; i++) {
3893             CPUState *cs = CPU(sc->threads[i]);
3894 
3895             pre_2_10_vmstate_register_dummy_icp(cs->cpu_index);
3896         }
3897     }
3898 
3899     assert(core_slot);
3900     core_slot->cpu = NULL;
3901     qdev_unrealize(dev);
3902 }
3903 
3904 static
3905 void spapr_core_unplug_request(HotplugHandler *hotplug_dev, DeviceState *dev,
3906                                Error **errp)
3907 {
3908     SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
3909     int index;
3910     SpaprDrc *drc;
3911     CPUCore *cc = CPU_CORE(dev);
3912 
3913     if (!spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index)) {
3914         error_setg(errp, "Unable to find CPU core with core-id: %d",
3915                    cc->core_id);
3916         return;
3917     }
3918     if (index == 0) {
3919         error_setg(errp, "Boot CPU core may not be unplugged");
3920         return;
3921     }
3922 
3923     drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU,
3924                           spapr_vcpu_id(spapr, cc->core_id));
3925     g_assert(drc);
3926 
3927     if (!spapr_drc_unplug_requested(drc)) {
3928         spapr_drc_unplug_request(drc);
3929     }
3930 
3931     /*
3932      * spapr_hotplug_req_remove_by_index is left unguarded, out of the
3933      * "!spapr_drc_unplug_requested" check, to allow for multiple IRQ
3934      * pulses removing the same CPU. Otherwise, in an failed hotunplug
3935      * attempt (e.g. the kernel will refuse to remove the last online
3936      * CPU), we will never attempt it again because unplug_requested
3937      * will still be 'true' in that case.
3938      */
3939     spapr_hotplug_req_remove_by_index(drc);
3940 }
3941 
3942 int spapr_core_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr,
3943                            void *fdt, int *fdt_start_offset, Error **errp)
3944 {
3945     SpaprCpuCore *core = SPAPR_CPU_CORE(drc->dev);
3946     CPUState *cs = CPU(core->threads[0]);
3947     PowerPCCPU *cpu = POWERPC_CPU(cs);
3948     DeviceClass *dc = DEVICE_GET_CLASS(cs);
3949     int id = spapr_get_vcpu_id(cpu);
3950     g_autofree char *nodename = NULL;
3951     int offset;
3952 
3953     nodename = g_strdup_printf("%s@%x", dc->fw_name, id);
3954     offset = fdt_add_subnode(fdt, 0, nodename);
3955 
3956     spapr_dt_cpu(cs, fdt, offset, spapr);
3957 
3958     /*
3959      * spapr_dt_cpu() does not fill the 'name' property in the
3960      * CPU node. The function is called during boot process, before
3961      * and after CAS, and overwriting the 'name' property written
3962      * by SLOF is not allowed.
3963      *
3964      * Write it manually after spapr_dt_cpu(). This makes the hotplug
3965      * CPUs more compatible with the coldplugged ones, which have
3966      * the 'name' property. Linux Kernel also relies on this
3967      * property to identify CPU nodes.
3968      */
3969     _FDT((fdt_setprop_string(fdt, offset, "name", nodename)));
3970 
3971     *fdt_start_offset = offset;
3972     return 0;
3973 }
3974 
3975 static void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev)
3976 {
3977     SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
3978     MachineClass *mc = MACHINE_GET_CLASS(spapr);
3979     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
3980     SpaprCpuCore *core = SPAPR_CPU_CORE(OBJECT(dev));
3981     CPUCore *cc = CPU_CORE(dev);
3982     CPUState *cs;
3983     SpaprDrc *drc;
3984     CPUArchId *core_slot;
3985     int index;
3986     bool hotplugged = spapr_drc_hotplugged(dev);
3987     int i;
3988 
3989     core_slot = spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index);
3990     g_assert(core_slot); /* Already checked in spapr_core_pre_plug() */
3991 
3992     drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU,
3993                           spapr_vcpu_id(spapr, cc->core_id));
3994 
3995     g_assert(drc || !mc->has_hotpluggable_cpus);
3996 
3997     if (drc) {
3998         /*
3999          * spapr_core_pre_plug() already buys us this is a brand new
4000          * core being plugged into a free slot. Nothing should already
4001          * be attached to the corresponding DRC.
4002          */
4003         spapr_drc_attach(drc, dev);
4004 
4005         if (hotplugged) {
4006             /*
4007              * Send hotplug notification interrupt to the guest only
4008              * in case of hotplugged CPUs.
4009              */
4010             spapr_hotplug_req_add_by_index(drc);
4011         } else {
4012             spapr_drc_reset(drc);
4013         }
4014     }
4015 
4016     core_slot->cpu = OBJECT(dev);
4017 
4018     /*
4019      * Set compatibility mode to match the boot CPU, which was either set
4020      * by the machine reset code or by CAS. This really shouldn't fail at
4021      * this point.
4022      */
4023     if (hotplugged) {
4024         for (i = 0; i < cc->nr_threads; i++) {
4025             ppc_set_compat(core->threads[i], POWERPC_CPU(first_cpu)->compat_pvr,
4026                            &error_abort);
4027         }
4028     }
4029 
4030     if (smc->pre_2_10_has_unused_icps) {
4031         for (i = 0; i < cc->nr_threads; i++) {
4032             cs = CPU(core->threads[i]);
4033             pre_2_10_vmstate_unregister_dummy_icp(cs->cpu_index);
4034         }
4035     }
4036 }
4037 
4038 static void spapr_core_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
4039                                 Error **errp)
4040 {
4041     MachineState *machine = MACHINE(OBJECT(hotplug_dev));
4042     MachineClass *mc = MACHINE_GET_CLASS(hotplug_dev);
4043     CPUCore *cc = CPU_CORE(dev);
4044     const char *base_core_type = spapr_get_cpu_core_type(machine->cpu_type);
4045     const char *type = object_get_typename(OBJECT(dev));
4046     CPUArchId *core_slot;
4047     int index;
4048     unsigned int smp_threads = machine->smp.threads;
4049 
4050     if (dev->hotplugged && !mc->has_hotpluggable_cpus) {
4051         error_setg(errp, "CPU hotplug not supported for this machine");
4052         return;
4053     }
4054 
4055     if (strcmp(base_core_type, type)) {
4056         error_setg(errp, "CPU core type should be %s", base_core_type);
4057         return;
4058     }
4059 
4060     if (cc->core_id % smp_threads) {
4061         error_setg(errp, "invalid core id %d", cc->core_id);
4062         return;
4063     }
4064 
4065     /*
4066      * In general we should have homogeneous threads-per-core, but old
4067      * (pre hotplug support) machine types allow the last core to have
4068      * reduced threads as a compatibility hack for when we allowed
4069      * total vcpus not a multiple of threads-per-core.
4070      */
4071     if (mc->has_hotpluggable_cpus && (cc->nr_threads != smp_threads)) {
4072         error_setg(errp, "invalid nr-threads %d, must be %d", cc->nr_threads,
4073                    smp_threads);
4074         return;
4075     }
4076 
4077     core_slot = spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index);
4078     if (!core_slot) {
4079         error_setg(errp, "core id %d out of range", cc->core_id);
4080         return;
4081     }
4082 
4083     if (core_slot->cpu) {
4084         error_setg(errp, "core %d already populated", cc->core_id);
4085         return;
4086     }
4087 
4088     numa_cpu_pre_plug(core_slot, dev, errp);
4089 }
4090 
4091 int spapr_phb_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr,
4092                           void *fdt, int *fdt_start_offset, Error **errp)
4093 {
4094     SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(drc->dev);
4095     int intc_phandle;
4096 
4097     intc_phandle = spapr_irq_get_phandle(spapr, spapr->fdt_blob, errp);
4098     if (intc_phandle <= 0) {
4099         return -1;
4100     }
4101 
4102     if (spapr_dt_phb(spapr, sphb, intc_phandle, fdt, fdt_start_offset)) {
4103         error_setg(errp, "unable to create FDT node for PHB %d", sphb->index);
4104         return -1;
4105     }
4106 
4107     /* generally SLOF creates these, for hotplug it's up to QEMU */
4108     _FDT(fdt_setprop_string(fdt, *fdt_start_offset, "name", "pci"));
4109 
4110     return 0;
4111 }
4112 
4113 static bool spapr_phb_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
4114                                Error **errp)
4115 {
4116     SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
4117     SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(dev);
4118     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
4119     const unsigned windows_supported = spapr_phb_windows_supported(sphb);
4120     SpaprDrc *drc;
4121 
4122     if (dev->hotplugged && !smc->dr_phb_enabled) {
4123         error_setg(errp, "PHB hotplug not supported for this machine");
4124         return false;
4125     }
4126 
4127     if (sphb->index == (uint32_t)-1) {
4128         error_setg(errp, "\"index\" for PAPR PHB is mandatory");
4129         return false;
4130     }
4131 
4132     drc = spapr_drc_by_id(TYPE_SPAPR_DRC_PHB, sphb->index);
4133     if (drc && drc->dev) {
4134         error_setg(errp, "PHB %d already attached", sphb->index);
4135         return false;
4136     }
4137 
4138     /*
4139      * This will check that sphb->index doesn't exceed the maximum number of
4140      * PHBs for the current machine type.
4141      */
4142     return
4143         smc->phb_placement(spapr, sphb->index,
4144                            &sphb->buid, &sphb->io_win_addr,
4145                            &sphb->mem_win_addr, &sphb->mem64_win_addr,
4146                            windows_supported, sphb->dma_liobn,
4147                            errp);
4148 }
4149 
4150 static void spapr_phb_plug(HotplugHandler *hotplug_dev, DeviceState *dev)
4151 {
4152     SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
4153     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
4154     SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(dev);
4155     SpaprDrc *drc;
4156     bool hotplugged = spapr_drc_hotplugged(dev);
4157 
4158     if (!smc->dr_phb_enabled) {
4159         return;
4160     }
4161 
4162     drc = spapr_drc_by_id(TYPE_SPAPR_DRC_PHB, sphb->index);
4163     /* hotplug hooks should check it's enabled before getting this far */
4164     assert(drc);
4165 
4166     /* spapr_phb_pre_plug() already checked the DRC is attachable */
4167     spapr_drc_attach(drc, dev);
4168 
4169     if (hotplugged) {
4170         spapr_hotplug_req_add_by_index(drc);
4171     } else {
4172         spapr_drc_reset(drc);
4173     }
4174 }
4175 
4176 void spapr_phb_release(DeviceState *dev)
4177 {
4178     HotplugHandler *hotplug_ctrl = qdev_get_hotplug_handler(dev);
4179 
4180     hotplug_handler_unplug(hotplug_ctrl, dev, &error_abort);
4181     object_unparent(OBJECT(dev));
4182 }
4183 
4184 static void spapr_phb_unplug(HotplugHandler *hotplug_dev, DeviceState *dev)
4185 {
4186     qdev_unrealize(dev);
4187 }
4188 
4189 static void spapr_phb_unplug_request(HotplugHandler *hotplug_dev,
4190                                      DeviceState *dev, Error **errp)
4191 {
4192     SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(dev);
4193     SpaprDrc *drc;
4194 
4195     drc = spapr_drc_by_id(TYPE_SPAPR_DRC_PHB, sphb->index);
4196     assert(drc);
4197 
4198     if (!spapr_drc_unplug_requested(drc)) {
4199         spapr_drc_unplug_request(drc);
4200         spapr_hotplug_req_remove_by_index(drc);
4201     } else {
4202         error_setg(errp,
4203                    "PCI Host Bridge unplug already in progress for device %s",
4204                    dev->id);
4205     }
4206 }
4207 
4208 static
4209 bool spapr_tpm_proxy_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
4210                               Error **errp)
4211 {
4212     SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
4213 
4214     if (spapr->tpm_proxy != NULL) {
4215         error_setg(errp, "Only one TPM proxy can be specified for this machine");
4216         return false;
4217     }
4218 
4219     return true;
4220 }
4221 
4222 static void spapr_tpm_proxy_plug(HotplugHandler *hotplug_dev, DeviceState *dev)
4223 {
4224     SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
4225     SpaprTpmProxy *tpm_proxy = SPAPR_TPM_PROXY(dev);
4226 
4227     /* Already checked in spapr_tpm_proxy_pre_plug() */
4228     g_assert(spapr->tpm_proxy == NULL);
4229 
4230     spapr->tpm_proxy = tpm_proxy;
4231 }
4232 
4233 static void spapr_tpm_proxy_unplug(HotplugHandler *hotplug_dev, DeviceState *dev)
4234 {
4235     SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
4236 
4237     qdev_unrealize(dev);
4238     object_unparent(OBJECT(dev));
4239     spapr->tpm_proxy = NULL;
4240 }
4241 
4242 static void spapr_machine_device_plug(HotplugHandler *hotplug_dev,
4243                                       DeviceState *dev, Error **errp)
4244 {
4245     if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
4246         spapr_memory_plug(hotplug_dev, dev);
4247     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
4248         spapr_core_plug(hotplug_dev, dev);
4249     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) {
4250         spapr_phb_plug(hotplug_dev, dev);
4251     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) {
4252         spapr_tpm_proxy_plug(hotplug_dev, dev);
4253     }
4254 }
4255 
4256 static void spapr_machine_device_unplug(HotplugHandler *hotplug_dev,
4257                                         DeviceState *dev, Error **errp)
4258 {
4259     if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
4260         spapr_memory_unplug(hotplug_dev, dev);
4261     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
4262         spapr_core_unplug(hotplug_dev, dev);
4263     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) {
4264         spapr_phb_unplug(hotplug_dev, dev);
4265     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) {
4266         spapr_tpm_proxy_unplug(hotplug_dev, dev);
4267     }
4268 }
4269 
4270 bool spapr_memory_hot_unplug_supported(SpaprMachineState *spapr)
4271 {
4272     return spapr_ovec_test(spapr->ov5_cas, OV5_HP_EVT) ||
4273         /*
4274          * CAS will process all pending unplug requests.
4275          *
4276          * HACK: a guest could theoretically have cleared all bits in OV5,
4277          * but none of the guests we care for do.
4278          */
4279         spapr_ovec_empty(spapr->ov5_cas);
4280 }
4281 
4282 static void spapr_machine_device_unplug_request(HotplugHandler *hotplug_dev,
4283                                                 DeviceState *dev, Error **errp)
4284 {
4285     SpaprMachineState *sms = SPAPR_MACHINE(OBJECT(hotplug_dev));
4286     MachineClass *mc = MACHINE_GET_CLASS(sms);
4287     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4288 
4289     if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
4290         if (spapr_memory_hot_unplug_supported(sms)) {
4291             spapr_memory_unplug_request(hotplug_dev, dev, errp);
4292         } else {
4293             error_setg(errp, "Memory hot unplug not supported for this guest");
4294         }
4295     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
4296         if (!mc->has_hotpluggable_cpus) {
4297             error_setg(errp, "CPU hot unplug not supported on this machine");
4298             return;
4299         }
4300         spapr_core_unplug_request(hotplug_dev, dev, errp);
4301     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) {
4302         if (!smc->dr_phb_enabled) {
4303             error_setg(errp, "PHB hot unplug not supported on this machine");
4304             return;
4305         }
4306         spapr_phb_unplug_request(hotplug_dev, dev, errp);
4307     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) {
4308         spapr_tpm_proxy_unplug(hotplug_dev, dev);
4309     }
4310 }
4311 
4312 static void spapr_machine_device_pre_plug(HotplugHandler *hotplug_dev,
4313                                           DeviceState *dev, Error **errp)
4314 {
4315     if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
4316         spapr_memory_pre_plug(hotplug_dev, dev, errp);
4317     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
4318         spapr_core_pre_plug(hotplug_dev, dev, errp);
4319     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) {
4320         spapr_phb_pre_plug(hotplug_dev, dev, errp);
4321     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) {
4322         spapr_tpm_proxy_pre_plug(hotplug_dev, dev, errp);
4323     }
4324 }
4325 
4326 static HotplugHandler *spapr_get_hotplug_handler(MachineState *machine,
4327                                                  DeviceState *dev)
4328 {
4329     if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM) ||
4330         object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE) ||
4331         object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE) ||
4332         object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) {
4333         return HOTPLUG_HANDLER(machine);
4334     }
4335     if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
4336         PCIDevice *pcidev = PCI_DEVICE(dev);
4337         PCIBus *root = pci_device_root_bus(pcidev);
4338         SpaprPhbState *phb =
4339             (SpaprPhbState *)object_dynamic_cast(OBJECT(BUS(root)->parent),
4340                                                  TYPE_SPAPR_PCI_HOST_BRIDGE);
4341 
4342         if (phb) {
4343             return HOTPLUG_HANDLER(phb);
4344         }
4345     }
4346     return NULL;
4347 }
4348 
4349 static CpuInstanceProperties
4350 spapr_cpu_index_to_props(MachineState *machine, unsigned cpu_index)
4351 {
4352     CPUArchId *core_slot;
4353     MachineClass *mc = MACHINE_GET_CLASS(machine);
4354 
4355     /* make sure possible_cpu are initialized */
4356     mc->possible_cpu_arch_ids(machine);
4357     /* get CPU core slot containing thread that matches cpu_index */
4358     core_slot = spapr_find_cpu_slot(machine, cpu_index, NULL);
4359     assert(core_slot);
4360     return core_slot->props;
4361 }
4362 
4363 static int64_t spapr_get_default_cpu_node_id(const MachineState *ms, int idx)
4364 {
4365     return idx / ms->smp.cores % ms->numa_state->num_nodes;
4366 }
4367 
4368 static const CPUArchIdList *spapr_possible_cpu_arch_ids(MachineState *machine)
4369 {
4370     int i;
4371     unsigned int smp_threads = machine->smp.threads;
4372     unsigned int smp_cpus = machine->smp.cpus;
4373     const char *core_type;
4374     int spapr_max_cores = machine->smp.max_cpus / smp_threads;
4375     MachineClass *mc = MACHINE_GET_CLASS(machine);
4376 
4377     if (!mc->has_hotpluggable_cpus) {
4378         spapr_max_cores = QEMU_ALIGN_UP(smp_cpus, smp_threads) / smp_threads;
4379     }
4380     if (machine->possible_cpus) {
4381         assert(machine->possible_cpus->len == spapr_max_cores);
4382         return machine->possible_cpus;
4383     }
4384 
4385     core_type = spapr_get_cpu_core_type(machine->cpu_type);
4386     if (!core_type) {
4387         error_report("Unable to find sPAPR CPU Core definition");
4388         exit(1);
4389     }
4390 
4391     machine->possible_cpus = g_malloc0(sizeof(CPUArchIdList) +
4392                              sizeof(CPUArchId) * spapr_max_cores);
4393     machine->possible_cpus->len = spapr_max_cores;
4394     for (i = 0; i < machine->possible_cpus->len; i++) {
4395         int core_id = i * smp_threads;
4396 
4397         machine->possible_cpus->cpus[i].type = core_type;
4398         machine->possible_cpus->cpus[i].vcpus_count = smp_threads;
4399         machine->possible_cpus->cpus[i].arch_id = core_id;
4400         machine->possible_cpus->cpus[i].props.has_core_id = true;
4401         machine->possible_cpus->cpus[i].props.core_id = core_id;
4402     }
4403     return machine->possible_cpus;
4404 }
4405 
4406 static bool spapr_phb_placement(SpaprMachineState *spapr, uint32_t index,
4407                                 uint64_t *buid, hwaddr *pio,
4408                                 hwaddr *mmio32, hwaddr *mmio64,
4409                                 unsigned n_dma, uint32_t *liobns, Error **errp)
4410 {
4411     /*
4412      * New-style PHB window placement.
4413      *
4414      * Goals: Gives large (1TiB), naturally aligned 64-bit MMIO window
4415      * for each PHB, in addition to 2GiB 32-bit MMIO and 64kiB PIO
4416      * windows.
4417      *
4418      * Some guest kernels can't work with MMIO windows above 1<<46
4419      * (64TiB), so we place up to 31 PHBs in the area 32TiB..64TiB
4420      *
4421      * 32TiB..(33TiB+1984kiB) contains the 64kiB PIO windows for each
4422      * PHB stacked together.  (32TiB+2GiB)..(32TiB+64GiB) contains the
4423      * 2GiB 32-bit MMIO windows for each PHB.  Then 33..64TiB has the
4424      * 1TiB 64-bit MMIO windows for each PHB.
4425      */
4426     const uint64_t base_buid = 0x800000020000000ULL;
4427     int i;
4428 
4429     /* Sanity check natural alignments */
4430     QEMU_BUILD_BUG_ON((SPAPR_PCI_BASE % SPAPR_PCI_MEM64_WIN_SIZE) != 0);
4431     QEMU_BUILD_BUG_ON((SPAPR_PCI_LIMIT % SPAPR_PCI_MEM64_WIN_SIZE) != 0);
4432     QEMU_BUILD_BUG_ON((SPAPR_PCI_MEM64_WIN_SIZE % SPAPR_PCI_MEM32_WIN_SIZE) != 0);
4433     QEMU_BUILD_BUG_ON((SPAPR_PCI_MEM32_WIN_SIZE % SPAPR_PCI_IO_WIN_SIZE) != 0);
4434     /* Sanity check bounds */
4435     QEMU_BUILD_BUG_ON((SPAPR_MAX_PHBS * SPAPR_PCI_IO_WIN_SIZE) >
4436                       SPAPR_PCI_MEM32_WIN_SIZE);
4437     QEMU_BUILD_BUG_ON((SPAPR_MAX_PHBS * SPAPR_PCI_MEM32_WIN_SIZE) >
4438                       SPAPR_PCI_MEM64_WIN_SIZE);
4439 
4440     if (index >= SPAPR_MAX_PHBS) {
4441         error_setg(errp, "\"index\" for PAPR PHB is too large (max %llu)",
4442                    SPAPR_MAX_PHBS - 1);
4443         return false;
4444     }
4445 
4446     *buid = base_buid + index;
4447     for (i = 0; i < n_dma; ++i) {
4448         liobns[i] = SPAPR_PCI_LIOBN(index, i);
4449     }
4450 
4451     *pio = SPAPR_PCI_BASE + index * SPAPR_PCI_IO_WIN_SIZE;
4452     *mmio32 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM32_WIN_SIZE;
4453     *mmio64 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM64_WIN_SIZE;
4454     return true;
4455 }
4456 
4457 static ICSState *spapr_ics_get(XICSFabric *dev, int irq)
4458 {
4459     SpaprMachineState *spapr = SPAPR_MACHINE(dev);
4460 
4461     return ics_valid_irq(spapr->ics, irq) ? spapr->ics : NULL;
4462 }
4463 
4464 static void spapr_ics_resend(XICSFabric *dev)
4465 {
4466     SpaprMachineState *spapr = SPAPR_MACHINE(dev);
4467 
4468     ics_resend(spapr->ics);
4469 }
4470 
4471 static ICPState *spapr_icp_get(XICSFabric *xi, int vcpu_id)
4472 {
4473     PowerPCCPU *cpu = spapr_find_cpu(vcpu_id);
4474 
4475     return cpu ? spapr_cpu_state(cpu)->icp : NULL;
4476 }
4477 
4478 static void spapr_pic_print_info(InterruptStatsProvider *obj,
4479                                  Monitor *mon)
4480 {
4481     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
4482 
4483     spapr_irq_print_info(spapr, mon);
4484     monitor_printf(mon, "irqchip: %s\n",
4485                    kvm_irqchip_in_kernel() ? "in-kernel" : "emulated");
4486 }
4487 
4488 /*
4489  * This is a XIVE only operation
4490  */
4491 static int spapr_match_nvt(XiveFabric *xfb, uint8_t format,
4492                            uint8_t nvt_blk, uint32_t nvt_idx,
4493                            bool cam_ignore, uint8_t priority,
4494                            uint32_t logic_serv, XiveTCTXMatch *match)
4495 {
4496     SpaprMachineState *spapr = SPAPR_MACHINE(xfb);
4497     XivePresenter *xptr = XIVE_PRESENTER(spapr->active_intc);
4498     XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr);
4499     int count;
4500 
4501     count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, cam_ignore,
4502                            priority, logic_serv, match);
4503     if (count < 0) {
4504         return count;
4505     }
4506 
4507     /*
4508      * When we implement the save and restore of the thread interrupt
4509      * contexts in the enter/exit CPU handlers of the machine and the
4510      * escalations in QEMU, we should be able to handle non dispatched
4511      * vCPUs.
4512      *
4513      * Until this is done, the sPAPR machine should find at least one
4514      * matching context always.
4515      */
4516     if (count == 0) {
4517         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVT %x/%x is not dispatched\n",
4518                       nvt_blk, nvt_idx);
4519     }
4520 
4521     return count;
4522 }
4523 
4524 int spapr_get_vcpu_id(PowerPCCPU *cpu)
4525 {
4526     return cpu->vcpu_id;
4527 }
4528 
4529 bool spapr_set_vcpu_id(PowerPCCPU *cpu, int cpu_index, Error **errp)
4530 {
4531     SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
4532     MachineState *ms = MACHINE(spapr);
4533     int vcpu_id;
4534 
4535     vcpu_id = spapr_vcpu_id(spapr, cpu_index);
4536 
4537     if (kvm_enabled() && !kvm_vcpu_id_is_valid(vcpu_id)) {
4538         error_setg(errp, "Can't create CPU with id %d in KVM", vcpu_id);
4539         error_append_hint(errp, "Adjust the number of cpus to %d "
4540                           "or try to raise the number of threads per core\n",
4541                           vcpu_id * ms->smp.threads / spapr->vsmt);
4542         return false;
4543     }
4544 
4545     cpu->vcpu_id = vcpu_id;
4546     return true;
4547 }
4548 
4549 PowerPCCPU *spapr_find_cpu(int vcpu_id)
4550 {
4551     CPUState *cs;
4552 
4553     CPU_FOREACH(cs) {
4554         PowerPCCPU *cpu = POWERPC_CPU(cs);
4555 
4556         if (spapr_get_vcpu_id(cpu) == vcpu_id) {
4557             return cpu;
4558         }
4559     }
4560 
4561     return NULL;
4562 }
4563 
4564 static bool spapr_cpu_in_nested(PowerPCCPU *cpu)
4565 {
4566     SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
4567 
4568     return spapr_cpu->in_nested;
4569 }
4570 
4571 static void spapr_cpu_exec_enter(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu)
4572 {
4573     SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
4574 
4575     /* These are only called by TCG, KVM maintains dispatch state */
4576 
4577     spapr_cpu->prod = false;
4578     if (spapr_cpu->vpa_addr) {
4579         CPUState *cs = CPU(cpu);
4580         uint32_t dispatch;
4581 
4582         dispatch = ldl_be_phys(cs->as,
4583                                spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER);
4584         dispatch++;
4585         if ((dispatch & 1) != 0) {
4586             qemu_log_mask(LOG_GUEST_ERROR,
4587                           "VPA: incorrect dispatch counter value for "
4588                           "dispatched partition %u, correcting.\n", dispatch);
4589             dispatch++;
4590         }
4591         stl_be_phys(cs->as,
4592                     spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER, dispatch);
4593     }
4594 }
4595 
4596 static void spapr_cpu_exec_exit(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu)
4597 {
4598     SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
4599 
4600     if (spapr_cpu->vpa_addr) {
4601         CPUState *cs = CPU(cpu);
4602         uint32_t dispatch;
4603 
4604         dispatch = ldl_be_phys(cs->as,
4605                                spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER);
4606         dispatch++;
4607         if ((dispatch & 1) != 1) {
4608             qemu_log_mask(LOG_GUEST_ERROR,
4609                           "VPA: incorrect dispatch counter value for "
4610                           "preempted partition %u, correcting.\n", dispatch);
4611             dispatch++;
4612         }
4613         stl_be_phys(cs->as,
4614                     spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER, dispatch);
4615     }
4616 }
4617 
4618 static void spapr_machine_class_init(ObjectClass *oc, void *data)
4619 {
4620     MachineClass *mc = MACHINE_CLASS(oc);
4621     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(oc);
4622     FWPathProviderClass *fwc = FW_PATH_PROVIDER_CLASS(oc);
4623     NMIClass *nc = NMI_CLASS(oc);
4624     HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc);
4625     PPCVirtualHypervisorClass *vhc = PPC_VIRTUAL_HYPERVISOR_CLASS(oc);
4626     XICSFabricClass *xic = XICS_FABRIC_CLASS(oc);
4627     InterruptStatsProviderClass *ispc = INTERRUPT_STATS_PROVIDER_CLASS(oc);
4628     XiveFabricClass *xfc = XIVE_FABRIC_CLASS(oc);
4629     VofMachineIfClass *vmc = VOF_MACHINE_CLASS(oc);
4630 
4631     mc->desc = "pSeries Logical Partition (PAPR compliant)";
4632     mc->ignore_boot_device_suffixes = true;
4633 
4634     /*
4635      * We set up the default / latest behaviour here.  The class_init
4636      * functions for the specific versioned machine types can override
4637      * these details for backwards compatibility
4638      */
4639     mc->init = spapr_machine_init;
4640     mc->reset = spapr_machine_reset;
4641     mc->block_default_type = IF_SCSI;
4642 
4643     /*
4644      * While KVM determines max cpus in kvm_init() using kvm_max_vcpus(),
4645      * In TCG the limit is restricted by the range of CPU IPIs available.
4646      */
4647     mc->max_cpus = SPAPR_IRQ_NR_IPIS;
4648 
4649     mc->no_parallel = 1;
4650     mc->default_boot_order = "";
4651     mc->default_ram_size = 512 * MiB;
4652     mc->default_ram_id = "ppc_spapr.ram";
4653     mc->default_display = "std";
4654     mc->kvm_type = spapr_kvm_type;
4655     machine_class_allow_dynamic_sysbus_dev(mc, TYPE_SPAPR_PCI_HOST_BRIDGE);
4656     mc->pci_allow_0_address = true;
4657     assert(!mc->get_hotplug_handler);
4658     mc->get_hotplug_handler = spapr_get_hotplug_handler;
4659     hc->pre_plug = spapr_machine_device_pre_plug;
4660     hc->plug = spapr_machine_device_plug;
4661     mc->cpu_index_to_instance_props = spapr_cpu_index_to_props;
4662     mc->get_default_cpu_node_id = spapr_get_default_cpu_node_id;
4663     mc->possible_cpu_arch_ids = spapr_possible_cpu_arch_ids;
4664     hc->unplug_request = spapr_machine_device_unplug_request;
4665     hc->unplug = spapr_machine_device_unplug;
4666 
4667     smc->dr_lmb_enabled = true;
4668     smc->update_dt_enabled = true;
4669     mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power10_v2.0");
4670     mc->has_hotpluggable_cpus = true;
4671     mc->nvdimm_supported = true;
4672     smc->resize_hpt_default = SPAPR_RESIZE_HPT_ENABLED;
4673     fwc->get_dev_path = spapr_get_fw_dev_path;
4674     nc->nmi_monitor_handler = spapr_nmi;
4675     smc->phb_placement = spapr_phb_placement;
4676     vhc->cpu_in_nested = spapr_cpu_in_nested;
4677     vhc->deliver_hv_excp = spapr_exit_nested;
4678     vhc->hypercall = emulate_spapr_hypercall;
4679     vhc->hpt_mask = spapr_hpt_mask;
4680     vhc->map_hptes = spapr_map_hptes;
4681     vhc->unmap_hptes = spapr_unmap_hptes;
4682     vhc->hpte_set_c = spapr_hpte_set_c;
4683     vhc->hpte_set_r = spapr_hpte_set_r;
4684     vhc->get_pate = spapr_get_pate;
4685     vhc->encode_hpt_for_kvm_pr = spapr_encode_hpt_for_kvm_pr;
4686     vhc->cpu_exec_enter = spapr_cpu_exec_enter;
4687     vhc->cpu_exec_exit = spapr_cpu_exec_exit;
4688     xic->ics_get = spapr_ics_get;
4689     xic->ics_resend = spapr_ics_resend;
4690     xic->icp_get = spapr_icp_get;
4691     ispc->print_info = spapr_pic_print_info;
4692     /* Force NUMA node memory size to be a multiple of
4693      * SPAPR_MEMORY_BLOCK_SIZE (256M) since that's the granularity
4694      * in which LMBs are represented and hot-added
4695      */
4696     mc->numa_mem_align_shift = 28;
4697     mc->auto_enable_numa = true;
4698 
4699     smc->default_caps.caps[SPAPR_CAP_HTM] = SPAPR_CAP_OFF;
4700     smc->default_caps.caps[SPAPR_CAP_VSX] = SPAPR_CAP_ON;
4701     smc->default_caps.caps[SPAPR_CAP_DFP] = SPAPR_CAP_ON;
4702     smc->default_caps.caps[SPAPR_CAP_CFPC] = SPAPR_CAP_WORKAROUND;
4703     smc->default_caps.caps[SPAPR_CAP_SBBC] = SPAPR_CAP_WORKAROUND;
4704     smc->default_caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_WORKAROUND;
4705     smc->default_caps.caps[SPAPR_CAP_HPT_MAXPAGESIZE] = 16; /* 64kiB */
4706     smc->default_caps.caps[SPAPR_CAP_NESTED_KVM_HV] = SPAPR_CAP_OFF;
4707     smc->default_caps.caps[SPAPR_CAP_LARGE_DECREMENTER] = SPAPR_CAP_ON;
4708     smc->default_caps.caps[SPAPR_CAP_CCF_ASSIST] = SPAPR_CAP_ON;
4709     smc->default_caps.caps[SPAPR_CAP_FWNMI] = SPAPR_CAP_ON;
4710     smc->default_caps.caps[SPAPR_CAP_RPT_INVALIDATE] = SPAPR_CAP_OFF;
4711 
4712     /*
4713      * This cap specifies whether the AIL 3 mode for
4714      * H_SET_RESOURCE is supported. The default is modified
4715      * by default_caps_with_cpu().
4716      */
4717     smc->default_caps.caps[SPAPR_CAP_AIL_MODE_3] = SPAPR_CAP_ON;
4718     spapr_caps_add_properties(smc);
4719     smc->irq = &spapr_irq_dual;
4720     smc->dr_phb_enabled = true;
4721     smc->linux_pci_probe = true;
4722     smc->smp_threads_vsmt = true;
4723     smc->nr_xirqs = SPAPR_NR_XIRQS;
4724     xfc->match_nvt = spapr_match_nvt;
4725     vmc->client_architecture_support = spapr_vof_client_architecture_support;
4726     vmc->quiesce = spapr_vof_quiesce;
4727     vmc->setprop = spapr_vof_setprop;
4728 }
4729 
4730 static const TypeInfo spapr_machine_info = {
4731     .name          = TYPE_SPAPR_MACHINE,
4732     .parent        = TYPE_MACHINE,
4733     .abstract      = true,
4734     .instance_size = sizeof(SpaprMachineState),
4735     .instance_init = spapr_instance_init,
4736     .instance_finalize = spapr_machine_finalizefn,
4737     .class_size    = sizeof(SpaprMachineClass),
4738     .class_init    = spapr_machine_class_init,
4739     .interfaces = (InterfaceInfo[]) {
4740         { TYPE_FW_PATH_PROVIDER },
4741         { TYPE_NMI },
4742         { TYPE_HOTPLUG_HANDLER },
4743         { TYPE_PPC_VIRTUAL_HYPERVISOR },
4744         { TYPE_XICS_FABRIC },
4745         { TYPE_INTERRUPT_STATS_PROVIDER },
4746         { TYPE_XIVE_FABRIC },
4747         { TYPE_VOF_MACHINE_IF },
4748         { }
4749     },
4750 };
4751 
4752 static void spapr_machine_latest_class_options(MachineClass *mc)
4753 {
4754     mc->alias = "pseries";
4755     mc->is_default = true;
4756 }
4757 
4758 #define DEFINE_SPAPR_MACHINE(suffix, verstr, latest)                 \
4759     static void spapr_machine_##suffix##_class_init(ObjectClass *oc, \
4760                                                     void *data)      \
4761     {                                                                \
4762         MachineClass *mc = MACHINE_CLASS(oc);                        \
4763         spapr_machine_##suffix##_class_options(mc);                  \
4764         if (latest) {                                                \
4765             spapr_machine_latest_class_options(mc);                  \
4766         }                                                            \
4767     }                                                                \
4768     static const TypeInfo spapr_machine_##suffix##_info = {          \
4769         .name = MACHINE_TYPE_NAME("pseries-" verstr),                \
4770         .parent = TYPE_SPAPR_MACHINE,                                \
4771         .class_init = spapr_machine_##suffix##_class_init,           \
4772     };                                                               \
4773     static void spapr_machine_register_##suffix(void)                \
4774     {                                                                \
4775         type_register(&spapr_machine_##suffix##_info);               \
4776     }                                                                \
4777     type_init(spapr_machine_register_##suffix)
4778 
4779 /*
4780  * pseries-9.0
4781  */
4782 static void spapr_machine_9_0_class_options(MachineClass *mc)
4783 {
4784     /* Defaults for the latest behaviour inherited from the base class */
4785 }
4786 
4787 DEFINE_SPAPR_MACHINE(9_0, "9.0", true);
4788 
4789 /*
4790  * pseries-8.2
4791  */
4792 static void spapr_machine_8_2_class_options(MachineClass *mc)
4793 {
4794     spapr_machine_9_0_class_options(mc);
4795     compat_props_add(mc->compat_props, hw_compat_8_2, hw_compat_8_2_len);
4796 }
4797 
4798 DEFINE_SPAPR_MACHINE(8_2, "8.2", false);
4799 
4800 /*
4801  * pseries-8.1
4802  */
4803 static void spapr_machine_8_1_class_options(MachineClass *mc)
4804 {
4805     spapr_machine_8_2_class_options(mc);
4806     compat_props_add(mc->compat_props, hw_compat_8_1, hw_compat_8_1_len);
4807 }
4808 
4809 DEFINE_SPAPR_MACHINE(8_1, "8.1", false);
4810 
4811 /*
4812  * pseries-8.0
4813  */
4814 static void spapr_machine_8_0_class_options(MachineClass *mc)
4815 {
4816     spapr_machine_8_1_class_options(mc);
4817     compat_props_add(mc->compat_props, hw_compat_8_0, hw_compat_8_0_len);
4818 }
4819 
4820 DEFINE_SPAPR_MACHINE(8_0, "8.0", false);
4821 
4822 /*
4823  * pseries-7.2
4824  */
4825 static void spapr_machine_7_2_class_options(MachineClass *mc)
4826 {
4827     spapr_machine_8_0_class_options(mc);
4828     compat_props_add(mc->compat_props, hw_compat_7_2, hw_compat_7_2_len);
4829 }
4830 
4831 DEFINE_SPAPR_MACHINE(7_2, "7.2", false);
4832 
4833 /*
4834  * pseries-7.1
4835  */
4836 static void spapr_machine_7_1_class_options(MachineClass *mc)
4837 {
4838     spapr_machine_7_2_class_options(mc);
4839     compat_props_add(mc->compat_props, hw_compat_7_1, hw_compat_7_1_len);
4840 }
4841 
4842 DEFINE_SPAPR_MACHINE(7_1, "7.1", false);
4843 
4844 /*
4845  * pseries-7.0
4846  */
4847 static void spapr_machine_7_0_class_options(MachineClass *mc)
4848 {
4849     spapr_machine_7_1_class_options(mc);
4850     compat_props_add(mc->compat_props, hw_compat_7_0, hw_compat_7_0_len);
4851 }
4852 
4853 DEFINE_SPAPR_MACHINE(7_0, "7.0", false);
4854 
4855 /*
4856  * pseries-6.2
4857  */
4858 static void spapr_machine_6_2_class_options(MachineClass *mc)
4859 {
4860     spapr_machine_7_0_class_options(mc);
4861     compat_props_add(mc->compat_props, hw_compat_6_2, hw_compat_6_2_len);
4862 }
4863 
4864 DEFINE_SPAPR_MACHINE(6_2, "6.2", false);
4865 
4866 /*
4867  * pseries-6.1
4868  */
4869 static void spapr_machine_6_1_class_options(MachineClass *mc)
4870 {
4871     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4872 
4873     spapr_machine_6_2_class_options(mc);
4874     compat_props_add(mc->compat_props, hw_compat_6_1, hw_compat_6_1_len);
4875     smc->pre_6_2_numa_affinity = true;
4876     mc->smp_props.prefer_sockets = true;
4877 }
4878 
4879 DEFINE_SPAPR_MACHINE(6_1, "6.1", false);
4880 
4881 /*
4882  * pseries-6.0
4883  */
4884 static void spapr_machine_6_0_class_options(MachineClass *mc)
4885 {
4886     spapr_machine_6_1_class_options(mc);
4887     compat_props_add(mc->compat_props, hw_compat_6_0, hw_compat_6_0_len);
4888 }
4889 
4890 DEFINE_SPAPR_MACHINE(6_0, "6.0", false);
4891 
4892 /*
4893  * pseries-5.2
4894  */
4895 static void spapr_machine_5_2_class_options(MachineClass *mc)
4896 {
4897     spapr_machine_6_0_class_options(mc);
4898     compat_props_add(mc->compat_props, hw_compat_5_2, hw_compat_5_2_len);
4899 }
4900 
4901 DEFINE_SPAPR_MACHINE(5_2, "5.2", false);
4902 
4903 /*
4904  * pseries-5.1
4905  */
4906 static void spapr_machine_5_1_class_options(MachineClass *mc)
4907 {
4908     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4909 
4910     spapr_machine_5_2_class_options(mc);
4911     compat_props_add(mc->compat_props, hw_compat_5_1, hw_compat_5_1_len);
4912     smc->pre_5_2_numa_associativity = true;
4913 }
4914 
4915 DEFINE_SPAPR_MACHINE(5_1, "5.1", false);
4916 
4917 /*
4918  * pseries-5.0
4919  */
4920 static void spapr_machine_5_0_class_options(MachineClass *mc)
4921 {
4922     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4923     static GlobalProperty compat[] = {
4924         { TYPE_SPAPR_PCI_HOST_BRIDGE, "pre-5.1-associativity", "on" },
4925     };
4926 
4927     spapr_machine_5_1_class_options(mc);
4928     compat_props_add(mc->compat_props, hw_compat_5_0, hw_compat_5_0_len);
4929     compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
4930     mc->numa_mem_supported = true;
4931     smc->pre_5_1_assoc_refpoints = true;
4932 }
4933 
4934 DEFINE_SPAPR_MACHINE(5_0, "5.0", false);
4935 
4936 /*
4937  * pseries-4.2
4938  */
4939 static void spapr_machine_4_2_class_options(MachineClass *mc)
4940 {
4941     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4942 
4943     spapr_machine_5_0_class_options(mc);
4944     compat_props_add(mc->compat_props, hw_compat_4_2, hw_compat_4_2_len);
4945     smc->default_caps.caps[SPAPR_CAP_CCF_ASSIST] = SPAPR_CAP_OFF;
4946     smc->default_caps.caps[SPAPR_CAP_FWNMI] = SPAPR_CAP_OFF;
4947     smc->rma_limit = 16 * GiB;
4948     mc->nvdimm_supported = false;
4949 }
4950 
4951 DEFINE_SPAPR_MACHINE(4_2, "4.2", false);
4952 
4953 /*
4954  * pseries-4.1
4955  */
4956 static void spapr_machine_4_1_class_options(MachineClass *mc)
4957 {
4958     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4959     static GlobalProperty compat[] = {
4960         /* Only allow 4kiB and 64kiB IOMMU pagesizes */
4961         { TYPE_SPAPR_PCI_HOST_BRIDGE, "pgsz", "0x11000" },
4962     };
4963 
4964     spapr_machine_4_2_class_options(mc);
4965     smc->linux_pci_probe = false;
4966     smc->smp_threads_vsmt = false;
4967     compat_props_add(mc->compat_props, hw_compat_4_1, hw_compat_4_1_len);
4968     compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
4969 }
4970 
4971 DEFINE_SPAPR_MACHINE(4_1, "4.1", false);
4972 
4973 /*
4974  * pseries-4.0
4975  */
4976 static bool phb_placement_4_0(SpaprMachineState *spapr, uint32_t index,
4977                               uint64_t *buid, hwaddr *pio,
4978                               hwaddr *mmio32, hwaddr *mmio64,
4979                               unsigned n_dma, uint32_t *liobns, Error **errp)
4980 {
4981     if (!spapr_phb_placement(spapr, index, buid, pio, mmio32, mmio64, n_dma,
4982                              liobns, errp)) {
4983         return false;
4984     }
4985     return true;
4986 }
4987 static void spapr_machine_4_0_class_options(MachineClass *mc)
4988 {
4989     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4990 
4991     spapr_machine_4_1_class_options(mc);
4992     compat_props_add(mc->compat_props, hw_compat_4_0, hw_compat_4_0_len);
4993     smc->phb_placement = phb_placement_4_0;
4994     smc->irq = &spapr_irq_xics;
4995     smc->pre_4_1_migration = true;
4996 }
4997 
4998 DEFINE_SPAPR_MACHINE(4_0, "4.0", false);
4999 
5000 /*
5001  * pseries-3.1
5002  */
5003 static void spapr_machine_3_1_class_options(MachineClass *mc)
5004 {
5005     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
5006 
5007     spapr_machine_4_0_class_options(mc);
5008     compat_props_add(mc->compat_props, hw_compat_3_1, hw_compat_3_1_len);
5009 
5010     mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power8_v2.0");
5011     smc->update_dt_enabled = false;
5012     smc->dr_phb_enabled = false;
5013     smc->broken_host_serial_model = true;
5014     smc->default_caps.caps[SPAPR_CAP_CFPC] = SPAPR_CAP_BROKEN;
5015     smc->default_caps.caps[SPAPR_CAP_SBBC] = SPAPR_CAP_BROKEN;
5016     smc->default_caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_BROKEN;
5017     smc->default_caps.caps[SPAPR_CAP_LARGE_DECREMENTER] = SPAPR_CAP_OFF;
5018 }
5019 
5020 DEFINE_SPAPR_MACHINE(3_1, "3.1", false);
5021 
5022 /*
5023  * pseries-3.0
5024  */
5025 
5026 static void spapr_machine_3_0_class_options(MachineClass *mc)
5027 {
5028     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
5029 
5030     spapr_machine_3_1_class_options(mc);
5031     compat_props_add(mc->compat_props, hw_compat_3_0, hw_compat_3_0_len);
5032 
5033     smc->legacy_irq_allocation = true;
5034     smc->nr_xirqs = 0x400;
5035     smc->irq = &spapr_irq_xics_legacy;
5036 }
5037 
5038 DEFINE_SPAPR_MACHINE(3_0, "3.0", false);
5039 
5040 /*
5041  * pseries-2.12
5042  */
5043 static void spapr_machine_2_12_class_options(MachineClass *mc)
5044 {
5045     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
5046     static GlobalProperty compat[] = {
5047         { TYPE_POWERPC_CPU, "pre-3.0-migration", "on" },
5048         { TYPE_SPAPR_CPU_CORE, "pre-3.0-migration", "on" },
5049     };
5050 
5051     spapr_machine_3_0_class_options(mc);
5052     compat_props_add(mc->compat_props, hw_compat_2_12, hw_compat_2_12_len);
5053     compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
5054 
5055     /* We depend on kvm_enabled() to choose a default value for the
5056      * hpt-max-page-size capability. Of course we can't do it here
5057      * because this is too early and the HW accelerator isn't initialized
5058      * yet. Postpone this to machine init (see default_caps_with_cpu()).
5059      */
5060     smc->default_caps.caps[SPAPR_CAP_HPT_MAXPAGESIZE] = 0;
5061 }
5062 
5063 DEFINE_SPAPR_MACHINE(2_12, "2.12", false);
5064 
5065 static void spapr_machine_2_12_sxxm_class_options(MachineClass *mc)
5066 {
5067     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
5068 
5069     spapr_machine_2_12_class_options(mc);
5070     smc->default_caps.caps[SPAPR_CAP_CFPC] = SPAPR_CAP_WORKAROUND;
5071     smc->default_caps.caps[SPAPR_CAP_SBBC] = SPAPR_CAP_WORKAROUND;
5072     smc->default_caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_FIXED_CCD;
5073 }
5074 
5075 DEFINE_SPAPR_MACHINE(2_12_sxxm, "2.12-sxxm", false);
5076 
5077 /*
5078  * pseries-2.11
5079  */
5080 
5081 static void spapr_machine_2_11_class_options(MachineClass *mc)
5082 {
5083     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
5084 
5085     spapr_machine_2_12_class_options(mc);
5086     smc->default_caps.caps[SPAPR_CAP_HTM] = SPAPR_CAP_ON;
5087     compat_props_add(mc->compat_props, hw_compat_2_11, hw_compat_2_11_len);
5088     mc->deprecation_reason = "old and not maintained - use a 2.12+ version";
5089 }
5090 
5091 DEFINE_SPAPR_MACHINE(2_11, "2.11", false);
5092 
5093 /*
5094  * pseries-2.10
5095  */
5096 
5097 static void spapr_machine_2_10_class_options(MachineClass *mc)
5098 {
5099     spapr_machine_2_11_class_options(mc);
5100     compat_props_add(mc->compat_props, hw_compat_2_10, hw_compat_2_10_len);
5101 }
5102 
5103 DEFINE_SPAPR_MACHINE(2_10, "2.10", false);
5104 
5105 /*
5106  * pseries-2.9
5107  */
5108 
5109 static void spapr_machine_2_9_class_options(MachineClass *mc)
5110 {
5111     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
5112     static GlobalProperty compat[] = {
5113         { TYPE_POWERPC_CPU, "pre-2.10-migration", "on" },
5114     };
5115 
5116     spapr_machine_2_10_class_options(mc);
5117     compat_props_add(mc->compat_props, hw_compat_2_9, hw_compat_2_9_len);
5118     compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
5119     smc->pre_2_10_has_unused_icps = true;
5120     smc->resize_hpt_default = SPAPR_RESIZE_HPT_DISABLED;
5121 }
5122 
5123 DEFINE_SPAPR_MACHINE(2_9, "2.9", false);
5124 
5125 /*
5126  * pseries-2.8
5127  */
5128 
5129 static void spapr_machine_2_8_class_options(MachineClass *mc)
5130 {
5131     static GlobalProperty compat[] = {
5132         { TYPE_SPAPR_PCI_HOST_BRIDGE, "pcie-extended-configuration-space", "off" },
5133     };
5134 
5135     spapr_machine_2_9_class_options(mc);
5136     compat_props_add(mc->compat_props, hw_compat_2_8, hw_compat_2_8_len);
5137     compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
5138     mc->numa_mem_align_shift = 23;
5139 }
5140 
5141 DEFINE_SPAPR_MACHINE(2_8, "2.8", false);
5142 
5143 /*
5144  * pseries-2.7
5145  */
5146 
5147 static bool phb_placement_2_7(SpaprMachineState *spapr, uint32_t index,
5148                               uint64_t *buid, hwaddr *pio,
5149                               hwaddr *mmio32, hwaddr *mmio64,
5150                               unsigned n_dma, uint32_t *liobns, Error **errp)
5151 {
5152     /* Legacy PHB placement for pseries-2.7 and earlier machine types */
5153     const uint64_t base_buid = 0x800000020000000ULL;
5154     const hwaddr phb_spacing = 0x1000000000ULL; /* 64 GiB */
5155     const hwaddr mmio_offset = 0xa0000000; /* 2 GiB + 512 MiB */
5156     const hwaddr pio_offset = 0x80000000; /* 2 GiB */
5157     const uint32_t max_index = 255;
5158     const hwaddr phb0_alignment = 0x10000000000ULL; /* 1 TiB */
5159 
5160     uint64_t ram_top = MACHINE(spapr)->ram_size;
5161     hwaddr phb0_base, phb_base;
5162     int i;
5163 
5164     /* Do we have device memory? */
5165     if (MACHINE(spapr)->device_memory) {
5166         /* Can't just use maxram_size, because there may be an
5167          * alignment gap between normal and device memory regions
5168          */
5169         ram_top = MACHINE(spapr)->device_memory->base +
5170             memory_region_size(&MACHINE(spapr)->device_memory->mr);
5171     }
5172 
5173     phb0_base = QEMU_ALIGN_UP(ram_top, phb0_alignment);
5174 
5175     if (index > max_index) {
5176         error_setg(errp, "\"index\" for PAPR PHB is too large (max %u)",
5177                    max_index);
5178         return false;
5179     }
5180 
5181     *buid = base_buid + index;
5182     for (i = 0; i < n_dma; ++i) {
5183         liobns[i] = SPAPR_PCI_LIOBN(index, i);
5184     }
5185 
5186     phb_base = phb0_base + index * phb_spacing;
5187     *pio = phb_base + pio_offset;
5188     *mmio32 = phb_base + mmio_offset;
5189     /*
5190      * We don't set the 64-bit MMIO window, relying on the PHB's
5191      * fallback behaviour of automatically splitting a large "32-bit"
5192      * window into contiguous 32-bit and 64-bit windows
5193      */
5194 
5195     return true;
5196 }
5197 
5198 static void spapr_machine_2_7_class_options(MachineClass *mc)
5199 {
5200     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
5201     static GlobalProperty compat[] = {
5202         { TYPE_SPAPR_PCI_HOST_BRIDGE, "mem_win_size", "0xf80000000", },
5203         { TYPE_SPAPR_PCI_HOST_BRIDGE, "mem64_win_size", "0", },
5204         { TYPE_POWERPC_CPU, "pre-2.8-migration", "on", },
5205         { TYPE_SPAPR_PCI_HOST_BRIDGE, "pre-2.8-migration", "on", },
5206     };
5207 
5208     spapr_machine_2_8_class_options(mc);
5209     mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power7_v2.3");
5210     mc->default_machine_opts = "modern-hotplug-events=off";
5211     compat_props_add(mc->compat_props, hw_compat_2_7, hw_compat_2_7_len);
5212     compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
5213     smc->phb_placement = phb_placement_2_7;
5214 }
5215 
5216 DEFINE_SPAPR_MACHINE(2_7, "2.7", false);
5217 
5218 /*
5219  * pseries-2.6
5220  */
5221 
5222 static void spapr_machine_2_6_class_options(MachineClass *mc)
5223 {
5224     static GlobalProperty compat[] = {
5225         { TYPE_SPAPR_PCI_HOST_BRIDGE, "ddw", "off" },
5226     };
5227 
5228     spapr_machine_2_7_class_options(mc);
5229     mc->has_hotpluggable_cpus = false;
5230     compat_props_add(mc->compat_props, hw_compat_2_6, hw_compat_2_6_len);
5231     compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
5232 }
5233 
5234 DEFINE_SPAPR_MACHINE(2_6, "2.6", false);
5235 
5236 /*
5237  * pseries-2.5
5238  */
5239 
5240 static void spapr_machine_2_5_class_options(MachineClass *mc)
5241 {
5242     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
5243     static GlobalProperty compat[] = {
5244         { "spapr-vlan", "use-rx-buffer-pools", "off" },
5245     };
5246 
5247     spapr_machine_2_6_class_options(mc);
5248     smc->use_ohci_by_default = true;
5249     compat_props_add(mc->compat_props, hw_compat_2_5, hw_compat_2_5_len);
5250     compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
5251 }
5252 
5253 DEFINE_SPAPR_MACHINE(2_5, "2.5", false);
5254 
5255 /*
5256  * pseries-2.4
5257  */
5258 
5259 static void spapr_machine_2_4_class_options(MachineClass *mc)
5260 {
5261     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
5262 
5263     spapr_machine_2_5_class_options(mc);
5264     smc->dr_lmb_enabled = false;
5265     compat_props_add(mc->compat_props, hw_compat_2_4, hw_compat_2_4_len);
5266 }
5267 
5268 DEFINE_SPAPR_MACHINE(2_4, "2.4", false);
5269 
5270 /*
5271  * pseries-2.3
5272  */
5273 
5274 static void spapr_machine_2_3_class_options(MachineClass *mc)
5275 {
5276     static GlobalProperty compat[] = {
5277         { "spapr-pci-host-bridge", "dynamic-reconfiguration", "off" },
5278     };
5279     spapr_machine_2_4_class_options(mc);
5280     compat_props_add(mc->compat_props, hw_compat_2_3, hw_compat_2_3_len);
5281     compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
5282 }
5283 DEFINE_SPAPR_MACHINE(2_3, "2.3", false);
5284 
5285 /*
5286  * pseries-2.2
5287  */
5288 
5289 static void spapr_machine_2_2_class_options(MachineClass *mc)
5290 {
5291     static GlobalProperty compat[] = {
5292         { TYPE_SPAPR_PCI_HOST_BRIDGE, "mem_win_size", "0x20000000" },
5293     };
5294 
5295     spapr_machine_2_3_class_options(mc);
5296     compat_props_add(mc->compat_props, hw_compat_2_2, hw_compat_2_2_len);
5297     compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
5298     mc->default_machine_opts = "modern-hotplug-events=off,suppress-vmdesc=on";
5299 }
5300 DEFINE_SPAPR_MACHINE(2_2, "2.2", false);
5301 
5302 /*
5303  * pseries-2.1
5304  */
5305 
5306 static void spapr_machine_2_1_class_options(MachineClass *mc)
5307 {
5308     spapr_machine_2_2_class_options(mc);
5309     compat_props_add(mc->compat_props, hw_compat_2_1, hw_compat_2_1_len);
5310 }
5311 DEFINE_SPAPR_MACHINE(2_1, "2.1", false);
5312 
5313 static void spapr_machine_register_types(void)
5314 {
5315     type_register_static(&spapr_machine_info);
5316 }
5317 
5318 type_init(spapr_machine_register_types)
5319