xref: /qemu/hw/ppc/spapr.c (revision f917eed3)
1 /*
2  * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
3  *
4  * Copyright (c) 2004-2007 Fabrice Bellard
5  * Copyright (c) 2007 Jocelyn Mayer
6  * Copyright (c) 2010 David Gibson, IBM Corporation.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a copy
9  * of this software and associated documentation files (the "Software"), to deal
10  * in the Software without restriction, including without limitation the rights
11  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12  * copies of the Software, and to permit persons to whom the Software is
13  * furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice shall be included in
16  * all copies or substantial portions of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24  * THE SOFTWARE.
25  */
26 
27 #include "qemu/osdep.h"
28 #include "qemu-common.h"
29 #include "qemu/datadir.h"
30 #include "qapi/error.h"
31 #include "qapi/visitor.h"
32 #include "sysemu/sysemu.h"
33 #include "sysemu/hostmem.h"
34 #include "sysemu/numa.h"
35 #include "sysemu/qtest.h"
36 #include "sysemu/reset.h"
37 #include "sysemu/runstate.h"
38 #include "qemu/log.h"
39 #include "hw/fw-path-provider.h"
40 #include "elf.h"
41 #include "net/net.h"
42 #include "sysemu/device_tree.h"
43 #include "sysemu/cpus.h"
44 #include "sysemu/hw_accel.h"
45 #include "kvm_ppc.h"
46 #include "migration/misc.h"
47 #include "migration/qemu-file-types.h"
48 #include "migration/global_state.h"
49 #include "migration/register.h"
50 #include "migration/blocker.h"
51 #include "mmu-hash64.h"
52 #include "mmu-book3s-v3.h"
53 #include "cpu-models.h"
54 #include "hw/core/cpu.h"
55 
56 #include "hw/boards.h"
57 #include "hw/ppc/ppc.h"
58 #include "hw/loader.h"
59 
60 #include "hw/ppc/fdt.h"
61 #include "hw/ppc/spapr.h"
62 #include "hw/ppc/spapr_vio.h"
63 #include "hw/qdev-properties.h"
64 #include "hw/pci-host/spapr.h"
65 #include "hw/pci/msi.h"
66 
67 #include "hw/pci/pci.h"
68 #include "hw/scsi/scsi.h"
69 #include "hw/virtio/virtio-scsi.h"
70 #include "hw/virtio/vhost-scsi-common.h"
71 
72 #include "exec/address-spaces.h"
73 #include "exec/ram_addr.h"
74 #include "hw/usb.h"
75 #include "qemu/config-file.h"
76 #include "qemu/error-report.h"
77 #include "trace.h"
78 #include "hw/nmi.h"
79 #include "hw/intc/intc.h"
80 
81 #include "hw/ppc/spapr_cpu_core.h"
82 #include "hw/mem/memory-device.h"
83 #include "hw/ppc/spapr_tpm_proxy.h"
84 #include "hw/ppc/spapr_nvdimm.h"
85 #include "hw/ppc/spapr_numa.h"
86 
87 #include "monitor/monitor.h"
88 
89 #include <libfdt.h>
90 
91 /* SLOF memory layout:
92  *
93  * SLOF raw image loaded at 0, copies its romfs right below the flat
94  * device-tree, then position SLOF itself 31M below that
95  *
96  * So we set FW_OVERHEAD to 40MB which should account for all of that
97  * and more
98  *
99  * We load our kernel at 4M, leaving space for SLOF initial image
100  */
101 #define RTAS_MAX_ADDR           0x80000000 /* RTAS must stay below that */
102 #define FW_MAX_SIZE             0x400000
103 #define FW_FILE_NAME            "slof.bin"
104 #define FW_OVERHEAD             0x2800000
105 #define KERNEL_LOAD_ADDR        FW_MAX_SIZE
106 
107 #define MIN_RMA_SLOF            (128 * MiB)
108 
109 #define PHANDLE_INTC            0x00001111
110 
111 /* These two functions implement the VCPU id numbering: one to compute them
112  * all and one to identify thread 0 of a VCORE. Any change to the first one
113  * is likely to have an impact on the second one, so let's keep them close.
114  */
115 static int spapr_vcpu_id(SpaprMachineState *spapr, int cpu_index)
116 {
117     MachineState *ms = MACHINE(spapr);
118     unsigned int smp_threads = ms->smp.threads;
119 
120     assert(spapr->vsmt);
121     return
122         (cpu_index / smp_threads) * spapr->vsmt + cpu_index % smp_threads;
123 }
124 static bool spapr_is_thread0_in_vcore(SpaprMachineState *spapr,
125                                       PowerPCCPU *cpu)
126 {
127     assert(spapr->vsmt);
128     return spapr_get_vcpu_id(cpu) % spapr->vsmt == 0;
129 }
130 
131 static bool pre_2_10_vmstate_dummy_icp_needed(void *opaque)
132 {
133     /* Dummy entries correspond to unused ICPState objects in older QEMUs,
134      * and newer QEMUs don't even have them. In both cases, we don't want
135      * to send anything on the wire.
136      */
137     return false;
138 }
139 
140 static const VMStateDescription pre_2_10_vmstate_dummy_icp = {
141     .name = "icp/server",
142     .version_id = 1,
143     .minimum_version_id = 1,
144     .needed = pre_2_10_vmstate_dummy_icp_needed,
145     .fields = (VMStateField[]) {
146         VMSTATE_UNUSED(4), /* uint32_t xirr */
147         VMSTATE_UNUSED(1), /* uint8_t pending_priority */
148         VMSTATE_UNUSED(1), /* uint8_t mfrr */
149         VMSTATE_END_OF_LIST()
150     },
151 };
152 
153 static void pre_2_10_vmstate_register_dummy_icp(int i)
154 {
155     vmstate_register(NULL, i, &pre_2_10_vmstate_dummy_icp,
156                      (void *)(uintptr_t) i);
157 }
158 
159 static void pre_2_10_vmstate_unregister_dummy_icp(int i)
160 {
161     vmstate_unregister(NULL, &pre_2_10_vmstate_dummy_icp,
162                        (void *)(uintptr_t) i);
163 }
164 
165 int spapr_max_server_number(SpaprMachineState *spapr)
166 {
167     MachineState *ms = MACHINE(spapr);
168 
169     assert(spapr->vsmt);
170     return DIV_ROUND_UP(ms->smp.max_cpus * spapr->vsmt, ms->smp.threads);
171 }
172 
173 static int spapr_fixup_cpu_smt_dt(void *fdt, int offset, PowerPCCPU *cpu,
174                                   int smt_threads)
175 {
176     int i, ret = 0;
177     uint32_t servers_prop[smt_threads];
178     uint32_t gservers_prop[smt_threads * 2];
179     int index = spapr_get_vcpu_id(cpu);
180 
181     if (cpu->compat_pvr) {
182         ret = fdt_setprop_cell(fdt, offset, "cpu-version", cpu->compat_pvr);
183         if (ret < 0) {
184             return ret;
185         }
186     }
187 
188     /* Build interrupt servers and gservers properties */
189     for (i = 0; i < smt_threads; i++) {
190         servers_prop[i] = cpu_to_be32(index + i);
191         /* Hack, direct the group queues back to cpu 0 */
192         gservers_prop[i*2] = cpu_to_be32(index + i);
193         gservers_prop[i*2 + 1] = 0;
194     }
195     ret = fdt_setprop(fdt, offset, "ibm,ppc-interrupt-server#s",
196                       servers_prop, sizeof(servers_prop));
197     if (ret < 0) {
198         return ret;
199     }
200     ret = fdt_setprop(fdt, offset, "ibm,ppc-interrupt-gserver#s",
201                       gservers_prop, sizeof(gservers_prop));
202 
203     return ret;
204 }
205 
206 static void spapr_dt_pa_features(SpaprMachineState *spapr,
207                                  PowerPCCPU *cpu,
208                                  void *fdt, int offset)
209 {
210     uint8_t pa_features_206[] = { 6, 0,
211         0xf6, 0x1f, 0xc7, 0x00, 0x80, 0xc0 };
212     uint8_t pa_features_207[] = { 24, 0,
213         0xf6, 0x1f, 0xc7, 0xc0, 0x80, 0xf0,
214         0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
215         0x00, 0x00, 0x00, 0x00, 0x80, 0x00,
216         0x80, 0x00, 0x80, 0x00, 0x00, 0x00 };
217     uint8_t pa_features_300[] = { 66, 0,
218         /* 0: MMU|FPU|SLB|RUN|DABR|NX, 1: fri[nzpm]|DABRX|SPRG3|SLB0|PP110 */
219         /* 2: VPM|DS205|PPR|DS202|DS206, 3: LSD|URG, SSO, 5: LE|CFAR|EB|LSQ */
220         0xf6, 0x1f, 0xc7, 0xc0, 0x80, 0xf0, /* 0 - 5 */
221         /* 6: DS207 */
222         0x80, 0x00, 0x00, 0x00, 0x00, 0x00, /* 6 - 11 */
223         /* 16: Vector */
224         0x00, 0x00, 0x00, 0x00, 0x80, 0x00, /* 12 - 17 */
225         /* 18: Vec. Scalar, 20: Vec. XOR, 22: HTM */
226         0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 18 - 23 */
227         /* 24: Ext. Dec, 26: 64 bit ftrs, 28: PM ftrs */
228         0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 24 - 29 */
229         /* 30: MMR, 32: LE atomic, 34: EBB + ext EBB */
230         0x80, 0x00, 0x80, 0x00, 0xC0, 0x00, /* 30 - 35 */
231         /* 36: SPR SO, 38: Copy/Paste, 40: Radix MMU */
232         0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 36 - 41 */
233         /* 42: PM, 44: PC RA, 46: SC vec'd */
234         0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 42 - 47 */
235         /* 48: SIMD, 50: QP BFP, 52: String */
236         0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 48 - 53 */
237         /* 54: DecFP, 56: DecI, 58: SHA */
238         0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 54 - 59 */
239         /* 60: NM atomic, 62: RNG */
240         0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 60 - 65 */
241     };
242     uint8_t *pa_features = NULL;
243     size_t pa_size;
244 
245     if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_2_06, 0, cpu->compat_pvr)) {
246         pa_features = pa_features_206;
247         pa_size = sizeof(pa_features_206);
248     }
249     if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_2_07, 0, cpu->compat_pvr)) {
250         pa_features = pa_features_207;
251         pa_size = sizeof(pa_features_207);
252     }
253     if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_00, 0, cpu->compat_pvr)) {
254         pa_features = pa_features_300;
255         pa_size = sizeof(pa_features_300);
256     }
257     if (!pa_features) {
258         return;
259     }
260 
261     if (ppc_hash64_has(cpu, PPC_HASH64_CI_LARGEPAGE)) {
262         /*
263          * Note: we keep CI large pages off by default because a 64K capable
264          * guest provisioned with large pages might otherwise try to map a qemu
265          * framebuffer (or other kind of memory mapped PCI BAR) using 64K pages
266          * even if that qemu runs on a 4k host.
267          * We dd this bit back here if we are confident this is not an issue
268          */
269         pa_features[3] |= 0x20;
270     }
271     if ((spapr_get_cap(spapr, SPAPR_CAP_HTM) != 0) && pa_size > 24) {
272         pa_features[24] |= 0x80;    /* Transactional memory support */
273     }
274     if (spapr->cas_pre_isa3_guest && pa_size > 40) {
275         /* Workaround for broken kernels that attempt (guest) radix
276          * mode when they can't handle it, if they see the radix bit set
277          * in pa-features. So hide it from them. */
278         pa_features[40 + 2] &= ~0x80; /* Radix MMU */
279     }
280 
281     _FDT((fdt_setprop(fdt, offset, "ibm,pa-features", pa_features, pa_size)));
282 }
283 
284 static hwaddr spapr_node0_size(MachineState *machine)
285 {
286     if (machine->numa_state->num_nodes) {
287         int i;
288         for (i = 0; i < machine->numa_state->num_nodes; ++i) {
289             if (machine->numa_state->nodes[i].node_mem) {
290                 return MIN(pow2floor(machine->numa_state->nodes[i].node_mem),
291                            machine->ram_size);
292             }
293         }
294     }
295     return machine->ram_size;
296 }
297 
298 bool spapr_machine_using_legacy_numa(SpaprMachineState *spapr)
299 {
300     MachineState *machine = MACHINE(spapr);
301     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine);
302 
303     return smc->pre_5_2_numa_associativity ||
304            machine->numa_state->num_nodes <= 1;
305 }
306 
307 static void add_str(GString *s, const gchar *s1)
308 {
309     g_string_append_len(s, s1, strlen(s1) + 1);
310 }
311 
312 static int spapr_dt_memory_node(SpaprMachineState *spapr, void *fdt, int nodeid,
313                                 hwaddr start, hwaddr size)
314 {
315     char mem_name[32];
316     uint64_t mem_reg_property[2];
317     int off;
318 
319     mem_reg_property[0] = cpu_to_be64(start);
320     mem_reg_property[1] = cpu_to_be64(size);
321 
322     sprintf(mem_name, "memory@%" HWADDR_PRIx, start);
323     off = fdt_add_subnode(fdt, 0, mem_name);
324     _FDT(off);
325     _FDT((fdt_setprop_string(fdt, off, "device_type", "memory")));
326     _FDT((fdt_setprop(fdt, off, "reg", mem_reg_property,
327                       sizeof(mem_reg_property))));
328     spapr_numa_write_associativity_dt(spapr, fdt, off, nodeid);
329     return off;
330 }
331 
332 static uint32_t spapr_pc_dimm_node(MemoryDeviceInfoList *list, ram_addr_t addr)
333 {
334     MemoryDeviceInfoList *info;
335 
336     for (info = list; info; info = info->next) {
337         MemoryDeviceInfo *value = info->value;
338 
339         if (value && value->type == MEMORY_DEVICE_INFO_KIND_DIMM) {
340             PCDIMMDeviceInfo *pcdimm_info = value->u.dimm.data;
341 
342             if (addr >= pcdimm_info->addr &&
343                 addr < (pcdimm_info->addr + pcdimm_info->size)) {
344                 return pcdimm_info->node;
345             }
346         }
347     }
348 
349     return -1;
350 }
351 
352 struct sPAPRDrconfCellV2 {
353      uint32_t seq_lmbs;
354      uint64_t base_addr;
355      uint32_t drc_index;
356      uint32_t aa_index;
357      uint32_t flags;
358 } QEMU_PACKED;
359 
360 typedef struct DrconfCellQueue {
361     struct sPAPRDrconfCellV2 cell;
362     QSIMPLEQ_ENTRY(DrconfCellQueue) entry;
363 } DrconfCellQueue;
364 
365 static DrconfCellQueue *
366 spapr_get_drconf_cell(uint32_t seq_lmbs, uint64_t base_addr,
367                       uint32_t drc_index, uint32_t aa_index,
368                       uint32_t flags)
369 {
370     DrconfCellQueue *elem;
371 
372     elem = g_malloc0(sizeof(*elem));
373     elem->cell.seq_lmbs = cpu_to_be32(seq_lmbs);
374     elem->cell.base_addr = cpu_to_be64(base_addr);
375     elem->cell.drc_index = cpu_to_be32(drc_index);
376     elem->cell.aa_index = cpu_to_be32(aa_index);
377     elem->cell.flags = cpu_to_be32(flags);
378 
379     return elem;
380 }
381 
382 static int spapr_dt_dynamic_memory_v2(SpaprMachineState *spapr, void *fdt,
383                                       int offset, MemoryDeviceInfoList *dimms)
384 {
385     MachineState *machine = MACHINE(spapr);
386     uint8_t *int_buf, *cur_index;
387     int ret;
388     uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE;
389     uint64_t addr, cur_addr, size;
390     uint32_t nr_boot_lmbs = (machine->device_memory->base / lmb_size);
391     uint64_t mem_end = machine->device_memory->base +
392                        memory_region_size(&machine->device_memory->mr);
393     uint32_t node, buf_len, nr_entries = 0;
394     SpaprDrc *drc;
395     DrconfCellQueue *elem, *next;
396     MemoryDeviceInfoList *info;
397     QSIMPLEQ_HEAD(, DrconfCellQueue) drconf_queue
398         = QSIMPLEQ_HEAD_INITIALIZER(drconf_queue);
399 
400     /* Entry to cover RAM and the gap area */
401     elem = spapr_get_drconf_cell(nr_boot_lmbs, 0, 0, -1,
402                                  SPAPR_LMB_FLAGS_RESERVED |
403                                  SPAPR_LMB_FLAGS_DRC_INVALID);
404     QSIMPLEQ_INSERT_TAIL(&drconf_queue, elem, entry);
405     nr_entries++;
406 
407     cur_addr = machine->device_memory->base;
408     for (info = dimms; info; info = info->next) {
409         PCDIMMDeviceInfo *di = info->value->u.dimm.data;
410 
411         addr = di->addr;
412         size = di->size;
413         node = di->node;
414 
415         /*
416          * The NVDIMM area is hotpluggable after the NVDIMM is unplugged. The
417          * area is marked hotpluggable in the next iteration for the bigger
418          * chunk including the NVDIMM occupied area.
419          */
420         if (info->value->type == MEMORY_DEVICE_INFO_KIND_NVDIMM)
421             continue;
422 
423         /* Entry for hot-pluggable area */
424         if (cur_addr < addr) {
425             drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, cur_addr / lmb_size);
426             g_assert(drc);
427             elem = spapr_get_drconf_cell((addr - cur_addr) / lmb_size,
428                                          cur_addr, spapr_drc_index(drc), -1, 0);
429             QSIMPLEQ_INSERT_TAIL(&drconf_queue, elem, entry);
430             nr_entries++;
431         }
432 
433         /* Entry for DIMM */
434         drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, addr / lmb_size);
435         g_assert(drc);
436         elem = spapr_get_drconf_cell(size / lmb_size, addr,
437                                      spapr_drc_index(drc), node,
438                                      (SPAPR_LMB_FLAGS_ASSIGNED |
439                                       SPAPR_LMB_FLAGS_HOTREMOVABLE));
440         QSIMPLEQ_INSERT_TAIL(&drconf_queue, elem, entry);
441         nr_entries++;
442         cur_addr = addr + size;
443     }
444 
445     /* Entry for remaining hotpluggable area */
446     if (cur_addr < mem_end) {
447         drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, cur_addr / lmb_size);
448         g_assert(drc);
449         elem = spapr_get_drconf_cell((mem_end - cur_addr) / lmb_size,
450                                      cur_addr, spapr_drc_index(drc), -1, 0);
451         QSIMPLEQ_INSERT_TAIL(&drconf_queue, elem, entry);
452         nr_entries++;
453     }
454 
455     buf_len = nr_entries * sizeof(struct sPAPRDrconfCellV2) + sizeof(uint32_t);
456     int_buf = cur_index = g_malloc0(buf_len);
457     *(uint32_t *)int_buf = cpu_to_be32(nr_entries);
458     cur_index += sizeof(nr_entries);
459 
460     QSIMPLEQ_FOREACH_SAFE(elem, &drconf_queue, entry, next) {
461         memcpy(cur_index, &elem->cell, sizeof(elem->cell));
462         cur_index += sizeof(elem->cell);
463         QSIMPLEQ_REMOVE(&drconf_queue, elem, DrconfCellQueue, entry);
464         g_free(elem);
465     }
466 
467     ret = fdt_setprop(fdt, offset, "ibm,dynamic-memory-v2", int_buf, buf_len);
468     g_free(int_buf);
469     if (ret < 0) {
470         return -1;
471     }
472     return 0;
473 }
474 
475 static int spapr_dt_dynamic_memory(SpaprMachineState *spapr, void *fdt,
476                                    int offset, MemoryDeviceInfoList *dimms)
477 {
478     MachineState *machine = MACHINE(spapr);
479     int i, ret;
480     uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE;
481     uint32_t device_lmb_start = machine->device_memory->base / lmb_size;
482     uint32_t nr_lmbs = (machine->device_memory->base +
483                        memory_region_size(&machine->device_memory->mr)) /
484                        lmb_size;
485     uint32_t *int_buf, *cur_index, buf_len;
486 
487     /*
488      * Allocate enough buffer size to fit in ibm,dynamic-memory
489      */
490     buf_len = (nr_lmbs * SPAPR_DR_LMB_LIST_ENTRY_SIZE + 1) * sizeof(uint32_t);
491     cur_index = int_buf = g_malloc0(buf_len);
492     int_buf[0] = cpu_to_be32(nr_lmbs);
493     cur_index++;
494     for (i = 0; i < nr_lmbs; i++) {
495         uint64_t addr = i * lmb_size;
496         uint32_t *dynamic_memory = cur_index;
497 
498         if (i >= device_lmb_start) {
499             SpaprDrc *drc;
500 
501             drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, i);
502             g_assert(drc);
503 
504             dynamic_memory[0] = cpu_to_be32(addr >> 32);
505             dynamic_memory[1] = cpu_to_be32(addr & 0xffffffff);
506             dynamic_memory[2] = cpu_to_be32(spapr_drc_index(drc));
507             dynamic_memory[3] = cpu_to_be32(0); /* reserved */
508             dynamic_memory[4] = cpu_to_be32(spapr_pc_dimm_node(dimms, addr));
509             if (memory_region_present(get_system_memory(), addr)) {
510                 dynamic_memory[5] = cpu_to_be32(SPAPR_LMB_FLAGS_ASSIGNED);
511             } else {
512                 dynamic_memory[5] = cpu_to_be32(0);
513             }
514         } else {
515             /*
516              * LMB information for RMA, boot time RAM and gap b/n RAM and
517              * device memory region -- all these are marked as reserved
518              * and as having no valid DRC.
519              */
520             dynamic_memory[0] = cpu_to_be32(addr >> 32);
521             dynamic_memory[1] = cpu_to_be32(addr & 0xffffffff);
522             dynamic_memory[2] = cpu_to_be32(0);
523             dynamic_memory[3] = cpu_to_be32(0); /* reserved */
524             dynamic_memory[4] = cpu_to_be32(-1);
525             dynamic_memory[5] = cpu_to_be32(SPAPR_LMB_FLAGS_RESERVED |
526                                             SPAPR_LMB_FLAGS_DRC_INVALID);
527         }
528 
529         cur_index += SPAPR_DR_LMB_LIST_ENTRY_SIZE;
530     }
531     ret = fdt_setprop(fdt, offset, "ibm,dynamic-memory", int_buf, buf_len);
532     g_free(int_buf);
533     if (ret < 0) {
534         return -1;
535     }
536     return 0;
537 }
538 
539 /*
540  * Adds ibm,dynamic-reconfiguration-memory node.
541  * Refer to docs/specs/ppc-spapr-hotplug.txt for the documentation
542  * of this device tree node.
543  */
544 static int spapr_dt_dynamic_reconfiguration_memory(SpaprMachineState *spapr,
545                                                    void *fdt)
546 {
547     MachineState *machine = MACHINE(spapr);
548     int ret, offset;
549     uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE;
550     uint32_t prop_lmb_size[] = {cpu_to_be32(lmb_size >> 32),
551                                 cpu_to_be32(lmb_size & 0xffffffff)};
552     MemoryDeviceInfoList *dimms = NULL;
553 
554     /*
555      * Don't create the node if there is no device memory
556      */
557     if (machine->ram_size == machine->maxram_size) {
558         return 0;
559     }
560 
561     offset = fdt_add_subnode(fdt, 0, "ibm,dynamic-reconfiguration-memory");
562 
563     ret = fdt_setprop(fdt, offset, "ibm,lmb-size", prop_lmb_size,
564                     sizeof(prop_lmb_size));
565     if (ret < 0) {
566         return ret;
567     }
568 
569     ret = fdt_setprop_cell(fdt, offset, "ibm,memory-flags-mask", 0xff);
570     if (ret < 0) {
571         return ret;
572     }
573 
574     ret = fdt_setprop_cell(fdt, offset, "ibm,memory-preservation-time", 0x0);
575     if (ret < 0) {
576         return ret;
577     }
578 
579     /* ibm,dynamic-memory or ibm,dynamic-memory-v2 */
580     dimms = qmp_memory_device_list();
581     if (spapr_ovec_test(spapr->ov5_cas, OV5_DRMEM_V2)) {
582         ret = spapr_dt_dynamic_memory_v2(spapr, fdt, offset, dimms);
583     } else {
584         ret = spapr_dt_dynamic_memory(spapr, fdt, offset, dimms);
585     }
586     qapi_free_MemoryDeviceInfoList(dimms);
587 
588     if (ret < 0) {
589         return ret;
590     }
591 
592     ret = spapr_numa_write_assoc_lookup_arrays(spapr, fdt, offset);
593 
594     return ret;
595 }
596 
597 static int spapr_dt_memory(SpaprMachineState *spapr, void *fdt)
598 {
599     MachineState *machine = MACHINE(spapr);
600     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
601     hwaddr mem_start, node_size;
602     int i, nb_nodes = machine->numa_state->num_nodes;
603     NodeInfo *nodes = machine->numa_state->nodes;
604 
605     for (i = 0, mem_start = 0; i < nb_nodes; ++i) {
606         if (!nodes[i].node_mem) {
607             continue;
608         }
609         if (mem_start >= machine->ram_size) {
610             node_size = 0;
611         } else {
612             node_size = nodes[i].node_mem;
613             if (node_size > machine->ram_size - mem_start) {
614                 node_size = machine->ram_size - mem_start;
615             }
616         }
617         if (!mem_start) {
618             /* spapr_machine_init() checks for rma_size <= node0_size
619              * already */
620             spapr_dt_memory_node(spapr, fdt, i, 0, spapr->rma_size);
621             mem_start += spapr->rma_size;
622             node_size -= spapr->rma_size;
623         }
624         for ( ; node_size; ) {
625             hwaddr sizetmp = pow2floor(node_size);
626 
627             /* mem_start != 0 here */
628             if (ctzl(mem_start) < ctzl(sizetmp)) {
629                 sizetmp = 1ULL << ctzl(mem_start);
630             }
631 
632             spapr_dt_memory_node(spapr, fdt, i, mem_start, sizetmp);
633             node_size -= sizetmp;
634             mem_start += sizetmp;
635         }
636     }
637 
638     /* Generate ibm,dynamic-reconfiguration-memory node if required */
639     if (spapr_ovec_test(spapr->ov5_cas, OV5_DRCONF_MEMORY)) {
640         int ret;
641 
642         g_assert(smc->dr_lmb_enabled);
643         ret = spapr_dt_dynamic_reconfiguration_memory(spapr, fdt);
644         if (ret) {
645             return ret;
646         }
647     }
648 
649     return 0;
650 }
651 
652 static void spapr_dt_cpu(CPUState *cs, void *fdt, int offset,
653                          SpaprMachineState *spapr)
654 {
655     MachineState *ms = MACHINE(spapr);
656     PowerPCCPU *cpu = POWERPC_CPU(cs);
657     CPUPPCState *env = &cpu->env;
658     PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs);
659     int index = spapr_get_vcpu_id(cpu);
660     uint32_t segs[] = {cpu_to_be32(28), cpu_to_be32(40),
661                        0xffffffff, 0xffffffff};
662     uint32_t tbfreq = kvm_enabled() ? kvmppc_get_tbfreq()
663         : SPAPR_TIMEBASE_FREQ;
664     uint32_t cpufreq = kvm_enabled() ? kvmppc_get_clockfreq() : 1000000000;
665     uint32_t page_sizes_prop[64];
666     size_t page_sizes_prop_size;
667     unsigned int smp_threads = ms->smp.threads;
668     uint32_t vcpus_per_socket = smp_threads * ms->smp.cores;
669     uint32_t pft_size_prop[] = {0, cpu_to_be32(spapr->htab_shift)};
670     int compat_smt = MIN(smp_threads, ppc_compat_max_vthreads(cpu));
671     SpaprDrc *drc;
672     int drc_index;
673     uint32_t radix_AP_encodings[PPC_PAGE_SIZES_MAX_SZ];
674     int i;
675 
676     drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU, index);
677     if (drc) {
678         drc_index = spapr_drc_index(drc);
679         _FDT((fdt_setprop_cell(fdt, offset, "ibm,my-drc-index", drc_index)));
680     }
681 
682     _FDT((fdt_setprop_cell(fdt, offset, "reg", index)));
683     _FDT((fdt_setprop_string(fdt, offset, "device_type", "cpu")));
684 
685     _FDT((fdt_setprop_cell(fdt, offset, "cpu-version", env->spr[SPR_PVR])));
686     _FDT((fdt_setprop_cell(fdt, offset, "d-cache-block-size",
687                            env->dcache_line_size)));
688     _FDT((fdt_setprop_cell(fdt, offset, "d-cache-line-size",
689                            env->dcache_line_size)));
690     _FDT((fdt_setprop_cell(fdt, offset, "i-cache-block-size",
691                            env->icache_line_size)));
692     _FDT((fdt_setprop_cell(fdt, offset, "i-cache-line-size",
693                            env->icache_line_size)));
694 
695     if (pcc->l1_dcache_size) {
696         _FDT((fdt_setprop_cell(fdt, offset, "d-cache-size",
697                                pcc->l1_dcache_size)));
698     } else {
699         warn_report("Unknown L1 dcache size for cpu");
700     }
701     if (pcc->l1_icache_size) {
702         _FDT((fdt_setprop_cell(fdt, offset, "i-cache-size",
703                                pcc->l1_icache_size)));
704     } else {
705         warn_report("Unknown L1 icache size for cpu");
706     }
707 
708     _FDT((fdt_setprop_cell(fdt, offset, "timebase-frequency", tbfreq)));
709     _FDT((fdt_setprop_cell(fdt, offset, "clock-frequency", cpufreq)));
710     _FDT((fdt_setprop_cell(fdt, offset, "slb-size", cpu->hash64_opts->slb_size)));
711     _FDT((fdt_setprop_cell(fdt, offset, "ibm,slb-size", cpu->hash64_opts->slb_size)));
712     _FDT((fdt_setprop_string(fdt, offset, "status", "okay")));
713     _FDT((fdt_setprop(fdt, offset, "64-bit", NULL, 0)));
714 
715     if (env->spr_cb[SPR_PURR].oea_read) {
716         _FDT((fdt_setprop_cell(fdt, offset, "ibm,purr", 1)));
717     }
718     if (env->spr_cb[SPR_SPURR].oea_read) {
719         _FDT((fdt_setprop_cell(fdt, offset, "ibm,spurr", 1)));
720     }
721 
722     if (ppc_hash64_has(cpu, PPC_HASH64_1TSEG)) {
723         _FDT((fdt_setprop(fdt, offset, "ibm,processor-segment-sizes",
724                           segs, sizeof(segs))));
725     }
726 
727     /* Advertise VSX (vector extensions) if available
728      *   1               == VMX / Altivec available
729      *   2               == VSX available
730      *
731      * Only CPUs for which we create core types in spapr_cpu_core.c
732      * are possible, and all of those have VMX */
733     if (spapr_get_cap(spapr, SPAPR_CAP_VSX) != 0) {
734         _FDT((fdt_setprop_cell(fdt, offset, "ibm,vmx", 2)));
735     } else {
736         _FDT((fdt_setprop_cell(fdt, offset, "ibm,vmx", 1)));
737     }
738 
739     /* Advertise DFP (Decimal Floating Point) if available
740      *   0 / no property == no DFP
741      *   1               == DFP available */
742     if (spapr_get_cap(spapr, SPAPR_CAP_DFP) != 0) {
743         _FDT((fdt_setprop_cell(fdt, offset, "ibm,dfp", 1)));
744     }
745 
746     page_sizes_prop_size = ppc_create_page_sizes_prop(cpu, page_sizes_prop,
747                                                       sizeof(page_sizes_prop));
748     if (page_sizes_prop_size) {
749         _FDT((fdt_setprop(fdt, offset, "ibm,segment-page-sizes",
750                           page_sizes_prop, page_sizes_prop_size)));
751     }
752 
753     spapr_dt_pa_features(spapr, cpu, fdt, offset);
754 
755     _FDT((fdt_setprop_cell(fdt, offset, "ibm,chip-id",
756                            cs->cpu_index / vcpus_per_socket)));
757 
758     _FDT((fdt_setprop(fdt, offset, "ibm,pft-size",
759                       pft_size_prop, sizeof(pft_size_prop))));
760 
761     if (ms->numa_state->num_nodes > 1) {
762         _FDT(spapr_numa_fixup_cpu_dt(spapr, fdt, offset, cpu));
763     }
764 
765     _FDT(spapr_fixup_cpu_smt_dt(fdt, offset, cpu, compat_smt));
766 
767     if (pcc->radix_page_info) {
768         for (i = 0; i < pcc->radix_page_info->count; i++) {
769             radix_AP_encodings[i] =
770                 cpu_to_be32(pcc->radix_page_info->entries[i]);
771         }
772         _FDT((fdt_setprop(fdt, offset, "ibm,processor-radix-AP-encodings",
773                           radix_AP_encodings,
774                           pcc->radix_page_info->count *
775                           sizeof(radix_AP_encodings[0]))));
776     }
777 
778     /*
779      * We set this property to let the guest know that it can use the large
780      * decrementer and its width in bits.
781      */
782     if (spapr_get_cap(spapr, SPAPR_CAP_LARGE_DECREMENTER) != SPAPR_CAP_OFF)
783         _FDT((fdt_setprop_u32(fdt, offset, "ibm,dec-bits",
784                               pcc->lrg_decr_bits)));
785 }
786 
787 static void spapr_dt_cpus(void *fdt, SpaprMachineState *spapr)
788 {
789     CPUState **rev;
790     CPUState *cs;
791     int n_cpus;
792     int cpus_offset;
793     char *nodename;
794     int i;
795 
796     cpus_offset = fdt_add_subnode(fdt, 0, "cpus");
797     _FDT(cpus_offset);
798     _FDT((fdt_setprop_cell(fdt, cpus_offset, "#address-cells", 0x1)));
799     _FDT((fdt_setprop_cell(fdt, cpus_offset, "#size-cells", 0x0)));
800 
801     /*
802      * We walk the CPUs in reverse order to ensure that CPU DT nodes
803      * created by fdt_add_subnode() end up in the right order in FDT
804      * for the guest kernel the enumerate the CPUs correctly.
805      *
806      * The CPU list cannot be traversed in reverse order, so we need
807      * to do extra work.
808      */
809     n_cpus = 0;
810     rev = NULL;
811     CPU_FOREACH(cs) {
812         rev = g_renew(CPUState *, rev, n_cpus + 1);
813         rev[n_cpus++] = cs;
814     }
815 
816     for (i = n_cpus - 1; i >= 0; i--) {
817         CPUState *cs = rev[i];
818         PowerPCCPU *cpu = POWERPC_CPU(cs);
819         int index = spapr_get_vcpu_id(cpu);
820         DeviceClass *dc = DEVICE_GET_CLASS(cs);
821         int offset;
822 
823         if (!spapr_is_thread0_in_vcore(spapr, cpu)) {
824             continue;
825         }
826 
827         nodename = g_strdup_printf("%s@%x", dc->fw_name, index);
828         offset = fdt_add_subnode(fdt, cpus_offset, nodename);
829         g_free(nodename);
830         _FDT(offset);
831         spapr_dt_cpu(cs, fdt, offset, spapr);
832     }
833 
834     g_free(rev);
835 }
836 
837 static int spapr_dt_rng(void *fdt)
838 {
839     int node;
840     int ret;
841 
842     node = qemu_fdt_add_subnode(fdt, "/ibm,platform-facilities");
843     if (node <= 0) {
844         return -1;
845     }
846     ret = fdt_setprop_string(fdt, node, "device_type",
847                              "ibm,platform-facilities");
848     ret |= fdt_setprop_cell(fdt, node, "#address-cells", 0x1);
849     ret |= fdt_setprop_cell(fdt, node, "#size-cells", 0x0);
850 
851     node = fdt_add_subnode(fdt, node, "ibm,random-v1");
852     if (node <= 0) {
853         return -1;
854     }
855     ret |= fdt_setprop_string(fdt, node, "compatible", "ibm,random");
856 
857     return ret ? -1 : 0;
858 }
859 
860 static void spapr_dt_rtas(SpaprMachineState *spapr, void *fdt)
861 {
862     MachineState *ms = MACHINE(spapr);
863     int rtas;
864     GString *hypertas = g_string_sized_new(256);
865     GString *qemu_hypertas = g_string_sized_new(256);
866     uint64_t max_device_addr = MACHINE(spapr)->device_memory->base +
867         memory_region_size(&MACHINE(spapr)->device_memory->mr);
868     uint32_t lrdr_capacity[] = {
869         cpu_to_be32(max_device_addr >> 32),
870         cpu_to_be32(max_device_addr & 0xffffffff),
871         cpu_to_be32(SPAPR_MEMORY_BLOCK_SIZE >> 32),
872         cpu_to_be32(SPAPR_MEMORY_BLOCK_SIZE & 0xffffffff),
873         cpu_to_be32(ms->smp.max_cpus / ms->smp.threads),
874     };
875 
876     _FDT(rtas = fdt_add_subnode(fdt, 0, "rtas"));
877 
878     /* hypertas */
879     add_str(hypertas, "hcall-pft");
880     add_str(hypertas, "hcall-term");
881     add_str(hypertas, "hcall-dabr");
882     add_str(hypertas, "hcall-interrupt");
883     add_str(hypertas, "hcall-tce");
884     add_str(hypertas, "hcall-vio");
885     add_str(hypertas, "hcall-splpar");
886     add_str(hypertas, "hcall-join");
887     add_str(hypertas, "hcall-bulk");
888     add_str(hypertas, "hcall-set-mode");
889     add_str(hypertas, "hcall-sprg0");
890     add_str(hypertas, "hcall-copy");
891     add_str(hypertas, "hcall-debug");
892     add_str(hypertas, "hcall-vphn");
893     add_str(qemu_hypertas, "hcall-memop1");
894 
895     if (!kvm_enabled() || kvmppc_spapr_use_multitce()) {
896         add_str(hypertas, "hcall-multi-tce");
897     }
898 
899     if (spapr->resize_hpt != SPAPR_RESIZE_HPT_DISABLED) {
900         add_str(hypertas, "hcall-hpt-resize");
901     }
902 
903     _FDT(fdt_setprop(fdt, rtas, "ibm,hypertas-functions",
904                      hypertas->str, hypertas->len));
905     g_string_free(hypertas, TRUE);
906     _FDT(fdt_setprop(fdt, rtas, "qemu,hypertas-functions",
907                      qemu_hypertas->str, qemu_hypertas->len));
908     g_string_free(qemu_hypertas, TRUE);
909 
910     spapr_numa_write_rtas_dt(spapr, fdt, rtas);
911 
912     /*
913      * FWNMI reserves RTAS_ERROR_LOG_MAX for the machine check error log,
914      * and 16 bytes per CPU for system reset error log plus an extra 8 bytes.
915      *
916      * The system reset requirements are driven by existing Linux and PowerVM
917      * implementation which (contrary to PAPR) saves r3 in the error log
918      * structure like machine check, so Linux expects to find the saved r3
919      * value at the address in r3 upon FWNMI-enabled sreset interrupt (and
920      * does not look at the error value).
921      *
922      * System reset interrupts are not subject to interlock like machine
923      * check, so this memory area could be corrupted if the sreset is
924      * interrupted by a machine check (or vice versa) if it was shared. To
925      * prevent this, system reset uses per-CPU areas for the sreset save
926      * area. A system reset that interrupts a system reset handler could
927      * still overwrite this area, but Linux doesn't try to recover in that
928      * case anyway.
929      *
930      * The extra 8 bytes is required because Linux's FWNMI error log check
931      * is off-by-one.
932      */
933     _FDT(fdt_setprop_cell(fdt, rtas, "rtas-size", RTAS_ERROR_LOG_MAX +
934 			  ms->smp.max_cpus * sizeof(uint64_t)*2 + sizeof(uint64_t)));
935     _FDT(fdt_setprop_cell(fdt, rtas, "rtas-error-log-max",
936                           RTAS_ERROR_LOG_MAX));
937     _FDT(fdt_setprop_cell(fdt, rtas, "rtas-event-scan-rate",
938                           RTAS_EVENT_SCAN_RATE));
939 
940     g_assert(msi_nonbroken);
941     _FDT(fdt_setprop(fdt, rtas, "ibm,change-msix-capable", NULL, 0));
942 
943     /*
944      * According to PAPR, rtas ibm,os-term does not guarantee a return
945      * back to the guest cpu.
946      *
947      * While an additional ibm,extended-os-term property indicates
948      * that rtas call return will always occur. Set this property.
949      */
950     _FDT(fdt_setprop(fdt, rtas, "ibm,extended-os-term", NULL, 0));
951 
952     _FDT(fdt_setprop(fdt, rtas, "ibm,lrdr-capacity",
953                      lrdr_capacity, sizeof(lrdr_capacity)));
954 
955     spapr_dt_rtas_tokens(fdt, rtas);
956 }
957 
958 /*
959  * Prepare ibm,arch-vec-5-platform-support, which indicates the MMU
960  * and the XIVE features that the guest may request and thus the valid
961  * values for bytes 23..26 of option vector 5:
962  */
963 static void spapr_dt_ov5_platform_support(SpaprMachineState *spapr, void *fdt,
964                                           int chosen)
965 {
966     PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu);
967 
968     char val[2 * 4] = {
969         23, 0x00, /* XICS / XIVE mode */
970         24, 0x00, /* Hash/Radix, filled in below. */
971         25, 0x00, /* Hash options: Segment Tables == no, GTSE == no. */
972         26, 0x40, /* Radix options: GTSE == yes. */
973     };
974 
975     if (spapr->irq->xics && spapr->irq->xive) {
976         val[1] = SPAPR_OV5_XIVE_BOTH;
977     } else if (spapr->irq->xive) {
978         val[1] = SPAPR_OV5_XIVE_EXPLOIT;
979     } else {
980         assert(spapr->irq->xics);
981         val[1] = SPAPR_OV5_XIVE_LEGACY;
982     }
983 
984     if (!ppc_check_compat(first_ppc_cpu, CPU_POWERPC_LOGICAL_3_00, 0,
985                           first_ppc_cpu->compat_pvr)) {
986         /*
987          * If we're in a pre POWER9 compat mode then the guest should
988          * do hash and use the legacy interrupt mode
989          */
990         val[1] = SPAPR_OV5_XIVE_LEGACY; /* XICS */
991         val[3] = 0x00; /* Hash */
992     } else if (kvm_enabled()) {
993         if (kvmppc_has_cap_mmu_radix() && kvmppc_has_cap_mmu_hash_v3()) {
994             val[3] = 0x80; /* OV5_MMU_BOTH */
995         } else if (kvmppc_has_cap_mmu_radix()) {
996             val[3] = 0x40; /* OV5_MMU_RADIX_300 */
997         } else {
998             val[3] = 0x00; /* Hash */
999         }
1000     } else {
1001         /* V3 MMU supports both hash and radix in tcg (with dynamic switching) */
1002         val[3] = 0xC0;
1003     }
1004     _FDT(fdt_setprop(fdt, chosen, "ibm,arch-vec-5-platform-support",
1005                      val, sizeof(val)));
1006 }
1007 
1008 static void spapr_dt_chosen(SpaprMachineState *spapr, void *fdt, bool reset)
1009 {
1010     MachineState *machine = MACHINE(spapr);
1011     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine);
1012     int chosen;
1013 
1014     _FDT(chosen = fdt_add_subnode(fdt, 0, "chosen"));
1015 
1016     if (reset) {
1017         const char *boot_device = machine->boot_order;
1018         char *stdout_path = spapr_vio_stdout_path(spapr->vio_bus);
1019         size_t cb = 0;
1020         char *bootlist = get_boot_devices_list(&cb);
1021 
1022         if (machine->kernel_cmdline && machine->kernel_cmdline[0]) {
1023             _FDT(fdt_setprop_string(fdt, chosen, "bootargs",
1024                                     machine->kernel_cmdline));
1025         }
1026 
1027         if (spapr->initrd_size) {
1028             _FDT(fdt_setprop_cell(fdt, chosen, "linux,initrd-start",
1029                                   spapr->initrd_base));
1030             _FDT(fdt_setprop_cell(fdt, chosen, "linux,initrd-end",
1031                                   spapr->initrd_base + spapr->initrd_size));
1032         }
1033 
1034         if (spapr->kernel_size) {
1035             uint64_t kprop[2] = { cpu_to_be64(spapr->kernel_addr),
1036                                   cpu_to_be64(spapr->kernel_size) };
1037 
1038             _FDT(fdt_setprop(fdt, chosen, "qemu,boot-kernel",
1039                          &kprop, sizeof(kprop)));
1040             if (spapr->kernel_le) {
1041                 _FDT(fdt_setprop(fdt, chosen, "qemu,boot-kernel-le", NULL, 0));
1042             }
1043         }
1044         if (boot_menu) {
1045             _FDT((fdt_setprop_cell(fdt, chosen, "qemu,boot-menu", boot_menu)));
1046         }
1047         _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-width", graphic_width));
1048         _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-height", graphic_height));
1049         _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-depth", graphic_depth));
1050 
1051         if (cb && bootlist) {
1052             int i;
1053 
1054             for (i = 0; i < cb; i++) {
1055                 if (bootlist[i] == '\n') {
1056                     bootlist[i] = ' ';
1057                 }
1058             }
1059             _FDT(fdt_setprop_string(fdt, chosen, "qemu,boot-list", bootlist));
1060         }
1061 
1062         if (boot_device && strlen(boot_device)) {
1063             _FDT(fdt_setprop_string(fdt, chosen, "qemu,boot-device", boot_device));
1064         }
1065 
1066         if (!spapr->has_graphics && stdout_path) {
1067             /*
1068              * "linux,stdout-path" and "stdout" properties are
1069              * deprecated by linux kernel. New platforms should only
1070              * use the "stdout-path" property. Set the new property
1071              * and continue using older property to remain compatible
1072              * with the existing firmware.
1073              */
1074             _FDT(fdt_setprop_string(fdt, chosen, "linux,stdout-path", stdout_path));
1075             _FDT(fdt_setprop_string(fdt, chosen, "stdout-path", stdout_path));
1076         }
1077 
1078         /*
1079          * We can deal with BAR reallocation just fine, advertise it
1080          * to the guest
1081          */
1082         if (smc->linux_pci_probe) {
1083             _FDT(fdt_setprop_cell(fdt, chosen, "linux,pci-probe-only", 0));
1084         }
1085 
1086         spapr_dt_ov5_platform_support(spapr, fdt, chosen);
1087 
1088         g_free(stdout_path);
1089         g_free(bootlist);
1090     }
1091 
1092     _FDT(spapr_dt_ovec(fdt, chosen, spapr->ov5_cas, "ibm,architecture-vec-5"));
1093 }
1094 
1095 static void spapr_dt_hypervisor(SpaprMachineState *spapr, void *fdt)
1096 {
1097     /* The /hypervisor node isn't in PAPR - this is a hack to allow PR
1098      * KVM to work under pHyp with some guest co-operation */
1099     int hypervisor;
1100     uint8_t hypercall[16];
1101 
1102     _FDT(hypervisor = fdt_add_subnode(fdt, 0, "hypervisor"));
1103     /* indicate KVM hypercall interface */
1104     _FDT(fdt_setprop_string(fdt, hypervisor, "compatible", "linux,kvm"));
1105     if (kvmppc_has_cap_fixup_hcalls()) {
1106         /*
1107          * Older KVM versions with older guest kernels were broken
1108          * with the magic page, don't allow the guest to map it.
1109          */
1110         if (!kvmppc_get_hypercall(first_cpu->env_ptr, hypercall,
1111                                   sizeof(hypercall))) {
1112             _FDT(fdt_setprop(fdt, hypervisor, "hcall-instructions",
1113                              hypercall, sizeof(hypercall)));
1114         }
1115     }
1116 }
1117 
1118 void *spapr_build_fdt(SpaprMachineState *spapr, bool reset, size_t space)
1119 {
1120     MachineState *machine = MACHINE(spapr);
1121     MachineClass *mc = MACHINE_GET_CLASS(machine);
1122     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine);
1123     int ret;
1124     void *fdt;
1125     SpaprPhbState *phb;
1126     char *buf;
1127 
1128     fdt = g_malloc0(space);
1129     _FDT((fdt_create_empty_tree(fdt, space)));
1130 
1131     /* Root node */
1132     _FDT(fdt_setprop_string(fdt, 0, "device_type", "chrp"));
1133     _FDT(fdt_setprop_string(fdt, 0, "model", "IBM pSeries (emulated by qemu)"));
1134     _FDT(fdt_setprop_string(fdt, 0, "compatible", "qemu,pseries"));
1135 
1136     /* Guest UUID & Name*/
1137     buf = qemu_uuid_unparse_strdup(&qemu_uuid);
1138     _FDT(fdt_setprop_string(fdt, 0, "vm,uuid", buf));
1139     if (qemu_uuid_set) {
1140         _FDT(fdt_setprop_string(fdt, 0, "system-id", buf));
1141     }
1142     g_free(buf);
1143 
1144     if (qemu_get_vm_name()) {
1145         _FDT(fdt_setprop_string(fdt, 0, "ibm,partition-name",
1146                                 qemu_get_vm_name()));
1147     }
1148 
1149     /* Host Model & Serial Number */
1150     if (spapr->host_model) {
1151         _FDT(fdt_setprop_string(fdt, 0, "host-model", spapr->host_model));
1152     } else if (smc->broken_host_serial_model && kvmppc_get_host_model(&buf)) {
1153         _FDT(fdt_setprop_string(fdt, 0, "host-model", buf));
1154         g_free(buf);
1155     }
1156 
1157     if (spapr->host_serial) {
1158         _FDT(fdt_setprop_string(fdt, 0, "host-serial", spapr->host_serial));
1159     } else if (smc->broken_host_serial_model && kvmppc_get_host_serial(&buf)) {
1160         _FDT(fdt_setprop_string(fdt, 0, "host-serial", buf));
1161         g_free(buf);
1162     }
1163 
1164     _FDT(fdt_setprop_cell(fdt, 0, "#address-cells", 2));
1165     _FDT(fdt_setprop_cell(fdt, 0, "#size-cells", 2));
1166 
1167     /* /interrupt controller */
1168     spapr_irq_dt(spapr, spapr_max_server_number(spapr), fdt, PHANDLE_INTC);
1169 
1170     ret = spapr_dt_memory(spapr, fdt);
1171     if (ret < 0) {
1172         error_report("couldn't setup memory nodes in fdt");
1173         exit(1);
1174     }
1175 
1176     /* /vdevice */
1177     spapr_dt_vdevice(spapr->vio_bus, fdt);
1178 
1179     if (object_resolve_path_type("", TYPE_SPAPR_RNG, NULL)) {
1180         ret = spapr_dt_rng(fdt);
1181         if (ret < 0) {
1182             error_report("could not set up rng device in the fdt");
1183             exit(1);
1184         }
1185     }
1186 
1187     QLIST_FOREACH(phb, &spapr->phbs, list) {
1188         ret = spapr_dt_phb(spapr, phb, PHANDLE_INTC, fdt, NULL);
1189         if (ret < 0) {
1190             error_report("couldn't setup PCI devices in fdt");
1191             exit(1);
1192         }
1193     }
1194 
1195     spapr_dt_cpus(fdt, spapr);
1196 
1197     if (smc->dr_lmb_enabled) {
1198         _FDT(spapr_dt_drc(fdt, 0, NULL, SPAPR_DR_CONNECTOR_TYPE_LMB));
1199     }
1200 
1201     if (mc->has_hotpluggable_cpus) {
1202         int offset = fdt_path_offset(fdt, "/cpus");
1203         ret = spapr_dt_drc(fdt, offset, NULL, SPAPR_DR_CONNECTOR_TYPE_CPU);
1204         if (ret < 0) {
1205             error_report("Couldn't set up CPU DR device tree properties");
1206             exit(1);
1207         }
1208     }
1209 
1210     /* /event-sources */
1211     spapr_dt_events(spapr, fdt);
1212 
1213     /* /rtas */
1214     spapr_dt_rtas(spapr, fdt);
1215 
1216     /* /chosen */
1217     spapr_dt_chosen(spapr, fdt, reset);
1218 
1219     /* /hypervisor */
1220     if (kvm_enabled()) {
1221         spapr_dt_hypervisor(spapr, fdt);
1222     }
1223 
1224     /* Build memory reserve map */
1225     if (reset) {
1226         if (spapr->kernel_size) {
1227             _FDT((fdt_add_mem_rsv(fdt, spapr->kernel_addr,
1228                                   spapr->kernel_size)));
1229         }
1230         if (spapr->initrd_size) {
1231             _FDT((fdt_add_mem_rsv(fdt, spapr->initrd_base,
1232                                   spapr->initrd_size)));
1233         }
1234     }
1235 
1236     if (smc->dr_phb_enabled) {
1237         ret = spapr_dt_drc(fdt, 0, NULL, SPAPR_DR_CONNECTOR_TYPE_PHB);
1238         if (ret < 0) {
1239             error_report("Couldn't set up PHB DR device tree properties");
1240             exit(1);
1241         }
1242     }
1243 
1244     /* NVDIMM devices */
1245     if (mc->nvdimm_supported) {
1246         spapr_dt_persistent_memory(spapr, fdt);
1247     }
1248 
1249     return fdt;
1250 }
1251 
1252 static uint64_t translate_kernel_address(void *opaque, uint64_t addr)
1253 {
1254     SpaprMachineState *spapr = opaque;
1255 
1256     return (addr & 0x0fffffff) + spapr->kernel_addr;
1257 }
1258 
1259 static void emulate_spapr_hypercall(PPCVirtualHypervisor *vhyp,
1260                                     PowerPCCPU *cpu)
1261 {
1262     CPUPPCState *env = &cpu->env;
1263 
1264     /* The TCG path should also be holding the BQL at this point */
1265     g_assert(qemu_mutex_iothread_locked());
1266 
1267     if (msr_pr) {
1268         hcall_dprintf("Hypercall made with MSR[PR]=1\n");
1269         env->gpr[3] = H_PRIVILEGE;
1270     } else {
1271         env->gpr[3] = spapr_hypercall(cpu, env->gpr[3], &env->gpr[4]);
1272     }
1273 }
1274 
1275 struct LPCRSyncState {
1276     target_ulong value;
1277     target_ulong mask;
1278 };
1279 
1280 static void do_lpcr_sync(CPUState *cs, run_on_cpu_data arg)
1281 {
1282     struct LPCRSyncState *s = arg.host_ptr;
1283     PowerPCCPU *cpu = POWERPC_CPU(cs);
1284     CPUPPCState *env = &cpu->env;
1285     target_ulong lpcr;
1286 
1287     cpu_synchronize_state(cs);
1288     lpcr = env->spr[SPR_LPCR];
1289     lpcr &= ~s->mask;
1290     lpcr |= s->value;
1291     ppc_store_lpcr(cpu, lpcr);
1292 }
1293 
1294 void spapr_set_all_lpcrs(target_ulong value, target_ulong mask)
1295 {
1296     CPUState *cs;
1297     struct LPCRSyncState s = {
1298         .value = value,
1299         .mask = mask
1300     };
1301     CPU_FOREACH(cs) {
1302         run_on_cpu(cs, do_lpcr_sync, RUN_ON_CPU_HOST_PTR(&s));
1303     }
1304 }
1305 
1306 static void spapr_get_pate(PPCVirtualHypervisor *vhyp, ppc_v3_pate_t *entry)
1307 {
1308     SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
1309 
1310     /* Copy PATE1:GR into PATE0:HR */
1311     entry->dw0 = spapr->patb_entry & PATE0_HR;
1312     entry->dw1 = spapr->patb_entry;
1313 }
1314 
1315 #define HPTE(_table, _i)   (void *)(((uint64_t *)(_table)) + ((_i) * 2))
1316 #define HPTE_VALID(_hpte)  (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_VALID)
1317 #define HPTE_DIRTY(_hpte)  (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_HPTE_DIRTY)
1318 #define CLEAN_HPTE(_hpte)  ((*(uint64_t *)(_hpte)) &= tswap64(~HPTE64_V_HPTE_DIRTY))
1319 #define DIRTY_HPTE(_hpte)  ((*(uint64_t *)(_hpte)) |= tswap64(HPTE64_V_HPTE_DIRTY))
1320 
1321 /*
1322  * Get the fd to access the kernel htab, re-opening it if necessary
1323  */
1324 static int get_htab_fd(SpaprMachineState *spapr)
1325 {
1326     Error *local_err = NULL;
1327 
1328     if (spapr->htab_fd >= 0) {
1329         return spapr->htab_fd;
1330     }
1331 
1332     spapr->htab_fd = kvmppc_get_htab_fd(false, 0, &local_err);
1333     if (spapr->htab_fd < 0) {
1334         error_report_err(local_err);
1335     }
1336 
1337     return spapr->htab_fd;
1338 }
1339 
1340 void close_htab_fd(SpaprMachineState *spapr)
1341 {
1342     if (spapr->htab_fd >= 0) {
1343         close(spapr->htab_fd);
1344     }
1345     spapr->htab_fd = -1;
1346 }
1347 
1348 static hwaddr spapr_hpt_mask(PPCVirtualHypervisor *vhyp)
1349 {
1350     SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
1351 
1352     return HTAB_SIZE(spapr) / HASH_PTEG_SIZE_64 - 1;
1353 }
1354 
1355 static target_ulong spapr_encode_hpt_for_kvm_pr(PPCVirtualHypervisor *vhyp)
1356 {
1357     SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
1358 
1359     assert(kvm_enabled());
1360 
1361     if (!spapr->htab) {
1362         return 0;
1363     }
1364 
1365     return (target_ulong)(uintptr_t)spapr->htab | (spapr->htab_shift - 18);
1366 }
1367 
1368 static const ppc_hash_pte64_t *spapr_map_hptes(PPCVirtualHypervisor *vhyp,
1369                                                 hwaddr ptex, int n)
1370 {
1371     SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
1372     hwaddr pte_offset = ptex * HASH_PTE_SIZE_64;
1373 
1374     if (!spapr->htab) {
1375         /*
1376          * HTAB is controlled by KVM. Fetch into temporary buffer
1377          */
1378         ppc_hash_pte64_t *hptes = g_malloc(n * HASH_PTE_SIZE_64);
1379         kvmppc_read_hptes(hptes, ptex, n);
1380         return hptes;
1381     }
1382 
1383     /*
1384      * HTAB is controlled by QEMU. Just point to the internally
1385      * accessible PTEG.
1386      */
1387     return (const ppc_hash_pte64_t *)(spapr->htab + pte_offset);
1388 }
1389 
1390 static void spapr_unmap_hptes(PPCVirtualHypervisor *vhyp,
1391                               const ppc_hash_pte64_t *hptes,
1392                               hwaddr ptex, int n)
1393 {
1394     SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
1395 
1396     if (!spapr->htab) {
1397         g_free((void *)hptes);
1398     }
1399 
1400     /* Nothing to do for qemu managed HPT */
1401 }
1402 
1403 void spapr_store_hpte(PowerPCCPU *cpu, hwaddr ptex,
1404                       uint64_t pte0, uint64_t pte1)
1405 {
1406     SpaprMachineState *spapr = SPAPR_MACHINE(cpu->vhyp);
1407     hwaddr offset = ptex * HASH_PTE_SIZE_64;
1408 
1409     if (!spapr->htab) {
1410         kvmppc_write_hpte(ptex, pte0, pte1);
1411     } else {
1412         if (pte0 & HPTE64_V_VALID) {
1413             stq_p(spapr->htab + offset + HASH_PTE_SIZE_64 / 2, pte1);
1414             /*
1415              * When setting valid, we write PTE1 first. This ensures
1416              * proper synchronization with the reading code in
1417              * ppc_hash64_pteg_search()
1418              */
1419             smp_wmb();
1420             stq_p(spapr->htab + offset, pte0);
1421         } else {
1422             stq_p(spapr->htab + offset, pte0);
1423             /*
1424              * When clearing it we set PTE0 first. This ensures proper
1425              * synchronization with the reading code in
1426              * ppc_hash64_pteg_search()
1427              */
1428             smp_wmb();
1429             stq_p(spapr->htab + offset + HASH_PTE_SIZE_64 / 2, pte1);
1430         }
1431     }
1432 }
1433 
1434 static void spapr_hpte_set_c(PPCVirtualHypervisor *vhyp, hwaddr ptex,
1435                              uint64_t pte1)
1436 {
1437     hwaddr offset = ptex * HASH_PTE_SIZE_64 + 15;
1438     SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
1439 
1440     if (!spapr->htab) {
1441         /* There should always be a hash table when this is called */
1442         error_report("spapr_hpte_set_c called with no hash table !");
1443         return;
1444     }
1445 
1446     /* The HW performs a non-atomic byte update */
1447     stb_p(spapr->htab + offset, (pte1 & 0xff) | 0x80);
1448 }
1449 
1450 static void spapr_hpte_set_r(PPCVirtualHypervisor *vhyp, hwaddr ptex,
1451                              uint64_t pte1)
1452 {
1453     hwaddr offset = ptex * HASH_PTE_SIZE_64 + 14;
1454     SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
1455 
1456     if (!spapr->htab) {
1457         /* There should always be a hash table when this is called */
1458         error_report("spapr_hpte_set_r called with no hash table !");
1459         return;
1460     }
1461 
1462     /* The HW performs a non-atomic byte update */
1463     stb_p(spapr->htab + offset, ((pte1 >> 8) & 0xff) | 0x01);
1464 }
1465 
1466 int spapr_hpt_shift_for_ramsize(uint64_t ramsize)
1467 {
1468     int shift;
1469 
1470     /* We aim for a hash table of size 1/128 the size of RAM (rounded
1471      * up).  The PAPR recommendation is actually 1/64 of RAM size, but
1472      * that's much more than is needed for Linux guests */
1473     shift = ctz64(pow2ceil(ramsize)) - 7;
1474     shift = MAX(shift, 18); /* Minimum architected size */
1475     shift = MIN(shift, 46); /* Maximum architected size */
1476     return shift;
1477 }
1478 
1479 void spapr_free_hpt(SpaprMachineState *spapr)
1480 {
1481     g_free(spapr->htab);
1482     spapr->htab = NULL;
1483     spapr->htab_shift = 0;
1484     close_htab_fd(spapr);
1485 }
1486 
1487 int spapr_reallocate_hpt(SpaprMachineState *spapr, int shift, Error **errp)
1488 {
1489     ERRP_GUARD();
1490     long rc;
1491 
1492     /* Clean up any HPT info from a previous boot */
1493     spapr_free_hpt(spapr);
1494 
1495     rc = kvmppc_reset_htab(shift);
1496 
1497     if (rc == -EOPNOTSUPP) {
1498         error_setg(errp, "HPT not supported in nested guests");
1499         return -EOPNOTSUPP;
1500     }
1501 
1502     if (rc < 0) {
1503         /* kernel-side HPT needed, but couldn't allocate one */
1504         error_setg_errno(errp, errno, "Failed to allocate KVM HPT of order %d",
1505                          shift);
1506         error_append_hint(errp, "Try smaller maxmem?\n");
1507         return -errno;
1508     } else if (rc > 0) {
1509         /* kernel-side HPT allocated */
1510         if (rc != shift) {
1511             error_setg(errp,
1512                        "Requested order %d HPT, but kernel allocated order %ld",
1513                        shift, rc);
1514             error_append_hint(errp, "Try smaller maxmem?\n");
1515             return -ENOSPC;
1516         }
1517 
1518         spapr->htab_shift = shift;
1519         spapr->htab = NULL;
1520     } else {
1521         /* kernel-side HPT not needed, allocate in userspace instead */
1522         size_t size = 1ULL << shift;
1523         int i;
1524 
1525         spapr->htab = qemu_memalign(size, size);
1526         memset(spapr->htab, 0, size);
1527         spapr->htab_shift = shift;
1528 
1529         for (i = 0; i < size / HASH_PTE_SIZE_64; i++) {
1530             DIRTY_HPTE(HPTE(spapr->htab, i));
1531         }
1532     }
1533     /* We're setting up a hash table, so that means we're not radix */
1534     spapr->patb_entry = 0;
1535     spapr_set_all_lpcrs(0, LPCR_HR | LPCR_UPRT);
1536     return 0;
1537 }
1538 
1539 void spapr_setup_hpt(SpaprMachineState *spapr)
1540 {
1541     int hpt_shift;
1542 
1543     if (spapr->resize_hpt == SPAPR_RESIZE_HPT_DISABLED) {
1544         hpt_shift = spapr_hpt_shift_for_ramsize(MACHINE(spapr)->maxram_size);
1545     } else {
1546         uint64_t current_ram_size;
1547 
1548         current_ram_size = MACHINE(spapr)->ram_size + get_plugged_memory_size();
1549         hpt_shift = spapr_hpt_shift_for_ramsize(current_ram_size);
1550     }
1551     spapr_reallocate_hpt(spapr, hpt_shift, &error_fatal);
1552 
1553     if (kvm_enabled()) {
1554         hwaddr vrma_limit = kvmppc_vrma_limit(spapr->htab_shift);
1555 
1556         /* Check our RMA fits in the possible VRMA */
1557         if (vrma_limit < spapr->rma_size) {
1558             error_report("Unable to create %" HWADDR_PRIu
1559                          "MiB RMA (VRMA only allows %" HWADDR_PRIu "MiB",
1560                          spapr->rma_size / MiB, vrma_limit / MiB);
1561             exit(EXIT_FAILURE);
1562         }
1563     }
1564 }
1565 
1566 static int spapr_reset_drcs(Object *child, void *opaque)
1567 {
1568     SpaprDrc *drc =
1569         (SpaprDrc *) object_dynamic_cast(child,
1570                                                  TYPE_SPAPR_DR_CONNECTOR);
1571 
1572     if (drc) {
1573         spapr_drc_reset(drc);
1574     }
1575 
1576     return 0;
1577 }
1578 
1579 static void spapr_machine_reset(MachineState *machine)
1580 {
1581     SpaprMachineState *spapr = SPAPR_MACHINE(machine);
1582     PowerPCCPU *first_ppc_cpu;
1583     hwaddr fdt_addr;
1584     void *fdt;
1585     int rc;
1586 
1587     kvmppc_svm_off(&error_fatal);
1588     spapr_caps_apply(spapr);
1589 
1590     first_ppc_cpu = POWERPC_CPU(first_cpu);
1591     if (kvm_enabled() && kvmppc_has_cap_mmu_radix() &&
1592         ppc_type_check_compat(machine->cpu_type, CPU_POWERPC_LOGICAL_3_00, 0,
1593                               spapr->max_compat_pvr)) {
1594         /*
1595          * If using KVM with radix mode available, VCPUs can be started
1596          * without a HPT because KVM will start them in radix mode.
1597          * Set the GR bit in PATE so that we know there is no HPT.
1598          */
1599         spapr->patb_entry = PATE1_GR;
1600         spapr_set_all_lpcrs(LPCR_HR | LPCR_UPRT, LPCR_HR | LPCR_UPRT);
1601     } else {
1602         spapr_setup_hpt(spapr);
1603     }
1604 
1605     qemu_devices_reset();
1606 
1607     spapr_ovec_cleanup(spapr->ov5_cas);
1608     spapr->ov5_cas = spapr_ovec_new();
1609 
1610     ppc_set_compat_all(spapr->max_compat_pvr, &error_fatal);
1611 
1612     /*
1613      * This is fixing some of the default configuration of the XIVE
1614      * devices. To be called after the reset of the machine devices.
1615      */
1616     spapr_irq_reset(spapr, &error_fatal);
1617 
1618     /*
1619      * There is no CAS under qtest. Simulate one to please the code that
1620      * depends on spapr->ov5_cas. This is especially needed to test device
1621      * unplug, so we do that before resetting the DRCs.
1622      */
1623     if (qtest_enabled()) {
1624         spapr_ovec_cleanup(spapr->ov5_cas);
1625         spapr->ov5_cas = spapr_ovec_clone(spapr->ov5);
1626     }
1627 
1628     /* DRC reset may cause a device to be unplugged. This will cause troubles
1629      * if this device is used by another device (eg, a running vhost backend
1630      * will crash QEMU if the DIMM holding the vring goes away). To avoid such
1631      * situations, we reset DRCs after all devices have been reset.
1632      */
1633     object_child_foreach_recursive(object_get_root(), spapr_reset_drcs, NULL);
1634 
1635     spapr_clear_pending_events(spapr);
1636 
1637     /*
1638      * We place the device tree and RTAS just below either the top of the RMA,
1639      * or just below 2GB, whichever is lower, so that it can be
1640      * processed with 32-bit real mode code if necessary
1641      */
1642     fdt_addr = MIN(spapr->rma_size, RTAS_MAX_ADDR) - FDT_MAX_SIZE;
1643 
1644     fdt = spapr_build_fdt(spapr, true, FDT_MAX_SIZE);
1645 
1646     rc = fdt_pack(fdt);
1647 
1648     /* Should only fail if we've built a corrupted tree */
1649     assert(rc == 0);
1650 
1651     /* Load the fdt */
1652     qemu_fdt_dumpdtb(fdt, fdt_totalsize(fdt));
1653     cpu_physical_memory_write(fdt_addr, fdt, fdt_totalsize(fdt));
1654     g_free(spapr->fdt_blob);
1655     spapr->fdt_size = fdt_totalsize(fdt);
1656     spapr->fdt_initial_size = spapr->fdt_size;
1657     spapr->fdt_blob = fdt;
1658 
1659     /* Set up the entry state */
1660     spapr_cpu_set_entry_state(first_ppc_cpu, SPAPR_ENTRY_POINT, 0, fdt_addr, 0);
1661     first_ppc_cpu->env.gpr[5] = 0;
1662 
1663     spapr->fwnmi_system_reset_addr = -1;
1664     spapr->fwnmi_machine_check_addr = -1;
1665     spapr->fwnmi_machine_check_interlock = -1;
1666 
1667     /* Signal all vCPUs waiting on this condition */
1668     qemu_cond_broadcast(&spapr->fwnmi_machine_check_interlock_cond);
1669 
1670     migrate_del_blocker(spapr->fwnmi_migration_blocker);
1671 }
1672 
1673 static void spapr_create_nvram(SpaprMachineState *spapr)
1674 {
1675     DeviceState *dev = qdev_new("spapr-nvram");
1676     DriveInfo *dinfo = drive_get(IF_PFLASH, 0, 0);
1677 
1678     if (dinfo) {
1679         qdev_prop_set_drive_err(dev, "drive", blk_by_legacy_dinfo(dinfo),
1680                                 &error_fatal);
1681     }
1682 
1683     qdev_realize_and_unref(dev, &spapr->vio_bus->bus, &error_fatal);
1684 
1685     spapr->nvram = (struct SpaprNvram *)dev;
1686 }
1687 
1688 static void spapr_rtc_create(SpaprMachineState *spapr)
1689 {
1690     object_initialize_child_with_props(OBJECT(spapr), "rtc", &spapr->rtc,
1691                                        sizeof(spapr->rtc), TYPE_SPAPR_RTC,
1692                                        &error_fatal, NULL);
1693     qdev_realize(DEVICE(&spapr->rtc), NULL, &error_fatal);
1694     object_property_add_alias(OBJECT(spapr), "rtc-time", OBJECT(&spapr->rtc),
1695                               "date");
1696 }
1697 
1698 /* Returns whether we want to use VGA or not */
1699 static bool spapr_vga_init(PCIBus *pci_bus, Error **errp)
1700 {
1701     switch (vga_interface_type) {
1702     case VGA_NONE:
1703         return false;
1704     case VGA_DEVICE:
1705         return true;
1706     case VGA_STD:
1707     case VGA_VIRTIO:
1708     case VGA_CIRRUS:
1709         return pci_vga_init(pci_bus) != NULL;
1710     default:
1711         error_setg(errp,
1712                    "Unsupported VGA mode, only -vga std or -vga virtio is supported");
1713         return false;
1714     }
1715 }
1716 
1717 static int spapr_pre_load(void *opaque)
1718 {
1719     int rc;
1720 
1721     rc = spapr_caps_pre_load(opaque);
1722     if (rc) {
1723         return rc;
1724     }
1725 
1726     return 0;
1727 }
1728 
1729 static int spapr_post_load(void *opaque, int version_id)
1730 {
1731     SpaprMachineState *spapr = (SpaprMachineState *)opaque;
1732     int err = 0;
1733 
1734     err = spapr_caps_post_migration(spapr);
1735     if (err) {
1736         return err;
1737     }
1738 
1739     /*
1740      * In earlier versions, there was no separate qdev for the PAPR
1741      * RTC, so the RTC offset was stored directly in sPAPREnvironment.
1742      * So when migrating from those versions, poke the incoming offset
1743      * value into the RTC device
1744      */
1745     if (version_id < 3) {
1746         err = spapr_rtc_import_offset(&spapr->rtc, spapr->rtc_offset);
1747         if (err) {
1748             return err;
1749         }
1750     }
1751 
1752     if (kvm_enabled() && spapr->patb_entry) {
1753         PowerPCCPU *cpu = POWERPC_CPU(first_cpu);
1754         bool radix = !!(spapr->patb_entry & PATE1_GR);
1755         bool gtse = !!(cpu->env.spr[SPR_LPCR] & LPCR_GTSE);
1756 
1757         /*
1758          * Update LPCR:HR and UPRT as they may not be set properly in
1759          * the stream
1760          */
1761         spapr_set_all_lpcrs(radix ? (LPCR_HR | LPCR_UPRT) : 0,
1762                             LPCR_HR | LPCR_UPRT);
1763 
1764         err = kvmppc_configure_v3_mmu(cpu, radix, gtse, spapr->patb_entry);
1765         if (err) {
1766             error_report("Process table config unsupported by the host");
1767             return -EINVAL;
1768         }
1769     }
1770 
1771     err = spapr_irq_post_load(spapr, version_id);
1772     if (err) {
1773         return err;
1774     }
1775 
1776     return err;
1777 }
1778 
1779 static int spapr_pre_save(void *opaque)
1780 {
1781     int rc;
1782 
1783     rc = spapr_caps_pre_save(opaque);
1784     if (rc) {
1785         return rc;
1786     }
1787 
1788     return 0;
1789 }
1790 
1791 static bool version_before_3(void *opaque, int version_id)
1792 {
1793     return version_id < 3;
1794 }
1795 
1796 static bool spapr_pending_events_needed(void *opaque)
1797 {
1798     SpaprMachineState *spapr = (SpaprMachineState *)opaque;
1799     return !QTAILQ_EMPTY(&spapr->pending_events);
1800 }
1801 
1802 static const VMStateDescription vmstate_spapr_event_entry = {
1803     .name = "spapr_event_log_entry",
1804     .version_id = 1,
1805     .minimum_version_id = 1,
1806     .fields = (VMStateField[]) {
1807         VMSTATE_UINT32(summary, SpaprEventLogEntry),
1808         VMSTATE_UINT32(extended_length, SpaprEventLogEntry),
1809         VMSTATE_VBUFFER_ALLOC_UINT32(extended_log, SpaprEventLogEntry, 0,
1810                                      NULL, extended_length),
1811         VMSTATE_END_OF_LIST()
1812     },
1813 };
1814 
1815 static const VMStateDescription vmstate_spapr_pending_events = {
1816     .name = "spapr_pending_events",
1817     .version_id = 1,
1818     .minimum_version_id = 1,
1819     .needed = spapr_pending_events_needed,
1820     .fields = (VMStateField[]) {
1821         VMSTATE_QTAILQ_V(pending_events, SpaprMachineState, 1,
1822                          vmstate_spapr_event_entry, SpaprEventLogEntry, next),
1823         VMSTATE_END_OF_LIST()
1824     },
1825 };
1826 
1827 static bool spapr_ov5_cas_needed(void *opaque)
1828 {
1829     SpaprMachineState *spapr = opaque;
1830     SpaprOptionVector *ov5_mask = spapr_ovec_new();
1831     bool cas_needed;
1832 
1833     /* Prior to the introduction of SpaprOptionVector, we had two option
1834      * vectors we dealt with: OV5_FORM1_AFFINITY, and OV5_DRCONF_MEMORY.
1835      * Both of these options encode machine topology into the device-tree
1836      * in such a way that the now-booted OS should still be able to interact
1837      * appropriately with QEMU regardless of what options were actually
1838      * negotiatied on the source side.
1839      *
1840      * As such, we can avoid migrating the CAS-negotiated options if these
1841      * are the only options available on the current machine/platform.
1842      * Since these are the only options available for pseries-2.7 and
1843      * earlier, this allows us to maintain old->new/new->old migration
1844      * compatibility.
1845      *
1846      * For QEMU 2.8+, there are additional CAS-negotiatable options available
1847      * via default pseries-2.8 machines and explicit command-line parameters.
1848      * Some of these options, like OV5_HP_EVT, *do* require QEMU to be aware
1849      * of the actual CAS-negotiated values to continue working properly. For
1850      * example, availability of memory unplug depends on knowing whether
1851      * OV5_HP_EVT was negotiated via CAS.
1852      *
1853      * Thus, for any cases where the set of available CAS-negotiatable
1854      * options extends beyond OV5_FORM1_AFFINITY and OV5_DRCONF_MEMORY, we
1855      * include the CAS-negotiated options in the migration stream, unless
1856      * if they affect boot time behaviour only.
1857      */
1858     spapr_ovec_set(ov5_mask, OV5_FORM1_AFFINITY);
1859     spapr_ovec_set(ov5_mask, OV5_DRCONF_MEMORY);
1860     spapr_ovec_set(ov5_mask, OV5_DRMEM_V2);
1861 
1862     /* We need extra information if we have any bits outside the mask
1863      * defined above */
1864     cas_needed = !spapr_ovec_subset(spapr->ov5, ov5_mask);
1865 
1866     spapr_ovec_cleanup(ov5_mask);
1867 
1868     return cas_needed;
1869 }
1870 
1871 static const VMStateDescription vmstate_spapr_ov5_cas = {
1872     .name = "spapr_option_vector_ov5_cas",
1873     .version_id = 1,
1874     .minimum_version_id = 1,
1875     .needed = spapr_ov5_cas_needed,
1876     .fields = (VMStateField[]) {
1877         VMSTATE_STRUCT_POINTER_V(ov5_cas, SpaprMachineState, 1,
1878                                  vmstate_spapr_ovec, SpaprOptionVector),
1879         VMSTATE_END_OF_LIST()
1880     },
1881 };
1882 
1883 static bool spapr_patb_entry_needed(void *opaque)
1884 {
1885     SpaprMachineState *spapr = opaque;
1886 
1887     return !!spapr->patb_entry;
1888 }
1889 
1890 static const VMStateDescription vmstate_spapr_patb_entry = {
1891     .name = "spapr_patb_entry",
1892     .version_id = 1,
1893     .minimum_version_id = 1,
1894     .needed = spapr_patb_entry_needed,
1895     .fields = (VMStateField[]) {
1896         VMSTATE_UINT64(patb_entry, SpaprMachineState),
1897         VMSTATE_END_OF_LIST()
1898     },
1899 };
1900 
1901 static bool spapr_irq_map_needed(void *opaque)
1902 {
1903     SpaprMachineState *spapr = opaque;
1904 
1905     return spapr->irq_map && !bitmap_empty(spapr->irq_map, spapr->irq_map_nr);
1906 }
1907 
1908 static const VMStateDescription vmstate_spapr_irq_map = {
1909     .name = "spapr_irq_map",
1910     .version_id = 1,
1911     .minimum_version_id = 1,
1912     .needed = spapr_irq_map_needed,
1913     .fields = (VMStateField[]) {
1914         VMSTATE_BITMAP(irq_map, SpaprMachineState, 0, irq_map_nr),
1915         VMSTATE_END_OF_LIST()
1916     },
1917 };
1918 
1919 static bool spapr_dtb_needed(void *opaque)
1920 {
1921     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(opaque);
1922 
1923     return smc->update_dt_enabled;
1924 }
1925 
1926 static int spapr_dtb_pre_load(void *opaque)
1927 {
1928     SpaprMachineState *spapr = (SpaprMachineState *)opaque;
1929 
1930     g_free(spapr->fdt_blob);
1931     spapr->fdt_blob = NULL;
1932     spapr->fdt_size = 0;
1933 
1934     return 0;
1935 }
1936 
1937 static const VMStateDescription vmstate_spapr_dtb = {
1938     .name = "spapr_dtb",
1939     .version_id = 1,
1940     .minimum_version_id = 1,
1941     .needed = spapr_dtb_needed,
1942     .pre_load = spapr_dtb_pre_load,
1943     .fields = (VMStateField[]) {
1944         VMSTATE_UINT32(fdt_initial_size, SpaprMachineState),
1945         VMSTATE_UINT32(fdt_size, SpaprMachineState),
1946         VMSTATE_VBUFFER_ALLOC_UINT32(fdt_blob, SpaprMachineState, 0, NULL,
1947                                      fdt_size),
1948         VMSTATE_END_OF_LIST()
1949     },
1950 };
1951 
1952 static bool spapr_fwnmi_needed(void *opaque)
1953 {
1954     SpaprMachineState *spapr = (SpaprMachineState *)opaque;
1955 
1956     return spapr->fwnmi_machine_check_addr != -1;
1957 }
1958 
1959 static int spapr_fwnmi_pre_save(void *opaque)
1960 {
1961     SpaprMachineState *spapr = (SpaprMachineState *)opaque;
1962 
1963     /*
1964      * Check if machine check handling is in progress and print a
1965      * warning message.
1966      */
1967     if (spapr->fwnmi_machine_check_interlock != -1) {
1968         warn_report("A machine check is being handled during migration. The"
1969                 "handler may run and log hardware error on the destination");
1970     }
1971 
1972     return 0;
1973 }
1974 
1975 static const VMStateDescription vmstate_spapr_fwnmi = {
1976     .name = "spapr_fwnmi",
1977     .version_id = 1,
1978     .minimum_version_id = 1,
1979     .needed = spapr_fwnmi_needed,
1980     .pre_save = spapr_fwnmi_pre_save,
1981     .fields = (VMStateField[]) {
1982         VMSTATE_UINT64(fwnmi_system_reset_addr, SpaprMachineState),
1983         VMSTATE_UINT64(fwnmi_machine_check_addr, SpaprMachineState),
1984         VMSTATE_INT32(fwnmi_machine_check_interlock, SpaprMachineState),
1985         VMSTATE_END_OF_LIST()
1986     },
1987 };
1988 
1989 static const VMStateDescription vmstate_spapr = {
1990     .name = "spapr",
1991     .version_id = 3,
1992     .minimum_version_id = 1,
1993     .pre_load = spapr_pre_load,
1994     .post_load = spapr_post_load,
1995     .pre_save = spapr_pre_save,
1996     .fields = (VMStateField[]) {
1997         /* used to be @next_irq */
1998         VMSTATE_UNUSED_BUFFER(version_before_3, 0, 4),
1999 
2000         /* RTC offset */
2001         VMSTATE_UINT64_TEST(rtc_offset, SpaprMachineState, version_before_3),
2002 
2003         VMSTATE_PPC_TIMEBASE_V(tb, SpaprMachineState, 2),
2004         VMSTATE_END_OF_LIST()
2005     },
2006     .subsections = (const VMStateDescription*[]) {
2007         &vmstate_spapr_ov5_cas,
2008         &vmstate_spapr_patb_entry,
2009         &vmstate_spapr_pending_events,
2010         &vmstate_spapr_cap_htm,
2011         &vmstate_spapr_cap_vsx,
2012         &vmstate_spapr_cap_dfp,
2013         &vmstate_spapr_cap_cfpc,
2014         &vmstate_spapr_cap_sbbc,
2015         &vmstate_spapr_cap_ibs,
2016         &vmstate_spapr_cap_hpt_maxpagesize,
2017         &vmstate_spapr_irq_map,
2018         &vmstate_spapr_cap_nested_kvm_hv,
2019         &vmstate_spapr_dtb,
2020         &vmstate_spapr_cap_large_decr,
2021         &vmstate_spapr_cap_ccf_assist,
2022         &vmstate_spapr_cap_fwnmi,
2023         &vmstate_spapr_fwnmi,
2024         NULL
2025     }
2026 };
2027 
2028 static int htab_save_setup(QEMUFile *f, void *opaque)
2029 {
2030     SpaprMachineState *spapr = opaque;
2031 
2032     /* "Iteration" header */
2033     if (!spapr->htab_shift) {
2034         qemu_put_be32(f, -1);
2035     } else {
2036         qemu_put_be32(f, spapr->htab_shift);
2037     }
2038 
2039     if (spapr->htab) {
2040         spapr->htab_save_index = 0;
2041         spapr->htab_first_pass = true;
2042     } else {
2043         if (spapr->htab_shift) {
2044             assert(kvm_enabled());
2045         }
2046     }
2047 
2048 
2049     return 0;
2050 }
2051 
2052 static void htab_save_chunk(QEMUFile *f, SpaprMachineState *spapr,
2053                             int chunkstart, int n_valid, int n_invalid)
2054 {
2055     qemu_put_be32(f, chunkstart);
2056     qemu_put_be16(f, n_valid);
2057     qemu_put_be16(f, n_invalid);
2058     qemu_put_buffer(f, HPTE(spapr->htab, chunkstart),
2059                     HASH_PTE_SIZE_64 * n_valid);
2060 }
2061 
2062 static void htab_save_end_marker(QEMUFile *f)
2063 {
2064     qemu_put_be32(f, 0);
2065     qemu_put_be16(f, 0);
2066     qemu_put_be16(f, 0);
2067 }
2068 
2069 static void htab_save_first_pass(QEMUFile *f, SpaprMachineState *spapr,
2070                                  int64_t max_ns)
2071 {
2072     bool has_timeout = max_ns != -1;
2073     int htabslots = HTAB_SIZE(spapr) / HASH_PTE_SIZE_64;
2074     int index = spapr->htab_save_index;
2075     int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
2076 
2077     assert(spapr->htab_first_pass);
2078 
2079     do {
2080         int chunkstart;
2081 
2082         /* Consume invalid HPTEs */
2083         while ((index < htabslots)
2084                && !HPTE_VALID(HPTE(spapr->htab, index))) {
2085             CLEAN_HPTE(HPTE(spapr->htab, index));
2086             index++;
2087         }
2088 
2089         /* Consume valid HPTEs */
2090         chunkstart = index;
2091         while ((index < htabslots) && (index - chunkstart < USHRT_MAX)
2092                && HPTE_VALID(HPTE(spapr->htab, index))) {
2093             CLEAN_HPTE(HPTE(spapr->htab, index));
2094             index++;
2095         }
2096 
2097         if (index > chunkstart) {
2098             int n_valid = index - chunkstart;
2099 
2100             htab_save_chunk(f, spapr, chunkstart, n_valid, 0);
2101 
2102             if (has_timeout &&
2103                 (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) > max_ns) {
2104                 break;
2105             }
2106         }
2107     } while ((index < htabslots) && !qemu_file_rate_limit(f));
2108 
2109     if (index >= htabslots) {
2110         assert(index == htabslots);
2111         index = 0;
2112         spapr->htab_first_pass = false;
2113     }
2114     spapr->htab_save_index = index;
2115 }
2116 
2117 static int htab_save_later_pass(QEMUFile *f, SpaprMachineState *spapr,
2118                                 int64_t max_ns)
2119 {
2120     bool final = max_ns < 0;
2121     int htabslots = HTAB_SIZE(spapr) / HASH_PTE_SIZE_64;
2122     int examined = 0, sent = 0;
2123     int index = spapr->htab_save_index;
2124     int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
2125 
2126     assert(!spapr->htab_first_pass);
2127 
2128     do {
2129         int chunkstart, invalidstart;
2130 
2131         /* Consume non-dirty HPTEs */
2132         while ((index < htabslots)
2133                && !HPTE_DIRTY(HPTE(spapr->htab, index))) {
2134             index++;
2135             examined++;
2136         }
2137 
2138         chunkstart = index;
2139         /* Consume valid dirty HPTEs */
2140         while ((index < htabslots) && (index - chunkstart < USHRT_MAX)
2141                && HPTE_DIRTY(HPTE(spapr->htab, index))
2142                && HPTE_VALID(HPTE(spapr->htab, index))) {
2143             CLEAN_HPTE(HPTE(spapr->htab, index));
2144             index++;
2145             examined++;
2146         }
2147 
2148         invalidstart = index;
2149         /* Consume invalid dirty HPTEs */
2150         while ((index < htabslots) && (index - invalidstart < USHRT_MAX)
2151                && HPTE_DIRTY(HPTE(spapr->htab, index))
2152                && !HPTE_VALID(HPTE(spapr->htab, index))) {
2153             CLEAN_HPTE(HPTE(spapr->htab, index));
2154             index++;
2155             examined++;
2156         }
2157 
2158         if (index > chunkstart) {
2159             int n_valid = invalidstart - chunkstart;
2160             int n_invalid = index - invalidstart;
2161 
2162             htab_save_chunk(f, spapr, chunkstart, n_valid, n_invalid);
2163             sent += index - chunkstart;
2164 
2165             if (!final && (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) > max_ns) {
2166                 break;
2167             }
2168         }
2169 
2170         if (examined >= htabslots) {
2171             break;
2172         }
2173 
2174         if (index >= htabslots) {
2175             assert(index == htabslots);
2176             index = 0;
2177         }
2178     } while ((examined < htabslots) && (!qemu_file_rate_limit(f) || final));
2179 
2180     if (index >= htabslots) {
2181         assert(index == htabslots);
2182         index = 0;
2183     }
2184 
2185     spapr->htab_save_index = index;
2186 
2187     return (examined >= htabslots) && (sent == 0) ? 1 : 0;
2188 }
2189 
2190 #define MAX_ITERATION_NS    5000000 /* 5 ms */
2191 #define MAX_KVM_BUF_SIZE    2048
2192 
2193 static int htab_save_iterate(QEMUFile *f, void *opaque)
2194 {
2195     SpaprMachineState *spapr = opaque;
2196     int fd;
2197     int rc = 0;
2198 
2199     /* Iteration header */
2200     if (!spapr->htab_shift) {
2201         qemu_put_be32(f, -1);
2202         return 1;
2203     } else {
2204         qemu_put_be32(f, 0);
2205     }
2206 
2207     if (!spapr->htab) {
2208         assert(kvm_enabled());
2209 
2210         fd = get_htab_fd(spapr);
2211         if (fd < 0) {
2212             return fd;
2213         }
2214 
2215         rc = kvmppc_save_htab(f, fd, MAX_KVM_BUF_SIZE, MAX_ITERATION_NS);
2216         if (rc < 0) {
2217             return rc;
2218         }
2219     } else  if (spapr->htab_first_pass) {
2220         htab_save_first_pass(f, spapr, MAX_ITERATION_NS);
2221     } else {
2222         rc = htab_save_later_pass(f, spapr, MAX_ITERATION_NS);
2223     }
2224 
2225     htab_save_end_marker(f);
2226 
2227     return rc;
2228 }
2229 
2230 static int htab_save_complete(QEMUFile *f, void *opaque)
2231 {
2232     SpaprMachineState *spapr = opaque;
2233     int fd;
2234 
2235     /* Iteration header */
2236     if (!spapr->htab_shift) {
2237         qemu_put_be32(f, -1);
2238         return 0;
2239     } else {
2240         qemu_put_be32(f, 0);
2241     }
2242 
2243     if (!spapr->htab) {
2244         int rc;
2245 
2246         assert(kvm_enabled());
2247 
2248         fd = get_htab_fd(spapr);
2249         if (fd < 0) {
2250             return fd;
2251         }
2252 
2253         rc = kvmppc_save_htab(f, fd, MAX_KVM_BUF_SIZE, -1);
2254         if (rc < 0) {
2255             return rc;
2256         }
2257     } else {
2258         if (spapr->htab_first_pass) {
2259             htab_save_first_pass(f, spapr, -1);
2260         }
2261         htab_save_later_pass(f, spapr, -1);
2262     }
2263 
2264     /* End marker */
2265     htab_save_end_marker(f);
2266 
2267     return 0;
2268 }
2269 
2270 static int htab_load(QEMUFile *f, void *opaque, int version_id)
2271 {
2272     SpaprMachineState *spapr = opaque;
2273     uint32_t section_hdr;
2274     int fd = -1;
2275     Error *local_err = NULL;
2276 
2277     if (version_id < 1 || version_id > 1) {
2278         error_report("htab_load() bad version");
2279         return -EINVAL;
2280     }
2281 
2282     section_hdr = qemu_get_be32(f);
2283 
2284     if (section_hdr == -1) {
2285         spapr_free_hpt(spapr);
2286         return 0;
2287     }
2288 
2289     if (section_hdr) {
2290         int ret;
2291 
2292         /* First section gives the htab size */
2293         ret = spapr_reallocate_hpt(spapr, section_hdr, &local_err);
2294         if (ret < 0) {
2295             error_report_err(local_err);
2296             return ret;
2297         }
2298         return 0;
2299     }
2300 
2301     if (!spapr->htab) {
2302         assert(kvm_enabled());
2303 
2304         fd = kvmppc_get_htab_fd(true, 0, &local_err);
2305         if (fd < 0) {
2306             error_report_err(local_err);
2307             return fd;
2308         }
2309     }
2310 
2311     while (true) {
2312         uint32_t index;
2313         uint16_t n_valid, n_invalid;
2314 
2315         index = qemu_get_be32(f);
2316         n_valid = qemu_get_be16(f);
2317         n_invalid = qemu_get_be16(f);
2318 
2319         if ((index == 0) && (n_valid == 0) && (n_invalid == 0)) {
2320             /* End of Stream */
2321             break;
2322         }
2323 
2324         if ((index + n_valid + n_invalid) >
2325             (HTAB_SIZE(spapr) / HASH_PTE_SIZE_64)) {
2326             /* Bad index in stream */
2327             error_report(
2328                 "htab_load() bad index %d (%hd+%hd entries) in htab stream (htab_shift=%d)",
2329                 index, n_valid, n_invalid, spapr->htab_shift);
2330             return -EINVAL;
2331         }
2332 
2333         if (spapr->htab) {
2334             if (n_valid) {
2335                 qemu_get_buffer(f, HPTE(spapr->htab, index),
2336                                 HASH_PTE_SIZE_64 * n_valid);
2337             }
2338             if (n_invalid) {
2339                 memset(HPTE(spapr->htab, index + n_valid), 0,
2340                        HASH_PTE_SIZE_64 * n_invalid);
2341             }
2342         } else {
2343             int rc;
2344 
2345             assert(fd >= 0);
2346 
2347             rc = kvmppc_load_htab_chunk(f, fd, index, n_valid, n_invalid,
2348                                         &local_err);
2349             if (rc < 0) {
2350                 error_report_err(local_err);
2351                 return rc;
2352             }
2353         }
2354     }
2355 
2356     if (!spapr->htab) {
2357         assert(fd >= 0);
2358         close(fd);
2359     }
2360 
2361     return 0;
2362 }
2363 
2364 static void htab_save_cleanup(void *opaque)
2365 {
2366     SpaprMachineState *spapr = opaque;
2367 
2368     close_htab_fd(spapr);
2369 }
2370 
2371 static SaveVMHandlers savevm_htab_handlers = {
2372     .save_setup = htab_save_setup,
2373     .save_live_iterate = htab_save_iterate,
2374     .save_live_complete_precopy = htab_save_complete,
2375     .save_cleanup = htab_save_cleanup,
2376     .load_state = htab_load,
2377 };
2378 
2379 static void spapr_boot_set(void *opaque, const char *boot_device,
2380                            Error **errp)
2381 {
2382     MachineState *machine = MACHINE(opaque);
2383     machine->boot_order = g_strdup(boot_device);
2384 }
2385 
2386 static void spapr_create_lmb_dr_connectors(SpaprMachineState *spapr)
2387 {
2388     MachineState *machine = MACHINE(spapr);
2389     uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE;
2390     uint32_t nr_lmbs = (machine->maxram_size - machine->ram_size)/lmb_size;
2391     int i;
2392 
2393     for (i = 0; i < nr_lmbs; i++) {
2394         uint64_t addr;
2395 
2396         addr = i * lmb_size + machine->device_memory->base;
2397         spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_LMB,
2398                                addr / lmb_size);
2399     }
2400 }
2401 
2402 /*
2403  * If RAM size, maxmem size and individual node mem sizes aren't aligned
2404  * to SPAPR_MEMORY_BLOCK_SIZE(256MB), then refuse to start the guest
2405  * since we can't support such unaligned sizes with DRCONF_MEMORY.
2406  */
2407 static void spapr_validate_node_memory(MachineState *machine, Error **errp)
2408 {
2409     int i;
2410 
2411     if (machine->ram_size % SPAPR_MEMORY_BLOCK_SIZE) {
2412         error_setg(errp, "Memory size 0x" RAM_ADDR_FMT
2413                    " is not aligned to %" PRIu64 " MiB",
2414                    machine->ram_size,
2415                    SPAPR_MEMORY_BLOCK_SIZE / MiB);
2416         return;
2417     }
2418 
2419     if (machine->maxram_size % SPAPR_MEMORY_BLOCK_SIZE) {
2420         error_setg(errp, "Maximum memory size 0x" RAM_ADDR_FMT
2421                    " is not aligned to %" PRIu64 " MiB",
2422                    machine->ram_size,
2423                    SPAPR_MEMORY_BLOCK_SIZE / MiB);
2424         return;
2425     }
2426 
2427     for (i = 0; i < machine->numa_state->num_nodes; i++) {
2428         if (machine->numa_state->nodes[i].node_mem % SPAPR_MEMORY_BLOCK_SIZE) {
2429             error_setg(errp,
2430                        "Node %d memory size 0x%" PRIx64
2431                        " is not aligned to %" PRIu64 " MiB",
2432                        i, machine->numa_state->nodes[i].node_mem,
2433                        SPAPR_MEMORY_BLOCK_SIZE / MiB);
2434             return;
2435         }
2436     }
2437 }
2438 
2439 /* find cpu slot in machine->possible_cpus by core_id */
2440 static CPUArchId *spapr_find_cpu_slot(MachineState *ms, uint32_t id, int *idx)
2441 {
2442     int index = id / ms->smp.threads;
2443 
2444     if (index >= ms->possible_cpus->len) {
2445         return NULL;
2446     }
2447     if (idx) {
2448         *idx = index;
2449     }
2450     return &ms->possible_cpus->cpus[index];
2451 }
2452 
2453 static void spapr_set_vsmt_mode(SpaprMachineState *spapr, Error **errp)
2454 {
2455     MachineState *ms = MACHINE(spapr);
2456     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
2457     Error *local_err = NULL;
2458     bool vsmt_user = !!spapr->vsmt;
2459     int kvm_smt = kvmppc_smt_threads();
2460     int ret;
2461     unsigned int smp_threads = ms->smp.threads;
2462 
2463     if (!kvm_enabled() && (smp_threads > 1)) {
2464         error_setg(errp, "TCG cannot support more than 1 thread/core "
2465                    "on a pseries machine");
2466         return;
2467     }
2468     if (!is_power_of_2(smp_threads)) {
2469         error_setg(errp, "Cannot support %d threads/core on a pseries "
2470                    "machine because it must be a power of 2", smp_threads);
2471         return;
2472     }
2473 
2474     /* Detemine the VSMT mode to use: */
2475     if (vsmt_user) {
2476         if (spapr->vsmt < smp_threads) {
2477             error_setg(errp, "Cannot support VSMT mode %d"
2478                        " because it must be >= threads/core (%d)",
2479                        spapr->vsmt, smp_threads);
2480             return;
2481         }
2482         /* In this case, spapr->vsmt has been set by the command line */
2483     } else if (!smc->smp_threads_vsmt) {
2484         /*
2485          * Default VSMT value is tricky, because we need it to be as
2486          * consistent as possible (for migration), but this requires
2487          * changing it for at least some existing cases.  We pick 8 as
2488          * the value that we'd get with KVM on POWER8, the
2489          * overwhelmingly common case in production systems.
2490          */
2491         spapr->vsmt = MAX(8, smp_threads);
2492     } else {
2493         spapr->vsmt = smp_threads;
2494     }
2495 
2496     /* KVM: If necessary, set the SMT mode: */
2497     if (kvm_enabled() && (spapr->vsmt != kvm_smt)) {
2498         ret = kvmppc_set_smt_threads(spapr->vsmt);
2499         if (ret) {
2500             /* Looks like KVM isn't able to change VSMT mode */
2501             error_setg(&local_err,
2502                        "Failed to set KVM's VSMT mode to %d (errno %d)",
2503                        spapr->vsmt, ret);
2504             /* We can live with that if the default one is big enough
2505              * for the number of threads, and a submultiple of the one
2506              * we want.  In this case we'll waste some vcpu ids, but
2507              * behaviour will be correct */
2508             if ((kvm_smt >= smp_threads) && ((spapr->vsmt % kvm_smt) == 0)) {
2509                 warn_report_err(local_err);
2510             } else {
2511                 if (!vsmt_user) {
2512                     error_append_hint(&local_err,
2513                                       "On PPC, a VM with %d threads/core"
2514                                       " on a host with %d threads/core"
2515                                       " requires the use of VSMT mode %d.\n",
2516                                       smp_threads, kvm_smt, spapr->vsmt);
2517                 }
2518                 kvmppc_error_append_smt_possible_hint(&local_err);
2519                 error_propagate(errp, local_err);
2520             }
2521         }
2522     }
2523     /* else TCG: nothing to do currently */
2524 }
2525 
2526 static void spapr_init_cpus(SpaprMachineState *spapr)
2527 {
2528     MachineState *machine = MACHINE(spapr);
2529     MachineClass *mc = MACHINE_GET_CLASS(machine);
2530     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine);
2531     const char *type = spapr_get_cpu_core_type(machine->cpu_type);
2532     const CPUArchIdList *possible_cpus;
2533     unsigned int smp_cpus = machine->smp.cpus;
2534     unsigned int smp_threads = machine->smp.threads;
2535     unsigned int max_cpus = machine->smp.max_cpus;
2536     int boot_cores_nr = smp_cpus / smp_threads;
2537     int i;
2538 
2539     possible_cpus = mc->possible_cpu_arch_ids(machine);
2540     if (mc->has_hotpluggable_cpus) {
2541         if (smp_cpus % smp_threads) {
2542             error_report("smp_cpus (%u) must be multiple of threads (%u)",
2543                          smp_cpus, smp_threads);
2544             exit(1);
2545         }
2546         if (max_cpus % smp_threads) {
2547             error_report("max_cpus (%u) must be multiple of threads (%u)",
2548                          max_cpus, smp_threads);
2549             exit(1);
2550         }
2551     } else {
2552         if (max_cpus != smp_cpus) {
2553             error_report("This machine version does not support CPU hotplug");
2554             exit(1);
2555         }
2556         boot_cores_nr = possible_cpus->len;
2557     }
2558 
2559     if (smc->pre_2_10_has_unused_icps) {
2560         int i;
2561 
2562         for (i = 0; i < spapr_max_server_number(spapr); i++) {
2563             /* Dummy entries get deregistered when real ICPState objects
2564              * are registered during CPU core hotplug.
2565              */
2566             pre_2_10_vmstate_register_dummy_icp(i);
2567         }
2568     }
2569 
2570     for (i = 0; i < possible_cpus->len; i++) {
2571         int core_id = i * smp_threads;
2572 
2573         if (mc->has_hotpluggable_cpus) {
2574             spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_CPU,
2575                                    spapr_vcpu_id(spapr, core_id));
2576         }
2577 
2578         if (i < boot_cores_nr) {
2579             Object *core  = object_new(type);
2580             int nr_threads = smp_threads;
2581 
2582             /* Handle the partially filled core for older machine types */
2583             if ((i + 1) * smp_threads >= smp_cpus) {
2584                 nr_threads = smp_cpus - i * smp_threads;
2585             }
2586 
2587             object_property_set_int(core, "nr-threads", nr_threads,
2588                                     &error_fatal);
2589             object_property_set_int(core, CPU_CORE_PROP_CORE_ID, core_id,
2590                                     &error_fatal);
2591             qdev_realize(DEVICE(core), NULL, &error_fatal);
2592 
2593             object_unref(core);
2594         }
2595     }
2596 }
2597 
2598 static PCIHostState *spapr_create_default_phb(void)
2599 {
2600     DeviceState *dev;
2601 
2602     dev = qdev_new(TYPE_SPAPR_PCI_HOST_BRIDGE);
2603     qdev_prop_set_uint32(dev, "index", 0);
2604     sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
2605 
2606     return PCI_HOST_BRIDGE(dev);
2607 }
2608 
2609 static hwaddr spapr_rma_size(SpaprMachineState *spapr, Error **errp)
2610 {
2611     MachineState *machine = MACHINE(spapr);
2612     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
2613     hwaddr rma_size = machine->ram_size;
2614     hwaddr node0_size = spapr_node0_size(machine);
2615 
2616     /* RMA has to fit in the first NUMA node */
2617     rma_size = MIN(rma_size, node0_size);
2618 
2619     /*
2620      * VRMA access is via a special 1TiB SLB mapping, so the RMA can
2621      * never exceed that
2622      */
2623     rma_size = MIN(rma_size, 1 * TiB);
2624 
2625     /*
2626      * Clamp the RMA size based on machine type.  This is for
2627      * migration compatibility with older qemu versions, which limited
2628      * the RMA size for complicated and mostly bad reasons.
2629      */
2630     if (smc->rma_limit) {
2631         rma_size = MIN(rma_size, smc->rma_limit);
2632     }
2633 
2634     if (rma_size < MIN_RMA_SLOF) {
2635         error_setg(errp,
2636                    "pSeries SLOF firmware requires >= %" HWADDR_PRIx
2637                    "ldMiB guest RMA (Real Mode Area memory)",
2638                    MIN_RMA_SLOF / MiB);
2639         return 0;
2640     }
2641 
2642     return rma_size;
2643 }
2644 
2645 static void spapr_create_nvdimm_dr_connectors(SpaprMachineState *spapr)
2646 {
2647     MachineState *machine = MACHINE(spapr);
2648     int i;
2649 
2650     for (i = 0; i < machine->ram_slots; i++) {
2651         spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_PMEM, i);
2652     }
2653 }
2654 
2655 /* pSeries LPAR / sPAPR hardware init */
2656 static void spapr_machine_init(MachineState *machine)
2657 {
2658     SpaprMachineState *spapr = SPAPR_MACHINE(machine);
2659     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine);
2660     MachineClass *mc = MACHINE_GET_CLASS(machine);
2661     const char *bios_name = machine->firmware ?: FW_FILE_NAME;
2662     const char *kernel_filename = machine->kernel_filename;
2663     const char *initrd_filename = machine->initrd_filename;
2664     PCIHostState *phb;
2665     int i;
2666     MemoryRegion *sysmem = get_system_memory();
2667     long load_limit, fw_size;
2668     char *filename;
2669     Error *resize_hpt_err = NULL;
2670 
2671     msi_nonbroken = true;
2672 
2673     QLIST_INIT(&spapr->phbs);
2674     QTAILQ_INIT(&spapr->pending_dimm_unplugs);
2675 
2676     /* Determine capabilities to run with */
2677     spapr_caps_init(spapr);
2678 
2679     kvmppc_check_papr_resize_hpt(&resize_hpt_err);
2680     if (spapr->resize_hpt == SPAPR_RESIZE_HPT_DEFAULT) {
2681         /*
2682          * If the user explicitly requested a mode we should either
2683          * supply it, or fail completely (which we do below).  But if
2684          * it's not set explicitly, we reset our mode to something
2685          * that works
2686          */
2687         if (resize_hpt_err) {
2688             spapr->resize_hpt = SPAPR_RESIZE_HPT_DISABLED;
2689             error_free(resize_hpt_err);
2690             resize_hpt_err = NULL;
2691         } else {
2692             spapr->resize_hpt = smc->resize_hpt_default;
2693         }
2694     }
2695 
2696     assert(spapr->resize_hpt != SPAPR_RESIZE_HPT_DEFAULT);
2697 
2698     if ((spapr->resize_hpt != SPAPR_RESIZE_HPT_DISABLED) && resize_hpt_err) {
2699         /*
2700          * User requested HPT resize, but this host can't supply it.  Bail out
2701          */
2702         error_report_err(resize_hpt_err);
2703         exit(1);
2704     }
2705     error_free(resize_hpt_err);
2706 
2707     spapr->rma_size = spapr_rma_size(spapr, &error_fatal);
2708 
2709     /* Setup a load limit for the ramdisk leaving room for SLOF and FDT */
2710     load_limit = MIN(spapr->rma_size, RTAS_MAX_ADDR) - FW_OVERHEAD;
2711 
2712     /*
2713      * VSMT must be set in order to be able to compute VCPU ids, ie to
2714      * call spapr_max_server_number() or spapr_vcpu_id().
2715      */
2716     spapr_set_vsmt_mode(spapr, &error_fatal);
2717 
2718     /* Set up Interrupt Controller before we create the VCPUs */
2719     spapr_irq_init(spapr, &error_fatal);
2720 
2721     /* Set up containers for ibm,client-architecture-support negotiated options
2722      */
2723     spapr->ov5 = spapr_ovec_new();
2724     spapr->ov5_cas = spapr_ovec_new();
2725 
2726     if (smc->dr_lmb_enabled) {
2727         spapr_ovec_set(spapr->ov5, OV5_DRCONF_MEMORY);
2728         spapr_validate_node_memory(machine, &error_fatal);
2729     }
2730 
2731     spapr_ovec_set(spapr->ov5, OV5_FORM1_AFFINITY);
2732 
2733     /* advertise support for dedicated HP event source to guests */
2734     if (spapr->use_hotplug_event_source) {
2735         spapr_ovec_set(spapr->ov5, OV5_HP_EVT);
2736     }
2737 
2738     /* advertise support for HPT resizing */
2739     if (spapr->resize_hpt != SPAPR_RESIZE_HPT_DISABLED) {
2740         spapr_ovec_set(spapr->ov5, OV5_HPT_RESIZE);
2741     }
2742 
2743     /* advertise support for ibm,dyamic-memory-v2 */
2744     spapr_ovec_set(spapr->ov5, OV5_DRMEM_V2);
2745 
2746     /* advertise XIVE on POWER9 machines */
2747     if (spapr->irq->xive) {
2748         spapr_ovec_set(spapr->ov5, OV5_XIVE_EXPLOIT);
2749     }
2750 
2751     /* init CPUs */
2752     spapr_init_cpus(spapr);
2753 
2754     /*
2755      * check we don't have a memory-less/cpu-less NUMA node
2756      * Firmware relies on the existing memory/cpu topology to provide the
2757      * NUMA topology to the kernel.
2758      * And the linux kernel needs to know the NUMA topology at start
2759      * to be able to hotplug CPUs later.
2760      */
2761     if (machine->numa_state->num_nodes) {
2762         for (i = 0; i < machine->numa_state->num_nodes; ++i) {
2763             /* check for memory-less node */
2764             if (machine->numa_state->nodes[i].node_mem == 0) {
2765                 CPUState *cs;
2766                 int found = 0;
2767                 /* check for cpu-less node */
2768                 CPU_FOREACH(cs) {
2769                     PowerPCCPU *cpu = POWERPC_CPU(cs);
2770                     if (cpu->node_id == i) {
2771                         found = 1;
2772                         break;
2773                     }
2774                 }
2775                 /* memory-less and cpu-less node */
2776                 if (!found) {
2777                     error_report(
2778                        "Memory-less/cpu-less nodes are not supported (node %d)",
2779                                  i);
2780                     exit(1);
2781                 }
2782             }
2783         }
2784 
2785     }
2786 
2787     /*
2788      * NVLink2-connected GPU RAM needs to be placed on a separate NUMA node.
2789      * We assign a new numa ID per GPU in spapr_pci_collect_nvgpu() which is
2790      * called from vPHB reset handler so we initialize the counter here.
2791      * If no NUMA is configured from the QEMU side, we start from 1 as GPU RAM
2792      * must be equally distant from any other node.
2793      * The final value of spapr->gpu_numa_id is going to be written to
2794      * max-associativity-domains in spapr_build_fdt().
2795      */
2796     spapr->gpu_numa_id = MAX(1, machine->numa_state->num_nodes);
2797 
2798     /* Init numa_assoc_array */
2799     spapr_numa_associativity_init(spapr, machine);
2800 
2801     if ((!kvm_enabled() || kvmppc_has_cap_mmu_radix()) &&
2802         ppc_type_check_compat(machine->cpu_type, CPU_POWERPC_LOGICAL_3_00, 0,
2803                               spapr->max_compat_pvr)) {
2804         spapr_ovec_set(spapr->ov5, OV5_MMU_RADIX_300);
2805         /* KVM and TCG always allow GTSE with radix... */
2806         spapr_ovec_set(spapr->ov5, OV5_MMU_RADIX_GTSE);
2807     }
2808     /* ... but not with hash (currently). */
2809 
2810     if (kvm_enabled()) {
2811         /* Enable H_LOGICAL_CI_* so SLOF can talk to in-kernel devices */
2812         kvmppc_enable_logical_ci_hcalls();
2813         kvmppc_enable_set_mode_hcall();
2814 
2815         /* H_CLEAR_MOD/_REF are mandatory in PAPR, but off by default */
2816         kvmppc_enable_clear_ref_mod_hcalls();
2817 
2818         /* Enable H_PAGE_INIT */
2819         kvmppc_enable_h_page_init();
2820     }
2821 
2822     /* map RAM */
2823     memory_region_add_subregion(sysmem, 0, machine->ram);
2824 
2825     /* always allocate the device memory information */
2826     machine->device_memory = g_malloc0(sizeof(*machine->device_memory));
2827 
2828     /* initialize hotplug memory address space */
2829     if (machine->ram_size < machine->maxram_size) {
2830         ram_addr_t device_mem_size = machine->maxram_size - machine->ram_size;
2831         /*
2832          * Limit the number of hotpluggable memory slots to half the number
2833          * slots that KVM supports, leaving the other half for PCI and other
2834          * devices. However ensure that number of slots doesn't drop below 32.
2835          */
2836         int max_memslots = kvm_enabled() ? kvm_get_max_memslots() / 2 :
2837                            SPAPR_MAX_RAM_SLOTS;
2838 
2839         if (max_memslots < SPAPR_MAX_RAM_SLOTS) {
2840             max_memslots = SPAPR_MAX_RAM_SLOTS;
2841         }
2842         if (machine->ram_slots > max_memslots) {
2843             error_report("Specified number of memory slots %"
2844                          PRIu64" exceeds max supported %d",
2845                          machine->ram_slots, max_memslots);
2846             exit(1);
2847         }
2848 
2849         machine->device_memory->base = ROUND_UP(machine->ram_size,
2850                                                 SPAPR_DEVICE_MEM_ALIGN);
2851         memory_region_init(&machine->device_memory->mr, OBJECT(spapr),
2852                            "device-memory", device_mem_size);
2853         memory_region_add_subregion(sysmem, machine->device_memory->base,
2854                                     &machine->device_memory->mr);
2855     }
2856 
2857     if (smc->dr_lmb_enabled) {
2858         spapr_create_lmb_dr_connectors(spapr);
2859     }
2860 
2861     if (spapr_get_cap(spapr, SPAPR_CAP_FWNMI) == SPAPR_CAP_ON) {
2862         /* Create the error string for live migration blocker */
2863         error_setg(&spapr->fwnmi_migration_blocker,
2864             "A machine check is being handled during migration. The handler"
2865             "may run and log hardware error on the destination");
2866     }
2867 
2868     if (mc->nvdimm_supported) {
2869         spapr_create_nvdimm_dr_connectors(spapr);
2870     }
2871 
2872     /* Set up RTAS event infrastructure */
2873     spapr_events_init(spapr);
2874 
2875     /* Set up the RTC RTAS interfaces */
2876     spapr_rtc_create(spapr);
2877 
2878     /* Set up VIO bus */
2879     spapr->vio_bus = spapr_vio_bus_init();
2880 
2881     for (i = 0; serial_hd(i); i++) {
2882         spapr_vty_create(spapr->vio_bus, serial_hd(i));
2883     }
2884 
2885     /* We always have at least the nvram device on VIO */
2886     spapr_create_nvram(spapr);
2887 
2888     /*
2889      * Setup hotplug / dynamic-reconfiguration connectors. top-level
2890      * connectors (described in root DT node's "ibm,drc-types" property)
2891      * are pre-initialized here. additional child connectors (such as
2892      * connectors for a PHBs PCI slots) are added as needed during their
2893      * parent's realization.
2894      */
2895     if (smc->dr_phb_enabled) {
2896         for (i = 0; i < SPAPR_MAX_PHBS; i++) {
2897             spapr_dr_connector_new(OBJECT(machine), TYPE_SPAPR_DRC_PHB, i);
2898         }
2899     }
2900 
2901     /* Set up PCI */
2902     spapr_pci_rtas_init();
2903 
2904     phb = spapr_create_default_phb();
2905 
2906     for (i = 0; i < nb_nics; i++) {
2907         NICInfo *nd = &nd_table[i];
2908 
2909         if (!nd->model) {
2910             nd->model = g_strdup("spapr-vlan");
2911         }
2912 
2913         if (g_str_equal(nd->model, "spapr-vlan") ||
2914             g_str_equal(nd->model, "ibmveth")) {
2915             spapr_vlan_create(spapr->vio_bus, nd);
2916         } else {
2917             pci_nic_init_nofail(&nd_table[i], phb->bus, nd->model, NULL);
2918         }
2919     }
2920 
2921     for (i = 0; i <= drive_get_max_bus(IF_SCSI); i++) {
2922         spapr_vscsi_create(spapr->vio_bus);
2923     }
2924 
2925     /* Graphics */
2926     if (spapr_vga_init(phb->bus, &error_fatal)) {
2927         spapr->has_graphics = true;
2928         machine->usb |= defaults_enabled() && !machine->usb_disabled;
2929     }
2930 
2931     if (machine->usb) {
2932         if (smc->use_ohci_by_default) {
2933             pci_create_simple(phb->bus, -1, "pci-ohci");
2934         } else {
2935             pci_create_simple(phb->bus, -1, "nec-usb-xhci");
2936         }
2937 
2938         if (spapr->has_graphics) {
2939             USBBus *usb_bus = usb_bus_find(-1);
2940 
2941             usb_create_simple(usb_bus, "usb-kbd");
2942             usb_create_simple(usb_bus, "usb-mouse");
2943         }
2944     }
2945 
2946     if (kernel_filename) {
2947         spapr->kernel_size = load_elf(kernel_filename, NULL,
2948                                       translate_kernel_address, spapr,
2949                                       NULL, NULL, NULL, NULL, 1,
2950                                       PPC_ELF_MACHINE, 0, 0);
2951         if (spapr->kernel_size == ELF_LOAD_WRONG_ENDIAN) {
2952             spapr->kernel_size = load_elf(kernel_filename, NULL,
2953                                           translate_kernel_address, spapr,
2954                                           NULL, NULL, NULL, NULL, 0,
2955                                           PPC_ELF_MACHINE, 0, 0);
2956             spapr->kernel_le = spapr->kernel_size > 0;
2957         }
2958         if (spapr->kernel_size < 0) {
2959             error_report("error loading %s: %s", kernel_filename,
2960                          load_elf_strerror(spapr->kernel_size));
2961             exit(1);
2962         }
2963 
2964         /* load initrd */
2965         if (initrd_filename) {
2966             /* Try to locate the initrd in the gap between the kernel
2967              * and the firmware. Add a bit of space just in case
2968              */
2969             spapr->initrd_base = (spapr->kernel_addr + spapr->kernel_size
2970                                   + 0x1ffff) & ~0xffff;
2971             spapr->initrd_size = load_image_targphys(initrd_filename,
2972                                                      spapr->initrd_base,
2973                                                      load_limit
2974                                                      - spapr->initrd_base);
2975             if (spapr->initrd_size < 0) {
2976                 error_report("could not load initial ram disk '%s'",
2977                              initrd_filename);
2978                 exit(1);
2979             }
2980         }
2981     }
2982 
2983     filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name);
2984     if (!filename) {
2985         error_report("Could not find LPAR firmware '%s'", bios_name);
2986         exit(1);
2987     }
2988     fw_size = load_image_targphys(filename, 0, FW_MAX_SIZE);
2989     if (fw_size <= 0) {
2990         error_report("Could not load LPAR firmware '%s'", filename);
2991         exit(1);
2992     }
2993     g_free(filename);
2994 
2995     /* FIXME: Should register things through the MachineState's qdev
2996      * interface, this is a legacy from the sPAPREnvironment structure
2997      * which predated MachineState but had a similar function */
2998     vmstate_register(NULL, 0, &vmstate_spapr, spapr);
2999     register_savevm_live("spapr/htab", VMSTATE_INSTANCE_ID_ANY, 1,
3000                          &savevm_htab_handlers, spapr);
3001 
3002     qbus_set_hotplug_handler(sysbus_get_default(), OBJECT(machine));
3003 
3004     qemu_register_boot_set(spapr_boot_set, spapr);
3005 
3006     /*
3007      * Nothing needs to be done to resume a suspended guest because
3008      * suspending does not change the machine state, so no need for
3009      * a ->wakeup method.
3010      */
3011     qemu_register_wakeup_support();
3012 
3013     if (kvm_enabled()) {
3014         /* to stop and start vmclock */
3015         qemu_add_vm_change_state_handler(cpu_ppc_clock_vm_state_change,
3016                                          &spapr->tb);
3017 
3018         kvmppc_spapr_enable_inkernel_multitce();
3019     }
3020 
3021     qemu_cond_init(&spapr->fwnmi_machine_check_interlock_cond);
3022 }
3023 
3024 #define DEFAULT_KVM_TYPE "auto"
3025 static int spapr_kvm_type(MachineState *machine, const char *vm_type)
3026 {
3027     /*
3028      * The use of g_ascii_strcasecmp() for 'hv' and 'pr' is to
3029      * accomodate the 'HV' and 'PV' formats that exists in the
3030      * wild. The 'auto' mode is being introduced already as
3031      * lower-case, thus we don't need to bother checking for
3032      * "AUTO".
3033      */
3034     if (!vm_type || !strcmp(vm_type, DEFAULT_KVM_TYPE)) {
3035         return 0;
3036     }
3037 
3038     if (!g_ascii_strcasecmp(vm_type, "hv")) {
3039         return 1;
3040     }
3041 
3042     if (!g_ascii_strcasecmp(vm_type, "pr")) {
3043         return 2;
3044     }
3045 
3046     error_report("Unknown kvm-type specified '%s'", vm_type);
3047     exit(1);
3048 }
3049 
3050 /*
3051  * Implementation of an interface to adjust firmware path
3052  * for the bootindex property handling.
3053  */
3054 static char *spapr_get_fw_dev_path(FWPathProvider *p, BusState *bus,
3055                                    DeviceState *dev)
3056 {
3057 #define CAST(type, obj, name) \
3058     ((type *)object_dynamic_cast(OBJECT(obj), (name)))
3059     SCSIDevice *d = CAST(SCSIDevice,  dev, TYPE_SCSI_DEVICE);
3060     SpaprPhbState *phb = CAST(SpaprPhbState, dev, TYPE_SPAPR_PCI_HOST_BRIDGE);
3061     VHostSCSICommon *vsc = CAST(VHostSCSICommon, dev, TYPE_VHOST_SCSI_COMMON);
3062 
3063     if (d) {
3064         void *spapr = CAST(void, bus->parent, "spapr-vscsi");
3065         VirtIOSCSI *virtio = CAST(VirtIOSCSI, bus->parent, TYPE_VIRTIO_SCSI);
3066         USBDevice *usb = CAST(USBDevice, bus->parent, TYPE_USB_DEVICE);
3067 
3068         if (spapr) {
3069             /*
3070              * Replace "channel@0/disk@0,0" with "disk@8000000000000000":
3071              * In the top 16 bits of the 64-bit LUN, we use SRP luns of the form
3072              * 0x8000 | (target << 8) | (bus << 5) | lun
3073              * (see the "Logical unit addressing format" table in SAM5)
3074              */
3075             unsigned id = 0x8000 | (d->id << 8) | (d->channel << 5) | d->lun;
3076             return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev),
3077                                    (uint64_t)id << 48);
3078         } else if (virtio) {
3079             /*
3080              * We use SRP luns of the form 01000000 | (target << 8) | lun
3081              * in the top 32 bits of the 64-bit LUN
3082              * Note: the quote above is from SLOF and it is wrong,
3083              * the actual binding is:
3084              * swap 0100 or 10 << or 20 << ( target lun-id -- srplun )
3085              */
3086             unsigned id = 0x1000000 | (d->id << 16) | d->lun;
3087             if (d->lun >= 256) {
3088                 /* Use the LUN "flat space addressing method" */
3089                 id |= 0x4000;
3090             }
3091             return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev),
3092                                    (uint64_t)id << 32);
3093         } else if (usb) {
3094             /*
3095              * We use SRP luns of the form 01000000 | (usb-port << 16) | lun
3096              * in the top 32 bits of the 64-bit LUN
3097              */
3098             unsigned usb_port = atoi(usb->port->path);
3099             unsigned id = 0x1000000 | (usb_port << 16) | d->lun;
3100             return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev),
3101                                    (uint64_t)id << 32);
3102         }
3103     }
3104 
3105     /*
3106      * SLOF probes the USB devices, and if it recognizes that the device is a
3107      * storage device, it changes its name to "storage" instead of "usb-host",
3108      * and additionally adds a child node for the SCSI LUN, so the correct
3109      * boot path in SLOF is something like .../storage@1/disk@xxx" instead.
3110      */
3111     if (strcmp("usb-host", qdev_fw_name(dev)) == 0) {
3112         USBDevice *usbdev = CAST(USBDevice, dev, TYPE_USB_DEVICE);
3113         if (usb_host_dev_is_scsi_storage(usbdev)) {
3114             return g_strdup_printf("storage@%s/disk", usbdev->port->path);
3115         }
3116     }
3117 
3118     if (phb) {
3119         /* Replace "pci" with "pci@800000020000000" */
3120         return g_strdup_printf("pci@%"PRIX64, phb->buid);
3121     }
3122 
3123     if (vsc) {
3124         /* Same logic as virtio above */
3125         unsigned id = 0x1000000 | (vsc->target << 16) | vsc->lun;
3126         return g_strdup_printf("disk@%"PRIX64, (uint64_t)id << 32);
3127     }
3128 
3129     if (g_str_equal("pci-bridge", qdev_fw_name(dev))) {
3130         /* SLOF uses "pci" instead of "pci-bridge" for PCI bridges */
3131         PCIDevice *pcidev = CAST(PCIDevice, dev, TYPE_PCI_DEVICE);
3132         return g_strdup_printf("pci@%x", PCI_SLOT(pcidev->devfn));
3133     }
3134 
3135     return NULL;
3136 }
3137 
3138 static char *spapr_get_kvm_type(Object *obj, Error **errp)
3139 {
3140     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3141 
3142     return g_strdup(spapr->kvm_type);
3143 }
3144 
3145 static void spapr_set_kvm_type(Object *obj, const char *value, Error **errp)
3146 {
3147     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3148 
3149     g_free(spapr->kvm_type);
3150     spapr->kvm_type = g_strdup(value);
3151 }
3152 
3153 static bool spapr_get_modern_hotplug_events(Object *obj, Error **errp)
3154 {
3155     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3156 
3157     return spapr->use_hotplug_event_source;
3158 }
3159 
3160 static void spapr_set_modern_hotplug_events(Object *obj, bool value,
3161                                             Error **errp)
3162 {
3163     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3164 
3165     spapr->use_hotplug_event_source = value;
3166 }
3167 
3168 static bool spapr_get_msix_emulation(Object *obj, Error **errp)
3169 {
3170     return true;
3171 }
3172 
3173 static char *spapr_get_resize_hpt(Object *obj, Error **errp)
3174 {
3175     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3176 
3177     switch (spapr->resize_hpt) {
3178     case SPAPR_RESIZE_HPT_DEFAULT:
3179         return g_strdup("default");
3180     case SPAPR_RESIZE_HPT_DISABLED:
3181         return g_strdup("disabled");
3182     case SPAPR_RESIZE_HPT_ENABLED:
3183         return g_strdup("enabled");
3184     case SPAPR_RESIZE_HPT_REQUIRED:
3185         return g_strdup("required");
3186     }
3187     g_assert_not_reached();
3188 }
3189 
3190 static void spapr_set_resize_hpt(Object *obj, const char *value, Error **errp)
3191 {
3192     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3193 
3194     if (strcmp(value, "default") == 0) {
3195         spapr->resize_hpt = SPAPR_RESIZE_HPT_DEFAULT;
3196     } else if (strcmp(value, "disabled") == 0) {
3197         spapr->resize_hpt = SPAPR_RESIZE_HPT_DISABLED;
3198     } else if (strcmp(value, "enabled") == 0) {
3199         spapr->resize_hpt = SPAPR_RESIZE_HPT_ENABLED;
3200     } else if (strcmp(value, "required") == 0) {
3201         spapr->resize_hpt = SPAPR_RESIZE_HPT_REQUIRED;
3202     } else {
3203         error_setg(errp, "Bad value for \"resize-hpt\" property");
3204     }
3205 }
3206 
3207 static char *spapr_get_ic_mode(Object *obj, Error **errp)
3208 {
3209     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3210 
3211     if (spapr->irq == &spapr_irq_xics_legacy) {
3212         return g_strdup("legacy");
3213     } else if (spapr->irq == &spapr_irq_xics) {
3214         return g_strdup("xics");
3215     } else if (spapr->irq == &spapr_irq_xive) {
3216         return g_strdup("xive");
3217     } else if (spapr->irq == &spapr_irq_dual) {
3218         return g_strdup("dual");
3219     }
3220     g_assert_not_reached();
3221 }
3222 
3223 static void spapr_set_ic_mode(Object *obj, const char *value, Error **errp)
3224 {
3225     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3226 
3227     if (SPAPR_MACHINE_GET_CLASS(spapr)->legacy_irq_allocation) {
3228         error_setg(errp, "This machine only uses the legacy XICS backend, don't pass ic-mode");
3229         return;
3230     }
3231 
3232     /* The legacy IRQ backend can not be set */
3233     if (strcmp(value, "xics") == 0) {
3234         spapr->irq = &spapr_irq_xics;
3235     } else if (strcmp(value, "xive") == 0) {
3236         spapr->irq = &spapr_irq_xive;
3237     } else if (strcmp(value, "dual") == 0) {
3238         spapr->irq = &spapr_irq_dual;
3239     } else {
3240         error_setg(errp, "Bad value for \"ic-mode\" property");
3241     }
3242 }
3243 
3244 static char *spapr_get_host_model(Object *obj, Error **errp)
3245 {
3246     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3247 
3248     return g_strdup(spapr->host_model);
3249 }
3250 
3251 static void spapr_set_host_model(Object *obj, const char *value, Error **errp)
3252 {
3253     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3254 
3255     g_free(spapr->host_model);
3256     spapr->host_model = g_strdup(value);
3257 }
3258 
3259 static char *spapr_get_host_serial(Object *obj, Error **errp)
3260 {
3261     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3262 
3263     return g_strdup(spapr->host_serial);
3264 }
3265 
3266 static void spapr_set_host_serial(Object *obj, const char *value, Error **errp)
3267 {
3268     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3269 
3270     g_free(spapr->host_serial);
3271     spapr->host_serial = g_strdup(value);
3272 }
3273 
3274 static void spapr_instance_init(Object *obj)
3275 {
3276     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3277     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
3278     MachineState *ms = MACHINE(spapr);
3279     MachineClass *mc = MACHINE_GET_CLASS(ms);
3280 
3281     /*
3282      * NVDIMM support went live in 5.1 without considering that, in
3283      * other archs, the user needs to enable NVDIMM support with the
3284      * 'nvdimm' machine option and the default behavior is NVDIMM
3285      * support disabled. It is too late to roll back to the standard
3286      * behavior without breaking 5.1 guests.
3287      */
3288     if (mc->nvdimm_supported) {
3289         ms->nvdimms_state->is_enabled = true;
3290     }
3291 
3292     spapr->htab_fd = -1;
3293     spapr->use_hotplug_event_source = true;
3294     spapr->kvm_type = g_strdup(DEFAULT_KVM_TYPE);
3295     object_property_add_str(obj, "kvm-type",
3296                             spapr_get_kvm_type, spapr_set_kvm_type);
3297     object_property_set_description(obj, "kvm-type",
3298                                     "Specifies the KVM virtualization mode (auto,"
3299                                     " hv, pr). Defaults to 'auto'. This mode will use"
3300                                     " any available KVM module loaded in the host,"
3301                                     " where kvm_hv takes precedence if both kvm_hv and"
3302                                     " kvm_pr are loaded.");
3303     object_property_add_bool(obj, "modern-hotplug-events",
3304                             spapr_get_modern_hotplug_events,
3305                             spapr_set_modern_hotplug_events);
3306     object_property_set_description(obj, "modern-hotplug-events",
3307                                     "Use dedicated hotplug event mechanism in"
3308                                     " place of standard EPOW events when possible"
3309                                     " (required for memory hot-unplug support)");
3310     ppc_compat_add_property(obj, "max-cpu-compat", &spapr->max_compat_pvr,
3311                             "Maximum permitted CPU compatibility mode");
3312 
3313     object_property_add_str(obj, "resize-hpt",
3314                             spapr_get_resize_hpt, spapr_set_resize_hpt);
3315     object_property_set_description(obj, "resize-hpt",
3316                                     "Resizing of the Hash Page Table (enabled, disabled, required)");
3317     object_property_add_uint32_ptr(obj, "vsmt",
3318                                    &spapr->vsmt, OBJ_PROP_FLAG_READWRITE);
3319     object_property_set_description(obj, "vsmt",
3320                                     "Virtual SMT: KVM behaves as if this were"
3321                                     " the host's SMT mode");
3322 
3323     object_property_add_bool(obj, "vfio-no-msix-emulation",
3324                              spapr_get_msix_emulation, NULL);
3325 
3326     object_property_add_uint64_ptr(obj, "kernel-addr",
3327                                    &spapr->kernel_addr, OBJ_PROP_FLAG_READWRITE);
3328     object_property_set_description(obj, "kernel-addr",
3329                                     stringify(KERNEL_LOAD_ADDR)
3330                                     " for -kernel is the default");
3331     spapr->kernel_addr = KERNEL_LOAD_ADDR;
3332     /* The machine class defines the default interrupt controller mode */
3333     spapr->irq = smc->irq;
3334     object_property_add_str(obj, "ic-mode", spapr_get_ic_mode,
3335                             spapr_set_ic_mode);
3336     object_property_set_description(obj, "ic-mode",
3337                  "Specifies the interrupt controller mode (xics, xive, dual)");
3338 
3339     object_property_add_str(obj, "host-model",
3340         spapr_get_host_model, spapr_set_host_model);
3341     object_property_set_description(obj, "host-model",
3342         "Host model to advertise in guest device tree");
3343     object_property_add_str(obj, "host-serial",
3344         spapr_get_host_serial, spapr_set_host_serial);
3345     object_property_set_description(obj, "host-serial",
3346         "Host serial number to advertise in guest device tree");
3347 }
3348 
3349 static void spapr_machine_finalizefn(Object *obj)
3350 {
3351     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3352 
3353     g_free(spapr->kvm_type);
3354 }
3355 
3356 void spapr_do_system_reset_on_cpu(CPUState *cs, run_on_cpu_data arg)
3357 {
3358     SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
3359     PowerPCCPU *cpu = POWERPC_CPU(cs);
3360     CPUPPCState *env = &cpu->env;
3361 
3362     cpu_synchronize_state(cs);
3363     /* If FWNMI is inactive, addr will be -1, which will deliver to 0x100 */
3364     if (spapr->fwnmi_system_reset_addr != -1) {
3365         uint64_t rtas_addr, addr;
3366 
3367         /* get rtas addr from fdt */
3368         rtas_addr = spapr_get_rtas_addr();
3369         if (!rtas_addr) {
3370             qemu_system_guest_panicked(NULL);
3371             return;
3372         }
3373 
3374         addr = rtas_addr + RTAS_ERROR_LOG_MAX + cs->cpu_index * sizeof(uint64_t)*2;
3375         stq_be_phys(&address_space_memory, addr, env->gpr[3]);
3376         stq_be_phys(&address_space_memory, addr + sizeof(uint64_t), 0);
3377         env->gpr[3] = addr;
3378     }
3379     ppc_cpu_do_system_reset(cs);
3380     if (spapr->fwnmi_system_reset_addr != -1) {
3381         env->nip = spapr->fwnmi_system_reset_addr;
3382     }
3383 }
3384 
3385 static void spapr_nmi(NMIState *n, int cpu_index, Error **errp)
3386 {
3387     CPUState *cs;
3388 
3389     CPU_FOREACH(cs) {
3390         async_run_on_cpu(cs, spapr_do_system_reset_on_cpu, RUN_ON_CPU_NULL);
3391     }
3392 }
3393 
3394 int spapr_lmb_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr,
3395                           void *fdt, int *fdt_start_offset, Error **errp)
3396 {
3397     uint64_t addr;
3398     uint32_t node;
3399 
3400     addr = spapr_drc_index(drc) * SPAPR_MEMORY_BLOCK_SIZE;
3401     node = object_property_get_uint(OBJECT(drc->dev), PC_DIMM_NODE_PROP,
3402                                     &error_abort);
3403     *fdt_start_offset = spapr_dt_memory_node(spapr, fdt, node, addr,
3404                                              SPAPR_MEMORY_BLOCK_SIZE);
3405     return 0;
3406 }
3407 
3408 static void spapr_add_lmbs(DeviceState *dev, uint64_t addr_start, uint64_t size,
3409                            bool dedicated_hp_event_source)
3410 {
3411     SpaprDrc *drc;
3412     uint32_t nr_lmbs = size/SPAPR_MEMORY_BLOCK_SIZE;
3413     int i;
3414     uint64_t addr = addr_start;
3415     bool hotplugged = spapr_drc_hotplugged(dev);
3416 
3417     for (i = 0; i < nr_lmbs; i++) {
3418         drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
3419                               addr / SPAPR_MEMORY_BLOCK_SIZE);
3420         g_assert(drc);
3421 
3422         /*
3423          * memory_device_get_free_addr() provided a range of free addresses
3424          * that doesn't overlap with any existing mapping at pre-plug. The
3425          * corresponding LMB DRCs are thus assumed to be all attachable.
3426          */
3427         spapr_drc_attach(drc, dev);
3428         if (!hotplugged) {
3429             spapr_drc_reset(drc);
3430         }
3431         addr += SPAPR_MEMORY_BLOCK_SIZE;
3432     }
3433     /* send hotplug notification to the
3434      * guest only in case of hotplugged memory
3435      */
3436     if (hotplugged) {
3437         if (dedicated_hp_event_source) {
3438             drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
3439                                   addr_start / SPAPR_MEMORY_BLOCK_SIZE);
3440             spapr_hotplug_req_add_by_count_indexed(SPAPR_DR_CONNECTOR_TYPE_LMB,
3441                                                    nr_lmbs,
3442                                                    spapr_drc_index(drc));
3443         } else {
3444             spapr_hotplug_req_add_by_count(SPAPR_DR_CONNECTOR_TYPE_LMB,
3445                                            nr_lmbs);
3446         }
3447     }
3448 }
3449 
3450 static void spapr_memory_plug(HotplugHandler *hotplug_dev, DeviceState *dev)
3451 {
3452     SpaprMachineState *ms = SPAPR_MACHINE(hotplug_dev);
3453     PCDIMMDevice *dimm = PC_DIMM(dev);
3454     uint64_t size, addr;
3455     int64_t slot;
3456     bool is_nvdimm = object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM);
3457 
3458     size = memory_device_get_region_size(MEMORY_DEVICE(dev), &error_abort);
3459 
3460     pc_dimm_plug(dimm, MACHINE(ms));
3461 
3462     if (!is_nvdimm) {
3463         addr = object_property_get_uint(OBJECT(dimm),
3464                                         PC_DIMM_ADDR_PROP, &error_abort);
3465         spapr_add_lmbs(dev, addr, size,
3466                        spapr_ovec_test(ms->ov5_cas, OV5_HP_EVT));
3467     } else {
3468         slot = object_property_get_int(OBJECT(dimm),
3469                                        PC_DIMM_SLOT_PROP, &error_abort);
3470         /* We should have valid slot number at this point */
3471         g_assert(slot >= 0);
3472         spapr_add_nvdimm(dev, slot);
3473     }
3474 }
3475 
3476 static void spapr_memory_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
3477                                   Error **errp)
3478 {
3479     const SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(hotplug_dev);
3480     SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_dev);
3481     bool is_nvdimm = object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM);
3482     PCDIMMDevice *dimm = PC_DIMM(dev);
3483     Error *local_err = NULL;
3484     uint64_t size;
3485     Object *memdev;
3486     hwaddr pagesize;
3487 
3488     if (!smc->dr_lmb_enabled) {
3489         error_setg(errp, "Memory hotplug not supported for this machine");
3490         return;
3491     }
3492 
3493     size = memory_device_get_region_size(MEMORY_DEVICE(dimm), &local_err);
3494     if (local_err) {
3495         error_propagate(errp, local_err);
3496         return;
3497     }
3498 
3499     if (is_nvdimm) {
3500         if (!spapr_nvdimm_validate(hotplug_dev, NVDIMM(dev), size, errp)) {
3501             return;
3502         }
3503     } else if (size % SPAPR_MEMORY_BLOCK_SIZE) {
3504         error_setg(errp, "Hotplugged memory size must be a multiple of "
3505                    "%" PRIu64 " MB", SPAPR_MEMORY_BLOCK_SIZE / MiB);
3506         return;
3507     }
3508 
3509     memdev = object_property_get_link(OBJECT(dimm), PC_DIMM_MEMDEV_PROP,
3510                                       &error_abort);
3511     pagesize = host_memory_backend_pagesize(MEMORY_BACKEND(memdev));
3512     if (!spapr_check_pagesize(spapr, pagesize, errp)) {
3513         return;
3514     }
3515 
3516     pc_dimm_pre_plug(dimm, MACHINE(hotplug_dev), NULL, errp);
3517 }
3518 
3519 struct SpaprDimmState {
3520     PCDIMMDevice *dimm;
3521     uint32_t nr_lmbs;
3522     QTAILQ_ENTRY(SpaprDimmState) next;
3523 };
3524 
3525 static SpaprDimmState *spapr_pending_dimm_unplugs_find(SpaprMachineState *s,
3526                                                        PCDIMMDevice *dimm)
3527 {
3528     SpaprDimmState *dimm_state = NULL;
3529 
3530     QTAILQ_FOREACH(dimm_state, &s->pending_dimm_unplugs, next) {
3531         if (dimm_state->dimm == dimm) {
3532             break;
3533         }
3534     }
3535     return dimm_state;
3536 }
3537 
3538 static SpaprDimmState *spapr_pending_dimm_unplugs_add(SpaprMachineState *spapr,
3539                                                       uint32_t nr_lmbs,
3540                                                       PCDIMMDevice *dimm)
3541 {
3542     SpaprDimmState *ds = NULL;
3543 
3544     /*
3545      * If this request is for a DIMM whose removal had failed earlier
3546      * (due to guest's refusal to remove the LMBs), we would have this
3547      * dimm already in the pending_dimm_unplugs list. In that
3548      * case don't add again.
3549      */
3550     ds = spapr_pending_dimm_unplugs_find(spapr, dimm);
3551     if (!ds) {
3552         ds = g_malloc0(sizeof(SpaprDimmState));
3553         ds->nr_lmbs = nr_lmbs;
3554         ds->dimm = dimm;
3555         QTAILQ_INSERT_HEAD(&spapr->pending_dimm_unplugs, ds, next);
3556     }
3557     return ds;
3558 }
3559 
3560 static void spapr_pending_dimm_unplugs_remove(SpaprMachineState *spapr,
3561                                               SpaprDimmState *dimm_state)
3562 {
3563     QTAILQ_REMOVE(&spapr->pending_dimm_unplugs, dimm_state, next);
3564     g_free(dimm_state);
3565 }
3566 
3567 static SpaprDimmState *spapr_recover_pending_dimm_state(SpaprMachineState *ms,
3568                                                         PCDIMMDevice *dimm)
3569 {
3570     SpaprDrc *drc;
3571     uint64_t size = memory_device_get_region_size(MEMORY_DEVICE(dimm),
3572                                                   &error_abort);
3573     uint32_t nr_lmbs = size / SPAPR_MEMORY_BLOCK_SIZE;
3574     uint32_t avail_lmbs = 0;
3575     uint64_t addr_start, addr;
3576     int i;
3577 
3578     addr_start = object_property_get_uint(OBJECT(dimm), PC_DIMM_ADDR_PROP,
3579                                           &error_abort);
3580 
3581     addr = addr_start;
3582     for (i = 0; i < nr_lmbs; i++) {
3583         drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
3584                               addr / SPAPR_MEMORY_BLOCK_SIZE);
3585         g_assert(drc);
3586         if (drc->dev) {
3587             avail_lmbs++;
3588         }
3589         addr += SPAPR_MEMORY_BLOCK_SIZE;
3590     }
3591 
3592     return spapr_pending_dimm_unplugs_add(ms, avail_lmbs, dimm);
3593 }
3594 
3595 /* Callback to be called during DRC release. */
3596 void spapr_lmb_release(DeviceState *dev)
3597 {
3598     HotplugHandler *hotplug_ctrl = qdev_get_hotplug_handler(dev);
3599     SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_ctrl);
3600     SpaprDimmState *ds = spapr_pending_dimm_unplugs_find(spapr, PC_DIMM(dev));
3601 
3602     /* This information will get lost if a migration occurs
3603      * during the unplug process. In this case recover it. */
3604     if (ds == NULL) {
3605         ds = spapr_recover_pending_dimm_state(spapr, PC_DIMM(dev));
3606         g_assert(ds);
3607         /* The DRC being examined by the caller at least must be counted */
3608         g_assert(ds->nr_lmbs);
3609     }
3610 
3611     if (--ds->nr_lmbs) {
3612         return;
3613     }
3614 
3615     /*
3616      * Now that all the LMBs have been removed by the guest, call the
3617      * unplug handler chain. This can never fail.
3618      */
3619     hotplug_handler_unplug(hotplug_ctrl, dev, &error_abort);
3620     object_unparent(OBJECT(dev));
3621 }
3622 
3623 static void spapr_memory_unplug(HotplugHandler *hotplug_dev, DeviceState *dev)
3624 {
3625     SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_dev);
3626     SpaprDimmState *ds = spapr_pending_dimm_unplugs_find(spapr, PC_DIMM(dev));
3627 
3628     pc_dimm_unplug(PC_DIMM(dev), MACHINE(hotplug_dev));
3629     qdev_unrealize(dev);
3630     spapr_pending_dimm_unplugs_remove(spapr, ds);
3631 }
3632 
3633 static void spapr_memory_unplug_request(HotplugHandler *hotplug_dev,
3634                                         DeviceState *dev, Error **errp)
3635 {
3636     SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_dev);
3637     PCDIMMDevice *dimm = PC_DIMM(dev);
3638     uint32_t nr_lmbs;
3639     uint64_t size, addr_start, addr;
3640     int i;
3641     SpaprDrc *drc;
3642 
3643     if (object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM)) {
3644         error_setg(errp, "nvdimm device hot unplug is not supported yet.");
3645         return;
3646     }
3647 
3648     size = memory_device_get_region_size(MEMORY_DEVICE(dimm), &error_abort);
3649     nr_lmbs = size / SPAPR_MEMORY_BLOCK_SIZE;
3650 
3651     addr_start = object_property_get_uint(OBJECT(dimm), PC_DIMM_ADDR_PROP,
3652                                           &error_abort);
3653 
3654     /*
3655      * An existing pending dimm state for this DIMM means that there is an
3656      * unplug operation in progress, waiting for the spapr_lmb_release
3657      * callback to complete the job (BQL can't cover that far). In this case,
3658      * bail out to avoid detaching DRCs that were already released.
3659      */
3660     if (spapr_pending_dimm_unplugs_find(spapr, dimm)) {
3661         error_setg(errp, "Memory unplug already in progress for device %s",
3662                    dev->id);
3663         return;
3664     }
3665 
3666     spapr_pending_dimm_unplugs_add(spapr, nr_lmbs, dimm);
3667 
3668     addr = addr_start;
3669     for (i = 0; i < nr_lmbs; i++) {
3670         drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
3671                               addr / SPAPR_MEMORY_BLOCK_SIZE);
3672         g_assert(drc);
3673 
3674         spapr_drc_detach(drc);
3675         addr += SPAPR_MEMORY_BLOCK_SIZE;
3676     }
3677 
3678     drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
3679                           addr_start / SPAPR_MEMORY_BLOCK_SIZE);
3680     spapr_hotplug_req_remove_by_count_indexed(SPAPR_DR_CONNECTOR_TYPE_LMB,
3681                                               nr_lmbs, spapr_drc_index(drc));
3682 }
3683 
3684 /* Callback to be called during DRC release. */
3685 void spapr_core_release(DeviceState *dev)
3686 {
3687     HotplugHandler *hotplug_ctrl = qdev_get_hotplug_handler(dev);
3688 
3689     /* Call the unplug handler chain. This can never fail. */
3690     hotplug_handler_unplug(hotplug_ctrl, dev, &error_abort);
3691     object_unparent(OBJECT(dev));
3692 }
3693 
3694 static void spapr_core_unplug(HotplugHandler *hotplug_dev, DeviceState *dev)
3695 {
3696     MachineState *ms = MACHINE(hotplug_dev);
3697     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(ms);
3698     CPUCore *cc = CPU_CORE(dev);
3699     CPUArchId *core_slot = spapr_find_cpu_slot(ms, cc->core_id, NULL);
3700 
3701     if (smc->pre_2_10_has_unused_icps) {
3702         SpaprCpuCore *sc = SPAPR_CPU_CORE(OBJECT(dev));
3703         int i;
3704 
3705         for (i = 0; i < cc->nr_threads; i++) {
3706             CPUState *cs = CPU(sc->threads[i]);
3707 
3708             pre_2_10_vmstate_register_dummy_icp(cs->cpu_index);
3709         }
3710     }
3711 
3712     assert(core_slot);
3713     core_slot->cpu = NULL;
3714     qdev_unrealize(dev);
3715 }
3716 
3717 static
3718 void spapr_core_unplug_request(HotplugHandler *hotplug_dev, DeviceState *dev,
3719                                Error **errp)
3720 {
3721     SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
3722     int index;
3723     SpaprDrc *drc;
3724     CPUCore *cc = CPU_CORE(dev);
3725 
3726     if (!spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index)) {
3727         error_setg(errp, "Unable to find CPU core with core-id: %d",
3728                    cc->core_id);
3729         return;
3730     }
3731     if (index == 0) {
3732         error_setg(errp, "Boot CPU core may not be unplugged");
3733         return;
3734     }
3735 
3736     drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU,
3737                           spapr_vcpu_id(spapr, cc->core_id));
3738     g_assert(drc);
3739 
3740     if (!spapr_drc_unplug_requested(drc)) {
3741         spapr_drc_detach(drc);
3742         spapr_hotplug_req_remove_by_index(drc);
3743     }
3744 }
3745 
3746 int spapr_core_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr,
3747                            void *fdt, int *fdt_start_offset, Error **errp)
3748 {
3749     SpaprCpuCore *core = SPAPR_CPU_CORE(drc->dev);
3750     CPUState *cs = CPU(core->threads[0]);
3751     PowerPCCPU *cpu = POWERPC_CPU(cs);
3752     DeviceClass *dc = DEVICE_GET_CLASS(cs);
3753     int id = spapr_get_vcpu_id(cpu);
3754     char *nodename;
3755     int offset;
3756 
3757     nodename = g_strdup_printf("%s@%x", dc->fw_name, id);
3758     offset = fdt_add_subnode(fdt, 0, nodename);
3759     g_free(nodename);
3760 
3761     spapr_dt_cpu(cs, fdt, offset, spapr);
3762 
3763     *fdt_start_offset = offset;
3764     return 0;
3765 }
3766 
3767 static void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev)
3768 {
3769     SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
3770     MachineClass *mc = MACHINE_GET_CLASS(spapr);
3771     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
3772     SpaprCpuCore *core = SPAPR_CPU_CORE(OBJECT(dev));
3773     CPUCore *cc = CPU_CORE(dev);
3774     CPUState *cs;
3775     SpaprDrc *drc;
3776     CPUArchId *core_slot;
3777     int index;
3778     bool hotplugged = spapr_drc_hotplugged(dev);
3779     int i;
3780 
3781     core_slot = spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index);
3782     g_assert(core_slot); /* Already checked in spapr_core_pre_plug() */
3783 
3784     drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU,
3785                           spapr_vcpu_id(spapr, cc->core_id));
3786 
3787     g_assert(drc || !mc->has_hotpluggable_cpus);
3788 
3789     if (drc) {
3790         /*
3791          * spapr_core_pre_plug() already buys us this is a brand new
3792          * core being plugged into a free slot. Nothing should already
3793          * be attached to the corresponding DRC.
3794          */
3795         spapr_drc_attach(drc, dev);
3796 
3797         if (hotplugged) {
3798             /*
3799              * Send hotplug notification interrupt to the guest only
3800              * in case of hotplugged CPUs.
3801              */
3802             spapr_hotplug_req_add_by_index(drc);
3803         } else {
3804             spapr_drc_reset(drc);
3805         }
3806     }
3807 
3808     core_slot->cpu = OBJECT(dev);
3809 
3810     /*
3811      * Set compatibility mode to match the boot CPU, which was either set
3812      * by the machine reset code or by CAS. This really shouldn't fail at
3813      * this point.
3814      */
3815     if (hotplugged) {
3816         for (i = 0; i < cc->nr_threads; i++) {
3817             ppc_set_compat(core->threads[i], POWERPC_CPU(first_cpu)->compat_pvr,
3818                            &error_abort);
3819         }
3820     }
3821 
3822     if (smc->pre_2_10_has_unused_icps) {
3823         for (i = 0; i < cc->nr_threads; i++) {
3824             cs = CPU(core->threads[i]);
3825             pre_2_10_vmstate_unregister_dummy_icp(cs->cpu_index);
3826         }
3827     }
3828 }
3829 
3830 static void spapr_core_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
3831                                 Error **errp)
3832 {
3833     MachineState *machine = MACHINE(OBJECT(hotplug_dev));
3834     MachineClass *mc = MACHINE_GET_CLASS(hotplug_dev);
3835     CPUCore *cc = CPU_CORE(dev);
3836     const char *base_core_type = spapr_get_cpu_core_type(machine->cpu_type);
3837     const char *type = object_get_typename(OBJECT(dev));
3838     CPUArchId *core_slot;
3839     int index;
3840     unsigned int smp_threads = machine->smp.threads;
3841 
3842     if (dev->hotplugged && !mc->has_hotpluggable_cpus) {
3843         error_setg(errp, "CPU hotplug not supported for this machine");
3844         return;
3845     }
3846 
3847     if (strcmp(base_core_type, type)) {
3848         error_setg(errp, "CPU core type should be %s", base_core_type);
3849         return;
3850     }
3851 
3852     if (cc->core_id % smp_threads) {
3853         error_setg(errp, "invalid core id %d", cc->core_id);
3854         return;
3855     }
3856 
3857     /*
3858      * In general we should have homogeneous threads-per-core, but old
3859      * (pre hotplug support) machine types allow the last core to have
3860      * reduced threads as a compatibility hack for when we allowed
3861      * total vcpus not a multiple of threads-per-core.
3862      */
3863     if (mc->has_hotpluggable_cpus && (cc->nr_threads != smp_threads)) {
3864         error_setg(errp, "invalid nr-threads %d, must be %d", cc->nr_threads,
3865                    smp_threads);
3866         return;
3867     }
3868 
3869     core_slot = spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index);
3870     if (!core_slot) {
3871         error_setg(errp, "core id %d out of range", cc->core_id);
3872         return;
3873     }
3874 
3875     if (core_slot->cpu) {
3876         error_setg(errp, "core %d already populated", cc->core_id);
3877         return;
3878     }
3879 
3880     numa_cpu_pre_plug(core_slot, dev, errp);
3881 }
3882 
3883 int spapr_phb_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr,
3884                           void *fdt, int *fdt_start_offset, Error **errp)
3885 {
3886     SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(drc->dev);
3887     int intc_phandle;
3888 
3889     intc_phandle = spapr_irq_get_phandle(spapr, spapr->fdt_blob, errp);
3890     if (intc_phandle <= 0) {
3891         return -1;
3892     }
3893 
3894     if (spapr_dt_phb(spapr, sphb, intc_phandle, fdt, fdt_start_offset)) {
3895         error_setg(errp, "unable to create FDT node for PHB %d", sphb->index);
3896         return -1;
3897     }
3898 
3899     /* generally SLOF creates these, for hotplug it's up to QEMU */
3900     _FDT(fdt_setprop_string(fdt, *fdt_start_offset, "name", "pci"));
3901 
3902     return 0;
3903 }
3904 
3905 static bool spapr_phb_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
3906                                Error **errp)
3907 {
3908     SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
3909     SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(dev);
3910     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
3911     const unsigned windows_supported = spapr_phb_windows_supported(sphb);
3912     SpaprDrc *drc;
3913 
3914     if (dev->hotplugged && !smc->dr_phb_enabled) {
3915         error_setg(errp, "PHB hotplug not supported for this machine");
3916         return false;
3917     }
3918 
3919     if (sphb->index == (uint32_t)-1) {
3920         error_setg(errp, "\"index\" for PAPR PHB is mandatory");
3921         return false;
3922     }
3923 
3924     drc = spapr_drc_by_id(TYPE_SPAPR_DRC_PHB, sphb->index);
3925     if (drc && drc->dev) {
3926         error_setg(errp, "PHB %d already attached", sphb->index);
3927         return false;
3928     }
3929 
3930     /*
3931      * This will check that sphb->index doesn't exceed the maximum number of
3932      * PHBs for the current machine type.
3933      */
3934     return
3935         smc->phb_placement(spapr, sphb->index,
3936                            &sphb->buid, &sphb->io_win_addr,
3937                            &sphb->mem_win_addr, &sphb->mem64_win_addr,
3938                            windows_supported, sphb->dma_liobn,
3939                            &sphb->nv2_gpa_win_addr, &sphb->nv2_atsd_win_addr,
3940                            errp);
3941 }
3942 
3943 static void spapr_phb_plug(HotplugHandler *hotplug_dev, DeviceState *dev)
3944 {
3945     SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
3946     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
3947     SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(dev);
3948     SpaprDrc *drc;
3949     bool hotplugged = spapr_drc_hotplugged(dev);
3950 
3951     if (!smc->dr_phb_enabled) {
3952         return;
3953     }
3954 
3955     drc = spapr_drc_by_id(TYPE_SPAPR_DRC_PHB, sphb->index);
3956     /* hotplug hooks should check it's enabled before getting this far */
3957     assert(drc);
3958 
3959     /* spapr_phb_pre_plug() already checked the DRC is attachable */
3960     spapr_drc_attach(drc, dev);
3961 
3962     if (hotplugged) {
3963         spapr_hotplug_req_add_by_index(drc);
3964     } else {
3965         spapr_drc_reset(drc);
3966     }
3967 }
3968 
3969 void spapr_phb_release(DeviceState *dev)
3970 {
3971     HotplugHandler *hotplug_ctrl = qdev_get_hotplug_handler(dev);
3972 
3973     hotplug_handler_unplug(hotplug_ctrl, dev, &error_abort);
3974     object_unparent(OBJECT(dev));
3975 }
3976 
3977 static void spapr_phb_unplug(HotplugHandler *hotplug_dev, DeviceState *dev)
3978 {
3979     qdev_unrealize(dev);
3980 }
3981 
3982 static void spapr_phb_unplug_request(HotplugHandler *hotplug_dev,
3983                                      DeviceState *dev, Error **errp)
3984 {
3985     SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(dev);
3986     SpaprDrc *drc;
3987 
3988     drc = spapr_drc_by_id(TYPE_SPAPR_DRC_PHB, sphb->index);
3989     assert(drc);
3990 
3991     if (!spapr_drc_unplug_requested(drc)) {
3992         spapr_drc_detach(drc);
3993         spapr_hotplug_req_remove_by_index(drc);
3994     }
3995 }
3996 
3997 static
3998 bool spapr_tpm_proxy_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
3999                               Error **errp)
4000 {
4001     SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
4002 
4003     if (spapr->tpm_proxy != NULL) {
4004         error_setg(errp, "Only one TPM proxy can be specified for this machine");
4005         return false;
4006     }
4007 
4008     return true;
4009 }
4010 
4011 static void spapr_tpm_proxy_plug(HotplugHandler *hotplug_dev, DeviceState *dev)
4012 {
4013     SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
4014     SpaprTpmProxy *tpm_proxy = SPAPR_TPM_PROXY(dev);
4015 
4016     /* Already checked in spapr_tpm_proxy_pre_plug() */
4017     g_assert(spapr->tpm_proxy == NULL);
4018 
4019     spapr->tpm_proxy = tpm_proxy;
4020 }
4021 
4022 static void spapr_tpm_proxy_unplug(HotplugHandler *hotplug_dev, DeviceState *dev)
4023 {
4024     SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
4025 
4026     qdev_unrealize(dev);
4027     object_unparent(OBJECT(dev));
4028     spapr->tpm_proxy = NULL;
4029 }
4030 
4031 static void spapr_machine_device_plug(HotplugHandler *hotplug_dev,
4032                                       DeviceState *dev, Error **errp)
4033 {
4034     if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
4035         spapr_memory_plug(hotplug_dev, dev);
4036     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
4037         spapr_core_plug(hotplug_dev, dev);
4038     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) {
4039         spapr_phb_plug(hotplug_dev, dev);
4040     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) {
4041         spapr_tpm_proxy_plug(hotplug_dev, dev);
4042     }
4043 }
4044 
4045 static void spapr_machine_device_unplug(HotplugHandler *hotplug_dev,
4046                                         DeviceState *dev, Error **errp)
4047 {
4048     if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
4049         spapr_memory_unplug(hotplug_dev, dev);
4050     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
4051         spapr_core_unplug(hotplug_dev, dev);
4052     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) {
4053         spapr_phb_unplug(hotplug_dev, dev);
4054     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) {
4055         spapr_tpm_proxy_unplug(hotplug_dev, dev);
4056     }
4057 }
4058 
4059 static void spapr_machine_device_unplug_request(HotplugHandler *hotplug_dev,
4060                                                 DeviceState *dev, Error **errp)
4061 {
4062     SpaprMachineState *sms = SPAPR_MACHINE(OBJECT(hotplug_dev));
4063     MachineClass *mc = MACHINE_GET_CLASS(sms);
4064     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4065 
4066     if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
4067         if (spapr_ovec_test(sms->ov5_cas, OV5_HP_EVT)) {
4068             spapr_memory_unplug_request(hotplug_dev, dev, errp);
4069         } else {
4070             /* NOTE: this means there is a window after guest reset, prior to
4071              * CAS negotiation, where unplug requests will fail due to the
4072              * capability not being detected yet. This is a bit different than
4073              * the case with PCI unplug, where the events will be queued and
4074              * eventually handled by the guest after boot
4075              */
4076             error_setg(errp, "Memory hot unplug not supported for this guest");
4077         }
4078     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
4079         if (!mc->has_hotpluggable_cpus) {
4080             error_setg(errp, "CPU hot unplug not supported on this machine");
4081             return;
4082         }
4083         spapr_core_unplug_request(hotplug_dev, dev, errp);
4084     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) {
4085         if (!smc->dr_phb_enabled) {
4086             error_setg(errp, "PHB hot unplug not supported on this machine");
4087             return;
4088         }
4089         spapr_phb_unplug_request(hotplug_dev, dev, errp);
4090     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) {
4091         spapr_tpm_proxy_unplug(hotplug_dev, dev);
4092     }
4093 }
4094 
4095 static void spapr_machine_device_pre_plug(HotplugHandler *hotplug_dev,
4096                                           DeviceState *dev, Error **errp)
4097 {
4098     if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
4099         spapr_memory_pre_plug(hotplug_dev, dev, errp);
4100     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
4101         spapr_core_pre_plug(hotplug_dev, dev, errp);
4102     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) {
4103         spapr_phb_pre_plug(hotplug_dev, dev, errp);
4104     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) {
4105         spapr_tpm_proxy_pre_plug(hotplug_dev, dev, errp);
4106     }
4107 }
4108 
4109 static HotplugHandler *spapr_get_hotplug_handler(MachineState *machine,
4110                                                  DeviceState *dev)
4111 {
4112     if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM) ||
4113         object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE) ||
4114         object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE) ||
4115         object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) {
4116         return HOTPLUG_HANDLER(machine);
4117     }
4118     if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
4119         PCIDevice *pcidev = PCI_DEVICE(dev);
4120         PCIBus *root = pci_device_root_bus(pcidev);
4121         SpaprPhbState *phb =
4122             (SpaprPhbState *)object_dynamic_cast(OBJECT(BUS(root)->parent),
4123                                                  TYPE_SPAPR_PCI_HOST_BRIDGE);
4124 
4125         if (phb) {
4126             return HOTPLUG_HANDLER(phb);
4127         }
4128     }
4129     return NULL;
4130 }
4131 
4132 static CpuInstanceProperties
4133 spapr_cpu_index_to_props(MachineState *machine, unsigned cpu_index)
4134 {
4135     CPUArchId *core_slot;
4136     MachineClass *mc = MACHINE_GET_CLASS(machine);
4137 
4138     /* make sure possible_cpu are intialized */
4139     mc->possible_cpu_arch_ids(machine);
4140     /* get CPU core slot containing thread that matches cpu_index */
4141     core_slot = spapr_find_cpu_slot(machine, cpu_index, NULL);
4142     assert(core_slot);
4143     return core_slot->props;
4144 }
4145 
4146 static int64_t spapr_get_default_cpu_node_id(const MachineState *ms, int idx)
4147 {
4148     return idx / ms->smp.cores % ms->numa_state->num_nodes;
4149 }
4150 
4151 static const CPUArchIdList *spapr_possible_cpu_arch_ids(MachineState *machine)
4152 {
4153     int i;
4154     unsigned int smp_threads = machine->smp.threads;
4155     unsigned int smp_cpus = machine->smp.cpus;
4156     const char *core_type;
4157     int spapr_max_cores = machine->smp.max_cpus / smp_threads;
4158     MachineClass *mc = MACHINE_GET_CLASS(machine);
4159 
4160     if (!mc->has_hotpluggable_cpus) {
4161         spapr_max_cores = QEMU_ALIGN_UP(smp_cpus, smp_threads) / smp_threads;
4162     }
4163     if (machine->possible_cpus) {
4164         assert(machine->possible_cpus->len == spapr_max_cores);
4165         return machine->possible_cpus;
4166     }
4167 
4168     core_type = spapr_get_cpu_core_type(machine->cpu_type);
4169     if (!core_type) {
4170         error_report("Unable to find sPAPR CPU Core definition");
4171         exit(1);
4172     }
4173 
4174     machine->possible_cpus = g_malloc0(sizeof(CPUArchIdList) +
4175                              sizeof(CPUArchId) * spapr_max_cores);
4176     machine->possible_cpus->len = spapr_max_cores;
4177     for (i = 0; i < machine->possible_cpus->len; i++) {
4178         int core_id = i * smp_threads;
4179 
4180         machine->possible_cpus->cpus[i].type = core_type;
4181         machine->possible_cpus->cpus[i].vcpus_count = smp_threads;
4182         machine->possible_cpus->cpus[i].arch_id = core_id;
4183         machine->possible_cpus->cpus[i].props.has_core_id = true;
4184         machine->possible_cpus->cpus[i].props.core_id = core_id;
4185     }
4186     return machine->possible_cpus;
4187 }
4188 
4189 static bool spapr_phb_placement(SpaprMachineState *spapr, uint32_t index,
4190                                 uint64_t *buid, hwaddr *pio,
4191                                 hwaddr *mmio32, hwaddr *mmio64,
4192                                 unsigned n_dma, uint32_t *liobns,
4193                                 hwaddr *nv2gpa, hwaddr *nv2atsd, Error **errp)
4194 {
4195     /*
4196      * New-style PHB window placement.
4197      *
4198      * Goals: Gives large (1TiB), naturally aligned 64-bit MMIO window
4199      * for each PHB, in addition to 2GiB 32-bit MMIO and 64kiB PIO
4200      * windows.
4201      *
4202      * Some guest kernels can't work with MMIO windows above 1<<46
4203      * (64TiB), so we place up to 31 PHBs in the area 32TiB..64TiB
4204      *
4205      * 32TiB..(33TiB+1984kiB) contains the 64kiB PIO windows for each
4206      * PHB stacked together.  (32TiB+2GiB)..(32TiB+64GiB) contains the
4207      * 2GiB 32-bit MMIO windows for each PHB.  Then 33..64TiB has the
4208      * 1TiB 64-bit MMIO windows for each PHB.
4209      */
4210     const uint64_t base_buid = 0x800000020000000ULL;
4211     int i;
4212 
4213     /* Sanity check natural alignments */
4214     QEMU_BUILD_BUG_ON((SPAPR_PCI_BASE % SPAPR_PCI_MEM64_WIN_SIZE) != 0);
4215     QEMU_BUILD_BUG_ON((SPAPR_PCI_LIMIT % SPAPR_PCI_MEM64_WIN_SIZE) != 0);
4216     QEMU_BUILD_BUG_ON((SPAPR_PCI_MEM64_WIN_SIZE % SPAPR_PCI_MEM32_WIN_SIZE) != 0);
4217     QEMU_BUILD_BUG_ON((SPAPR_PCI_MEM32_WIN_SIZE % SPAPR_PCI_IO_WIN_SIZE) != 0);
4218     /* Sanity check bounds */
4219     QEMU_BUILD_BUG_ON((SPAPR_MAX_PHBS * SPAPR_PCI_IO_WIN_SIZE) >
4220                       SPAPR_PCI_MEM32_WIN_SIZE);
4221     QEMU_BUILD_BUG_ON((SPAPR_MAX_PHBS * SPAPR_PCI_MEM32_WIN_SIZE) >
4222                       SPAPR_PCI_MEM64_WIN_SIZE);
4223 
4224     if (index >= SPAPR_MAX_PHBS) {
4225         error_setg(errp, "\"index\" for PAPR PHB is too large (max %llu)",
4226                    SPAPR_MAX_PHBS - 1);
4227         return false;
4228     }
4229 
4230     *buid = base_buid + index;
4231     for (i = 0; i < n_dma; ++i) {
4232         liobns[i] = SPAPR_PCI_LIOBN(index, i);
4233     }
4234 
4235     *pio = SPAPR_PCI_BASE + index * SPAPR_PCI_IO_WIN_SIZE;
4236     *mmio32 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM32_WIN_SIZE;
4237     *mmio64 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM64_WIN_SIZE;
4238 
4239     *nv2gpa = SPAPR_PCI_NV2RAM64_WIN_BASE + index * SPAPR_PCI_NV2RAM64_WIN_SIZE;
4240     *nv2atsd = SPAPR_PCI_NV2ATSD_WIN_BASE + index * SPAPR_PCI_NV2ATSD_WIN_SIZE;
4241     return true;
4242 }
4243 
4244 static ICSState *spapr_ics_get(XICSFabric *dev, int irq)
4245 {
4246     SpaprMachineState *spapr = SPAPR_MACHINE(dev);
4247 
4248     return ics_valid_irq(spapr->ics, irq) ? spapr->ics : NULL;
4249 }
4250 
4251 static void spapr_ics_resend(XICSFabric *dev)
4252 {
4253     SpaprMachineState *spapr = SPAPR_MACHINE(dev);
4254 
4255     ics_resend(spapr->ics);
4256 }
4257 
4258 static ICPState *spapr_icp_get(XICSFabric *xi, int vcpu_id)
4259 {
4260     PowerPCCPU *cpu = spapr_find_cpu(vcpu_id);
4261 
4262     return cpu ? spapr_cpu_state(cpu)->icp : NULL;
4263 }
4264 
4265 static void spapr_pic_print_info(InterruptStatsProvider *obj,
4266                                  Monitor *mon)
4267 {
4268     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
4269 
4270     spapr_irq_print_info(spapr, mon);
4271     monitor_printf(mon, "irqchip: %s\n",
4272                    kvm_irqchip_in_kernel() ? "in-kernel" : "emulated");
4273 }
4274 
4275 /*
4276  * This is a XIVE only operation
4277  */
4278 static int spapr_match_nvt(XiveFabric *xfb, uint8_t format,
4279                            uint8_t nvt_blk, uint32_t nvt_idx,
4280                            bool cam_ignore, uint8_t priority,
4281                            uint32_t logic_serv, XiveTCTXMatch *match)
4282 {
4283     SpaprMachineState *spapr = SPAPR_MACHINE(xfb);
4284     XivePresenter *xptr = XIVE_PRESENTER(spapr->active_intc);
4285     XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr);
4286     int count;
4287 
4288     count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, cam_ignore,
4289                            priority, logic_serv, match);
4290     if (count < 0) {
4291         return count;
4292     }
4293 
4294     /*
4295      * When we implement the save and restore of the thread interrupt
4296      * contexts in the enter/exit CPU handlers of the machine and the
4297      * escalations in QEMU, we should be able to handle non dispatched
4298      * vCPUs.
4299      *
4300      * Until this is done, the sPAPR machine should find at least one
4301      * matching context always.
4302      */
4303     if (count == 0) {
4304         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVT %x/%x is not dispatched\n",
4305                       nvt_blk, nvt_idx);
4306     }
4307 
4308     return count;
4309 }
4310 
4311 int spapr_get_vcpu_id(PowerPCCPU *cpu)
4312 {
4313     return cpu->vcpu_id;
4314 }
4315 
4316 bool spapr_set_vcpu_id(PowerPCCPU *cpu, int cpu_index, Error **errp)
4317 {
4318     SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
4319     MachineState *ms = MACHINE(spapr);
4320     int vcpu_id;
4321 
4322     vcpu_id = spapr_vcpu_id(spapr, cpu_index);
4323 
4324     if (kvm_enabled() && !kvm_vcpu_id_is_valid(vcpu_id)) {
4325         error_setg(errp, "Can't create CPU with id %d in KVM", vcpu_id);
4326         error_append_hint(errp, "Adjust the number of cpus to %d "
4327                           "or try to raise the number of threads per core\n",
4328                           vcpu_id * ms->smp.threads / spapr->vsmt);
4329         return false;
4330     }
4331 
4332     cpu->vcpu_id = vcpu_id;
4333     return true;
4334 }
4335 
4336 PowerPCCPU *spapr_find_cpu(int vcpu_id)
4337 {
4338     CPUState *cs;
4339 
4340     CPU_FOREACH(cs) {
4341         PowerPCCPU *cpu = POWERPC_CPU(cs);
4342 
4343         if (spapr_get_vcpu_id(cpu) == vcpu_id) {
4344             return cpu;
4345         }
4346     }
4347 
4348     return NULL;
4349 }
4350 
4351 static void spapr_cpu_exec_enter(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu)
4352 {
4353     SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
4354 
4355     /* These are only called by TCG, KVM maintains dispatch state */
4356 
4357     spapr_cpu->prod = false;
4358     if (spapr_cpu->vpa_addr) {
4359         CPUState *cs = CPU(cpu);
4360         uint32_t dispatch;
4361 
4362         dispatch = ldl_be_phys(cs->as,
4363                                spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER);
4364         dispatch++;
4365         if ((dispatch & 1) != 0) {
4366             qemu_log_mask(LOG_GUEST_ERROR,
4367                           "VPA: incorrect dispatch counter value for "
4368                           "dispatched partition %u, correcting.\n", dispatch);
4369             dispatch++;
4370         }
4371         stl_be_phys(cs->as,
4372                     spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER, dispatch);
4373     }
4374 }
4375 
4376 static void spapr_cpu_exec_exit(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu)
4377 {
4378     SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
4379 
4380     if (spapr_cpu->vpa_addr) {
4381         CPUState *cs = CPU(cpu);
4382         uint32_t dispatch;
4383 
4384         dispatch = ldl_be_phys(cs->as,
4385                                spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER);
4386         dispatch++;
4387         if ((dispatch & 1) != 1) {
4388             qemu_log_mask(LOG_GUEST_ERROR,
4389                           "VPA: incorrect dispatch counter value for "
4390                           "preempted partition %u, correcting.\n", dispatch);
4391             dispatch++;
4392         }
4393         stl_be_phys(cs->as,
4394                     spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER, dispatch);
4395     }
4396 }
4397 
4398 static void spapr_machine_class_init(ObjectClass *oc, void *data)
4399 {
4400     MachineClass *mc = MACHINE_CLASS(oc);
4401     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(oc);
4402     FWPathProviderClass *fwc = FW_PATH_PROVIDER_CLASS(oc);
4403     NMIClass *nc = NMI_CLASS(oc);
4404     HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc);
4405     PPCVirtualHypervisorClass *vhc = PPC_VIRTUAL_HYPERVISOR_CLASS(oc);
4406     XICSFabricClass *xic = XICS_FABRIC_CLASS(oc);
4407     InterruptStatsProviderClass *ispc = INTERRUPT_STATS_PROVIDER_CLASS(oc);
4408     XiveFabricClass *xfc = XIVE_FABRIC_CLASS(oc);
4409 
4410     mc->desc = "pSeries Logical Partition (PAPR compliant)";
4411     mc->ignore_boot_device_suffixes = true;
4412 
4413     /*
4414      * We set up the default / latest behaviour here.  The class_init
4415      * functions for the specific versioned machine types can override
4416      * these details for backwards compatibility
4417      */
4418     mc->init = spapr_machine_init;
4419     mc->reset = spapr_machine_reset;
4420     mc->block_default_type = IF_SCSI;
4421     mc->max_cpus = 1024;
4422     mc->no_parallel = 1;
4423     mc->default_boot_order = "";
4424     mc->default_ram_size = 512 * MiB;
4425     mc->default_ram_id = "ppc_spapr.ram";
4426     mc->default_display = "std";
4427     mc->kvm_type = spapr_kvm_type;
4428     machine_class_allow_dynamic_sysbus_dev(mc, TYPE_SPAPR_PCI_HOST_BRIDGE);
4429     mc->pci_allow_0_address = true;
4430     assert(!mc->get_hotplug_handler);
4431     mc->get_hotplug_handler = spapr_get_hotplug_handler;
4432     hc->pre_plug = spapr_machine_device_pre_plug;
4433     hc->plug = spapr_machine_device_plug;
4434     mc->cpu_index_to_instance_props = spapr_cpu_index_to_props;
4435     mc->get_default_cpu_node_id = spapr_get_default_cpu_node_id;
4436     mc->possible_cpu_arch_ids = spapr_possible_cpu_arch_ids;
4437     hc->unplug_request = spapr_machine_device_unplug_request;
4438     hc->unplug = spapr_machine_device_unplug;
4439 
4440     smc->dr_lmb_enabled = true;
4441     smc->update_dt_enabled = true;
4442     mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power9_v2.0");
4443     mc->has_hotpluggable_cpus = true;
4444     mc->nvdimm_supported = true;
4445     smc->resize_hpt_default = SPAPR_RESIZE_HPT_ENABLED;
4446     fwc->get_dev_path = spapr_get_fw_dev_path;
4447     nc->nmi_monitor_handler = spapr_nmi;
4448     smc->phb_placement = spapr_phb_placement;
4449     vhc->hypercall = emulate_spapr_hypercall;
4450     vhc->hpt_mask = spapr_hpt_mask;
4451     vhc->map_hptes = spapr_map_hptes;
4452     vhc->unmap_hptes = spapr_unmap_hptes;
4453     vhc->hpte_set_c = spapr_hpte_set_c;
4454     vhc->hpte_set_r = spapr_hpte_set_r;
4455     vhc->get_pate = spapr_get_pate;
4456     vhc->encode_hpt_for_kvm_pr = spapr_encode_hpt_for_kvm_pr;
4457     vhc->cpu_exec_enter = spapr_cpu_exec_enter;
4458     vhc->cpu_exec_exit = spapr_cpu_exec_exit;
4459     xic->ics_get = spapr_ics_get;
4460     xic->ics_resend = spapr_ics_resend;
4461     xic->icp_get = spapr_icp_get;
4462     ispc->print_info = spapr_pic_print_info;
4463     /* Force NUMA node memory size to be a multiple of
4464      * SPAPR_MEMORY_BLOCK_SIZE (256M) since that's the granularity
4465      * in which LMBs are represented and hot-added
4466      */
4467     mc->numa_mem_align_shift = 28;
4468     mc->auto_enable_numa = true;
4469 
4470     smc->default_caps.caps[SPAPR_CAP_HTM] = SPAPR_CAP_OFF;
4471     smc->default_caps.caps[SPAPR_CAP_VSX] = SPAPR_CAP_ON;
4472     smc->default_caps.caps[SPAPR_CAP_DFP] = SPAPR_CAP_ON;
4473     smc->default_caps.caps[SPAPR_CAP_CFPC] = SPAPR_CAP_WORKAROUND;
4474     smc->default_caps.caps[SPAPR_CAP_SBBC] = SPAPR_CAP_WORKAROUND;
4475     smc->default_caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_WORKAROUND;
4476     smc->default_caps.caps[SPAPR_CAP_HPT_MAXPAGESIZE] = 16; /* 64kiB */
4477     smc->default_caps.caps[SPAPR_CAP_NESTED_KVM_HV] = SPAPR_CAP_OFF;
4478     smc->default_caps.caps[SPAPR_CAP_LARGE_DECREMENTER] = SPAPR_CAP_ON;
4479     smc->default_caps.caps[SPAPR_CAP_CCF_ASSIST] = SPAPR_CAP_ON;
4480     smc->default_caps.caps[SPAPR_CAP_FWNMI] = SPAPR_CAP_ON;
4481     spapr_caps_add_properties(smc);
4482     smc->irq = &spapr_irq_dual;
4483     smc->dr_phb_enabled = true;
4484     smc->linux_pci_probe = true;
4485     smc->smp_threads_vsmt = true;
4486     smc->nr_xirqs = SPAPR_NR_XIRQS;
4487     xfc->match_nvt = spapr_match_nvt;
4488 }
4489 
4490 static const TypeInfo spapr_machine_info = {
4491     .name          = TYPE_SPAPR_MACHINE,
4492     .parent        = TYPE_MACHINE,
4493     .abstract      = true,
4494     .instance_size = sizeof(SpaprMachineState),
4495     .instance_init = spapr_instance_init,
4496     .instance_finalize = spapr_machine_finalizefn,
4497     .class_size    = sizeof(SpaprMachineClass),
4498     .class_init    = spapr_machine_class_init,
4499     .interfaces = (InterfaceInfo[]) {
4500         { TYPE_FW_PATH_PROVIDER },
4501         { TYPE_NMI },
4502         { TYPE_HOTPLUG_HANDLER },
4503         { TYPE_PPC_VIRTUAL_HYPERVISOR },
4504         { TYPE_XICS_FABRIC },
4505         { TYPE_INTERRUPT_STATS_PROVIDER },
4506         { TYPE_XIVE_FABRIC },
4507         { }
4508     },
4509 };
4510 
4511 static void spapr_machine_latest_class_options(MachineClass *mc)
4512 {
4513     mc->alias = "pseries";
4514     mc->is_default = true;
4515 }
4516 
4517 #define DEFINE_SPAPR_MACHINE(suffix, verstr, latest)                 \
4518     static void spapr_machine_##suffix##_class_init(ObjectClass *oc, \
4519                                                     void *data)      \
4520     {                                                                \
4521         MachineClass *mc = MACHINE_CLASS(oc);                        \
4522         spapr_machine_##suffix##_class_options(mc);                  \
4523         if (latest) {                                                \
4524             spapr_machine_latest_class_options(mc);                  \
4525         }                                                            \
4526     }                                                                \
4527     static const TypeInfo spapr_machine_##suffix##_info = {          \
4528         .name = MACHINE_TYPE_NAME("pseries-" verstr),                \
4529         .parent = TYPE_SPAPR_MACHINE,                                \
4530         .class_init = spapr_machine_##suffix##_class_init,           \
4531     };                                                               \
4532     static void spapr_machine_register_##suffix(void)                \
4533     {                                                                \
4534         type_register(&spapr_machine_##suffix##_info);               \
4535     }                                                                \
4536     type_init(spapr_machine_register_##suffix)
4537 
4538 /*
4539  * pseries-6.0
4540  */
4541 static void spapr_machine_6_0_class_options(MachineClass *mc)
4542 {
4543     /* Defaults for the latest behaviour inherited from the base class */
4544 }
4545 
4546 DEFINE_SPAPR_MACHINE(6_0, "6.0", true);
4547 
4548 /*
4549  * pseries-5.2
4550  */
4551 static void spapr_machine_5_2_class_options(MachineClass *mc)
4552 {
4553     spapr_machine_6_0_class_options(mc);
4554     compat_props_add(mc->compat_props, hw_compat_5_2, hw_compat_5_2_len);
4555 }
4556 
4557 DEFINE_SPAPR_MACHINE(5_2, "5.2", false);
4558 
4559 /*
4560  * pseries-5.1
4561  */
4562 static void spapr_machine_5_1_class_options(MachineClass *mc)
4563 {
4564     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4565 
4566     spapr_machine_5_2_class_options(mc);
4567     compat_props_add(mc->compat_props, hw_compat_5_1, hw_compat_5_1_len);
4568     smc->pre_5_2_numa_associativity = true;
4569 }
4570 
4571 DEFINE_SPAPR_MACHINE(5_1, "5.1", false);
4572 
4573 /*
4574  * pseries-5.0
4575  */
4576 static void spapr_machine_5_0_class_options(MachineClass *mc)
4577 {
4578     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4579     static GlobalProperty compat[] = {
4580         { TYPE_SPAPR_PCI_HOST_BRIDGE, "pre-5.1-associativity", "on" },
4581     };
4582 
4583     spapr_machine_5_1_class_options(mc);
4584     compat_props_add(mc->compat_props, hw_compat_5_0, hw_compat_5_0_len);
4585     compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
4586     mc->numa_mem_supported = true;
4587     smc->pre_5_1_assoc_refpoints = true;
4588 }
4589 
4590 DEFINE_SPAPR_MACHINE(5_0, "5.0", false);
4591 
4592 /*
4593  * pseries-4.2
4594  */
4595 static void spapr_machine_4_2_class_options(MachineClass *mc)
4596 {
4597     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4598 
4599     spapr_machine_5_0_class_options(mc);
4600     compat_props_add(mc->compat_props, hw_compat_4_2, hw_compat_4_2_len);
4601     smc->default_caps.caps[SPAPR_CAP_CCF_ASSIST] = SPAPR_CAP_OFF;
4602     smc->default_caps.caps[SPAPR_CAP_FWNMI] = SPAPR_CAP_OFF;
4603     smc->rma_limit = 16 * GiB;
4604     mc->nvdimm_supported = false;
4605 }
4606 
4607 DEFINE_SPAPR_MACHINE(4_2, "4.2", false);
4608 
4609 /*
4610  * pseries-4.1
4611  */
4612 static void spapr_machine_4_1_class_options(MachineClass *mc)
4613 {
4614     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4615     static GlobalProperty compat[] = {
4616         /* Only allow 4kiB and 64kiB IOMMU pagesizes */
4617         { TYPE_SPAPR_PCI_HOST_BRIDGE, "pgsz", "0x11000" },
4618     };
4619 
4620     spapr_machine_4_2_class_options(mc);
4621     smc->linux_pci_probe = false;
4622     smc->smp_threads_vsmt = false;
4623     compat_props_add(mc->compat_props, hw_compat_4_1, hw_compat_4_1_len);
4624     compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
4625 }
4626 
4627 DEFINE_SPAPR_MACHINE(4_1, "4.1", false);
4628 
4629 /*
4630  * pseries-4.0
4631  */
4632 static bool phb_placement_4_0(SpaprMachineState *spapr, uint32_t index,
4633                               uint64_t *buid, hwaddr *pio,
4634                               hwaddr *mmio32, hwaddr *mmio64,
4635                               unsigned n_dma, uint32_t *liobns,
4636                               hwaddr *nv2gpa, hwaddr *nv2atsd, Error **errp)
4637 {
4638     if (!spapr_phb_placement(spapr, index, buid, pio, mmio32, mmio64, n_dma,
4639                              liobns, nv2gpa, nv2atsd, errp)) {
4640         return false;
4641     }
4642 
4643     *nv2gpa = 0;
4644     *nv2atsd = 0;
4645     return true;
4646 }
4647 static void spapr_machine_4_0_class_options(MachineClass *mc)
4648 {
4649     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4650 
4651     spapr_machine_4_1_class_options(mc);
4652     compat_props_add(mc->compat_props, hw_compat_4_0, hw_compat_4_0_len);
4653     smc->phb_placement = phb_placement_4_0;
4654     smc->irq = &spapr_irq_xics;
4655     smc->pre_4_1_migration = true;
4656 }
4657 
4658 DEFINE_SPAPR_MACHINE(4_0, "4.0", false);
4659 
4660 /*
4661  * pseries-3.1
4662  */
4663 static void spapr_machine_3_1_class_options(MachineClass *mc)
4664 {
4665     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4666 
4667     spapr_machine_4_0_class_options(mc);
4668     compat_props_add(mc->compat_props, hw_compat_3_1, hw_compat_3_1_len);
4669 
4670     mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power8_v2.0");
4671     smc->update_dt_enabled = false;
4672     smc->dr_phb_enabled = false;
4673     smc->broken_host_serial_model = true;
4674     smc->default_caps.caps[SPAPR_CAP_CFPC] = SPAPR_CAP_BROKEN;
4675     smc->default_caps.caps[SPAPR_CAP_SBBC] = SPAPR_CAP_BROKEN;
4676     smc->default_caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_BROKEN;
4677     smc->default_caps.caps[SPAPR_CAP_LARGE_DECREMENTER] = SPAPR_CAP_OFF;
4678 }
4679 
4680 DEFINE_SPAPR_MACHINE(3_1, "3.1", false);
4681 
4682 /*
4683  * pseries-3.0
4684  */
4685 
4686 static void spapr_machine_3_0_class_options(MachineClass *mc)
4687 {
4688     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4689 
4690     spapr_machine_3_1_class_options(mc);
4691     compat_props_add(mc->compat_props, hw_compat_3_0, hw_compat_3_0_len);
4692 
4693     smc->legacy_irq_allocation = true;
4694     smc->nr_xirqs = 0x400;
4695     smc->irq = &spapr_irq_xics_legacy;
4696 }
4697 
4698 DEFINE_SPAPR_MACHINE(3_0, "3.0", false);
4699 
4700 /*
4701  * pseries-2.12
4702  */
4703 static void spapr_machine_2_12_class_options(MachineClass *mc)
4704 {
4705     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4706     static GlobalProperty compat[] = {
4707         { TYPE_POWERPC_CPU, "pre-3.0-migration", "on" },
4708         { TYPE_SPAPR_CPU_CORE, "pre-3.0-migration", "on" },
4709     };
4710 
4711     spapr_machine_3_0_class_options(mc);
4712     compat_props_add(mc->compat_props, hw_compat_2_12, hw_compat_2_12_len);
4713     compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
4714 
4715     /* We depend on kvm_enabled() to choose a default value for the
4716      * hpt-max-page-size capability. Of course we can't do it here
4717      * because this is too early and the HW accelerator isn't initialzed
4718      * yet. Postpone this to machine init (see default_caps_with_cpu()).
4719      */
4720     smc->default_caps.caps[SPAPR_CAP_HPT_MAXPAGESIZE] = 0;
4721 }
4722 
4723 DEFINE_SPAPR_MACHINE(2_12, "2.12", false);
4724 
4725 static void spapr_machine_2_12_sxxm_class_options(MachineClass *mc)
4726 {
4727     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4728 
4729     spapr_machine_2_12_class_options(mc);
4730     smc->default_caps.caps[SPAPR_CAP_CFPC] = SPAPR_CAP_WORKAROUND;
4731     smc->default_caps.caps[SPAPR_CAP_SBBC] = SPAPR_CAP_WORKAROUND;
4732     smc->default_caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_FIXED_CCD;
4733 }
4734 
4735 DEFINE_SPAPR_MACHINE(2_12_sxxm, "2.12-sxxm", false);
4736 
4737 /*
4738  * pseries-2.11
4739  */
4740 
4741 static void spapr_machine_2_11_class_options(MachineClass *mc)
4742 {
4743     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4744 
4745     spapr_machine_2_12_class_options(mc);
4746     smc->default_caps.caps[SPAPR_CAP_HTM] = SPAPR_CAP_ON;
4747     compat_props_add(mc->compat_props, hw_compat_2_11, hw_compat_2_11_len);
4748 }
4749 
4750 DEFINE_SPAPR_MACHINE(2_11, "2.11", false);
4751 
4752 /*
4753  * pseries-2.10
4754  */
4755 
4756 static void spapr_machine_2_10_class_options(MachineClass *mc)
4757 {
4758     spapr_machine_2_11_class_options(mc);
4759     compat_props_add(mc->compat_props, hw_compat_2_10, hw_compat_2_10_len);
4760 }
4761 
4762 DEFINE_SPAPR_MACHINE(2_10, "2.10", false);
4763 
4764 /*
4765  * pseries-2.9
4766  */
4767 
4768 static void spapr_machine_2_9_class_options(MachineClass *mc)
4769 {
4770     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4771     static GlobalProperty compat[] = {
4772         { TYPE_POWERPC_CPU, "pre-2.10-migration", "on" },
4773     };
4774 
4775     spapr_machine_2_10_class_options(mc);
4776     compat_props_add(mc->compat_props, hw_compat_2_9, hw_compat_2_9_len);
4777     compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
4778     smc->pre_2_10_has_unused_icps = true;
4779     smc->resize_hpt_default = SPAPR_RESIZE_HPT_DISABLED;
4780 }
4781 
4782 DEFINE_SPAPR_MACHINE(2_9, "2.9", false);
4783 
4784 /*
4785  * pseries-2.8
4786  */
4787 
4788 static void spapr_machine_2_8_class_options(MachineClass *mc)
4789 {
4790     static GlobalProperty compat[] = {
4791         { TYPE_SPAPR_PCI_HOST_BRIDGE, "pcie-extended-configuration-space", "off" },
4792     };
4793 
4794     spapr_machine_2_9_class_options(mc);
4795     compat_props_add(mc->compat_props, hw_compat_2_8, hw_compat_2_8_len);
4796     compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
4797     mc->numa_mem_align_shift = 23;
4798 }
4799 
4800 DEFINE_SPAPR_MACHINE(2_8, "2.8", false);
4801 
4802 /*
4803  * pseries-2.7
4804  */
4805 
4806 static bool phb_placement_2_7(SpaprMachineState *spapr, uint32_t index,
4807                               uint64_t *buid, hwaddr *pio,
4808                               hwaddr *mmio32, hwaddr *mmio64,
4809                               unsigned n_dma, uint32_t *liobns,
4810                               hwaddr *nv2gpa, hwaddr *nv2atsd, Error **errp)
4811 {
4812     /* Legacy PHB placement for pseries-2.7 and earlier machine types */
4813     const uint64_t base_buid = 0x800000020000000ULL;
4814     const hwaddr phb_spacing = 0x1000000000ULL; /* 64 GiB */
4815     const hwaddr mmio_offset = 0xa0000000; /* 2 GiB + 512 MiB */
4816     const hwaddr pio_offset = 0x80000000; /* 2 GiB */
4817     const uint32_t max_index = 255;
4818     const hwaddr phb0_alignment = 0x10000000000ULL; /* 1 TiB */
4819 
4820     uint64_t ram_top = MACHINE(spapr)->ram_size;
4821     hwaddr phb0_base, phb_base;
4822     int i;
4823 
4824     /* Do we have device memory? */
4825     if (MACHINE(spapr)->maxram_size > ram_top) {
4826         /* Can't just use maxram_size, because there may be an
4827          * alignment gap between normal and device memory regions
4828          */
4829         ram_top = MACHINE(spapr)->device_memory->base +
4830             memory_region_size(&MACHINE(spapr)->device_memory->mr);
4831     }
4832 
4833     phb0_base = QEMU_ALIGN_UP(ram_top, phb0_alignment);
4834 
4835     if (index > max_index) {
4836         error_setg(errp, "\"index\" for PAPR PHB is too large (max %u)",
4837                    max_index);
4838         return false;
4839     }
4840 
4841     *buid = base_buid + index;
4842     for (i = 0; i < n_dma; ++i) {
4843         liobns[i] = SPAPR_PCI_LIOBN(index, i);
4844     }
4845 
4846     phb_base = phb0_base + index * phb_spacing;
4847     *pio = phb_base + pio_offset;
4848     *mmio32 = phb_base + mmio_offset;
4849     /*
4850      * We don't set the 64-bit MMIO window, relying on the PHB's
4851      * fallback behaviour of automatically splitting a large "32-bit"
4852      * window into contiguous 32-bit and 64-bit windows
4853      */
4854 
4855     *nv2gpa = 0;
4856     *nv2atsd = 0;
4857     return true;
4858 }
4859 
4860 static void spapr_machine_2_7_class_options(MachineClass *mc)
4861 {
4862     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4863     static GlobalProperty compat[] = {
4864         { TYPE_SPAPR_PCI_HOST_BRIDGE, "mem_win_size", "0xf80000000", },
4865         { TYPE_SPAPR_PCI_HOST_BRIDGE, "mem64_win_size", "0", },
4866         { TYPE_POWERPC_CPU, "pre-2.8-migration", "on", },
4867         { TYPE_SPAPR_PCI_HOST_BRIDGE, "pre-2.8-migration", "on", },
4868     };
4869 
4870     spapr_machine_2_8_class_options(mc);
4871     mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power7_v2.3");
4872     mc->default_machine_opts = "modern-hotplug-events=off";
4873     compat_props_add(mc->compat_props, hw_compat_2_7, hw_compat_2_7_len);
4874     compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
4875     smc->phb_placement = phb_placement_2_7;
4876 }
4877 
4878 DEFINE_SPAPR_MACHINE(2_7, "2.7", false);
4879 
4880 /*
4881  * pseries-2.6
4882  */
4883 
4884 static void spapr_machine_2_6_class_options(MachineClass *mc)
4885 {
4886     static GlobalProperty compat[] = {
4887         { TYPE_SPAPR_PCI_HOST_BRIDGE, "ddw", "off" },
4888     };
4889 
4890     spapr_machine_2_7_class_options(mc);
4891     mc->has_hotpluggable_cpus = false;
4892     compat_props_add(mc->compat_props, hw_compat_2_6, hw_compat_2_6_len);
4893     compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
4894 }
4895 
4896 DEFINE_SPAPR_MACHINE(2_6, "2.6", false);
4897 
4898 /*
4899  * pseries-2.5
4900  */
4901 
4902 static void spapr_machine_2_5_class_options(MachineClass *mc)
4903 {
4904     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4905     static GlobalProperty compat[] = {
4906         { "spapr-vlan", "use-rx-buffer-pools", "off" },
4907     };
4908 
4909     spapr_machine_2_6_class_options(mc);
4910     smc->use_ohci_by_default = true;
4911     compat_props_add(mc->compat_props, hw_compat_2_5, hw_compat_2_5_len);
4912     compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
4913 }
4914 
4915 DEFINE_SPAPR_MACHINE(2_5, "2.5", false);
4916 
4917 /*
4918  * pseries-2.4
4919  */
4920 
4921 static void spapr_machine_2_4_class_options(MachineClass *mc)
4922 {
4923     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4924 
4925     spapr_machine_2_5_class_options(mc);
4926     smc->dr_lmb_enabled = false;
4927     compat_props_add(mc->compat_props, hw_compat_2_4, hw_compat_2_4_len);
4928 }
4929 
4930 DEFINE_SPAPR_MACHINE(2_4, "2.4", false);
4931 
4932 /*
4933  * pseries-2.3
4934  */
4935 
4936 static void spapr_machine_2_3_class_options(MachineClass *mc)
4937 {
4938     static GlobalProperty compat[] = {
4939         { "spapr-pci-host-bridge", "dynamic-reconfiguration", "off" },
4940     };
4941     spapr_machine_2_4_class_options(mc);
4942     compat_props_add(mc->compat_props, hw_compat_2_3, hw_compat_2_3_len);
4943     compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
4944 }
4945 DEFINE_SPAPR_MACHINE(2_3, "2.3", false);
4946 
4947 /*
4948  * pseries-2.2
4949  */
4950 
4951 static void spapr_machine_2_2_class_options(MachineClass *mc)
4952 {
4953     static GlobalProperty compat[] = {
4954         { TYPE_SPAPR_PCI_HOST_BRIDGE, "mem_win_size", "0x20000000" },
4955     };
4956 
4957     spapr_machine_2_3_class_options(mc);
4958     compat_props_add(mc->compat_props, hw_compat_2_2, hw_compat_2_2_len);
4959     compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
4960     mc->default_machine_opts = "modern-hotplug-events=off,suppress-vmdesc=on";
4961 }
4962 DEFINE_SPAPR_MACHINE(2_2, "2.2", false);
4963 
4964 /*
4965  * pseries-2.1
4966  */
4967 
4968 static void spapr_machine_2_1_class_options(MachineClass *mc)
4969 {
4970     spapr_machine_2_2_class_options(mc);
4971     compat_props_add(mc->compat_props, hw_compat_2_1, hw_compat_2_1_len);
4972 }
4973 DEFINE_SPAPR_MACHINE(2_1, "2.1", false);
4974 
4975 static void spapr_machine_register_types(void)
4976 {
4977     type_register_static(&spapr_machine_info);
4978 }
4979 
4980 type_init(spapr_machine_register_types)
4981