1 /*
2 * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
3 *
4 * Copyright (c) 2004-2007 Fabrice Bellard
5 * Copyright (c) 2007 Jocelyn Mayer
6 * Copyright (c) 2010 David Gibson, IBM Corporation.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * THE SOFTWARE.
25 */
26
27 #include "qemu/osdep.h"
28 #include "qemu/datadir.h"
29 #include "qemu/memalign.h"
30 #include "qemu/guest-random.h"
31 #include "qapi/error.h"
32 #include "qapi/qapi-events-machine.h"
33 #include "qapi/qapi-events-qdev.h"
34 #include "qapi/visitor.h"
35 #include "sysemu/sysemu.h"
36 #include "sysemu/hostmem.h"
37 #include "sysemu/numa.h"
38 #include "sysemu/tcg.h"
39 #include "sysemu/qtest.h"
40 #include "sysemu/reset.h"
41 #include "sysemu/runstate.h"
42 #include "qemu/log.h"
43 #include "hw/fw-path-provider.h"
44 #include "elf.h"
45 #include "net/net.h"
46 #include "sysemu/device_tree.h"
47 #include "sysemu/cpus.h"
48 #include "sysemu/hw_accel.h"
49 #include "kvm_ppc.h"
50 #include "migration/misc.h"
51 #include "migration/qemu-file-types.h"
52 #include "migration/global_state.h"
53 #include "migration/register.h"
54 #include "migration/blocker.h"
55 #include "mmu-hash64.h"
56 #include "mmu-book3s-v3.h"
57 #include "cpu-models.h"
58 #include "hw/core/cpu.h"
59
60 #include "hw/ppc/ppc.h"
61 #include "hw/loader.h"
62
63 #include "hw/ppc/fdt.h"
64 #include "hw/ppc/spapr.h"
65 #include "hw/ppc/spapr_nested.h"
66 #include "hw/ppc/spapr_vio.h"
67 #include "hw/ppc/vof.h"
68 #include "hw/qdev-properties.h"
69 #include "hw/pci-host/spapr.h"
70 #include "hw/pci/msi.h"
71
72 #include "hw/pci/pci.h"
73 #include "hw/scsi/scsi.h"
74 #include "hw/virtio/virtio-scsi.h"
75 #include "hw/virtio/vhost-scsi-common.h"
76
77 #include "exec/ram_addr.h"
78 #include "exec/confidential-guest-support.h"
79 #include "hw/usb.h"
80 #include "qemu/config-file.h"
81 #include "qemu/error-report.h"
82 #include "trace.h"
83 #include "hw/nmi.h"
84 #include "hw/intc/intc.h"
85
86 #include "hw/ppc/spapr_cpu_core.h"
87 #include "hw/mem/memory-device.h"
88 #include "hw/ppc/spapr_tpm_proxy.h"
89 #include "hw/ppc/spapr_nvdimm.h"
90 #include "hw/ppc/spapr_numa.h"
91
92 #include <libfdt.h>
93
94 /* SLOF memory layout:
95 *
96 * SLOF raw image loaded at 0, copies its romfs right below the flat
97 * device-tree, then position SLOF itself 31M below that
98 *
99 * So we set FW_OVERHEAD to 40MB which should account for all of that
100 * and more
101 *
102 * We load our kernel at 4M, leaving space for SLOF initial image
103 */
104 #define FDT_MAX_ADDR 0x80000000 /* FDT must stay below that */
105 #define FW_MAX_SIZE 0x400000
106 #define FW_FILE_NAME "slof.bin"
107 #define FW_FILE_NAME_VOF "vof.bin"
108 #define FW_OVERHEAD 0x2800000
109 #define KERNEL_LOAD_ADDR FW_MAX_SIZE
110
111 #define MIN_RMA_SLOF (128 * MiB)
112
113 #define PHANDLE_INTC 0x00001111
114
115 /* These two functions implement the VCPU id numbering: one to compute them
116 * all and one to identify thread 0 of a VCORE. Any change to the first one
117 * is likely to have an impact on the second one, so let's keep them close.
118 */
spapr_vcpu_id(SpaprMachineState * spapr,int cpu_index)119 static int spapr_vcpu_id(SpaprMachineState *spapr, int cpu_index)
120 {
121 MachineState *ms = MACHINE(spapr);
122 unsigned int smp_threads = ms->smp.threads;
123
124 assert(spapr->vsmt);
125 return
126 (cpu_index / smp_threads) * spapr->vsmt + cpu_index % smp_threads;
127 }
spapr_is_thread0_in_vcore(SpaprMachineState * spapr,PowerPCCPU * cpu)128 static bool spapr_is_thread0_in_vcore(SpaprMachineState *spapr,
129 PowerPCCPU *cpu)
130 {
131 assert(spapr->vsmt);
132 return spapr_get_vcpu_id(cpu) % spapr->vsmt == 0;
133 }
134
spapr_max_server_number(SpaprMachineState * spapr)135 int spapr_max_server_number(SpaprMachineState *spapr)
136 {
137 MachineState *ms = MACHINE(spapr);
138
139 assert(spapr->vsmt);
140 return DIV_ROUND_UP(ms->smp.max_cpus * spapr->vsmt, ms->smp.threads);
141 }
142
spapr_fixup_cpu_smt_dt(void * fdt,int offset,PowerPCCPU * cpu,int smt_threads)143 static int spapr_fixup_cpu_smt_dt(void *fdt, int offset, PowerPCCPU *cpu,
144 int smt_threads)
145 {
146 int i, ret = 0;
147 g_autofree uint32_t *servers_prop = g_new(uint32_t, smt_threads);
148 g_autofree uint32_t *gservers_prop = g_new(uint32_t, smt_threads * 2);
149 int index = spapr_get_vcpu_id(cpu);
150
151 if (cpu->compat_pvr) {
152 ret = fdt_setprop_cell(fdt, offset, "cpu-version", cpu->compat_pvr);
153 if (ret < 0) {
154 return ret;
155 }
156 }
157
158 /* Build interrupt servers and gservers properties */
159 for (i = 0; i < smt_threads; i++) {
160 servers_prop[i] = cpu_to_be32(index + i);
161 /* Hack, direct the group queues back to cpu 0 */
162 gservers_prop[i*2] = cpu_to_be32(index + i);
163 gservers_prop[i*2 + 1] = 0;
164 }
165 ret = fdt_setprop(fdt, offset, "ibm,ppc-interrupt-server#s",
166 servers_prop, sizeof(*servers_prop) * smt_threads);
167 if (ret < 0) {
168 return ret;
169 }
170 ret = fdt_setprop(fdt, offset, "ibm,ppc-interrupt-gserver#s",
171 gservers_prop, sizeof(*gservers_prop) * smt_threads * 2);
172
173 return ret;
174 }
175
spapr_dt_pa_features(SpaprMachineState * spapr,PowerPCCPU * cpu,void * fdt,int offset)176 static void spapr_dt_pa_features(SpaprMachineState *spapr,
177 PowerPCCPU *cpu,
178 void *fdt, int offset)
179 {
180 /*
181 * SSO (SAO) ordering is supported on KVM and thread=single hosts,
182 * but not MTTCG, so disable it. To advertise it, a cap would have
183 * to be added, or support implemented for MTTCG.
184 *
185 * Copy/paste is not supported by TCG, so it is not advertised. KVM
186 * can execute them but it has no accelerator drivers which are usable,
187 * so there isn't much need for it anyway.
188 */
189
190 /* These should be kept in sync with pnv */
191 uint8_t pa_features_206[] = { 6, 0,
192 0xf6, 0x1f, 0xc7, 0x00, 0x00, 0xc0 };
193 uint8_t pa_features_207[] = { 24, 0,
194 0xf6, 0x1f, 0xc7, 0xc0, 0x00, 0xf0,
195 0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
196 0x00, 0x00, 0x00, 0x00, 0x80, 0x00,
197 0x80, 0x00, 0x80, 0x00, 0x00, 0x00 };
198 uint8_t pa_features_300[] = { 66, 0,
199 /* 0: MMU|FPU|SLB|RUN|DABR|NX, 1: fri[nzpm]|DABRX|SPRG3|SLB0|PP110 */
200 /* 2: VPM|DS205|PPR|DS202|DS206, 3: LSD|URG, 5: LE|CFAR|EB|LSQ */
201 0xf6, 0x1f, 0xc7, 0xc0, 0x00, 0xf0, /* 0 - 5 */
202 /* 6: DS207 */
203 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, /* 6 - 11 */
204 /* 16: Vector */
205 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, /* 12 - 17 */
206 /* 18: Vec. Scalar, 20: Vec. XOR */
207 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 18 - 23 */
208 /* 24: Ext. Dec, 26: 64 bit ftrs, 28: PM ftrs */
209 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 24 - 29 */
210 /* 32: LE atomic, 34: EBB + ext EBB */
211 0x00, 0x00, 0x80, 0x00, 0xC0, 0x00, /* 30 - 35 */
212 /* 40: Radix MMU */
213 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, /* 36 - 41 */
214 /* 42: PM, 44: PC RA, 46: SC vec'd */
215 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 42 - 47 */
216 /* 48: SIMD, 50: QP BFP, 52: String */
217 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 48 - 53 */
218 /* 54: DecFP, 56: DecI, 58: SHA */
219 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 54 - 59 */
220 /* 60: NM atomic, 62: RNG */
221 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 60 - 65 */
222 };
223 /* 3.1 removes SAO, HTM support */
224 uint8_t pa_features_31[] = { 74, 0,
225 /* 0: MMU|FPU|SLB|RUN|DABR|NX, 1: fri[nzpm]|DABRX|SPRG3|SLB0|PP110 */
226 /* 2: VPM|DS205|PPR|DS202|DS206, 3: LSD|URG, 5: LE|CFAR|EB|LSQ */
227 0xf6, 0x1f, 0xc7, 0xc0, 0x00, 0xf0, /* 0 - 5 */
228 /* 6: DS207 */
229 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, /* 6 - 11 */
230 /* 16: Vector */
231 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, /* 12 - 17 */
232 /* 18: Vec. Scalar, 20: Vec. XOR */
233 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 18 - 23 */
234 /* 24: Ext. Dec, 26: 64 bit ftrs, 28: PM ftrs */
235 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 24 - 29 */
236 /* 32: LE atomic, 34: EBB + ext EBB */
237 0x00, 0x00, 0x80, 0x00, 0xC0, 0x00, /* 30 - 35 */
238 /* 40: Radix MMU */
239 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, /* 36 - 41 */
240 /* 42: PM, 44: PC RA, 46: SC vec'd */
241 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 42 - 47 */
242 /* 48: SIMD, 50: QP BFP, 52: String */
243 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 48 - 53 */
244 /* 54: DecFP, 56: DecI, 58: SHA */
245 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 54 - 59 */
246 /* 60: NM atomic, 62: RNG */
247 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 60 - 65 */
248 /* 68: DEXCR[SBHE|IBRTPDUS|SRAPD|NPHIE|PHIE] */
249 0x00, 0x00, 0xce, 0x00, 0x00, 0x00, /* 66 - 71 */
250 /* 72: [P]HASHST/[P]HASHCHK */
251 0x80, 0x00, /* 72 - 73 */
252 };
253 uint8_t *pa_features = NULL;
254 size_t pa_size;
255
256 if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_2_06, 0, cpu->compat_pvr)) {
257 pa_features = pa_features_206;
258 pa_size = sizeof(pa_features_206);
259 }
260 if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_2_07, 0, cpu->compat_pvr)) {
261 pa_features = pa_features_207;
262 pa_size = sizeof(pa_features_207);
263 }
264 if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_00, 0, cpu->compat_pvr)) {
265 pa_features = pa_features_300;
266 pa_size = sizeof(pa_features_300);
267 }
268 if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_10, 0, cpu->compat_pvr)) {
269 pa_features = pa_features_31;
270 pa_size = sizeof(pa_features_31);
271 }
272 if (!pa_features) {
273 return;
274 }
275
276 if (ppc_hash64_has(cpu, PPC_HASH64_CI_LARGEPAGE)) {
277 /*
278 * Note: we keep CI large pages off by default because a 64K capable
279 * guest provisioned with large pages might otherwise try to map a qemu
280 * framebuffer (or other kind of memory mapped PCI BAR) using 64K pages
281 * even if that qemu runs on a 4k host.
282 * We dd this bit back here if we are confident this is not an issue
283 */
284 pa_features[3] |= 0x20;
285 }
286 if ((spapr_get_cap(spapr, SPAPR_CAP_HTM) != 0) && pa_size > 24) {
287 pa_features[24] |= 0x80; /* Transactional memory support */
288 }
289 if (spapr->cas_pre_isa3_guest && pa_size > 40) {
290 /* Workaround for broken kernels that attempt (guest) radix
291 * mode when they can't handle it, if they see the radix bit set
292 * in pa-features. So hide it from them. */
293 pa_features[40 + 2] &= ~0x80; /* Radix MMU */
294 }
295
296 _FDT((fdt_setprop(fdt, offset, "ibm,pa-features", pa_features, pa_size)));
297 }
298
spapr_dt_pi_features(SpaprMachineState * spapr,PowerPCCPU * cpu,void * fdt,int offset)299 static void spapr_dt_pi_features(SpaprMachineState *spapr,
300 PowerPCCPU *cpu,
301 void *fdt, int offset)
302 {
303 uint8_t pi_features[] = { 1, 0,
304 0x00 };
305
306 if (kvm_enabled() && ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_00,
307 0, cpu->compat_pvr)) {
308 /*
309 * POWER9 and later CPUs with KVM run in LPAR-per-thread mode where
310 * all threads are essentially independent CPUs, and msgsndp does not
311 * work (because it is physically-addressed) and therefore is
312 * emulated by KVM, so disable it here to ensure XIVE will be used.
313 * This is both KVM and CPU implementation-specific behaviour so a KVM
314 * cap would be cleanest, but for now this works. If KVM ever permits
315 * native msgsndp execution by guests, a cap could be added at that
316 * time.
317 */
318 pi_features[2] |= 0x08; /* 4: No msgsndp */
319 }
320
321 _FDT((fdt_setprop(fdt, offset, "ibm,pi-features", pi_features,
322 sizeof(pi_features))));
323 }
324
spapr_node0_size(MachineState * machine)325 static hwaddr spapr_node0_size(MachineState *machine)
326 {
327 if (machine->numa_state->num_nodes) {
328 int i;
329 for (i = 0; i < machine->numa_state->num_nodes; ++i) {
330 if (machine->numa_state->nodes[i].node_mem) {
331 return MIN(pow2floor(machine->numa_state->nodes[i].node_mem),
332 machine->ram_size);
333 }
334 }
335 }
336 return machine->ram_size;
337 }
338
add_str(GString * s,const gchar * s1)339 static void add_str(GString *s, const gchar *s1)
340 {
341 g_string_append_len(s, s1, strlen(s1) + 1);
342 }
343
spapr_dt_memory_node(SpaprMachineState * spapr,void * fdt,int nodeid,hwaddr start,hwaddr size)344 static int spapr_dt_memory_node(SpaprMachineState *spapr, void *fdt, int nodeid,
345 hwaddr start, hwaddr size)
346 {
347 char mem_name[32];
348 uint64_t mem_reg_property[2];
349 int off;
350
351 mem_reg_property[0] = cpu_to_be64(start);
352 mem_reg_property[1] = cpu_to_be64(size);
353
354 sprintf(mem_name, "memory@%" HWADDR_PRIx, start);
355 off = fdt_add_subnode(fdt, 0, mem_name);
356 _FDT(off);
357 _FDT((fdt_setprop_string(fdt, off, "device_type", "memory")));
358 _FDT((fdt_setprop(fdt, off, "reg", mem_reg_property,
359 sizeof(mem_reg_property))));
360 spapr_numa_write_associativity_dt(spapr, fdt, off, nodeid);
361 return off;
362 }
363
spapr_pc_dimm_node(MemoryDeviceInfoList * list,ram_addr_t addr)364 static uint32_t spapr_pc_dimm_node(MemoryDeviceInfoList *list, ram_addr_t addr)
365 {
366 MemoryDeviceInfoList *info;
367
368 for (info = list; info; info = info->next) {
369 MemoryDeviceInfo *value = info->value;
370
371 if (value && value->type == MEMORY_DEVICE_INFO_KIND_DIMM) {
372 PCDIMMDeviceInfo *pcdimm_info = value->u.dimm.data;
373
374 if (addr >= pcdimm_info->addr &&
375 addr < (pcdimm_info->addr + pcdimm_info->size)) {
376 return pcdimm_info->node;
377 }
378 }
379 }
380
381 return -1;
382 }
383
384 struct sPAPRDrconfCellV2 {
385 uint32_t seq_lmbs;
386 uint64_t base_addr;
387 uint32_t drc_index;
388 uint32_t aa_index;
389 uint32_t flags;
390 } QEMU_PACKED;
391
392 typedef struct DrconfCellQueue {
393 struct sPAPRDrconfCellV2 cell;
394 QSIMPLEQ_ENTRY(DrconfCellQueue) entry;
395 } DrconfCellQueue;
396
397 static DrconfCellQueue *
spapr_get_drconf_cell(uint32_t seq_lmbs,uint64_t base_addr,uint32_t drc_index,uint32_t aa_index,uint32_t flags)398 spapr_get_drconf_cell(uint32_t seq_lmbs, uint64_t base_addr,
399 uint32_t drc_index, uint32_t aa_index,
400 uint32_t flags)
401 {
402 DrconfCellQueue *elem;
403
404 elem = g_malloc0(sizeof(*elem));
405 elem->cell.seq_lmbs = cpu_to_be32(seq_lmbs);
406 elem->cell.base_addr = cpu_to_be64(base_addr);
407 elem->cell.drc_index = cpu_to_be32(drc_index);
408 elem->cell.aa_index = cpu_to_be32(aa_index);
409 elem->cell.flags = cpu_to_be32(flags);
410
411 return elem;
412 }
413
spapr_dt_dynamic_memory_v2(SpaprMachineState * spapr,void * fdt,int offset,MemoryDeviceInfoList * dimms)414 static int spapr_dt_dynamic_memory_v2(SpaprMachineState *spapr, void *fdt,
415 int offset, MemoryDeviceInfoList *dimms)
416 {
417 MachineState *machine = MACHINE(spapr);
418 uint8_t *int_buf, *cur_index;
419 int ret;
420 uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE;
421 uint64_t addr, cur_addr, size;
422 uint32_t nr_boot_lmbs = (machine->device_memory->base / lmb_size);
423 uint64_t mem_end = machine->device_memory->base +
424 memory_region_size(&machine->device_memory->mr);
425 uint32_t node, buf_len, nr_entries = 0;
426 SpaprDrc *drc;
427 DrconfCellQueue *elem, *next;
428 MemoryDeviceInfoList *info;
429 QSIMPLEQ_HEAD(, DrconfCellQueue) drconf_queue
430 = QSIMPLEQ_HEAD_INITIALIZER(drconf_queue);
431
432 /* Entry to cover RAM and the gap area */
433 elem = spapr_get_drconf_cell(nr_boot_lmbs, 0, 0, -1,
434 SPAPR_LMB_FLAGS_RESERVED |
435 SPAPR_LMB_FLAGS_DRC_INVALID);
436 QSIMPLEQ_INSERT_TAIL(&drconf_queue, elem, entry);
437 nr_entries++;
438
439 cur_addr = machine->device_memory->base;
440 for (info = dimms; info; info = info->next) {
441 PCDIMMDeviceInfo *di = info->value->u.dimm.data;
442
443 addr = di->addr;
444 size = di->size;
445 node = di->node;
446
447 /*
448 * The NVDIMM area is hotpluggable after the NVDIMM is unplugged. The
449 * area is marked hotpluggable in the next iteration for the bigger
450 * chunk including the NVDIMM occupied area.
451 */
452 if (info->value->type == MEMORY_DEVICE_INFO_KIND_NVDIMM)
453 continue;
454
455 /* Entry for hot-pluggable area */
456 if (cur_addr < addr) {
457 drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, cur_addr / lmb_size);
458 g_assert(drc);
459 elem = spapr_get_drconf_cell((addr - cur_addr) / lmb_size,
460 cur_addr, spapr_drc_index(drc), -1, 0);
461 QSIMPLEQ_INSERT_TAIL(&drconf_queue, elem, entry);
462 nr_entries++;
463 }
464
465 /* Entry for DIMM */
466 drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, addr / lmb_size);
467 g_assert(drc);
468 elem = spapr_get_drconf_cell(size / lmb_size, addr,
469 spapr_drc_index(drc), node,
470 (SPAPR_LMB_FLAGS_ASSIGNED |
471 SPAPR_LMB_FLAGS_HOTREMOVABLE));
472 QSIMPLEQ_INSERT_TAIL(&drconf_queue, elem, entry);
473 nr_entries++;
474 cur_addr = addr + size;
475 }
476
477 /* Entry for remaining hotpluggable area */
478 if (cur_addr < mem_end) {
479 drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, cur_addr / lmb_size);
480 g_assert(drc);
481 elem = spapr_get_drconf_cell((mem_end - cur_addr) / lmb_size,
482 cur_addr, spapr_drc_index(drc), -1, 0);
483 QSIMPLEQ_INSERT_TAIL(&drconf_queue, elem, entry);
484 nr_entries++;
485 }
486
487 buf_len = nr_entries * sizeof(struct sPAPRDrconfCellV2) + sizeof(uint32_t);
488 int_buf = cur_index = g_malloc0(buf_len);
489 *(uint32_t *)int_buf = cpu_to_be32(nr_entries);
490 cur_index += sizeof(nr_entries);
491
492 QSIMPLEQ_FOREACH_SAFE(elem, &drconf_queue, entry, next) {
493 memcpy(cur_index, &elem->cell, sizeof(elem->cell));
494 cur_index += sizeof(elem->cell);
495 QSIMPLEQ_REMOVE(&drconf_queue, elem, DrconfCellQueue, entry);
496 g_free(elem);
497 }
498
499 ret = fdt_setprop(fdt, offset, "ibm,dynamic-memory-v2", int_buf, buf_len);
500 g_free(int_buf);
501 if (ret < 0) {
502 return -1;
503 }
504 return 0;
505 }
506
spapr_dt_dynamic_memory(SpaprMachineState * spapr,void * fdt,int offset,MemoryDeviceInfoList * dimms)507 static int spapr_dt_dynamic_memory(SpaprMachineState *spapr, void *fdt,
508 int offset, MemoryDeviceInfoList *dimms)
509 {
510 MachineState *machine = MACHINE(spapr);
511 int i, ret;
512 uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE;
513 uint32_t device_lmb_start = machine->device_memory->base / lmb_size;
514 uint32_t nr_lmbs = (machine->device_memory->base +
515 memory_region_size(&machine->device_memory->mr)) /
516 lmb_size;
517 uint32_t *int_buf, *cur_index, buf_len;
518
519 /*
520 * Allocate enough buffer size to fit in ibm,dynamic-memory
521 */
522 buf_len = (nr_lmbs * SPAPR_DR_LMB_LIST_ENTRY_SIZE + 1) * sizeof(uint32_t);
523 cur_index = int_buf = g_malloc0(buf_len);
524 int_buf[0] = cpu_to_be32(nr_lmbs);
525 cur_index++;
526 for (i = 0; i < nr_lmbs; i++) {
527 uint64_t addr = i * lmb_size;
528 uint32_t *dynamic_memory = cur_index;
529
530 if (i >= device_lmb_start) {
531 SpaprDrc *drc;
532
533 drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, i);
534 g_assert(drc);
535
536 dynamic_memory[0] = cpu_to_be32(addr >> 32);
537 dynamic_memory[1] = cpu_to_be32(addr & 0xffffffff);
538 dynamic_memory[2] = cpu_to_be32(spapr_drc_index(drc));
539 dynamic_memory[3] = cpu_to_be32(0); /* reserved */
540 dynamic_memory[4] = cpu_to_be32(spapr_pc_dimm_node(dimms, addr));
541 if (memory_region_present(get_system_memory(), addr)) {
542 dynamic_memory[5] = cpu_to_be32(SPAPR_LMB_FLAGS_ASSIGNED);
543 } else {
544 dynamic_memory[5] = cpu_to_be32(0);
545 }
546 } else {
547 /*
548 * LMB information for RMA, boot time RAM and gap b/n RAM and
549 * device memory region -- all these are marked as reserved
550 * and as having no valid DRC.
551 */
552 dynamic_memory[0] = cpu_to_be32(addr >> 32);
553 dynamic_memory[1] = cpu_to_be32(addr & 0xffffffff);
554 dynamic_memory[2] = cpu_to_be32(0);
555 dynamic_memory[3] = cpu_to_be32(0); /* reserved */
556 dynamic_memory[4] = cpu_to_be32(-1);
557 dynamic_memory[5] = cpu_to_be32(SPAPR_LMB_FLAGS_RESERVED |
558 SPAPR_LMB_FLAGS_DRC_INVALID);
559 }
560
561 cur_index += SPAPR_DR_LMB_LIST_ENTRY_SIZE;
562 }
563 ret = fdt_setprop(fdt, offset, "ibm,dynamic-memory", int_buf, buf_len);
564 g_free(int_buf);
565 if (ret < 0) {
566 return -1;
567 }
568 return 0;
569 }
570
571 /*
572 * Adds ibm,dynamic-reconfiguration-memory node.
573 * Refer to docs/specs/ppc-spapr-hotplug.txt for the documentation
574 * of this device tree node.
575 */
spapr_dt_dynamic_reconfiguration_memory(SpaprMachineState * spapr,void * fdt)576 static int spapr_dt_dynamic_reconfiguration_memory(SpaprMachineState *spapr,
577 void *fdt)
578 {
579 MachineState *machine = MACHINE(spapr);
580 int ret, offset;
581 uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE;
582 uint32_t prop_lmb_size[] = {cpu_to_be32(lmb_size >> 32),
583 cpu_to_be32(lmb_size & 0xffffffff)};
584 MemoryDeviceInfoList *dimms = NULL;
585
586 /* Don't create the node if there is no device memory. */
587 if (!machine->device_memory) {
588 return 0;
589 }
590
591 offset = fdt_add_subnode(fdt, 0, "ibm,dynamic-reconfiguration-memory");
592
593 ret = fdt_setprop(fdt, offset, "ibm,lmb-size", prop_lmb_size,
594 sizeof(prop_lmb_size));
595 if (ret < 0) {
596 return ret;
597 }
598
599 ret = fdt_setprop_cell(fdt, offset, "ibm,memory-flags-mask", 0xff);
600 if (ret < 0) {
601 return ret;
602 }
603
604 ret = fdt_setprop_cell(fdt, offset, "ibm,memory-preservation-time", 0x0);
605 if (ret < 0) {
606 return ret;
607 }
608
609 /* ibm,dynamic-memory or ibm,dynamic-memory-v2 */
610 dimms = qmp_memory_device_list();
611 if (spapr_ovec_test(spapr->ov5_cas, OV5_DRMEM_V2)) {
612 ret = spapr_dt_dynamic_memory_v2(spapr, fdt, offset, dimms);
613 } else {
614 ret = spapr_dt_dynamic_memory(spapr, fdt, offset, dimms);
615 }
616 qapi_free_MemoryDeviceInfoList(dimms);
617
618 if (ret < 0) {
619 return ret;
620 }
621
622 ret = spapr_numa_write_assoc_lookup_arrays(spapr, fdt, offset);
623
624 return ret;
625 }
626
spapr_dt_memory(SpaprMachineState * spapr,void * fdt)627 static int spapr_dt_memory(SpaprMachineState *spapr, void *fdt)
628 {
629 MachineState *machine = MACHINE(spapr);
630 hwaddr mem_start, node_size;
631 int i, nb_nodes = machine->numa_state->num_nodes;
632 NodeInfo *nodes = machine->numa_state->nodes;
633
634 for (i = 0, mem_start = 0; i < nb_nodes; ++i) {
635 if (!nodes[i].node_mem) {
636 continue;
637 }
638 if (mem_start >= machine->ram_size) {
639 node_size = 0;
640 } else {
641 node_size = nodes[i].node_mem;
642 if (node_size > machine->ram_size - mem_start) {
643 node_size = machine->ram_size - mem_start;
644 }
645 }
646 if (!mem_start) {
647 /* spapr_machine_init() checks for rma_size <= node0_size
648 * already */
649 spapr_dt_memory_node(spapr, fdt, i, 0, spapr->rma_size);
650 mem_start += spapr->rma_size;
651 node_size -= spapr->rma_size;
652 }
653 for ( ; node_size; ) {
654 hwaddr sizetmp = pow2floor(node_size);
655
656 /* mem_start != 0 here */
657 if (ctzl(mem_start) < ctzl(sizetmp)) {
658 sizetmp = 1ULL << ctzl(mem_start);
659 }
660
661 spapr_dt_memory_node(spapr, fdt, i, mem_start, sizetmp);
662 node_size -= sizetmp;
663 mem_start += sizetmp;
664 }
665 }
666
667 /* Generate ibm,dynamic-reconfiguration-memory node if required */
668 if (spapr_ovec_test(spapr->ov5_cas, OV5_DRCONF_MEMORY)) {
669 int ret;
670
671 ret = spapr_dt_dynamic_reconfiguration_memory(spapr, fdt);
672 if (ret) {
673 return ret;
674 }
675 }
676
677 return 0;
678 }
679
spapr_dt_cpu(CPUState * cs,void * fdt,int offset,SpaprMachineState * spapr)680 static void spapr_dt_cpu(CPUState *cs, void *fdt, int offset,
681 SpaprMachineState *spapr)
682 {
683 MachineState *ms = MACHINE(spapr);
684 PowerPCCPU *cpu = POWERPC_CPU(cs);
685 CPUPPCState *env = &cpu->env;
686 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs);
687 int index = spapr_get_vcpu_id(cpu);
688 uint32_t segs[] = {cpu_to_be32(28), cpu_to_be32(40),
689 0xffffffff, 0xffffffff};
690 uint32_t tbfreq = kvm_enabled() ? kvmppc_get_tbfreq()
691 : SPAPR_TIMEBASE_FREQ;
692 uint32_t cpufreq = kvm_enabled() ? kvmppc_get_clockfreq() : 1000000000;
693 uint32_t page_sizes_prop[64];
694 size_t page_sizes_prop_size;
695 unsigned int smp_threads = ms->smp.threads;
696 uint32_t vcpus_per_socket = smp_threads * ms->smp.cores;
697 uint32_t pft_size_prop[] = {0, cpu_to_be32(spapr->htab_shift)};
698 int compat_smt = MIN(smp_threads, ppc_compat_max_vthreads(cpu));
699 SpaprDrc *drc;
700 int drc_index;
701 uint32_t radix_AP_encodings[PPC_PAGE_SIZES_MAX_SZ];
702 int i;
703
704 drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU, index);
705 if (drc) {
706 drc_index = spapr_drc_index(drc);
707 _FDT((fdt_setprop_cell(fdt, offset, "ibm,my-drc-index", drc_index)));
708 }
709
710 _FDT((fdt_setprop_cell(fdt, offset, "reg", index)));
711 _FDT((fdt_setprop_string(fdt, offset, "device_type", "cpu")));
712
713 _FDT((fdt_setprop_cell(fdt, offset, "cpu-version", env->spr[SPR_PVR])));
714 _FDT((fdt_setprop_cell(fdt, offset, "d-cache-block-size",
715 env->dcache_line_size)));
716 _FDT((fdt_setprop_cell(fdt, offset, "d-cache-line-size",
717 env->dcache_line_size)));
718 _FDT((fdt_setprop_cell(fdt, offset, "i-cache-block-size",
719 env->icache_line_size)));
720 _FDT((fdt_setprop_cell(fdt, offset, "i-cache-line-size",
721 env->icache_line_size)));
722
723 if (pcc->l1_dcache_size) {
724 _FDT((fdt_setprop_cell(fdt, offset, "d-cache-size",
725 pcc->l1_dcache_size)));
726 } else {
727 warn_report("Unknown L1 dcache size for cpu");
728 }
729 if (pcc->l1_icache_size) {
730 _FDT((fdt_setprop_cell(fdt, offset, "i-cache-size",
731 pcc->l1_icache_size)));
732 } else {
733 warn_report("Unknown L1 icache size for cpu");
734 }
735
736 _FDT((fdt_setprop_cell(fdt, offset, "timebase-frequency", tbfreq)));
737 _FDT((fdt_setprop_cell(fdt, offset, "clock-frequency", cpufreq)));
738 _FDT((fdt_setprop_cell(fdt, offset, "slb-size", cpu->hash64_opts->slb_size)));
739 _FDT((fdt_setprop_cell(fdt, offset, "ibm,slb-size", cpu->hash64_opts->slb_size)));
740 _FDT((fdt_setprop_string(fdt, offset, "status", "okay")));
741 _FDT((fdt_setprop(fdt, offset, "64-bit", NULL, 0)));
742
743 if (ppc_has_spr(cpu, SPR_PURR)) {
744 _FDT((fdt_setprop_cell(fdt, offset, "ibm,purr", 1)));
745 }
746 if (ppc_has_spr(cpu, SPR_PURR)) {
747 _FDT((fdt_setprop_cell(fdt, offset, "ibm,spurr", 1)));
748 }
749
750 if (ppc_hash64_has(cpu, PPC_HASH64_1TSEG)) {
751 _FDT((fdt_setprop(fdt, offset, "ibm,processor-segment-sizes",
752 segs, sizeof(segs))));
753 }
754
755 /* Advertise VSX (vector extensions) if available
756 * 1 == VMX / Altivec available
757 * 2 == VSX available
758 *
759 * Only CPUs for which we create core types in spapr_cpu_core.c
760 * are possible, and all of those have VMX */
761 if (env->insns_flags & PPC_ALTIVEC) {
762 if (spapr_get_cap(spapr, SPAPR_CAP_VSX) != 0) {
763 _FDT((fdt_setprop_cell(fdt, offset, "ibm,vmx", 2)));
764 } else {
765 _FDT((fdt_setprop_cell(fdt, offset, "ibm,vmx", 1)));
766 }
767 }
768
769 /* Advertise DFP (Decimal Floating Point) if available
770 * 0 / no property == no DFP
771 * 1 == DFP available */
772 if (spapr_get_cap(spapr, SPAPR_CAP_DFP) != 0) {
773 _FDT((fdt_setprop_cell(fdt, offset, "ibm,dfp", 1)));
774 }
775
776 page_sizes_prop_size = ppc_create_page_sizes_prop(cpu, page_sizes_prop,
777 sizeof(page_sizes_prop));
778 if (page_sizes_prop_size) {
779 _FDT((fdt_setprop(fdt, offset, "ibm,segment-page-sizes",
780 page_sizes_prop, page_sizes_prop_size)));
781 }
782
783 spapr_dt_pa_features(spapr, cpu, fdt, offset);
784
785 spapr_dt_pi_features(spapr, cpu, fdt, offset);
786
787 _FDT((fdt_setprop_cell(fdt, offset, "ibm,chip-id",
788 cs->cpu_index / vcpus_per_socket)));
789
790 _FDT((fdt_setprop(fdt, offset, "ibm,pft-size",
791 pft_size_prop, sizeof(pft_size_prop))));
792
793 if (ms->numa_state->num_nodes > 1) {
794 _FDT(spapr_numa_fixup_cpu_dt(spapr, fdt, offset, cpu));
795 }
796
797 _FDT(spapr_fixup_cpu_smt_dt(fdt, offset, cpu, compat_smt));
798
799 if (pcc->radix_page_info) {
800 for (i = 0; i < pcc->radix_page_info->count; i++) {
801 radix_AP_encodings[i] =
802 cpu_to_be32(pcc->radix_page_info->entries[i]);
803 }
804 _FDT((fdt_setprop(fdt, offset, "ibm,processor-radix-AP-encodings",
805 radix_AP_encodings,
806 pcc->radix_page_info->count *
807 sizeof(radix_AP_encodings[0]))));
808 }
809
810 /*
811 * We set this property to let the guest know that it can use the large
812 * decrementer and its width in bits.
813 */
814 if (spapr_get_cap(spapr, SPAPR_CAP_LARGE_DECREMENTER) != SPAPR_CAP_OFF)
815 _FDT((fdt_setprop_u32(fdt, offset, "ibm,dec-bits",
816 pcc->lrg_decr_bits)));
817 }
818
spapr_dt_one_cpu(void * fdt,SpaprMachineState * spapr,CPUState * cs,int cpus_offset)819 static void spapr_dt_one_cpu(void *fdt, SpaprMachineState *spapr, CPUState *cs,
820 int cpus_offset)
821 {
822 PowerPCCPU *cpu = POWERPC_CPU(cs);
823 int index = spapr_get_vcpu_id(cpu);
824 DeviceClass *dc = DEVICE_GET_CLASS(cs);
825 g_autofree char *nodename = NULL;
826 int offset;
827
828 if (!spapr_is_thread0_in_vcore(spapr, cpu)) {
829 return;
830 }
831
832 nodename = g_strdup_printf("%s@%x", dc->fw_name, index);
833 offset = fdt_add_subnode(fdt, cpus_offset, nodename);
834 _FDT(offset);
835 spapr_dt_cpu(cs, fdt, offset, spapr);
836 }
837
838
spapr_dt_cpus(void * fdt,SpaprMachineState * spapr)839 static void spapr_dt_cpus(void *fdt, SpaprMachineState *spapr)
840 {
841 CPUState **rev;
842 CPUState *cs;
843 int n_cpus;
844 int cpus_offset;
845 int i;
846
847 cpus_offset = fdt_add_subnode(fdt, 0, "cpus");
848 _FDT(cpus_offset);
849 _FDT((fdt_setprop_cell(fdt, cpus_offset, "#address-cells", 0x1)));
850 _FDT((fdt_setprop_cell(fdt, cpus_offset, "#size-cells", 0x0)));
851
852 /*
853 * We walk the CPUs in reverse order to ensure that CPU DT nodes
854 * created by fdt_add_subnode() end up in the right order in FDT
855 * for the guest kernel the enumerate the CPUs correctly.
856 *
857 * The CPU list cannot be traversed in reverse order, so we need
858 * to do extra work.
859 */
860 n_cpus = 0;
861 rev = NULL;
862 CPU_FOREACH(cs) {
863 rev = g_renew(CPUState *, rev, n_cpus + 1);
864 rev[n_cpus++] = cs;
865 }
866
867 for (i = n_cpus - 1; i >= 0; i--) {
868 spapr_dt_one_cpu(fdt, spapr, rev[i], cpus_offset);
869 }
870
871 g_free(rev);
872 }
873
spapr_dt_rng(void * fdt)874 static int spapr_dt_rng(void *fdt)
875 {
876 int node;
877 int ret;
878
879 node = qemu_fdt_add_subnode(fdt, "/ibm,platform-facilities");
880 if (node <= 0) {
881 return -1;
882 }
883 ret = fdt_setprop_string(fdt, node, "device_type",
884 "ibm,platform-facilities");
885 ret |= fdt_setprop_cell(fdt, node, "#address-cells", 0x1);
886 ret |= fdt_setprop_cell(fdt, node, "#size-cells", 0x0);
887
888 node = fdt_add_subnode(fdt, node, "ibm,random-v1");
889 if (node <= 0) {
890 return -1;
891 }
892 ret |= fdt_setprop_string(fdt, node, "compatible", "ibm,random");
893
894 return ret ? -1 : 0;
895 }
896
spapr_dt_rtas(SpaprMachineState * spapr,void * fdt)897 static void spapr_dt_rtas(SpaprMachineState *spapr, void *fdt)
898 {
899 MachineState *ms = MACHINE(spapr);
900 int rtas;
901 GString *hypertas = g_string_sized_new(256);
902 GString *qemu_hypertas = g_string_sized_new(256);
903 uint32_t lrdr_capacity[] = {
904 0,
905 0,
906 cpu_to_be32(SPAPR_MEMORY_BLOCK_SIZE >> 32),
907 cpu_to_be32(SPAPR_MEMORY_BLOCK_SIZE & 0xffffffff),
908 cpu_to_be32(ms->smp.max_cpus / ms->smp.threads),
909 };
910
911 /* Do we have device memory? */
912 if (MACHINE(spapr)->device_memory) {
913 uint64_t max_device_addr = MACHINE(spapr)->device_memory->base +
914 memory_region_size(&MACHINE(spapr)->device_memory->mr);
915
916 lrdr_capacity[0] = cpu_to_be32(max_device_addr >> 32);
917 lrdr_capacity[1] = cpu_to_be32(max_device_addr & 0xffffffff);
918 }
919
920 _FDT(rtas = fdt_add_subnode(fdt, 0, "rtas"));
921
922 /* hypertas */
923 add_str(hypertas, "hcall-pft");
924 add_str(hypertas, "hcall-term");
925 add_str(hypertas, "hcall-dabr");
926 add_str(hypertas, "hcall-interrupt");
927 add_str(hypertas, "hcall-tce");
928 add_str(hypertas, "hcall-vio");
929 add_str(hypertas, "hcall-splpar");
930 add_str(hypertas, "hcall-join");
931 add_str(hypertas, "hcall-bulk");
932 add_str(hypertas, "hcall-set-mode");
933 add_str(hypertas, "hcall-sprg0");
934 add_str(hypertas, "hcall-copy");
935 add_str(hypertas, "hcall-debug");
936 add_str(hypertas, "hcall-vphn");
937 if (spapr_get_cap(spapr, SPAPR_CAP_RPT_INVALIDATE) == SPAPR_CAP_ON) {
938 add_str(hypertas, "hcall-rpt-invalidate");
939 }
940
941 add_str(qemu_hypertas, "hcall-memop1");
942
943 if (!kvm_enabled() || kvmppc_spapr_use_multitce()) {
944 add_str(hypertas, "hcall-multi-tce");
945 }
946
947 if (spapr->resize_hpt != SPAPR_RESIZE_HPT_DISABLED) {
948 add_str(hypertas, "hcall-hpt-resize");
949 }
950
951 add_str(hypertas, "hcall-watchdog");
952
953 _FDT(fdt_setprop(fdt, rtas, "ibm,hypertas-functions",
954 hypertas->str, hypertas->len));
955 g_string_free(hypertas, TRUE);
956 _FDT(fdt_setprop(fdt, rtas, "qemu,hypertas-functions",
957 qemu_hypertas->str, qemu_hypertas->len));
958 g_string_free(qemu_hypertas, TRUE);
959
960 spapr_numa_write_rtas_dt(spapr, fdt, rtas);
961
962 /*
963 * FWNMI reserves RTAS_ERROR_LOG_MAX for the machine check error log,
964 * and 16 bytes per CPU for system reset error log plus an extra 8 bytes.
965 *
966 * The system reset requirements are driven by existing Linux and PowerVM
967 * implementation which (contrary to PAPR) saves r3 in the error log
968 * structure like machine check, so Linux expects to find the saved r3
969 * value at the address in r3 upon FWNMI-enabled sreset interrupt (and
970 * does not look at the error value).
971 *
972 * System reset interrupts are not subject to interlock like machine
973 * check, so this memory area could be corrupted if the sreset is
974 * interrupted by a machine check (or vice versa) if it was shared. To
975 * prevent this, system reset uses per-CPU areas for the sreset save
976 * area. A system reset that interrupts a system reset handler could
977 * still overwrite this area, but Linux doesn't try to recover in that
978 * case anyway.
979 *
980 * The extra 8 bytes is required because Linux's FWNMI error log check
981 * is off-by-one.
982 *
983 * RTAS_MIN_SIZE is required for the RTAS blob itself.
984 */
985 _FDT(fdt_setprop_cell(fdt, rtas, "rtas-size", RTAS_MIN_SIZE +
986 RTAS_ERROR_LOG_MAX +
987 ms->smp.max_cpus * sizeof(uint64_t) * 2 +
988 sizeof(uint64_t)));
989 _FDT(fdt_setprop_cell(fdt, rtas, "rtas-error-log-max",
990 RTAS_ERROR_LOG_MAX));
991 _FDT(fdt_setprop_cell(fdt, rtas, "rtas-event-scan-rate",
992 RTAS_EVENT_SCAN_RATE));
993
994 g_assert(msi_nonbroken);
995 _FDT(fdt_setprop(fdt, rtas, "ibm,change-msix-capable", NULL, 0));
996
997 /*
998 * According to PAPR, rtas ibm,os-term does not guarantee a return
999 * back to the guest cpu.
1000 *
1001 * While an additional ibm,extended-os-term property indicates
1002 * that rtas call return will always occur. Set this property.
1003 */
1004 _FDT(fdt_setprop(fdt, rtas, "ibm,extended-os-term", NULL, 0));
1005
1006 _FDT(fdt_setprop(fdt, rtas, "ibm,lrdr-capacity",
1007 lrdr_capacity, sizeof(lrdr_capacity)));
1008
1009 spapr_dt_rtas_tokens(fdt, rtas);
1010 }
1011
1012 /*
1013 * Prepare ibm,arch-vec-5-platform-support, which indicates the MMU
1014 * and the XIVE features that the guest may request and thus the valid
1015 * values for bytes 23..26 of option vector 5:
1016 */
spapr_dt_ov5_platform_support(SpaprMachineState * spapr,void * fdt,int chosen)1017 static void spapr_dt_ov5_platform_support(SpaprMachineState *spapr, void *fdt,
1018 int chosen)
1019 {
1020 PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu);
1021
1022 char val[2 * 4] = {
1023 23, 0x00, /* XICS / XIVE mode */
1024 24, 0x00, /* Hash/Radix, filled in below. */
1025 25, 0x00, /* Hash options: Segment Tables == no, GTSE == no. */
1026 26, 0x40, /* Radix options: GTSE == yes. */
1027 };
1028
1029 if (spapr->irq->xics && spapr->irq->xive) {
1030 val[1] = SPAPR_OV5_XIVE_BOTH;
1031 } else if (spapr->irq->xive) {
1032 val[1] = SPAPR_OV5_XIVE_EXPLOIT;
1033 } else {
1034 assert(spapr->irq->xics);
1035 val[1] = SPAPR_OV5_XIVE_LEGACY;
1036 }
1037
1038 if (!ppc_check_compat(first_ppc_cpu, CPU_POWERPC_LOGICAL_3_00, 0,
1039 first_ppc_cpu->compat_pvr)) {
1040 /*
1041 * If we're in a pre POWER9 compat mode then the guest should
1042 * do hash and use the legacy interrupt mode
1043 */
1044 val[1] = SPAPR_OV5_XIVE_LEGACY; /* XICS */
1045 val[3] = 0x00; /* Hash */
1046 spapr_check_mmu_mode(false);
1047 } else if (kvm_enabled()) {
1048 if (kvmppc_has_cap_mmu_radix() && kvmppc_has_cap_mmu_hash_v3()) {
1049 val[3] = 0x80; /* OV5_MMU_BOTH */
1050 } else if (kvmppc_has_cap_mmu_radix()) {
1051 val[3] = 0x40; /* OV5_MMU_RADIX_300 */
1052 } else {
1053 val[3] = 0x00; /* Hash */
1054 }
1055 } else {
1056 /* V3 MMU supports both hash and radix in tcg (with dynamic switching) */
1057 val[3] = 0xC0;
1058 }
1059 _FDT(fdt_setprop(fdt, chosen, "ibm,arch-vec-5-platform-support",
1060 val, sizeof(val)));
1061 }
1062
spapr_dt_chosen(SpaprMachineState * spapr,void * fdt,bool reset)1063 static void spapr_dt_chosen(SpaprMachineState *spapr, void *fdt, bool reset)
1064 {
1065 MachineState *machine = MACHINE(spapr);
1066 SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine);
1067 int chosen;
1068
1069 _FDT(chosen = fdt_add_subnode(fdt, 0, "chosen"));
1070
1071 if (reset) {
1072 const char *boot_device = spapr->boot_device;
1073 g_autofree char *stdout_path = spapr_vio_stdout_path(spapr->vio_bus);
1074 size_t cb = 0;
1075 g_autofree char *bootlist = get_boot_devices_list(&cb);
1076
1077 if (machine->kernel_cmdline && machine->kernel_cmdline[0]) {
1078 _FDT(fdt_setprop_string(fdt, chosen, "bootargs",
1079 machine->kernel_cmdline));
1080 }
1081
1082 if (spapr->initrd_size) {
1083 _FDT(fdt_setprop_cell(fdt, chosen, "linux,initrd-start",
1084 spapr->initrd_base));
1085 _FDT(fdt_setprop_cell(fdt, chosen, "linux,initrd-end",
1086 spapr->initrd_base + spapr->initrd_size));
1087 }
1088
1089 if (spapr->kernel_size) {
1090 uint64_t kprop[2] = { cpu_to_be64(spapr->kernel_addr),
1091 cpu_to_be64(spapr->kernel_size) };
1092
1093 _FDT(fdt_setprop(fdt, chosen, "qemu,boot-kernel",
1094 &kprop, sizeof(kprop)));
1095 if (spapr->kernel_le) {
1096 _FDT(fdt_setprop(fdt, chosen, "qemu,boot-kernel-le", NULL, 0));
1097 }
1098 }
1099 if (machine->boot_config.has_menu && machine->boot_config.menu) {
1100 _FDT((fdt_setprop_cell(fdt, chosen, "qemu,boot-menu", true)));
1101 }
1102 _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-width", graphic_width));
1103 _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-height", graphic_height));
1104 _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-depth", graphic_depth));
1105
1106 if (cb && bootlist) {
1107 int i;
1108
1109 for (i = 0; i < cb; i++) {
1110 if (bootlist[i] == '\n') {
1111 bootlist[i] = ' ';
1112 }
1113 }
1114 _FDT(fdt_setprop_string(fdt, chosen, "qemu,boot-list", bootlist));
1115 }
1116
1117 if (boot_device && strlen(boot_device)) {
1118 _FDT(fdt_setprop_string(fdt, chosen, "qemu,boot-device", boot_device));
1119 }
1120
1121 if (spapr->want_stdout_path && stdout_path) {
1122 /*
1123 * "linux,stdout-path" and "stdout" properties are
1124 * deprecated by linux kernel. New platforms should only
1125 * use the "stdout-path" property. Set the new property
1126 * and continue using older property to remain compatible
1127 * with the existing firmware.
1128 */
1129 _FDT(fdt_setprop_string(fdt, chosen, "linux,stdout-path", stdout_path));
1130 _FDT(fdt_setprop_string(fdt, chosen, "stdout-path", stdout_path));
1131 }
1132
1133 /*
1134 * We can deal with BAR reallocation just fine, advertise it
1135 * to the guest
1136 */
1137 if (smc->linux_pci_probe) {
1138 _FDT(fdt_setprop_cell(fdt, chosen, "linux,pci-probe-only", 0));
1139 }
1140
1141 spapr_dt_ov5_platform_support(spapr, fdt, chosen);
1142 }
1143
1144 _FDT(fdt_setprop(fdt, chosen, "rng-seed", spapr->fdt_rng_seed, 32));
1145
1146 _FDT(spapr_dt_ovec(fdt, chosen, spapr->ov5_cas, "ibm,architecture-vec-5"));
1147 }
1148
spapr_dt_hypervisor(SpaprMachineState * spapr,void * fdt)1149 static void spapr_dt_hypervisor(SpaprMachineState *spapr, void *fdt)
1150 {
1151 /* The /hypervisor node isn't in PAPR - this is a hack to allow PR
1152 * KVM to work under pHyp with some guest co-operation */
1153 int hypervisor;
1154 uint8_t hypercall[16];
1155
1156 _FDT(hypervisor = fdt_add_subnode(fdt, 0, "hypervisor"));
1157 /* indicate KVM hypercall interface */
1158 _FDT(fdt_setprop_string(fdt, hypervisor, "compatible", "linux,kvm"));
1159 if (kvmppc_has_cap_fixup_hcalls()) {
1160 /*
1161 * Older KVM versions with older guest kernels were broken
1162 * with the magic page, don't allow the guest to map it.
1163 */
1164 if (!kvmppc_get_hypercall(cpu_env(first_cpu), hypercall,
1165 sizeof(hypercall))) {
1166 _FDT(fdt_setprop(fdt, hypervisor, "hcall-instructions",
1167 hypercall, sizeof(hypercall)));
1168 }
1169 }
1170 }
1171
spapr_build_fdt(SpaprMachineState * spapr,bool reset,size_t space)1172 void *spapr_build_fdt(SpaprMachineState *spapr, bool reset, size_t space)
1173 {
1174 MachineState *machine = MACHINE(spapr);
1175 MachineClass *mc = MACHINE_GET_CLASS(machine);
1176 SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine);
1177 uint32_t root_drc_type_mask = 0;
1178 int ret;
1179 void *fdt;
1180 SpaprPhbState *phb;
1181 char *buf;
1182
1183 fdt = g_malloc0(space);
1184 _FDT((fdt_create_empty_tree(fdt, space)));
1185
1186 /* Root node */
1187 _FDT(fdt_setprop_string(fdt, 0, "device_type", "chrp"));
1188 _FDT(fdt_setprop_string(fdt, 0, "model", "IBM pSeries (emulated by qemu)"));
1189 _FDT(fdt_setprop_string(fdt, 0, "compatible", "qemu,pseries"));
1190
1191 /* Guest UUID & Name*/
1192 buf = qemu_uuid_unparse_strdup(&qemu_uuid);
1193 _FDT(fdt_setprop_string(fdt, 0, "vm,uuid", buf));
1194 if (qemu_uuid_set) {
1195 _FDT(fdt_setprop_string(fdt, 0, "system-id", buf));
1196 }
1197 g_free(buf);
1198
1199 if (qemu_get_vm_name()) {
1200 _FDT(fdt_setprop_string(fdt, 0, "ibm,partition-name",
1201 qemu_get_vm_name()));
1202 }
1203
1204 /* Host Model & Serial Number */
1205 if (spapr->host_model) {
1206 _FDT(fdt_setprop_string(fdt, 0, "host-model", spapr->host_model));
1207 } else if (smc->broken_host_serial_model && kvmppc_get_host_model(&buf)) {
1208 _FDT(fdt_setprop_string(fdt, 0, "host-model", buf));
1209 g_free(buf);
1210 }
1211
1212 if (spapr->host_serial) {
1213 _FDT(fdt_setprop_string(fdt, 0, "host-serial", spapr->host_serial));
1214 } else if (smc->broken_host_serial_model && kvmppc_get_host_serial(&buf)) {
1215 _FDT(fdt_setprop_string(fdt, 0, "host-serial", buf));
1216 g_free(buf);
1217 }
1218
1219 _FDT(fdt_setprop_cell(fdt, 0, "#address-cells", 2));
1220 _FDT(fdt_setprop_cell(fdt, 0, "#size-cells", 2));
1221
1222 /* /interrupt controller */
1223 spapr_irq_dt(spapr, spapr_max_server_number(spapr), fdt, PHANDLE_INTC);
1224
1225 ret = spapr_dt_memory(spapr, fdt);
1226 if (ret < 0) {
1227 error_report("couldn't setup memory nodes in fdt");
1228 exit(1);
1229 }
1230
1231 /* /vdevice */
1232 spapr_dt_vdevice(spapr->vio_bus, fdt);
1233
1234 if (object_resolve_path_type("", TYPE_SPAPR_RNG, NULL)) {
1235 ret = spapr_dt_rng(fdt);
1236 if (ret < 0) {
1237 error_report("could not set up rng device in the fdt");
1238 exit(1);
1239 }
1240 }
1241
1242 QLIST_FOREACH(phb, &spapr->phbs, list) {
1243 ret = spapr_dt_phb(spapr, phb, PHANDLE_INTC, fdt, NULL);
1244 if (ret < 0) {
1245 error_report("couldn't setup PCI devices in fdt");
1246 exit(1);
1247 }
1248 }
1249
1250 spapr_dt_cpus(fdt, spapr);
1251
1252 /* ibm,drc-indexes and friends */
1253 root_drc_type_mask |= SPAPR_DR_CONNECTOR_TYPE_LMB;
1254 if (smc->dr_phb_enabled) {
1255 root_drc_type_mask |= SPAPR_DR_CONNECTOR_TYPE_PHB;
1256 }
1257 if (mc->nvdimm_supported) {
1258 root_drc_type_mask |= SPAPR_DR_CONNECTOR_TYPE_PMEM;
1259 }
1260 if (root_drc_type_mask) {
1261 _FDT(spapr_dt_drc(fdt, 0, NULL, root_drc_type_mask));
1262 }
1263
1264 if (mc->has_hotpluggable_cpus) {
1265 int offset = fdt_path_offset(fdt, "/cpus");
1266 ret = spapr_dt_drc(fdt, offset, NULL, SPAPR_DR_CONNECTOR_TYPE_CPU);
1267 if (ret < 0) {
1268 error_report("Couldn't set up CPU DR device tree properties");
1269 exit(1);
1270 }
1271 }
1272
1273 /* /event-sources */
1274 spapr_dt_events(spapr, fdt);
1275
1276 /* /rtas */
1277 spapr_dt_rtas(spapr, fdt);
1278
1279 /* /chosen */
1280 spapr_dt_chosen(spapr, fdt, reset);
1281
1282 /* /hypervisor */
1283 if (kvm_enabled()) {
1284 spapr_dt_hypervisor(spapr, fdt);
1285 }
1286
1287 /* Build memory reserve map */
1288 if (reset) {
1289 if (spapr->kernel_size) {
1290 _FDT((fdt_add_mem_rsv(fdt, spapr->kernel_addr,
1291 spapr->kernel_size)));
1292 }
1293 if (spapr->initrd_size) {
1294 _FDT((fdt_add_mem_rsv(fdt, spapr->initrd_base,
1295 spapr->initrd_size)));
1296 }
1297 }
1298
1299 /* NVDIMM devices */
1300 if (mc->nvdimm_supported) {
1301 spapr_dt_persistent_memory(spapr, fdt);
1302 }
1303
1304 return fdt;
1305 }
1306
translate_kernel_address(void * opaque,uint64_t addr)1307 static uint64_t translate_kernel_address(void *opaque, uint64_t addr)
1308 {
1309 SpaprMachineState *spapr = opaque;
1310
1311 return (addr & 0x0fffffff) + spapr->kernel_addr;
1312 }
1313
emulate_spapr_hypercall(PPCVirtualHypervisor * vhyp,PowerPCCPU * cpu)1314 static void emulate_spapr_hypercall(PPCVirtualHypervisor *vhyp,
1315 PowerPCCPU *cpu)
1316 {
1317 CPUPPCState *env = &cpu->env;
1318
1319 /* The TCG path should also be holding the BQL at this point */
1320 g_assert(bql_locked());
1321
1322 g_assert(!vhyp_cpu_in_nested(cpu));
1323
1324 if (FIELD_EX64(env->msr, MSR, PR)) {
1325 hcall_dprintf("Hypercall made with MSR[PR]=1\n");
1326 env->gpr[3] = H_PRIVILEGE;
1327 } else {
1328 env->gpr[3] = spapr_hypercall(cpu, env->gpr[3], &env->gpr[4]);
1329 }
1330 }
1331
1332 struct LPCRSyncState {
1333 target_ulong value;
1334 target_ulong mask;
1335 };
1336
do_lpcr_sync(CPUState * cs,run_on_cpu_data arg)1337 static void do_lpcr_sync(CPUState *cs, run_on_cpu_data arg)
1338 {
1339 struct LPCRSyncState *s = arg.host_ptr;
1340 PowerPCCPU *cpu = POWERPC_CPU(cs);
1341 CPUPPCState *env = &cpu->env;
1342 target_ulong lpcr;
1343
1344 cpu_synchronize_state(cs);
1345 lpcr = env->spr[SPR_LPCR];
1346 lpcr &= ~s->mask;
1347 lpcr |= s->value;
1348 ppc_store_lpcr(cpu, lpcr);
1349 }
1350
spapr_set_all_lpcrs(target_ulong value,target_ulong mask)1351 void spapr_set_all_lpcrs(target_ulong value, target_ulong mask)
1352 {
1353 CPUState *cs;
1354 struct LPCRSyncState s = {
1355 .value = value,
1356 .mask = mask
1357 };
1358 CPU_FOREACH(cs) {
1359 run_on_cpu(cs, do_lpcr_sync, RUN_ON_CPU_HOST_PTR(&s));
1360 }
1361 }
1362
1363 /* May be used when the machine is not running */
spapr_init_all_lpcrs(target_ulong value,target_ulong mask)1364 void spapr_init_all_lpcrs(target_ulong value, target_ulong mask)
1365 {
1366 CPUState *cs;
1367 CPU_FOREACH(cs) {
1368 PowerPCCPU *cpu = POWERPC_CPU(cs);
1369 CPUPPCState *env = &cpu->env;
1370 target_ulong lpcr;
1371
1372 lpcr = env->spr[SPR_LPCR];
1373 lpcr &= ~(LPCR_HR | LPCR_UPRT);
1374 ppc_store_lpcr(cpu, lpcr);
1375 }
1376 }
1377
spapr_get_pate(PPCVirtualHypervisor * vhyp,PowerPCCPU * cpu,target_ulong lpid,ppc_v3_pate_t * entry)1378 static bool spapr_get_pate(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu,
1379 target_ulong lpid, ppc_v3_pate_t *entry)
1380 {
1381 SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
1382 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
1383
1384 if (!spapr_cpu->in_nested) {
1385 assert(lpid == 0);
1386
1387 /* Copy PATE1:GR into PATE0:HR */
1388 entry->dw0 = spapr->patb_entry & PATE0_HR;
1389 entry->dw1 = spapr->patb_entry;
1390 return true;
1391 } else {
1392 if (spapr_nested_api(spapr) == NESTED_API_KVM_HV) {
1393 return spapr_get_pate_nested_hv(spapr, cpu, lpid, entry);
1394 } else if (spapr_nested_api(spapr) == NESTED_API_PAPR) {
1395 return spapr_get_pate_nested_papr(spapr, cpu, lpid, entry);
1396 } else {
1397 g_assert_not_reached();
1398 }
1399 }
1400 }
1401
1402 #define HPTE(_table, _i) (void *)(((uint64_t *)(_table)) + ((_i) * 2))
1403 #define HPTE_VALID(_hpte) (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_VALID)
1404 #define HPTE_DIRTY(_hpte) (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_HPTE_DIRTY)
1405 #define CLEAN_HPTE(_hpte) ((*(uint64_t *)(_hpte)) &= tswap64(~HPTE64_V_HPTE_DIRTY))
1406 #define DIRTY_HPTE(_hpte) ((*(uint64_t *)(_hpte)) |= tswap64(HPTE64_V_HPTE_DIRTY))
1407
1408 /*
1409 * Get the fd to access the kernel htab, re-opening it if necessary
1410 */
get_htab_fd(SpaprMachineState * spapr)1411 static int get_htab_fd(SpaprMachineState *spapr)
1412 {
1413 Error *local_err = NULL;
1414
1415 if (spapr->htab_fd >= 0) {
1416 return spapr->htab_fd;
1417 }
1418
1419 spapr->htab_fd = kvmppc_get_htab_fd(false, 0, &local_err);
1420 if (spapr->htab_fd < 0) {
1421 error_report_err(local_err);
1422 }
1423
1424 return spapr->htab_fd;
1425 }
1426
close_htab_fd(SpaprMachineState * spapr)1427 void close_htab_fd(SpaprMachineState *spapr)
1428 {
1429 if (spapr->htab_fd >= 0) {
1430 close(spapr->htab_fd);
1431 }
1432 spapr->htab_fd = -1;
1433 }
1434
spapr_hpt_mask(PPCVirtualHypervisor * vhyp)1435 static hwaddr spapr_hpt_mask(PPCVirtualHypervisor *vhyp)
1436 {
1437 SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
1438
1439 return HTAB_SIZE(spapr) / HASH_PTEG_SIZE_64 - 1;
1440 }
1441
spapr_encode_hpt_for_kvm_pr(PPCVirtualHypervisor * vhyp)1442 static target_ulong spapr_encode_hpt_for_kvm_pr(PPCVirtualHypervisor *vhyp)
1443 {
1444 SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
1445
1446 assert(kvm_enabled());
1447
1448 if (!spapr->htab) {
1449 return 0;
1450 }
1451
1452 return (target_ulong)(uintptr_t)spapr->htab | (spapr->htab_shift - 18);
1453 }
1454
spapr_map_hptes(PPCVirtualHypervisor * vhyp,hwaddr ptex,int n)1455 static const ppc_hash_pte64_t *spapr_map_hptes(PPCVirtualHypervisor *vhyp,
1456 hwaddr ptex, int n)
1457 {
1458 SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
1459 hwaddr pte_offset = ptex * HASH_PTE_SIZE_64;
1460
1461 if (!spapr->htab) {
1462 /*
1463 * HTAB is controlled by KVM. Fetch into temporary buffer
1464 */
1465 ppc_hash_pte64_t *hptes = g_malloc(n * HASH_PTE_SIZE_64);
1466 kvmppc_read_hptes(hptes, ptex, n);
1467 return hptes;
1468 }
1469
1470 /*
1471 * HTAB is controlled by QEMU. Just point to the internally
1472 * accessible PTEG.
1473 */
1474 return (const ppc_hash_pte64_t *)(spapr->htab + pte_offset);
1475 }
1476
spapr_unmap_hptes(PPCVirtualHypervisor * vhyp,const ppc_hash_pte64_t * hptes,hwaddr ptex,int n)1477 static void spapr_unmap_hptes(PPCVirtualHypervisor *vhyp,
1478 const ppc_hash_pte64_t *hptes,
1479 hwaddr ptex, int n)
1480 {
1481 SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
1482
1483 if (!spapr->htab) {
1484 g_free((void *)hptes);
1485 }
1486
1487 /* Nothing to do for qemu managed HPT */
1488 }
1489
spapr_store_hpte(PowerPCCPU * cpu,hwaddr ptex,uint64_t pte0,uint64_t pte1)1490 void spapr_store_hpte(PowerPCCPU *cpu, hwaddr ptex,
1491 uint64_t pte0, uint64_t pte1)
1492 {
1493 SpaprMachineState *spapr = SPAPR_MACHINE(cpu->vhyp);
1494 hwaddr offset = ptex * HASH_PTE_SIZE_64;
1495
1496 if (!spapr->htab) {
1497 kvmppc_write_hpte(ptex, pte0, pte1);
1498 } else {
1499 if (pte0 & HPTE64_V_VALID) {
1500 stq_p(spapr->htab + offset + HPTE64_DW1, pte1);
1501 /*
1502 * When setting valid, we write PTE1 first. This ensures
1503 * proper synchronization with the reading code in
1504 * ppc_hash64_pteg_search()
1505 */
1506 smp_wmb();
1507 stq_p(spapr->htab + offset, pte0);
1508 } else {
1509 stq_p(spapr->htab + offset, pte0);
1510 /*
1511 * When clearing it we set PTE0 first. This ensures proper
1512 * synchronization with the reading code in
1513 * ppc_hash64_pteg_search()
1514 */
1515 smp_wmb();
1516 stq_p(spapr->htab + offset + HPTE64_DW1, pte1);
1517 }
1518 }
1519 }
1520
spapr_hpte_set_c(PPCVirtualHypervisor * vhyp,hwaddr ptex,uint64_t pte1)1521 static void spapr_hpte_set_c(PPCVirtualHypervisor *vhyp, hwaddr ptex,
1522 uint64_t pte1)
1523 {
1524 hwaddr offset = ptex * HASH_PTE_SIZE_64 + HPTE64_DW1_C;
1525 SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
1526
1527 if (!spapr->htab) {
1528 /* There should always be a hash table when this is called */
1529 error_report("spapr_hpte_set_c called with no hash table !");
1530 return;
1531 }
1532
1533 /* The HW performs a non-atomic byte update */
1534 stb_p(spapr->htab + offset, (pte1 & 0xff) | 0x80);
1535 }
1536
spapr_hpte_set_r(PPCVirtualHypervisor * vhyp,hwaddr ptex,uint64_t pte1)1537 static void spapr_hpte_set_r(PPCVirtualHypervisor *vhyp, hwaddr ptex,
1538 uint64_t pte1)
1539 {
1540 hwaddr offset = ptex * HASH_PTE_SIZE_64 + HPTE64_DW1_R;
1541 SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
1542
1543 if (!spapr->htab) {
1544 /* There should always be a hash table when this is called */
1545 error_report("spapr_hpte_set_r called with no hash table !");
1546 return;
1547 }
1548
1549 /* The HW performs a non-atomic byte update */
1550 stb_p(spapr->htab + offset, ((pte1 >> 8) & 0xff) | 0x01);
1551 }
1552
spapr_hpt_shift_for_ramsize(uint64_t ramsize)1553 int spapr_hpt_shift_for_ramsize(uint64_t ramsize)
1554 {
1555 int shift;
1556
1557 /* We aim for a hash table of size 1/128 the size of RAM (rounded
1558 * up). The PAPR recommendation is actually 1/64 of RAM size, but
1559 * that's much more than is needed for Linux guests */
1560 shift = ctz64(pow2ceil(ramsize)) - 7;
1561 shift = MAX(shift, 18); /* Minimum architected size */
1562 shift = MIN(shift, 46); /* Maximum architected size */
1563 return shift;
1564 }
1565
spapr_free_hpt(SpaprMachineState * spapr)1566 void spapr_free_hpt(SpaprMachineState *spapr)
1567 {
1568 qemu_vfree(spapr->htab);
1569 spapr->htab = NULL;
1570 spapr->htab_shift = 0;
1571 close_htab_fd(spapr);
1572 }
1573
spapr_reallocate_hpt(SpaprMachineState * spapr,int shift,Error ** errp)1574 int spapr_reallocate_hpt(SpaprMachineState *spapr, int shift, Error **errp)
1575 {
1576 ERRP_GUARD();
1577 long rc;
1578
1579 /* Clean up any HPT info from a previous boot */
1580 spapr_free_hpt(spapr);
1581
1582 rc = kvmppc_reset_htab(shift);
1583
1584 if (rc == -EOPNOTSUPP) {
1585 error_setg(errp, "HPT not supported in nested guests");
1586 return -EOPNOTSUPP;
1587 }
1588
1589 if (rc < 0) {
1590 /* kernel-side HPT needed, but couldn't allocate one */
1591 error_setg_errno(errp, errno, "Failed to allocate KVM HPT of order %d",
1592 shift);
1593 error_append_hint(errp, "Try smaller maxmem?\n");
1594 return -errno;
1595 } else if (rc > 0) {
1596 /* kernel-side HPT allocated */
1597 if (rc != shift) {
1598 error_setg(errp,
1599 "Requested order %d HPT, but kernel allocated order %ld",
1600 shift, rc);
1601 error_append_hint(errp, "Try smaller maxmem?\n");
1602 return -ENOSPC;
1603 }
1604
1605 spapr->htab_shift = shift;
1606 spapr->htab = NULL;
1607 } else {
1608 /* kernel-side HPT not needed, allocate in userspace instead */
1609 size_t size = 1ULL << shift;
1610 int i;
1611
1612 spapr->htab = qemu_memalign(size, size);
1613 memset(spapr->htab, 0, size);
1614 spapr->htab_shift = shift;
1615
1616 for (i = 0; i < size / HASH_PTE_SIZE_64; i++) {
1617 DIRTY_HPTE(HPTE(spapr->htab, i));
1618 }
1619 }
1620 /* We're setting up a hash table, so that means we're not radix */
1621 spapr->patb_entry = 0;
1622 spapr_init_all_lpcrs(0, LPCR_HR | LPCR_UPRT);
1623 return 0;
1624 }
1625
spapr_setup_hpt(SpaprMachineState * spapr)1626 void spapr_setup_hpt(SpaprMachineState *spapr)
1627 {
1628 int hpt_shift;
1629
1630 if (spapr->resize_hpt == SPAPR_RESIZE_HPT_DISABLED) {
1631 hpt_shift = spapr_hpt_shift_for_ramsize(MACHINE(spapr)->maxram_size);
1632 } else {
1633 uint64_t current_ram_size;
1634
1635 current_ram_size = MACHINE(spapr)->ram_size + get_plugged_memory_size();
1636 hpt_shift = spapr_hpt_shift_for_ramsize(current_ram_size);
1637 }
1638 spapr_reallocate_hpt(spapr, hpt_shift, &error_fatal);
1639
1640 if (kvm_enabled()) {
1641 hwaddr vrma_limit = kvmppc_vrma_limit(spapr->htab_shift);
1642
1643 /* Check our RMA fits in the possible VRMA */
1644 if (vrma_limit < spapr->rma_size) {
1645 error_report("Unable to create %" HWADDR_PRIu
1646 "MiB RMA (VRMA only allows %" HWADDR_PRIu "MiB",
1647 spapr->rma_size / MiB, vrma_limit / MiB);
1648 exit(EXIT_FAILURE);
1649 }
1650 }
1651 }
1652
spapr_check_mmu_mode(bool guest_radix)1653 void spapr_check_mmu_mode(bool guest_radix)
1654 {
1655 if (guest_radix) {
1656 if (kvm_enabled() && !kvmppc_has_cap_mmu_radix()) {
1657 error_report("Guest requested unavailable MMU mode (radix).");
1658 exit(EXIT_FAILURE);
1659 }
1660 } else {
1661 if (kvm_enabled() && kvmppc_has_cap_mmu_radix()
1662 && !kvmppc_has_cap_mmu_hash_v3()) {
1663 error_report("Guest requested unavailable MMU mode (hash).");
1664 exit(EXIT_FAILURE);
1665 }
1666 }
1667 }
1668
spapr_machine_reset(MachineState * machine,ResetType type)1669 static void spapr_machine_reset(MachineState *machine, ResetType type)
1670 {
1671 SpaprMachineState *spapr = SPAPR_MACHINE(machine);
1672 PowerPCCPU *first_ppc_cpu;
1673 hwaddr fdt_addr;
1674 void *fdt;
1675 int rc;
1676
1677 if (type != RESET_TYPE_SNAPSHOT_LOAD) {
1678 /*
1679 * Record-replay snapshot load must not consume random, this was
1680 * already replayed from initial machine reset.
1681 */
1682 qemu_guest_getrandom_nofail(spapr->fdt_rng_seed, 32);
1683 }
1684
1685 if (machine->cgs) {
1686 confidential_guest_kvm_reset(machine->cgs, &error_fatal);
1687 }
1688 spapr_caps_apply(spapr);
1689 spapr_nested_reset(spapr);
1690
1691 first_ppc_cpu = POWERPC_CPU(first_cpu);
1692 if (kvm_enabled() && kvmppc_has_cap_mmu_radix() &&
1693 ppc_type_check_compat(machine->cpu_type, CPU_POWERPC_LOGICAL_3_00, 0,
1694 spapr->max_compat_pvr)) {
1695 /*
1696 * If using KVM with radix mode available, VCPUs can be started
1697 * without a HPT because KVM will start them in radix mode.
1698 * Set the GR bit in PATE so that we know there is no HPT.
1699 */
1700 spapr->patb_entry = PATE1_GR;
1701 spapr_set_all_lpcrs(LPCR_HR | LPCR_UPRT, LPCR_HR | LPCR_UPRT);
1702 } else {
1703 spapr_setup_hpt(spapr);
1704 }
1705
1706 qemu_devices_reset(type);
1707
1708 spapr_ovec_cleanup(spapr->ov5_cas);
1709 spapr->ov5_cas = spapr_ovec_new();
1710
1711 ppc_init_compat_all(spapr->max_compat_pvr, &error_fatal);
1712
1713 /*
1714 * This is fixing some of the default configuration of the XIVE
1715 * devices. To be called after the reset of the machine devices.
1716 */
1717 spapr_irq_reset(spapr, &error_fatal);
1718
1719 /*
1720 * There is no CAS under qtest. Simulate one to please the code that
1721 * depends on spapr->ov5_cas. This is especially needed to test device
1722 * unplug, so we do that before resetting the DRCs.
1723 */
1724 if (qtest_enabled()) {
1725 spapr_ovec_cleanup(spapr->ov5_cas);
1726 spapr->ov5_cas = spapr_ovec_clone(spapr->ov5);
1727 }
1728
1729 spapr_nvdimm_finish_flushes();
1730
1731 /* DRC reset may cause a device to be unplugged. This will cause troubles
1732 * if this device is used by another device (eg, a running vhost backend
1733 * will crash QEMU if the DIMM holding the vring goes away). To avoid such
1734 * situations, we reset DRCs after all devices have been reset.
1735 */
1736 spapr_drc_reset_all(spapr);
1737
1738 spapr_clear_pending_events(spapr);
1739
1740 /*
1741 * We place the device tree just below either the top of the RMA,
1742 * or just below 2GB, whichever is lower, so that it can be
1743 * processed with 32-bit real mode code if necessary
1744 */
1745 fdt_addr = MIN(spapr->rma_size, FDT_MAX_ADDR) - FDT_MAX_SIZE;
1746
1747 fdt = spapr_build_fdt(spapr, true, FDT_MAX_SIZE);
1748 if (spapr->vof) {
1749 spapr_vof_reset(spapr, fdt, &error_fatal);
1750 /*
1751 * Do not pack the FDT as the client may change properties.
1752 * VOF client does not expect the FDT so we do not load it to the VM.
1753 */
1754 } else {
1755 rc = fdt_pack(fdt);
1756 /* Should only fail if we've built a corrupted tree */
1757 assert(rc == 0);
1758
1759 spapr_cpu_set_entry_state(first_ppc_cpu, SPAPR_ENTRY_POINT,
1760 0, fdt_addr, 0);
1761 cpu_physical_memory_write(fdt_addr, fdt, fdt_totalsize(fdt));
1762 }
1763 qemu_fdt_dumpdtb(fdt, fdt_totalsize(fdt));
1764
1765 g_free(spapr->fdt_blob);
1766 spapr->fdt_size = fdt_totalsize(fdt);
1767 spapr->fdt_initial_size = spapr->fdt_size;
1768 spapr->fdt_blob = fdt;
1769
1770 /* Set machine->fdt for 'dumpdtb' QMP/HMP command */
1771 machine->fdt = fdt;
1772
1773 /* Set up the entry state */
1774 first_ppc_cpu->env.gpr[5] = 0;
1775
1776 spapr->fwnmi_system_reset_addr = -1;
1777 spapr->fwnmi_machine_check_addr = -1;
1778 spapr->fwnmi_machine_check_interlock = -1;
1779
1780 /* Signal all vCPUs waiting on this condition */
1781 qemu_cond_broadcast(&spapr->fwnmi_machine_check_interlock_cond);
1782
1783 migrate_del_blocker(&spapr->fwnmi_migration_blocker);
1784 }
1785
spapr_create_nvram(SpaprMachineState * spapr)1786 static void spapr_create_nvram(SpaprMachineState *spapr)
1787 {
1788 DeviceState *dev = qdev_new("spapr-nvram");
1789 DriveInfo *dinfo = drive_get(IF_PFLASH, 0, 0);
1790
1791 if (dinfo) {
1792 qdev_prop_set_drive_err(dev, "drive", blk_by_legacy_dinfo(dinfo),
1793 &error_fatal);
1794 }
1795
1796 qdev_realize_and_unref(dev, &spapr->vio_bus->bus, &error_fatal);
1797
1798 spapr->nvram = (struct SpaprNvram *)dev;
1799 }
1800
spapr_rtc_create(SpaprMachineState * spapr)1801 static void spapr_rtc_create(SpaprMachineState *spapr)
1802 {
1803 object_initialize_child_with_props(OBJECT(spapr), "rtc", &spapr->rtc,
1804 sizeof(spapr->rtc), TYPE_SPAPR_RTC,
1805 &error_fatal, NULL);
1806 qdev_realize(DEVICE(&spapr->rtc), NULL, &error_fatal);
1807 object_property_add_alias(OBJECT(spapr), "rtc-time", OBJECT(&spapr->rtc),
1808 "date");
1809 }
1810
1811 /* Returns whether we want to use VGA or not */
spapr_vga_init(PCIBus * pci_bus,Error ** errp)1812 static bool spapr_vga_init(PCIBus *pci_bus, Error **errp)
1813 {
1814 vga_interface_created = true;
1815 switch (vga_interface_type) {
1816 case VGA_NONE:
1817 return false;
1818 case VGA_DEVICE:
1819 return true;
1820 case VGA_STD:
1821 case VGA_VIRTIO:
1822 case VGA_CIRRUS:
1823 return pci_vga_init(pci_bus) != NULL;
1824 default:
1825 error_setg(errp,
1826 "Unsupported VGA mode, only -vga std or -vga virtio is supported");
1827 return false;
1828 }
1829 }
1830
spapr_pre_load(void * opaque)1831 static int spapr_pre_load(void *opaque)
1832 {
1833 int rc;
1834
1835 rc = spapr_caps_pre_load(opaque);
1836 if (rc) {
1837 return rc;
1838 }
1839
1840 return 0;
1841 }
1842
spapr_post_load(void * opaque,int version_id)1843 static int spapr_post_load(void *opaque, int version_id)
1844 {
1845 SpaprMachineState *spapr = (SpaprMachineState *)opaque;
1846 int err = 0;
1847
1848 err = spapr_caps_post_migration(spapr);
1849 if (err) {
1850 return err;
1851 }
1852
1853 /*
1854 * In earlier versions, there was no separate qdev for the PAPR
1855 * RTC, so the RTC offset was stored directly in sPAPREnvironment.
1856 * So when migrating from those versions, poke the incoming offset
1857 * value into the RTC device
1858 */
1859 if (version_id < 3) {
1860 err = spapr_rtc_import_offset(&spapr->rtc, spapr->rtc_offset);
1861 if (err) {
1862 return err;
1863 }
1864 }
1865
1866 if (kvm_enabled() && spapr->patb_entry) {
1867 PowerPCCPU *cpu = POWERPC_CPU(first_cpu);
1868 bool radix = !!(spapr->patb_entry & PATE1_GR);
1869 bool gtse = !!(cpu->env.spr[SPR_LPCR] & LPCR_GTSE);
1870
1871 /*
1872 * Update LPCR:HR and UPRT as they may not be set properly in
1873 * the stream
1874 */
1875 spapr_set_all_lpcrs(radix ? (LPCR_HR | LPCR_UPRT) : 0,
1876 LPCR_HR | LPCR_UPRT);
1877
1878 err = kvmppc_configure_v3_mmu(cpu, radix, gtse, spapr->patb_entry);
1879 if (err) {
1880 error_report("Process table config unsupported by the host");
1881 return -EINVAL;
1882 }
1883 }
1884
1885 err = spapr_irq_post_load(spapr, version_id);
1886 if (err) {
1887 return err;
1888 }
1889
1890 return err;
1891 }
1892
spapr_pre_save(void * opaque)1893 static int spapr_pre_save(void *opaque)
1894 {
1895 int rc;
1896
1897 rc = spapr_caps_pre_save(opaque);
1898 if (rc) {
1899 return rc;
1900 }
1901
1902 return 0;
1903 }
1904
version_before_3(void * opaque,int version_id)1905 static bool version_before_3(void *opaque, int version_id)
1906 {
1907 return version_id < 3;
1908 }
1909
spapr_pending_events_needed(void * opaque)1910 static bool spapr_pending_events_needed(void *opaque)
1911 {
1912 SpaprMachineState *spapr = (SpaprMachineState *)opaque;
1913 return !QTAILQ_EMPTY(&spapr->pending_events);
1914 }
1915
1916 static const VMStateDescription vmstate_spapr_event_entry = {
1917 .name = "spapr_event_log_entry",
1918 .version_id = 1,
1919 .minimum_version_id = 1,
1920 .fields = (const VMStateField[]) {
1921 VMSTATE_UINT32(summary, SpaprEventLogEntry),
1922 VMSTATE_UINT32(extended_length, SpaprEventLogEntry),
1923 VMSTATE_VBUFFER_ALLOC_UINT32(extended_log, SpaprEventLogEntry, 0,
1924 NULL, extended_length),
1925 VMSTATE_END_OF_LIST()
1926 },
1927 };
1928
1929 static const VMStateDescription vmstate_spapr_pending_events = {
1930 .name = "spapr_pending_events",
1931 .version_id = 1,
1932 .minimum_version_id = 1,
1933 .needed = spapr_pending_events_needed,
1934 .fields = (const VMStateField[]) {
1935 VMSTATE_QTAILQ_V(pending_events, SpaprMachineState, 1,
1936 vmstate_spapr_event_entry, SpaprEventLogEntry, next),
1937 VMSTATE_END_OF_LIST()
1938 },
1939 };
1940
spapr_ov5_cas_needed(void * opaque)1941 static bool spapr_ov5_cas_needed(void *opaque)
1942 {
1943 SpaprMachineState *spapr = opaque;
1944 SpaprOptionVector *ov5_mask = spapr_ovec_new();
1945 bool cas_needed;
1946
1947 /* Prior to the introduction of SpaprOptionVector, we had two option
1948 * vectors we dealt with: OV5_FORM1_AFFINITY, and OV5_DRCONF_MEMORY.
1949 * Both of these options encode machine topology into the device-tree
1950 * in such a way that the now-booted OS should still be able to interact
1951 * appropriately with QEMU regardless of what options were actually
1952 * negotiatied on the source side.
1953 *
1954 * As such, we can avoid migrating the CAS-negotiated options if these
1955 * are the only options available on the current machine/platform.
1956 * Since these are the only options available for pseries-2.7 and
1957 * earlier, this allows us to maintain old->new/new->old migration
1958 * compatibility.
1959 *
1960 * For QEMU 2.8+, there are additional CAS-negotiatable options available
1961 * via default pseries-2.8 machines and explicit command-line parameters.
1962 * Some of these options, like OV5_HP_EVT, *do* require QEMU to be aware
1963 * of the actual CAS-negotiated values to continue working properly. For
1964 * example, availability of memory unplug depends on knowing whether
1965 * OV5_HP_EVT was negotiated via CAS.
1966 *
1967 * Thus, for any cases where the set of available CAS-negotiatable
1968 * options extends beyond OV5_FORM1_AFFINITY and OV5_DRCONF_MEMORY, we
1969 * include the CAS-negotiated options in the migration stream, unless
1970 * if they affect boot time behaviour only.
1971 */
1972 spapr_ovec_set(ov5_mask, OV5_FORM1_AFFINITY);
1973 spapr_ovec_set(ov5_mask, OV5_DRCONF_MEMORY);
1974 spapr_ovec_set(ov5_mask, OV5_DRMEM_V2);
1975
1976 /* We need extra information if we have any bits outside the mask
1977 * defined above */
1978 cas_needed = !spapr_ovec_subset(spapr->ov5, ov5_mask);
1979
1980 spapr_ovec_cleanup(ov5_mask);
1981
1982 return cas_needed;
1983 }
1984
1985 static const VMStateDescription vmstate_spapr_ov5_cas = {
1986 .name = "spapr_option_vector_ov5_cas",
1987 .version_id = 1,
1988 .minimum_version_id = 1,
1989 .needed = spapr_ov5_cas_needed,
1990 .fields = (const VMStateField[]) {
1991 VMSTATE_STRUCT_POINTER_V(ov5_cas, SpaprMachineState, 1,
1992 vmstate_spapr_ovec, SpaprOptionVector),
1993 VMSTATE_END_OF_LIST()
1994 },
1995 };
1996
spapr_patb_entry_needed(void * opaque)1997 static bool spapr_patb_entry_needed(void *opaque)
1998 {
1999 SpaprMachineState *spapr = opaque;
2000
2001 return !!spapr->patb_entry;
2002 }
2003
2004 static const VMStateDescription vmstate_spapr_patb_entry = {
2005 .name = "spapr_patb_entry",
2006 .version_id = 1,
2007 .minimum_version_id = 1,
2008 .needed = spapr_patb_entry_needed,
2009 .fields = (const VMStateField[]) {
2010 VMSTATE_UINT64(patb_entry, SpaprMachineState),
2011 VMSTATE_END_OF_LIST()
2012 },
2013 };
2014
spapr_irq_map_needed(void * opaque)2015 static bool spapr_irq_map_needed(void *opaque)
2016 {
2017 SpaprMachineState *spapr = opaque;
2018
2019 return spapr->irq_map && !bitmap_empty(spapr->irq_map, spapr->irq_map_nr);
2020 }
2021
2022 static const VMStateDescription vmstate_spapr_irq_map = {
2023 .name = "spapr_irq_map",
2024 .version_id = 1,
2025 .minimum_version_id = 1,
2026 .needed = spapr_irq_map_needed,
2027 .fields = (const VMStateField[]) {
2028 VMSTATE_BITMAP(irq_map, SpaprMachineState, 0, irq_map_nr),
2029 VMSTATE_END_OF_LIST()
2030 },
2031 };
2032
spapr_dtb_needed(void * opaque)2033 static bool spapr_dtb_needed(void *opaque)
2034 {
2035 SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(opaque);
2036
2037 return smc->update_dt_enabled;
2038 }
2039
spapr_dtb_pre_load(void * opaque)2040 static int spapr_dtb_pre_load(void *opaque)
2041 {
2042 SpaprMachineState *spapr = (SpaprMachineState *)opaque;
2043
2044 g_free(spapr->fdt_blob);
2045 spapr->fdt_blob = NULL;
2046 spapr->fdt_size = 0;
2047
2048 return 0;
2049 }
2050
2051 static const VMStateDescription vmstate_spapr_dtb = {
2052 .name = "spapr_dtb",
2053 .version_id = 1,
2054 .minimum_version_id = 1,
2055 .needed = spapr_dtb_needed,
2056 .pre_load = spapr_dtb_pre_load,
2057 .fields = (const VMStateField[]) {
2058 VMSTATE_UINT32(fdt_initial_size, SpaprMachineState),
2059 VMSTATE_UINT32(fdt_size, SpaprMachineState),
2060 VMSTATE_VBUFFER_ALLOC_UINT32(fdt_blob, SpaprMachineState, 0, NULL,
2061 fdt_size),
2062 VMSTATE_END_OF_LIST()
2063 },
2064 };
2065
spapr_fwnmi_needed(void * opaque)2066 static bool spapr_fwnmi_needed(void *opaque)
2067 {
2068 SpaprMachineState *spapr = (SpaprMachineState *)opaque;
2069
2070 return spapr->fwnmi_machine_check_addr != -1;
2071 }
2072
spapr_fwnmi_pre_save(void * opaque)2073 static int spapr_fwnmi_pre_save(void *opaque)
2074 {
2075 SpaprMachineState *spapr = (SpaprMachineState *)opaque;
2076
2077 /*
2078 * Check if machine check handling is in progress and print a
2079 * warning message.
2080 */
2081 if (spapr->fwnmi_machine_check_interlock != -1) {
2082 warn_report("A machine check is being handled during migration. The"
2083 "handler may run and log hardware error on the destination");
2084 }
2085
2086 return 0;
2087 }
2088
2089 static const VMStateDescription vmstate_spapr_fwnmi = {
2090 .name = "spapr_fwnmi",
2091 .version_id = 1,
2092 .minimum_version_id = 1,
2093 .needed = spapr_fwnmi_needed,
2094 .pre_save = spapr_fwnmi_pre_save,
2095 .fields = (const VMStateField[]) {
2096 VMSTATE_UINT64(fwnmi_system_reset_addr, SpaprMachineState),
2097 VMSTATE_UINT64(fwnmi_machine_check_addr, SpaprMachineState),
2098 VMSTATE_INT32(fwnmi_machine_check_interlock, SpaprMachineState),
2099 VMSTATE_END_OF_LIST()
2100 },
2101 };
2102
2103 static const VMStateDescription vmstate_spapr = {
2104 .name = "spapr",
2105 .version_id = 3,
2106 .minimum_version_id = 1,
2107 .pre_load = spapr_pre_load,
2108 .post_load = spapr_post_load,
2109 .pre_save = spapr_pre_save,
2110 .fields = (const VMStateField[]) {
2111 /* used to be @next_irq */
2112 VMSTATE_UNUSED_BUFFER(version_before_3, 0, 4),
2113
2114 /* RTC offset */
2115 VMSTATE_UINT64_TEST(rtc_offset, SpaprMachineState, version_before_3),
2116
2117 VMSTATE_PPC_TIMEBASE_V(tb, SpaprMachineState, 2),
2118 VMSTATE_END_OF_LIST()
2119 },
2120 .subsections = (const VMStateDescription * const []) {
2121 &vmstate_spapr_ov5_cas,
2122 &vmstate_spapr_patb_entry,
2123 &vmstate_spapr_pending_events,
2124 &vmstate_spapr_cap_htm,
2125 &vmstate_spapr_cap_vsx,
2126 &vmstate_spapr_cap_dfp,
2127 &vmstate_spapr_cap_cfpc,
2128 &vmstate_spapr_cap_sbbc,
2129 &vmstate_spapr_cap_ibs,
2130 &vmstate_spapr_cap_hpt_maxpagesize,
2131 &vmstate_spapr_irq_map,
2132 &vmstate_spapr_cap_nested_kvm_hv,
2133 &vmstate_spapr_dtb,
2134 &vmstate_spapr_cap_large_decr,
2135 &vmstate_spapr_cap_ccf_assist,
2136 &vmstate_spapr_cap_fwnmi,
2137 &vmstate_spapr_fwnmi,
2138 &vmstate_spapr_cap_rpt_invalidate,
2139 &vmstate_spapr_cap_ail_mode_3,
2140 &vmstate_spapr_cap_nested_papr,
2141 NULL
2142 }
2143 };
2144
htab_save_setup(QEMUFile * f,void * opaque,Error ** errp)2145 static int htab_save_setup(QEMUFile *f, void *opaque, Error **errp)
2146 {
2147 SpaprMachineState *spapr = opaque;
2148
2149 /* "Iteration" header */
2150 if (!spapr->htab_shift) {
2151 qemu_put_be32(f, -1);
2152 } else {
2153 qemu_put_be32(f, spapr->htab_shift);
2154 }
2155
2156 if (spapr->htab) {
2157 spapr->htab_save_index = 0;
2158 spapr->htab_first_pass = true;
2159 } else {
2160 if (spapr->htab_shift) {
2161 assert(kvm_enabled());
2162 }
2163 }
2164
2165
2166 return 0;
2167 }
2168
htab_save_chunk(QEMUFile * f,SpaprMachineState * spapr,int chunkstart,int n_valid,int n_invalid)2169 static void htab_save_chunk(QEMUFile *f, SpaprMachineState *spapr,
2170 int chunkstart, int n_valid, int n_invalid)
2171 {
2172 qemu_put_be32(f, chunkstart);
2173 qemu_put_be16(f, n_valid);
2174 qemu_put_be16(f, n_invalid);
2175 qemu_put_buffer(f, HPTE(spapr->htab, chunkstart),
2176 HASH_PTE_SIZE_64 * n_valid);
2177 }
2178
htab_save_end_marker(QEMUFile * f)2179 static void htab_save_end_marker(QEMUFile *f)
2180 {
2181 qemu_put_be32(f, 0);
2182 qemu_put_be16(f, 0);
2183 qemu_put_be16(f, 0);
2184 }
2185
htab_save_first_pass(QEMUFile * f,SpaprMachineState * spapr,int64_t max_ns)2186 static void htab_save_first_pass(QEMUFile *f, SpaprMachineState *spapr,
2187 int64_t max_ns)
2188 {
2189 bool has_timeout = max_ns != -1;
2190 int htabslots = HTAB_SIZE(spapr) / HASH_PTE_SIZE_64;
2191 int index = spapr->htab_save_index;
2192 int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
2193
2194 assert(spapr->htab_first_pass);
2195
2196 do {
2197 int chunkstart;
2198
2199 /* Consume invalid HPTEs */
2200 while ((index < htabslots)
2201 && !HPTE_VALID(HPTE(spapr->htab, index))) {
2202 CLEAN_HPTE(HPTE(spapr->htab, index));
2203 index++;
2204 }
2205
2206 /* Consume valid HPTEs */
2207 chunkstart = index;
2208 while ((index < htabslots) && (index - chunkstart < USHRT_MAX)
2209 && HPTE_VALID(HPTE(spapr->htab, index))) {
2210 CLEAN_HPTE(HPTE(spapr->htab, index));
2211 index++;
2212 }
2213
2214 if (index > chunkstart) {
2215 int n_valid = index - chunkstart;
2216
2217 htab_save_chunk(f, spapr, chunkstart, n_valid, 0);
2218
2219 if (has_timeout &&
2220 (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) > max_ns) {
2221 break;
2222 }
2223 }
2224 } while ((index < htabslots) && !migration_rate_exceeded(f));
2225
2226 if (index >= htabslots) {
2227 assert(index == htabslots);
2228 index = 0;
2229 spapr->htab_first_pass = false;
2230 }
2231 spapr->htab_save_index = index;
2232 }
2233
htab_save_later_pass(QEMUFile * f,SpaprMachineState * spapr,int64_t max_ns)2234 static int htab_save_later_pass(QEMUFile *f, SpaprMachineState *spapr,
2235 int64_t max_ns)
2236 {
2237 bool final = max_ns < 0;
2238 int htabslots = HTAB_SIZE(spapr) / HASH_PTE_SIZE_64;
2239 int examined = 0, sent = 0;
2240 int index = spapr->htab_save_index;
2241 int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
2242
2243 assert(!spapr->htab_first_pass);
2244
2245 do {
2246 int chunkstart, invalidstart;
2247
2248 /* Consume non-dirty HPTEs */
2249 while ((index < htabslots)
2250 && !HPTE_DIRTY(HPTE(spapr->htab, index))) {
2251 index++;
2252 examined++;
2253 }
2254
2255 chunkstart = index;
2256 /* Consume valid dirty HPTEs */
2257 while ((index < htabslots) && (index - chunkstart < USHRT_MAX)
2258 && HPTE_DIRTY(HPTE(spapr->htab, index))
2259 && HPTE_VALID(HPTE(spapr->htab, index))) {
2260 CLEAN_HPTE(HPTE(spapr->htab, index));
2261 index++;
2262 examined++;
2263 }
2264
2265 invalidstart = index;
2266 /* Consume invalid dirty HPTEs */
2267 while ((index < htabslots) && (index - invalidstart < USHRT_MAX)
2268 && HPTE_DIRTY(HPTE(spapr->htab, index))
2269 && !HPTE_VALID(HPTE(spapr->htab, index))) {
2270 CLEAN_HPTE(HPTE(spapr->htab, index));
2271 index++;
2272 examined++;
2273 }
2274
2275 if (index > chunkstart) {
2276 int n_valid = invalidstart - chunkstart;
2277 int n_invalid = index - invalidstart;
2278
2279 htab_save_chunk(f, spapr, chunkstart, n_valid, n_invalid);
2280 sent += index - chunkstart;
2281
2282 if (!final && (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) > max_ns) {
2283 break;
2284 }
2285 }
2286
2287 if (examined >= htabslots) {
2288 break;
2289 }
2290
2291 if (index >= htabslots) {
2292 assert(index == htabslots);
2293 index = 0;
2294 }
2295 } while ((examined < htabslots) && (!migration_rate_exceeded(f) || final));
2296
2297 if (index >= htabslots) {
2298 assert(index == htabslots);
2299 index = 0;
2300 }
2301
2302 spapr->htab_save_index = index;
2303
2304 return (examined >= htabslots) && (sent == 0) ? 1 : 0;
2305 }
2306
2307 #define MAX_ITERATION_NS 5000000 /* 5 ms */
2308 #define MAX_KVM_BUF_SIZE 2048
2309
htab_save_iterate(QEMUFile * f,void * opaque)2310 static int htab_save_iterate(QEMUFile *f, void *opaque)
2311 {
2312 SpaprMachineState *spapr = opaque;
2313 int fd;
2314 int rc = 0;
2315
2316 /* Iteration header */
2317 if (!spapr->htab_shift) {
2318 qemu_put_be32(f, -1);
2319 return 1;
2320 } else {
2321 qemu_put_be32(f, 0);
2322 }
2323
2324 if (!spapr->htab) {
2325 assert(kvm_enabled());
2326
2327 fd = get_htab_fd(spapr);
2328 if (fd < 0) {
2329 return fd;
2330 }
2331
2332 rc = kvmppc_save_htab(f, fd, MAX_KVM_BUF_SIZE, MAX_ITERATION_NS);
2333 if (rc < 0) {
2334 return rc;
2335 }
2336 } else if (spapr->htab_first_pass) {
2337 htab_save_first_pass(f, spapr, MAX_ITERATION_NS);
2338 } else {
2339 rc = htab_save_later_pass(f, spapr, MAX_ITERATION_NS);
2340 }
2341
2342 htab_save_end_marker(f);
2343
2344 return rc;
2345 }
2346
htab_save_complete(QEMUFile * f,void * opaque)2347 static int htab_save_complete(QEMUFile *f, void *opaque)
2348 {
2349 SpaprMachineState *spapr = opaque;
2350 int fd;
2351
2352 /* Iteration header */
2353 if (!spapr->htab_shift) {
2354 qemu_put_be32(f, -1);
2355 return 0;
2356 } else {
2357 qemu_put_be32(f, 0);
2358 }
2359
2360 if (!spapr->htab) {
2361 int rc;
2362
2363 assert(kvm_enabled());
2364
2365 fd = get_htab_fd(spapr);
2366 if (fd < 0) {
2367 return fd;
2368 }
2369
2370 rc = kvmppc_save_htab(f, fd, MAX_KVM_BUF_SIZE, -1);
2371 if (rc < 0) {
2372 return rc;
2373 }
2374 } else {
2375 if (spapr->htab_first_pass) {
2376 htab_save_first_pass(f, spapr, -1);
2377 }
2378 htab_save_later_pass(f, spapr, -1);
2379 }
2380
2381 /* End marker */
2382 htab_save_end_marker(f);
2383
2384 return 0;
2385 }
2386
htab_load(QEMUFile * f,void * opaque,int version_id)2387 static int htab_load(QEMUFile *f, void *opaque, int version_id)
2388 {
2389 SpaprMachineState *spapr = opaque;
2390 uint32_t section_hdr;
2391 int fd = -1;
2392 Error *local_err = NULL;
2393
2394 if (version_id < 1 || version_id > 1) {
2395 error_report("htab_load() bad version");
2396 return -EINVAL;
2397 }
2398
2399 section_hdr = qemu_get_be32(f);
2400
2401 if (section_hdr == -1) {
2402 spapr_free_hpt(spapr);
2403 return 0;
2404 }
2405
2406 if (section_hdr) {
2407 int ret;
2408
2409 /* First section gives the htab size */
2410 ret = spapr_reallocate_hpt(spapr, section_hdr, &local_err);
2411 if (ret < 0) {
2412 error_report_err(local_err);
2413 return ret;
2414 }
2415 return 0;
2416 }
2417
2418 if (!spapr->htab) {
2419 assert(kvm_enabled());
2420
2421 fd = kvmppc_get_htab_fd(true, 0, &local_err);
2422 if (fd < 0) {
2423 error_report_err(local_err);
2424 return fd;
2425 }
2426 }
2427
2428 while (true) {
2429 uint32_t index;
2430 uint16_t n_valid, n_invalid;
2431
2432 index = qemu_get_be32(f);
2433 n_valid = qemu_get_be16(f);
2434 n_invalid = qemu_get_be16(f);
2435
2436 if ((index == 0) && (n_valid == 0) && (n_invalid == 0)) {
2437 /* End of Stream */
2438 break;
2439 }
2440
2441 if ((index + n_valid + n_invalid) >
2442 (HTAB_SIZE(spapr) / HASH_PTE_SIZE_64)) {
2443 /* Bad index in stream */
2444 error_report(
2445 "htab_load() bad index %d (%hd+%hd entries) in htab stream (htab_shift=%d)",
2446 index, n_valid, n_invalid, spapr->htab_shift);
2447 return -EINVAL;
2448 }
2449
2450 if (spapr->htab) {
2451 if (n_valid) {
2452 qemu_get_buffer(f, HPTE(spapr->htab, index),
2453 HASH_PTE_SIZE_64 * n_valid);
2454 }
2455 if (n_invalid) {
2456 memset(HPTE(spapr->htab, index + n_valid), 0,
2457 HASH_PTE_SIZE_64 * n_invalid);
2458 }
2459 } else {
2460 int rc;
2461
2462 assert(fd >= 0);
2463
2464 rc = kvmppc_load_htab_chunk(f, fd, index, n_valid, n_invalid,
2465 &local_err);
2466 if (rc < 0) {
2467 error_report_err(local_err);
2468 return rc;
2469 }
2470 }
2471 }
2472
2473 if (!spapr->htab) {
2474 assert(fd >= 0);
2475 close(fd);
2476 }
2477
2478 return 0;
2479 }
2480
htab_save_cleanup(void * opaque)2481 static void htab_save_cleanup(void *opaque)
2482 {
2483 SpaprMachineState *spapr = opaque;
2484
2485 close_htab_fd(spapr);
2486 }
2487
2488 static SaveVMHandlers savevm_htab_handlers = {
2489 .save_setup = htab_save_setup,
2490 .save_live_iterate = htab_save_iterate,
2491 .save_live_complete_precopy = htab_save_complete,
2492 .save_cleanup = htab_save_cleanup,
2493 .load_state = htab_load,
2494 };
2495
spapr_boot_set(void * opaque,const char * boot_device,Error ** errp)2496 static void spapr_boot_set(void *opaque, const char *boot_device,
2497 Error **errp)
2498 {
2499 SpaprMachineState *spapr = SPAPR_MACHINE(opaque);
2500
2501 g_free(spapr->boot_device);
2502 spapr->boot_device = g_strdup(boot_device);
2503 }
2504
spapr_create_lmb_dr_connectors(SpaprMachineState * spapr)2505 static void spapr_create_lmb_dr_connectors(SpaprMachineState *spapr)
2506 {
2507 MachineState *machine = MACHINE(spapr);
2508 uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE;
2509 uint32_t nr_lmbs = (machine->maxram_size - machine->ram_size)/lmb_size;
2510 int i;
2511
2512 g_assert(!nr_lmbs || machine->device_memory);
2513 for (i = 0; i < nr_lmbs; i++) {
2514 uint64_t addr;
2515
2516 addr = i * lmb_size + machine->device_memory->base;
2517 spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_LMB,
2518 addr / lmb_size);
2519 }
2520 }
2521
2522 /*
2523 * If RAM size, maxmem size and individual node mem sizes aren't aligned
2524 * to SPAPR_MEMORY_BLOCK_SIZE(256MB), then refuse to start the guest
2525 * since we can't support such unaligned sizes with DRCONF_MEMORY.
2526 */
spapr_validate_node_memory(MachineState * machine,Error ** errp)2527 static void spapr_validate_node_memory(MachineState *machine, Error **errp)
2528 {
2529 int i;
2530
2531 if (machine->ram_size % SPAPR_MEMORY_BLOCK_SIZE) {
2532 error_setg(errp, "Memory size 0x" RAM_ADDR_FMT
2533 " is not aligned to %" PRIu64 " MiB",
2534 machine->ram_size,
2535 SPAPR_MEMORY_BLOCK_SIZE / MiB);
2536 return;
2537 }
2538
2539 if (machine->maxram_size % SPAPR_MEMORY_BLOCK_SIZE) {
2540 error_setg(errp, "Maximum memory size 0x" RAM_ADDR_FMT
2541 " is not aligned to %" PRIu64 " MiB",
2542 machine->ram_size,
2543 SPAPR_MEMORY_BLOCK_SIZE / MiB);
2544 return;
2545 }
2546
2547 for (i = 0; i < machine->numa_state->num_nodes; i++) {
2548 if (machine->numa_state->nodes[i].node_mem % SPAPR_MEMORY_BLOCK_SIZE) {
2549 error_setg(errp,
2550 "Node %d memory size 0x%" PRIx64
2551 " is not aligned to %" PRIu64 " MiB",
2552 i, machine->numa_state->nodes[i].node_mem,
2553 SPAPR_MEMORY_BLOCK_SIZE / MiB);
2554 return;
2555 }
2556 }
2557 }
2558
2559 /* find cpu slot in machine->possible_cpus by core_id */
spapr_find_cpu_slot(MachineState * ms,uint32_t id,int * idx)2560 static CPUArchId *spapr_find_cpu_slot(MachineState *ms, uint32_t id, int *idx)
2561 {
2562 int index = id / ms->smp.threads;
2563
2564 if (index >= ms->possible_cpus->len) {
2565 return NULL;
2566 }
2567 if (idx) {
2568 *idx = index;
2569 }
2570 return &ms->possible_cpus->cpus[index];
2571 }
2572
spapr_set_vsmt_mode(SpaprMachineState * spapr,Error ** errp)2573 static void spapr_set_vsmt_mode(SpaprMachineState *spapr, Error **errp)
2574 {
2575 MachineState *ms = MACHINE(spapr);
2576 SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
2577 Error *local_err = NULL;
2578 bool vsmt_user = !!spapr->vsmt;
2579 int kvm_smt = kvmppc_smt_threads();
2580 int ret;
2581 unsigned int smp_threads = ms->smp.threads;
2582
2583 if (tcg_enabled()) {
2584 if (smp_threads > 1 &&
2585 !ppc_type_check_compat(ms->cpu_type, CPU_POWERPC_LOGICAL_2_07, 0,
2586 spapr->max_compat_pvr)) {
2587 error_setg(errp, "TCG only supports SMT on POWER8 or newer CPUs");
2588 return;
2589 }
2590
2591 if (smp_threads > 8) {
2592 error_setg(errp, "TCG cannot support more than 8 threads/core "
2593 "on a pseries machine");
2594 return;
2595 }
2596 }
2597 if (!is_power_of_2(smp_threads)) {
2598 error_setg(errp, "Cannot support %d threads/core on a pseries "
2599 "machine because it must be a power of 2", smp_threads);
2600 return;
2601 }
2602
2603 /* Determine the VSMT mode to use: */
2604 if (vsmt_user) {
2605 if (spapr->vsmt < smp_threads) {
2606 error_setg(errp, "Cannot support VSMT mode %d"
2607 " because it must be >= threads/core (%d)",
2608 spapr->vsmt, smp_threads);
2609 return;
2610 }
2611 /* In this case, spapr->vsmt has been set by the command line */
2612 } else if (!smc->smp_threads_vsmt) {
2613 /*
2614 * Default VSMT value is tricky, because we need it to be as
2615 * consistent as possible (for migration), but this requires
2616 * changing it for at least some existing cases. We pick 8 as
2617 * the value that we'd get with KVM on POWER8, the
2618 * overwhelmingly common case in production systems.
2619 */
2620 spapr->vsmt = MAX(8, smp_threads);
2621 } else {
2622 spapr->vsmt = smp_threads;
2623 }
2624
2625 /* KVM: If necessary, set the SMT mode: */
2626 if (kvm_enabled() && (spapr->vsmt != kvm_smt)) {
2627 ret = kvmppc_set_smt_threads(spapr->vsmt);
2628 if (ret) {
2629 /* Looks like KVM isn't able to change VSMT mode */
2630 error_setg(&local_err,
2631 "Failed to set KVM's VSMT mode to %d (errno %d)",
2632 spapr->vsmt, ret);
2633 /* We can live with that if the default one is big enough
2634 * for the number of threads, and a submultiple of the one
2635 * we want. In this case we'll waste some vcpu ids, but
2636 * behaviour will be correct */
2637 if ((kvm_smt >= smp_threads) && ((spapr->vsmt % kvm_smt) == 0)) {
2638 warn_report_err(local_err);
2639 } else {
2640 if (!vsmt_user) {
2641 error_append_hint(&local_err,
2642 "On PPC, a VM with %d threads/core"
2643 " on a host with %d threads/core"
2644 " requires the use of VSMT mode %d.\n",
2645 smp_threads, kvm_smt, spapr->vsmt);
2646 }
2647 kvmppc_error_append_smt_possible_hint(&local_err);
2648 error_propagate(errp, local_err);
2649 }
2650 }
2651 }
2652 /* else TCG: nothing to do currently */
2653 }
2654
spapr_init_cpus(SpaprMachineState * spapr)2655 static void spapr_init_cpus(SpaprMachineState *spapr)
2656 {
2657 MachineState *machine = MACHINE(spapr);
2658 MachineClass *mc = MACHINE_GET_CLASS(machine);
2659 const char *type = spapr_get_cpu_core_type(machine->cpu_type);
2660 const CPUArchIdList *possible_cpus;
2661 unsigned int smp_cpus = machine->smp.cpus;
2662 unsigned int smp_threads = machine->smp.threads;
2663 unsigned int max_cpus = machine->smp.max_cpus;
2664 int boot_cores_nr = smp_cpus / smp_threads;
2665 int i;
2666
2667 possible_cpus = mc->possible_cpu_arch_ids(machine);
2668 if (mc->has_hotpluggable_cpus) {
2669 if (smp_cpus % smp_threads) {
2670 error_report("smp_cpus (%u) must be multiple of threads (%u)",
2671 smp_cpus, smp_threads);
2672 exit(1);
2673 }
2674 if (max_cpus % smp_threads) {
2675 error_report("max_cpus (%u) must be multiple of threads (%u)",
2676 max_cpus, smp_threads);
2677 exit(1);
2678 }
2679 } else {
2680 if (max_cpus != smp_cpus) {
2681 error_report("This machine version does not support CPU hotplug");
2682 exit(1);
2683 }
2684 boot_cores_nr = possible_cpus->len;
2685 }
2686
2687 for (i = 0; i < possible_cpus->len; i++) {
2688 int core_id = i * smp_threads;
2689
2690 if (mc->has_hotpluggable_cpus) {
2691 spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_CPU,
2692 spapr_vcpu_id(spapr, core_id));
2693 }
2694
2695 if (i < boot_cores_nr) {
2696 Object *core = object_new(type);
2697 int nr_threads = smp_threads;
2698
2699 /* Handle the partially filled core for older machine types */
2700 if ((i + 1) * smp_threads >= smp_cpus) {
2701 nr_threads = smp_cpus - i * smp_threads;
2702 }
2703
2704 object_property_set_int(core, "nr-threads", nr_threads,
2705 &error_fatal);
2706 object_property_set_int(core, CPU_CORE_PROP_CORE_ID, core_id,
2707 &error_fatal);
2708 qdev_realize(DEVICE(core), NULL, &error_fatal);
2709
2710 object_unref(core);
2711 }
2712 }
2713 }
2714
spapr_create_default_phb(void)2715 static PCIHostState *spapr_create_default_phb(void)
2716 {
2717 DeviceState *dev;
2718
2719 dev = qdev_new(TYPE_SPAPR_PCI_HOST_BRIDGE);
2720 qdev_prop_set_uint32(dev, "index", 0);
2721 sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
2722
2723 return PCI_HOST_BRIDGE(dev);
2724 }
2725
spapr_rma_size(SpaprMachineState * spapr,Error ** errp)2726 static hwaddr spapr_rma_size(SpaprMachineState *spapr, Error **errp)
2727 {
2728 MachineState *machine = MACHINE(spapr);
2729 SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
2730 hwaddr rma_size = machine->ram_size;
2731 hwaddr node0_size = spapr_node0_size(machine);
2732
2733 /* RMA has to fit in the first NUMA node */
2734 rma_size = MIN(rma_size, node0_size);
2735
2736 /*
2737 * VRMA access is via a special 1TiB SLB mapping, so the RMA can
2738 * never exceed that
2739 */
2740 rma_size = MIN(rma_size, 1 * TiB);
2741
2742 /*
2743 * Clamp the RMA size based on machine type. This is for
2744 * migration compatibility with older qemu versions, which limited
2745 * the RMA size for complicated and mostly bad reasons.
2746 */
2747 if (smc->rma_limit) {
2748 rma_size = MIN(rma_size, smc->rma_limit);
2749 }
2750
2751 if (rma_size < MIN_RMA_SLOF) {
2752 error_setg(errp,
2753 "pSeries SLOF firmware requires >= %" HWADDR_PRIx
2754 "ldMiB guest RMA (Real Mode Area memory)",
2755 MIN_RMA_SLOF / MiB);
2756 return 0;
2757 }
2758
2759 return rma_size;
2760 }
2761
spapr_create_nvdimm_dr_connectors(SpaprMachineState * spapr)2762 static void spapr_create_nvdimm_dr_connectors(SpaprMachineState *spapr)
2763 {
2764 MachineState *machine = MACHINE(spapr);
2765 int i;
2766
2767 for (i = 0; i < machine->ram_slots; i++) {
2768 spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_PMEM, i);
2769 }
2770 }
2771
2772 /* pSeries LPAR / sPAPR hardware init */
spapr_machine_init(MachineState * machine)2773 static void spapr_machine_init(MachineState *machine)
2774 {
2775 SpaprMachineState *spapr = SPAPR_MACHINE(machine);
2776 SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine);
2777 MachineClass *mc = MACHINE_GET_CLASS(machine);
2778 const char *bios_default = spapr->vof ? FW_FILE_NAME_VOF : FW_FILE_NAME;
2779 const char *bios_name = machine->firmware ?: bios_default;
2780 g_autofree char *filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name);
2781 const char *kernel_filename = machine->kernel_filename;
2782 const char *initrd_filename = machine->initrd_filename;
2783 PCIHostState *phb;
2784 bool has_vga;
2785 int i;
2786 MemoryRegion *sysmem = get_system_memory();
2787 long load_limit, fw_size;
2788 Error *resize_hpt_err = NULL;
2789 NICInfo *nd;
2790
2791 if (!filename) {
2792 error_report("Could not find LPAR firmware '%s'", bios_name);
2793 exit(1);
2794 }
2795 fw_size = load_image_targphys(filename, 0, FW_MAX_SIZE);
2796 if (fw_size <= 0) {
2797 error_report("Could not load LPAR firmware '%s'", filename);
2798 exit(1);
2799 }
2800
2801 /*
2802 * if Secure VM (PEF) support is configured, then initialize it
2803 */
2804 if (machine->cgs) {
2805 confidential_guest_kvm_init(machine->cgs, &error_fatal);
2806 }
2807
2808 msi_nonbroken = true;
2809
2810 QLIST_INIT(&spapr->phbs);
2811 QTAILQ_INIT(&spapr->pending_dimm_unplugs);
2812
2813 /* Determine capabilities to run with */
2814 spapr_caps_init(spapr);
2815
2816 kvmppc_check_papr_resize_hpt(&resize_hpt_err);
2817 if (spapr->resize_hpt == SPAPR_RESIZE_HPT_DEFAULT) {
2818 /*
2819 * If the user explicitly requested a mode we should either
2820 * supply it, or fail completely (which we do below). But if
2821 * it's not set explicitly, we reset our mode to something
2822 * that works
2823 */
2824 if (resize_hpt_err) {
2825 spapr->resize_hpt = SPAPR_RESIZE_HPT_DISABLED;
2826 error_free(resize_hpt_err);
2827 resize_hpt_err = NULL;
2828 } else {
2829 spapr->resize_hpt = smc->resize_hpt_default;
2830 }
2831 }
2832
2833 assert(spapr->resize_hpt != SPAPR_RESIZE_HPT_DEFAULT);
2834
2835 if ((spapr->resize_hpt != SPAPR_RESIZE_HPT_DISABLED) && resize_hpt_err) {
2836 /*
2837 * User requested HPT resize, but this host can't supply it. Bail out
2838 */
2839 error_report_err(resize_hpt_err);
2840 exit(1);
2841 }
2842 error_free(resize_hpt_err);
2843
2844 spapr->rma_size = spapr_rma_size(spapr, &error_fatal);
2845
2846 /* Setup a load limit for the ramdisk leaving room for SLOF and FDT */
2847 load_limit = MIN(spapr->rma_size, FDT_MAX_ADDR) - FW_OVERHEAD;
2848
2849 /*
2850 * VSMT must be set in order to be able to compute VCPU ids, ie to
2851 * call spapr_max_server_number() or spapr_vcpu_id().
2852 */
2853 spapr_set_vsmt_mode(spapr, &error_fatal);
2854
2855 /* Set up Interrupt Controller before we create the VCPUs */
2856 spapr_irq_init(spapr, &error_fatal);
2857
2858 /* Set up containers for ibm,client-architecture-support negotiated options
2859 */
2860 spapr->ov5 = spapr_ovec_new();
2861 spapr->ov5_cas = spapr_ovec_new();
2862
2863 spapr_ovec_set(spapr->ov5, OV5_DRCONF_MEMORY);
2864 spapr_validate_node_memory(machine, &error_fatal);
2865
2866 spapr_ovec_set(spapr->ov5, OV5_FORM1_AFFINITY);
2867
2868 /* Do not advertise FORM2 NUMA support for pseries-6.1 and older */
2869 if (!smc->pre_6_2_numa_affinity) {
2870 spapr_ovec_set(spapr->ov5, OV5_FORM2_AFFINITY);
2871 }
2872
2873 /* advertise support for dedicated HP event source to guests */
2874 if (spapr->use_hotplug_event_source) {
2875 spapr_ovec_set(spapr->ov5, OV5_HP_EVT);
2876 }
2877
2878 /* advertise support for HPT resizing */
2879 if (spapr->resize_hpt != SPAPR_RESIZE_HPT_DISABLED) {
2880 spapr_ovec_set(spapr->ov5, OV5_HPT_RESIZE);
2881 }
2882
2883 /* advertise support for ibm,dyamic-memory-v2 */
2884 spapr_ovec_set(spapr->ov5, OV5_DRMEM_V2);
2885
2886 /* advertise XIVE on POWER9 machines */
2887 if (spapr->irq->xive) {
2888 spapr_ovec_set(spapr->ov5, OV5_XIVE_EXPLOIT);
2889 }
2890
2891 /* init CPUs */
2892 spapr_init_cpus(spapr);
2893
2894 /* Init numa_assoc_array */
2895 spapr_numa_associativity_init(spapr, machine);
2896
2897 if ((!kvm_enabled() || kvmppc_has_cap_mmu_radix()) &&
2898 ppc_type_check_compat(machine->cpu_type, CPU_POWERPC_LOGICAL_3_00, 0,
2899 spapr->max_compat_pvr)) {
2900 spapr_ovec_set(spapr->ov5, OV5_MMU_RADIX_300);
2901 /* KVM and TCG always allow GTSE with radix... */
2902 spapr_ovec_set(spapr->ov5, OV5_MMU_RADIX_GTSE);
2903 }
2904 /* ... but not with hash (currently). */
2905
2906 if (kvm_enabled()) {
2907 /* Enable H_LOGICAL_CI_* so SLOF can talk to in-kernel devices */
2908 kvmppc_enable_logical_ci_hcalls();
2909 kvmppc_enable_set_mode_hcall();
2910
2911 /* H_CLEAR_MOD/_REF are mandatory in PAPR, but off by default */
2912 kvmppc_enable_clear_ref_mod_hcalls();
2913
2914 /* Enable H_PAGE_INIT */
2915 kvmppc_enable_h_page_init();
2916 }
2917
2918 /* map RAM */
2919 memory_region_add_subregion(sysmem, 0, machine->ram);
2920
2921 /* initialize hotplug memory address space */
2922 if (machine->ram_size < machine->maxram_size) {
2923 ram_addr_t device_mem_size = machine->maxram_size - machine->ram_size;
2924 hwaddr device_mem_base;
2925
2926 /*
2927 * Limit the number of hotpluggable memory slots to half the number
2928 * slots that KVM supports, leaving the other half for PCI and other
2929 * devices. However ensure that number of slots doesn't drop below 32.
2930 */
2931 int max_memslots = kvm_enabled() ? kvm_get_max_memslots() / 2 :
2932 SPAPR_MAX_RAM_SLOTS;
2933
2934 if (max_memslots < SPAPR_MAX_RAM_SLOTS) {
2935 max_memslots = SPAPR_MAX_RAM_SLOTS;
2936 }
2937 if (machine->ram_slots > max_memslots) {
2938 error_report("Specified number of memory slots %"
2939 PRIu64" exceeds max supported %d",
2940 machine->ram_slots, max_memslots);
2941 exit(1);
2942 }
2943
2944 device_mem_base = ROUND_UP(machine->ram_size, SPAPR_DEVICE_MEM_ALIGN);
2945 machine_memory_devices_init(machine, device_mem_base, device_mem_size);
2946 }
2947
2948 spapr_create_lmb_dr_connectors(spapr);
2949
2950 if (mc->nvdimm_supported) {
2951 spapr_create_nvdimm_dr_connectors(spapr);
2952 }
2953
2954 /* Set up RTAS event infrastructure */
2955 spapr_events_init(spapr);
2956
2957 /* Set up the RTC RTAS interfaces */
2958 spapr_rtc_create(spapr);
2959
2960 /* Set up VIO bus */
2961 spapr->vio_bus = spapr_vio_bus_init();
2962
2963 for (i = 0; serial_hd(i); i++) {
2964 spapr_vty_create(spapr->vio_bus, serial_hd(i));
2965 }
2966
2967 /* We always have at least the nvram device on VIO */
2968 spapr_create_nvram(spapr);
2969
2970 /*
2971 * Setup hotplug / dynamic-reconfiguration connectors. top-level
2972 * connectors (described in root DT node's "ibm,drc-types" property)
2973 * are pre-initialized here. additional child connectors (such as
2974 * connectors for a PHBs PCI slots) are added as needed during their
2975 * parent's realization.
2976 */
2977 if (smc->dr_phb_enabled) {
2978 for (i = 0; i < SPAPR_MAX_PHBS; i++) {
2979 spapr_dr_connector_new(OBJECT(machine), TYPE_SPAPR_DRC_PHB, i);
2980 }
2981 }
2982
2983 /* Set up PCI */
2984 spapr_pci_rtas_init();
2985
2986 phb = spapr_create_default_phb();
2987
2988 while ((nd = qemu_find_nic_info("spapr-vlan", true, "ibmveth"))) {
2989 spapr_vlan_create(spapr->vio_bus, nd);
2990 }
2991
2992 pci_init_nic_devices(phb->bus, NULL);
2993
2994 for (i = 0; i <= drive_get_max_bus(IF_SCSI); i++) {
2995 spapr_vscsi_create(spapr->vio_bus);
2996 }
2997
2998 /* Graphics */
2999 has_vga = spapr_vga_init(phb->bus, &error_fatal);
3000 if (has_vga) {
3001 spapr->want_stdout_path = !machine->enable_graphics;
3002 machine->usb |= defaults_enabled() && !machine->usb_disabled;
3003 } else {
3004 spapr->want_stdout_path = true;
3005 }
3006
3007 if (machine->usb) {
3008 pci_create_simple(phb->bus, -1, "nec-usb-xhci");
3009
3010 if (has_vga) {
3011 USBBus *usb_bus;
3012
3013 usb_bus = USB_BUS(object_resolve_type_unambiguous(TYPE_USB_BUS,
3014 &error_abort));
3015 usb_create_simple(usb_bus, "usb-kbd");
3016 usb_create_simple(usb_bus, "usb-mouse");
3017 }
3018 }
3019
3020 if (kernel_filename) {
3021 uint64_t loaded_addr = 0;
3022
3023 spapr->kernel_size = load_elf(kernel_filename, NULL,
3024 translate_kernel_address, spapr,
3025 NULL, &loaded_addr, NULL, NULL, 1,
3026 PPC_ELF_MACHINE, 0, 0);
3027 if (spapr->kernel_size == ELF_LOAD_WRONG_ENDIAN) {
3028 spapr->kernel_size = load_elf(kernel_filename, NULL,
3029 translate_kernel_address, spapr,
3030 NULL, &loaded_addr, NULL, NULL, 0,
3031 PPC_ELF_MACHINE, 0, 0);
3032 spapr->kernel_le = spapr->kernel_size > 0;
3033 }
3034 if (spapr->kernel_size < 0) {
3035 error_report("error loading %s: %s", kernel_filename,
3036 load_elf_strerror(spapr->kernel_size));
3037 exit(1);
3038 }
3039
3040 if (spapr->kernel_addr != loaded_addr) {
3041 warn_report("spapr: kernel_addr changed from 0x%"PRIx64
3042 " to 0x%"PRIx64,
3043 spapr->kernel_addr, loaded_addr);
3044 spapr->kernel_addr = loaded_addr;
3045 }
3046
3047 /* load initrd */
3048 if (initrd_filename) {
3049 /* Try to locate the initrd in the gap between the kernel
3050 * and the firmware. Add a bit of space just in case
3051 */
3052 spapr->initrd_base = (spapr->kernel_addr + spapr->kernel_size
3053 + 0x1ffff) & ~0xffff;
3054 spapr->initrd_size = load_image_targphys(initrd_filename,
3055 spapr->initrd_base,
3056 load_limit
3057 - spapr->initrd_base);
3058 if (spapr->initrd_size < 0) {
3059 error_report("could not load initial ram disk '%s'",
3060 initrd_filename);
3061 exit(1);
3062 }
3063 }
3064 }
3065
3066 /* FIXME: Should register things through the MachineState's qdev
3067 * interface, this is a legacy from the sPAPREnvironment structure
3068 * which predated MachineState but had a similar function */
3069 vmstate_register(NULL, 0, &vmstate_spapr, spapr);
3070 register_savevm_live("spapr/htab", VMSTATE_INSTANCE_ID_ANY, 1,
3071 &savevm_htab_handlers, spapr);
3072
3073 qbus_set_hotplug_handler(sysbus_get_default(), OBJECT(machine));
3074
3075 qemu_register_boot_set(spapr_boot_set, spapr);
3076
3077 /*
3078 * Nothing needs to be done to resume a suspended guest because
3079 * suspending does not change the machine state, so no need for
3080 * a ->wakeup method.
3081 */
3082 qemu_register_wakeup_support();
3083
3084 if (kvm_enabled()) {
3085 /* to stop and start vmclock */
3086 qemu_add_vm_change_state_handler(cpu_ppc_clock_vm_state_change,
3087 &spapr->tb);
3088
3089 kvmppc_spapr_enable_inkernel_multitce();
3090 }
3091
3092 qemu_cond_init(&spapr->fwnmi_machine_check_interlock_cond);
3093 if (spapr->vof) {
3094 spapr->vof->fw_size = fw_size; /* for claim() on itself */
3095 spapr_register_hypercall(KVMPPC_H_VOF_CLIENT, spapr_h_vof_client);
3096 }
3097
3098 spapr_watchdog_init(spapr);
3099 }
3100
3101 #define DEFAULT_KVM_TYPE "auto"
spapr_kvm_type(MachineState * machine,const char * vm_type)3102 static int spapr_kvm_type(MachineState *machine, const char *vm_type)
3103 {
3104 /*
3105 * The use of g_ascii_strcasecmp() for 'hv' and 'pr' is to
3106 * accommodate the 'HV' and 'PV' formats that exists in the
3107 * wild. The 'auto' mode is being introduced already as
3108 * lower-case, thus we don't need to bother checking for
3109 * "AUTO".
3110 */
3111 if (!vm_type || !strcmp(vm_type, DEFAULT_KVM_TYPE)) {
3112 return 0;
3113 }
3114
3115 if (!g_ascii_strcasecmp(vm_type, "hv")) {
3116 return 1;
3117 }
3118
3119 if (!g_ascii_strcasecmp(vm_type, "pr")) {
3120 return 2;
3121 }
3122
3123 error_report("Unknown kvm-type specified '%s'", vm_type);
3124 return -1;
3125 }
3126
3127 /*
3128 * Implementation of an interface to adjust firmware path
3129 * for the bootindex property handling.
3130 */
spapr_get_fw_dev_path(FWPathProvider * p,BusState * bus,DeviceState * dev)3131 static char *spapr_get_fw_dev_path(FWPathProvider *p, BusState *bus,
3132 DeviceState *dev)
3133 {
3134 #define CAST(type, obj, name) \
3135 ((type *)object_dynamic_cast(OBJECT(obj), (name)))
3136 SCSIDevice *d = CAST(SCSIDevice, dev, TYPE_SCSI_DEVICE);
3137 SpaprPhbState *phb = CAST(SpaprPhbState, dev, TYPE_SPAPR_PCI_HOST_BRIDGE);
3138 VHostSCSICommon *vsc = CAST(VHostSCSICommon, dev, TYPE_VHOST_SCSI_COMMON);
3139 PCIDevice *pcidev = CAST(PCIDevice, dev, TYPE_PCI_DEVICE);
3140
3141 if (d && bus) {
3142 void *spapr = CAST(void, bus->parent, "spapr-vscsi");
3143 VirtIOSCSI *virtio = CAST(VirtIOSCSI, bus->parent, TYPE_VIRTIO_SCSI);
3144 USBDevice *usb = CAST(USBDevice, bus->parent, TYPE_USB_DEVICE);
3145
3146 if (spapr) {
3147 /*
3148 * Replace "channel@0/disk@0,0" with "disk@8000000000000000":
3149 * In the top 16 bits of the 64-bit LUN, we use SRP luns of the form
3150 * 0x8000 | (target << 8) | (bus << 5) | lun
3151 * (see the "Logical unit addressing format" table in SAM5)
3152 */
3153 unsigned id = 0x8000 | (d->id << 8) | (d->channel << 5) | d->lun;
3154 return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev),
3155 (uint64_t)id << 48);
3156 } else if (virtio) {
3157 /*
3158 * We use SRP luns of the form 01000000 | (target << 8) | lun
3159 * in the top 32 bits of the 64-bit LUN
3160 * Note: the quote above is from SLOF and it is wrong,
3161 * the actual binding is:
3162 * swap 0100 or 10 << or 20 << ( target lun-id -- srplun )
3163 */
3164 unsigned id = 0x1000000 | (d->id << 16) | d->lun;
3165 if (d->lun >= 256) {
3166 /* Use the LUN "flat space addressing method" */
3167 id |= 0x4000;
3168 }
3169 return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev),
3170 (uint64_t)id << 32);
3171 } else if (usb) {
3172 /*
3173 * We use SRP luns of the form 01000000 | (usb-port << 16) | lun
3174 * in the top 32 bits of the 64-bit LUN
3175 */
3176 unsigned usb_port = atoi(usb->port->path);
3177 unsigned id = 0x1000000 | (usb_port << 16) | d->lun;
3178 return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev),
3179 (uint64_t)id << 32);
3180 }
3181 }
3182
3183 /*
3184 * SLOF probes the USB devices, and if it recognizes that the device is a
3185 * storage device, it changes its name to "storage" instead of "usb-host",
3186 * and additionally adds a child node for the SCSI LUN, so the correct
3187 * boot path in SLOF is something like .../storage@1/disk@xxx" instead.
3188 */
3189 if (strcmp("usb-host", qdev_fw_name(dev)) == 0) {
3190 USBDevice *usbdev = CAST(USBDevice, dev, TYPE_USB_DEVICE);
3191 if (usb_device_is_scsi_storage(usbdev)) {
3192 return g_strdup_printf("storage@%s/disk", usbdev->port->path);
3193 }
3194 }
3195
3196 if (phb) {
3197 /* Replace "pci" with "pci@800000020000000" */
3198 return g_strdup_printf("pci@%"PRIX64, phb->buid);
3199 }
3200
3201 if (vsc) {
3202 /* Same logic as virtio above */
3203 unsigned id = 0x1000000 | (vsc->target << 16) | vsc->lun;
3204 return g_strdup_printf("disk@%"PRIX64, (uint64_t)id << 32);
3205 }
3206
3207 if (g_str_equal("pci-bridge", qdev_fw_name(dev))) {
3208 /* SLOF uses "pci" instead of "pci-bridge" for PCI bridges */
3209 PCIDevice *pdev = CAST(PCIDevice, dev, TYPE_PCI_DEVICE);
3210 return g_strdup_printf("pci@%x", PCI_SLOT(pdev->devfn));
3211 }
3212
3213 if (pcidev) {
3214 return spapr_pci_fw_dev_name(pcidev);
3215 }
3216
3217 return NULL;
3218 }
3219
spapr_get_kvm_type(Object * obj,Error ** errp)3220 static char *spapr_get_kvm_type(Object *obj, Error **errp)
3221 {
3222 SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3223
3224 return g_strdup(spapr->kvm_type);
3225 }
3226
spapr_set_kvm_type(Object * obj,const char * value,Error ** errp)3227 static void spapr_set_kvm_type(Object *obj, const char *value, Error **errp)
3228 {
3229 SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3230
3231 g_free(spapr->kvm_type);
3232 spapr->kvm_type = g_strdup(value);
3233 }
3234
spapr_get_modern_hotplug_events(Object * obj,Error ** errp)3235 static bool spapr_get_modern_hotplug_events(Object *obj, Error **errp)
3236 {
3237 SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3238
3239 return spapr->use_hotplug_event_source;
3240 }
3241
spapr_set_modern_hotplug_events(Object * obj,bool value,Error ** errp)3242 static void spapr_set_modern_hotplug_events(Object *obj, bool value,
3243 Error **errp)
3244 {
3245 SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3246
3247 spapr->use_hotplug_event_source = value;
3248 }
3249
spapr_get_msix_emulation(Object * obj,Error ** errp)3250 static bool spapr_get_msix_emulation(Object *obj, Error **errp)
3251 {
3252 return true;
3253 }
3254
spapr_get_resize_hpt(Object * obj,Error ** errp)3255 static char *spapr_get_resize_hpt(Object *obj, Error **errp)
3256 {
3257 SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3258
3259 switch (spapr->resize_hpt) {
3260 case SPAPR_RESIZE_HPT_DEFAULT:
3261 return g_strdup("default");
3262 case SPAPR_RESIZE_HPT_DISABLED:
3263 return g_strdup("disabled");
3264 case SPAPR_RESIZE_HPT_ENABLED:
3265 return g_strdup("enabled");
3266 case SPAPR_RESIZE_HPT_REQUIRED:
3267 return g_strdup("required");
3268 }
3269 g_assert_not_reached();
3270 }
3271
spapr_set_resize_hpt(Object * obj,const char * value,Error ** errp)3272 static void spapr_set_resize_hpt(Object *obj, const char *value, Error **errp)
3273 {
3274 SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3275
3276 if (strcmp(value, "default") == 0) {
3277 spapr->resize_hpt = SPAPR_RESIZE_HPT_DEFAULT;
3278 } else if (strcmp(value, "disabled") == 0) {
3279 spapr->resize_hpt = SPAPR_RESIZE_HPT_DISABLED;
3280 } else if (strcmp(value, "enabled") == 0) {
3281 spapr->resize_hpt = SPAPR_RESIZE_HPT_ENABLED;
3282 } else if (strcmp(value, "required") == 0) {
3283 spapr->resize_hpt = SPAPR_RESIZE_HPT_REQUIRED;
3284 } else {
3285 error_setg(errp, "Bad value for \"resize-hpt\" property");
3286 }
3287 }
3288
spapr_get_vof(Object * obj,Error ** errp)3289 static bool spapr_get_vof(Object *obj, Error **errp)
3290 {
3291 SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3292
3293 return spapr->vof != NULL;
3294 }
3295
spapr_set_vof(Object * obj,bool value,Error ** errp)3296 static void spapr_set_vof(Object *obj, bool value, Error **errp)
3297 {
3298 SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3299
3300 if (spapr->vof) {
3301 vof_cleanup(spapr->vof);
3302 g_free(spapr->vof);
3303 spapr->vof = NULL;
3304 }
3305 if (!value) {
3306 return;
3307 }
3308 spapr->vof = g_malloc0(sizeof(*spapr->vof));
3309 }
3310
spapr_get_ic_mode(Object * obj,Error ** errp)3311 static char *spapr_get_ic_mode(Object *obj, Error **errp)
3312 {
3313 SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3314
3315 if (spapr->irq == &spapr_irq_xics_legacy) {
3316 return g_strdup("legacy");
3317 } else if (spapr->irq == &spapr_irq_xics) {
3318 return g_strdup("xics");
3319 } else if (spapr->irq == &spapr_irq_xive) {
3320 return g_strdup("xive");
3321 } else if (spapr->irq == &spapr_irq_dual) {
3322 return g_strdup("dual");
3323 }
3324 g_assert_not_reached();
3325 }
3326
spapr_set_ic_mode(Object * obj,const char * value,Error ** errp)3327 static void spapr_set_ic_mode(Object *obj, const char *value, Error **errp)
3328 {
3329 SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3330
3331 if (SPAPR_MACHINE_GET_CLASS(spapr)->legacy_irq_allocation) {
3332 error_setg(errp, "This machine only uses the legacy XICS backend, don't pass ic-mode");
3333 return;
3334 }
3335
3336 /* The legacy IRQ backend can not be set */
3337 if (strcmp(value, "xics") == 0) {
3338 spapr->irq = &spapr_irq_xics;
3339 } else if (strcmp(value, "xive") == 0) {
3340 spapr->irq = &spapr_irq_xive;
3341 } else if (strcmp(value, "dual") == 0) {
3342 spapr->irq = &spapr_irq_dual;
3343 } else {
3344 error_setg(errp, "Bad value for \"ic-mode\" property");
3345 }
3346 }
3347
spapr_get_host_model(Object * obj,Error ** errp)3348 static char *spapr_get_host_model(Object *obj, Error **errp)
3349 {
3350 SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3351
3352 return g_strdup(spapr->host_model);
3353 }
3354
spapr_set_host_model(Object * obj,const char * value,Error ** errp)3355 static void spapr_set_host_model(Object *obj, const char *value, Error **errp)
3356 {
3357 SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3358
3359 g_free(spapr->host_model);
3360 spapr->host_model = g_strdup(value);
3361 }
3362
spapr_get_host_serial(Object * obj,Error ** errp)3363 static char *spapr_get_host_serial(Object *obj, Error **errp)
3364 {
3365 SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3366
3367 return g_strdup(spapr->host_serial);
3368 }
3369
spapr_set_host_serial(Object * obj,const char * value,Error ** errp)3370 static void spapr_set_host_serial(Object *obj, const char *value, Error **errp)
3371 {
3372 SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3373
3374 g_free(spapr->host_serial);
3375 spapr->host_serial = g_strdup(value);
3376 }
3377
spapr_instance_init(Object * obj)3378 static void spapr_instance_init(Object *obj)
3379 {
3380 SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3381 SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
3382 MachineState *ms = MACHINE(spapr);
3383 MachineClass *mc = MACHINE_GET_CLASS(ms);
3384
3385 /*
3386 * NVDIMM support went live in 5.1 without considering that, in
3387 * other archs, the user needs to enable NVDIMM support with the
3388 * 'nvdimm' machine option and the default behavior is NVDIMM
3389 * support disabled. It is too late to roll back to the standard
3390 * behavior without breaking 5.1 guests.
3391 */
3392 if (mc->nvdimm_supported) {
3393 ms->nvdimms_state->is_enabled = true;
3394 }
3395
3396 spapr->htab_fd = -1;
3397 spapr->use_hotplug_event_source = true;
3398 spapr->kvm_type = g_strdup(DEFAULT_KVM_TYPE);
3399 object_property_add_str(obj, "kvm-type",
3400 spapr_get_kvm_type, spapr_set_kvm_type);
3401 object_property_set_description(obj, "kvm-type",
3402 "Specifies the KVM virtualization mode (auto,"
3403 " hv, pr). Defaults to 'auto'. This mode will use"
3404 " any available KVM module loaded in the host,"
3405 " where kvm_hv takes precedence if both kvm_hv and"
3406 " kvm_pr are loaded.");
3407 object_property_add_bool(obj, "modern-hotplug-events",
3408 spapr_get_modern_hotplug_events,
3409 spapr_set_modern_hotplug_events);
3410 object_property_set_description(obj, "modern-hotplug-events",
3411 "Use dedicated hotplug event mechanism in"
3412 " place of standard EPOW events when possible"
3413 " (required for memory hot-unplug support)");
3414 ppc_compat_add_property(obj, "max-cpu-compat", &spapr->max_compat_pvr,
3415 "Maximum permitted CPU compatibility mode");
3416
3417 object_property_add_str(obj, "resize-hpt",
3418 spapr_get_resize_hpt, spapr_set_resize_hpt);
3419 object_property_set_description(obj, "resize-hpt",
3420 "Resizing of the Hash Page Table (enabled, disabled, required)");
3421 object_property_add_uint32_ptr(obj, "vsmt",
3422 &spapr->vsmt, OBJ_PROP_FLAG_READWRITE);
3423 object_property_set_description(obj, "vsmt",
3424 "Virtual SMT: KVM behaves as if this were"
3425 " the host's SMT mode");
3426
3427 object_property_add_bool(obj, "vfio-no-msix-emulation",
3428 spapr_get_msix_emulation, NULL);
3429
3430 object_property_add_uint64_ptr(obj, "kernel-addr",
3431 &spapr->kernel_addr, OBJ_PROP_FLAG_READWRITE);
3432 object_property_set_description(obj, "kernel-addr",
3433 stringify(KERNEL_LOAD_ADDR)
3434 " for -kernel is the default");
3435 spapr->kernel_addr = KERNEL_LOAD_ADDR;
3436
3437 object_property_add_bool(obj, "x-vof", spapr_get_vof, spapr_set_vof);
3438 object_property_set_description(obj, "x-vof",
3439 "Enable Virtual Open Firmware (experimental)");
3440
3441 /* The machine class defines the default interrupt controller mode */
3442 spapr->irq = smc->irq;
3443 object_property_add_str(obj, "ic-mode", spapr_get_ic_mode,
3444 spapr_set_ic_mode);
3445 object_property_set_description(obj, "ic-mode",
3446 "Specifies the interrupt controller mode (xics, xive, dual)");
3447
3448 object_property_add_str(obj, "host-model",
3449 spapr_get_host_model, spapr_set_host_model);
3450 object_property_set_description(obj, "host-model",
3451 "Host model to advertise in guest device tree");
3452 object_property_add_str(obj, "host-serial",
3453 spapr_get_host_serial, spapr_set_host_serial);
3454 object_property_set_description(obj, "host-serial",
3455 "Host serial number to advertise in guest device tree");
3456 }
3457
spapr_machine_finalizefn(Object * obj)3458 static void spapr_machine_finalizefn(Object *obj)
3459 {
3460 SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3461
3462 g_free(spapr->kvm_type);
3463 }
3464
spapr_do_system_reset_on_cpu(CPUState * cs,run_on_cpu_data arg)3465 void spapr_do_system_reset_on_cpu(CPUState *cs, run_on_cpu_data arg)
3466 {
3467 SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
3468 CPUPPCState *env = cpu_env(cs);
3469
3470 cpu_synchronize_state(cs);
3471 /* If FWNMI is inactive, addr will be -1, which will deliver to 0x100 */
3472 if (spapr->fwnmi_system_reset_addr != -1) {
3473 uint64_t rtas_addr, addr;
3474
3475 /* get rtas addr from fdt */
3476 rtas_addr = spapr_get_rtas_addr();
3477 if (!rtas_addr) {
3478 qemu_system_guest_panicked(NULL);
3479 return;
3480 }
3481
3482 addr = rtas_addr + RTAS_ERROR_LOG_MAX + cs->cpu_index * sizeof(uint64_t)*2;
3483 stq_be_phys(&address_space_memory, addr, env->gpr[3]);
3484 stq_be_phys(&address_space_memory, addr + sizeof(uint64_t), 0);
3485 env->gpr[3] = addr;
3486 }
3487 ppc_cpu_do_system_reset(cs);
3488 if (spapr->fwnmi_system_reset_addr != -1) {
3489 env->nip = spapr->fwnmi_system_reset_addr;
3490 }
3491 }
3492
spapr_nmi(NMIState * n,int cpu_index,Error ** errp)3493 static void spapr_nmi(NMIState *n, int cpu_index, Error **errp)
3494 {
3495 CPUState *cs;
3496
3497 CPU_FOREACH(cs) {
3498 async_run_on_cpu(cs, spapr_do_system_reset_on_cpu, RUN_ON_CPU_NULL);
3499 }
3500 }
3501
spapr_lmb_dt_populate(SpaprDrc * drc,SpaprMachineState * spapr,void * fdt,int * fdt_start_offset,Error ** errp)3502 int spapr_lmb_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr,
3503 void *fdt, int *fdt_start_offset, Error **errp)
3504 {
3505 uint64_t addr;
3506 uint32_t node;
3507
3508 addr = spapr_drc_index(drc) * SPAPR_MEMORY_BLOCK_SIZE;
3509 node = object_property_get_uint(OBJECT(drc->dev), PC_DIMM_NODE_PROP,
3510 &error_abort);
3511 *fdt_start_offset = spapr_dt_memory_node(spapr, fdt, node, addr,
3512 SPAPR_MEMORY_BLOCK_SIZE);
3513 return 0;
3514 }
3515
spapr_add_lmbs(DeviceState * dev,uint64_t addr_start,uint64_t size,bool dedicated_hp_event_source)3516 static void spapr_add_lmbs(DeviceState *dev, uint64_t addr_start, uint64_t size,
3517 bool dedicated_hp_event_source)
3518 {
3519 SpaprDrc *drc;
3520 uint32_t nr_lmbs = size/SPAPR_MEMORY_BLOCK_SIZE;
3521 int i;
3522 uint64_t addr = addr_start;
3523 bool hotplugged = spapr_drc_hotplugged(dev);
3524
3525 for (i = 0; i < nr_lmbs; i++) {
3526 drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
3527 addr / SPAPR_MEMORY_BLOCK_SIZE);
3528 g_assert(drc);
3529
3530 /*
3531 * memory_device_get_free_addr() provided a range of free addresses
3532 * that doesn't overlap with any existing mapping at pre-plug. The
3533 * corresponding LMB DRCs are thus assumed to be all attachable.
3534 */
3535 spapr_drc_attach(drc, dev);
3536 if (!hotplugged) {
3537 spapr_drc_reset(drc);
3538 }
3539 addr += SPAPR_MEMORY_BLOCK_SIZE;
3540 }
3541 /* send hotplug notification to the
3542 * guest only in case of hotplugged memory
3543 */
3544 if (hotplugged) {
3545 if (dedicated_hp_event_source) {
3546 drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
3547 addr_start / SPAPR_MEMORY_BLOCK_SIZE);
3548 g_assert(drc);
3549 spapr_hotplug_req_add_by_count_indexed(SPAPR_DR_CONNECTOR_TYPE_LMB,
3550 nr_lmbs,
3551 spapr_drc_index(drc));
3552 } else {
3553 spapr_hotplug_req_add_by_count(SPAPR_DR_CONNECTOR_TYPE_LMB,
3554 nr_lmbs);
3555 }
3556 }
3557 }
3558
spapr_memory_plug(HotplugHandler * hotplug_dev,DeviceState * dev)3559 static void spapr_memory_plug(HotplugHandler *hotplug_dev, DeviceState *dev)
3560 {
3561 SpaprMachineState *ms = SPAPR_MACHINE(hotplug_dev);
3562 PCDIMMDevice *dimm = PC_DIMM(dev);
3563 uint64_t size, addr;
3564 int64_t slot;
3565 bool is_nvdimm = object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM);
3566
3567 size = memory_device_get_region_size(MEMORY_DEVICE(dev), &error_abort);
3568
3569 pc_dimm_plug(dimm, MACHINE(ms));
3570
3571 if (!is_nvdimm) {
3572 addr = object_property_get_uint(OBJECT(dimm),
3573 PC_DIMM_ADDR_PROP, &error_abort);
3574 spapr_add_lmbs(dev, addr, size,
3575 spapr_ovec_test(ms->ov5_cas, OV5_HP_EVT));
3576 } else {
3577 slot = object_property_get_int(OBJECT(dimm),
3578 PC_DIMM_SLOT_PROP, &error_abort);
3579 /* We should have valid slot number at this point */
3580 g_assert(slot >= 0);
3581 spapr_add_nvdimm(dev, slot);
3582 }
3583 }
3584
spapr_memory_pre_plug(HotplugHandler * hotplug_dev,DeviceState * dev,Error ** errp)3585 static void spapr_memory_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
3586 Error **errp)
3587 {
3588 SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_dev);
3589 bool is_nvdimm = object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM);
3590 PCDIMMDevice *dimm = PC_DIMM(dev);
3591 Error *local_err = NULL;
3592 uint64_t size;
3593 Object *memdev;
3594 hwaddr pagesize;
3595
3596 size = memory_device_get_region_size(MEMORY_DEVICE(dimm), &local_err);
3597 if (local_err) {
3598 error_propagate(errp, local_err);
3599 return;
3600 }
3601
3602 if (is_nvdimm) {
3603 if (!spapr_nvdimm_validate(hotplug_dev, NVDIMM(dev), size, errp)) {
3604 return;
3605 }
3606 } else if (size % SPAPR_MEMORY_BLOCK_SIZE) {
3607 error_setg(errp, "Hotplugged memory size must be a multiple of "
3608 "%" PRIu64 " MB", SPAPR_MEMORY_BLOCK_SIZE / MiB);
3609 return;
3610 }
3611
3612 memdev = object_property_get_link(OBJECT(dimm), PC_DIMM_MEMDEV_PROP,
3613 &error_abort);
3614 pagesize = host_memory_backend_pagesize(MEMORY_BACKEND(memdev));
3615 if (!spapr_check_pagesize(spapr, pagesize, errp)) {
3616 return;
3617 }
3618
3619 pc_dimm_pre_plug(dimm, MACHINE(hotplug_dev), errp);
3620 }
3621
3622 struct SpaprDimmState {
3623 PCDIMMDevice *dimm;
3624 uint32_t nr_lmbs;
3625 QTAILQ_ENTRY(SpaprDimmState) next;
3626 };
3627
spapr_pending_dimm_unplugs_find(SpaprMachineState * s,PCDIMMDevice * dimm)3628 static SpaprDimmState *spapr_pending_dimm_unplugs_find(SpaprMachineState *s,
3629 PCDIMMDevice *dimm)
3630 {
3631 SpaprDimmState *dimm_state = NULL;
3632
3633 QTAILQ_FOREACH(dimm_state, &s->pending_dimm_unplugs, next) {
3634 if (dimm_state->dimm == dimm) {
3635 break;
3636 }
3637 }
3638 return dimm_state;
3639 }
3640
spapr_pending_dimm_unplugs_add(SpaprMachineState * spapr,uint32_t nr_lmbs,PCDIMMDevice * dimm)3641 static SpaprDimmState *spapr_pending_dimm_unplugs_add(SpaprMachineState *spapr,
3642 uint32_t nr_lmbs,
3643 PCDIMMDevice *dimm)
3644 {
3645 SpaprDimmState *ds = NULL;
3646
3647 /*
3648 * If this request is for a DIMM whose removal had failed earlier
3649 * (due to guest's refusal to remove the LMBs), we would have this
3650 * dimm already in the pending_dimm_unplugs list. In that
3651 * case don't add again.
3652 */
3653 ds = spapr_pending_dimm_unplugs_find(spapr, dimm);
3654 if (!ds) {
3655 ds = g_new0(SpaprDimmState, 1);
3656 ds->nr_lmbs = nr_lmbs;
3657 ds->dimm = dimm;
3658 QTAILQ_INSERT_HEAD(&spapr->pending_dimm_unplugs, ds, next);
3659 }
3660 return ds;
3661 }
3662
spapr_pending_dimm_unplugs_remove(SpaprMachineState * spapr,SpaprDimmState * dimm_state)3663 static void spapr_pending_dimm_unplugs_remove(SpaprMachineState *spapr,
3664 SpaprDimmState *dimm_state)
3665 {
3666 QTAILQ_REMOVE(&spapr->pending_dimm_unplugs, dimm_state, next);
3667 g_free(dimm_state);
3668 }
3669
spapr_recover_pending_dimm_state(SpaprMachineState * ms,PCDIMMDevice * dimm)3670 static SpaprDimmState *spapr_recover_pending_dimm_state(SpaprMachineState *ms,
3671 PCDIMMDevice *dimm)
3672 {
3673 SpaprDrc *drc;
3674 uint64_t size = memory_device_get_region_size(MEMORY_DEVICE(dimm),
3675 &error_abort);
3676 uint32_t nr_lmbs = size / SPAPR_MEMORY_BLOCK_SIZE;
3677 uint32_t avail_lmbs = 0;
3678 uint64_t addr_start, addr;
3679 int i;
3680
3681 addr_start = object_property_get_uint(OBJECT(dimm), PC_DIMM_ADDR_PROP,
3682 &error_abort);
3683
3684 addr = addr_start;
3685 for (i = 0; i < nr_lmbs; i++) {
3686 drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
3687 addr / SPAPR_MEMORY_BLOCK_SIZE);
3688 g_assert(drc);
3689 if (drc->dev) {
3690 avail_lmbs++;
3691 }
3692 addr += SPAPR_MEMORY_BLOCK_SIZE;
3693 }
3694
3695 return spapr_pending_dimm_unplugs_add(ms, avail_lmbs, dimm);
3696 }
3697
spapr_memory_unplug_rollback(SpaprMachineState * spapr,DeviceState * dev)3698 void spapr_memory_unplug_rollback(SpaprMachineState *spapr, DeviceState *dev)
3699 {
3700 SpaprDimmState *ds;
3701 PCDIMMDevice *dimm;
3702 SpaprDrc *drc;
3703 uint32_t nr_lmbs;
3704 uint64_t size, addr_start, addr;
3705 int i;
3706
3707 if (!dev) {
3708 return;
3709 }
3710
3711 dimm = PC_DIMM(dev);
3712 ds = spapr_pending_dimm_unplugs_find(spapr, dimm);
3713
3714 /*
3715 * 'ds == NULL' would mean that the DIMM doesn't have a pending
3716 * unplug state, but one of its DRC is marked as unplug_requested.
3717 * This is bad and weird enough to g_assert() out.
3718 */
3719 g_assert(ds);
3720
3721 spapr_pending_dimm_unplugs_remove(spapr, ds);
3722
3723 size = memory_device_get_region_size(MEMORY_DEVICE(dimm), &error_abort);
3724 nr_lmbs = size / SPAPR_MEMORY_BLOCK_SIZE;
3725
3726 addr_start = object_property_get_uint(OBJECT(dimm), PC_DIMM_ADDR_PROP,
3727 &error_abort);
3728
3729 addr = addr_start;
3730 for (i = 0; i < nr_lmbs; i++) {
3731 drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
3732 addr / SPAPR_MEMORY_BLOCK_SIZE);
3733 g_assert(drc);
3734
3735 drc->unplug_requested = false;
3736 addr += SPAPR_MEMORY_BLOCK_SIZE;
3737 }
3738
3739 /*
3740 * Tell QAPI that something happened and the memory
3741 * hotunplug wasn't successful.
3742 */
3743 qapi_event_send_device_unplug_guest_error(dev->id,
3744 dev->canonical_path);
3745 }
3746
3747 /* Callback to be called during DRC release. */
spapr_lmb_release(DeviceState * dev)3748 void spapr_lmb_release(DeviceState *dev)
3749 {
3750 HotplugHandler *hotplug_ctrl = qdev_get_hotplug_handler(dev);
3751 SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_ctrl);
3752 SpaprDimmState *ds = spapr_pending_dimm_unplugs_find(spapr, PC_DIMM(dev));
3753
3754 /* This information will get lost if a migration occurs
3755 * during the unplug process. In this case recover it. */
3756 if (ds == NULL) {
3757 ds = spapr_recover_pending_dimm_state(spapr, PC_DIMM(dev));
3758 g_assert(ds);
3759 /* The DRC being examined by the caller at least must be counted */
3760 g_assert(ds->nr_lmbs);
3761 }
3762
3763 if (--ds->nr_lmbs) {
3764 return;
3765 }
3766
3767 /*
3768 * Now that all the LMBs have been removed by the guest, call the
3769 * unplug handler chain. This can never fail.
3770 */
3771 hotplug_handler_unplug(hotplug_ctrl, dev, &error_abort);
3772 object_unparent(OBJECT(dev));
3773 }
3774
spapr_memory_unplug(HotplugHandler * hotplug_dev,DeviceState * dev)3775 static void spapr_memory_unplug(HotplugHandler *hotplug_dev, DeviceState *dev)
3776 {
3777 SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_dev);
3778 SpaprDimmState *ds = spapr_pending_dimm_unplugs_find(spapr, PC_DIMM(dev));
3779
3780 /* We really shouldn't get this far without anything to unplug */
3781 g_assert(ds);
3782
3783 pc_dimm_unplug(PC_DIMM(dev), MACHINE(hotplug_dev));
3784 qdev_unrealize(dev);
3785 spapr_pending_dimm_unplugs_remove(spapr, ds);
3786 }
3787
spapr_memory_unplug_request(HotplugHandler * hotplug_dev,DeviceState * dev,Error ** errp)3788 static void spapr_memory_unplug_request(HotplugHandler *hotplug_dev,
3789 DeviceState *dev, Error **errp)
3790 {
3791 SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_dev);
3792 PCDIMMDevice *dimm = PC_DIMM(dev);
3793 uint32_t nr_lmbs;
3794 uint64_t size, addr_start, addr;
3795 int i;
3796 SpaprDrc *drc;
3797
3798 if (object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM)) {
3799 error_setg(errp, "nvdimm device hot unplug is not supported yet.");
3800 return;
3801 }
3802
3803 size = memory_device_get_region_size(MEMORY_DEVICE(dimm), &error_abort);
3804 nr_lmbs = size / SPAPR_MEMORY_BLOCK_SIZE;
3805
3806 addr_start = object_property_get_uint(OBJECT(dimm), PC_DIMM_ADDR_PROP,
3807 &error_abort);
3808
3809 /*
3810 * An existing pending dimm state for this DIMM means that there is an
3811 * unplug operation in progress, waiting for the spapr_lmb_release
3812 * callback to complete the job (BQL can't cover that far). In this case,
3813 * bail out to avoid detaching DRCs that were already released.
3814 */
3815 if (spapr_pending_dimm_unplugs_find(spapr, dimm)) {
3816 error_setg(errp, "Memory unplug already in progress for device %s",
3817 dev->id);
3818 return;
3819 }
3820
3821 spapr_pending_dimm_unplugs_add(spapr, nr_lmbs, dimm);
3822
3823 addr = addr_start;
3824 for (i = 0; i < nr_lmbs; i++) {
3825 drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
3826 addr / SPAPR_MEMORY_BLOCK_SIZE);
3827 g_assert(drc);
3828
3829 spapr_drc_unplug_request(drc);
3830 addr += SPAPR_MEMORY_BLOCK_SIZE;
3831 }
3832
3833 drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
3834 addr_start / SPAPR_MEMORY_BLOCK_SIZE);
3835 spapr_hotplug_req_remove_by_count_indexed(SPAPR_DR_CONNECTOR_TYPE_LMB,
3836 nr_lmbs, spapr_drc_index(drc));
3837 }
3838
3839 /* Callback to be called during DRC release. */
spapr_core_release(DeviceState * dev)3840 void spapr_core_release(DeviceState *dev)
3841 {
3842 HotplugHandler *hotplug_ctrl = qdev_get_hotplug_handler(dev);
3843
3844 /* Call the unplug handler chain. This can never fail. */
3845 hotplug_handler_unplug(hotplug_ctrl, dev, &error_abort);
3846 object_unparent(OBJECT(dev));
3847 }
3848
spapr_core_unplug(HotplugHandler * hotplug_dev,DeviceState * dev)3849 static void spapr_core_unplug(HotplugHandler *hotplug_dev, DeviceState *dev)
3850 {
3851 MachineState *ms = MACHINE(hotplug_dev);
3852 CPUCore *cc = CPU_CORE(dev);
3853 CPUArchId *core_slot = spapr_find_cpu_slot(ms, cc->core_id, NULL);
3854
3855 assert(core_slot);
3856 core_slot->cpu = NULL;
3857 qdev_unrealize(dev);
3858 }
3859
3860 static
spapr_core_unplug_request(HotplugHandler * hotplug_dev,DeviceState * dev,Error ** errp)3861 void spapr_core_unplug_request(HotplugHandler *hotplug_dev, DeviceState *dev,
3862 Error **errp)
3863 {
3864 SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
3865 int index;
3866 SpaprDrc *drc;
3867 CPUCore *cc = CPU_CORE(dev);
3868
3869 if (!spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index)) {
3870 error_setg(errp, "Unable to find CPU core with core-id: %d",
3871 cc->core_id);
3872 return;
3873 }
3874 if (index == 0) {
3875 error_setg(errp, "Boot CPU core may not be unplugged");
3876 return;
3877 }
3878
3879 drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU,
3880 spapr_vcpu_id(spapr, cc->core_id));
3881 g_assert(drc);
3882
3883 if (!spapr_drc_unplug_requested(drc)) {
3884 spapr_drc_unplug_request(drc);
3885 }
3886
3887 /*
3888 * spapr_hotplug_req_remove_by_index is left unguarded, out of the
3889 * "!spapr_drc_unplug_requested" check, to allow for multiple IRQ
3890 * pulses removing the same CPU. Otherwise, in an failed hotunplug
3891 * attempt (e.g. the kernel will refuse to remove the last online
3892 * CPU), we will never attempt it again because unplug_requested
3893 * will still be 'true' in that case.
3894 */
3895 spapr_hotplug_req_remove_by_index(drc);
3896 }
3897
spapr_core_dt_populate(SpaprDrc * drc,SpaprMachineState * spapr,void * fdt,int * fdt_start_offset,Error ** errp)3898 int spapr_core_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr,
3899 void *fdt, int *fdt_start_offset, Error **errp)
3900 {
3901 SpaprCpuCore *core = SPAPR_CPU_CORE(drc->dev);
3902 CPUState *cs = CPU(core->threads[0]);
3903 PowerPCCPU *cpu = POWERPC_CPU(cs);
3904 DeviceClass *dc = DEVICE_GET_CLASS(cs);
3905 int id = spapr_get_vcpu_id(cpu);
3906 g_autofree char *nodename = NULL;
3907 int offset;
3908
3909 nodename = g_strdup_printf("%s@%x", dc->fw_name, id);
3910 offset = fdt_add_subnode(fdt, 0, nodename);
3911
3912 spapr_dt_cpu(cs, fdt, offset, spapr);
3913
3914 /*
3915 * spapr_dt_cpu() does not fill the 'name' property in the
3916 * CPU node. The function is called during boot process, before
3917 * and after CAS, and overwriting the 'name' property written
3918 * by SLOF is not allowed.
3919 *
3920 * Write it manually after spapr_dt_cpu(). This makes the hotplug
3921 * CPUs more compatible with the coldplugged ones, which have
3922 * the 'name' property. Linux Kernel also relies on this
3923 * property to identify CPU nodes.
3924 */
3925 _FDT((fdt_setprop_string(fdt, offset, "name", nodename)));
3926
3927 *fdt_start_offset = offset;
3928 return 0;
3929 }
3930
spapr_core_plug(HotplugHandler * hotplug_dev,DeviceState * dev)3931 static void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev)
3932 {
3933 SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
3934 MachineClass *mc = MACHINE_GET_CLASS(spapr);
3935 SpaprCpuCore *core = SPAPR_CPU_CORE(OBJECT(dev));
3936 CPUCore *cc = CPU_CORE(dev);
3937 SpaprDrc *drc;
3938 CPUArchId *core_slot;
3939 int index;
3940 bool hotplugged = spapr_drc_hotplugged(dev);
3941 int i;
3942
3943 core_slot = spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index);
3944 g_assert(core_slot); /* Already checked in spapr_core_pre_plug() */
3945
3946 drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU,
3947 spapr_vcpu_id(spapr, cc->core_id));
3948
3949 g_assert(drc || !mc->has_hotpluggable_cpus);
3950
3951 if (drc) {
3952 /*
3953 * spapr_core_pre_plug() already buys us this is a brand new
3954 * core being plugged into a free slot. Nothing should already
3955 * be attached to the corresponding DRC.
3956 */
3957 spapr_drc_attach(drc, dev);
3958
3959 if (hotplugged) {
3960 /*
3961 * Send hotplug notification interrupt to the guest only
3962 * in case of hotplugged CPUs.
3963 */
3964 spapr_hotplug_req_add_by_index(drc);
3965 } else {
3966 spapr_drc_reset(drc);
3967 }
3968 }
3969
3970 core_slot->cpu = CPU(dev);
3971
3972 /*
3973 * Set compatibility mode to match the boot CPU, which was either set
3974 * by the machine reset code or by CAS. This really shouldn't fail at
3975 * this point.
3976 */
3977 if (hotplugged) {
3978 for (i = 0; i < cc->nr_threads; i++) {
3979 ppc_set_compat(core->threads[i], POWERPC_CPU(first_cpu)->compat_pvr,
3980 &error_abort);
3981 }
3982 }
3983
3984 }
3985
spapr_core_pre_plug(HotplugHandler * hotplug_dev,DeviceState * dev,Error ** errp)3986 static void spapr_core_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
3987 Error **errp)
3988 {
3989 MachineState *machine = MACHINE(OBJECT(hotplug_dev));
3990 MachineClass *mc = MACHINE_GET_CLASS(hotplug_dev);
3991 CPUCore *cc = CPU_CORE(dev);
3992 const char *base_core_type = spapr_get_cpu_core_type(machine->cpu_type);
3993 const char *type = object_get_typename(OBJECT(dev));
3994 CPUArchId *core_slot;
3995 int index;
3996 unsigned int smp_threads = machine->smp.threads;
3997
3998 if (dev->hotplugged && !mc->has_hotpluggable_cpus) {
3999 error_setg(errp, "CPU hotplug not supported for this machine");
4000 return;
4001 }
4002
4003 if (strcmp(base_core_type, type)) {
4004 error_setg(errp, "CPU core type should be %s", base_core_type);
4005 return;
4006 }
4007
4008 if (cc->core_id % smp_threads) {
4009 error_setg(errp, "invalid core id %d", cc->core_id);
4010 return;
4011 }
4012
4013 /*
4014 * In general we should have homogeneous threads-per-core, but old
4015 * (pre hotplug support) machine types allow the last core to have
4016 * reduced threads as a compatibility hack for when we allowed
4017 * total vcpus not a multiple of threads-per-core.
4018 */
4019 if (mc->has_hotpluggable_cpus && (cc->nr_threads != smp_threads)) {
4020 error_setg(errp, "invalid nr-threads %d, must be %d", cc->nr_threads,
4021 smp_threads);
4022 return;
4023 }
4024
4025 core_slot = spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index);
4026 if (!core_slot) {
4027 error_setg(errp, "core id %d out of range", cc->core_id);
4028 return;
4029 }
4030
4031 if (core_slot->cpu) {
4032 error_setg(errp, "core %d already populated", cc->core_id);
4033 return;
4034 }
4035
4036 numa_cpu_pre_plug(core_slot, dev, errp);
4037 }
4038
spapr_phb_dt_populate(SpaprDrc * drc,SpaprMachineState * spapr,void * fdt,int * fdt_start_offset,Error ** errp)4039 int spapr_phb_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr,
4040 void *fdt, int *fdt_start_offset, Error **errp)
4041 {
4042 SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(drc->dev);
4043 int intc_phandle;
4044
4045 intc_phandle = spapr_irq_get_phandle(spapr, spapr->fdt_blob, errp);
4046 if (intc_phandle <= 0) {
4047 return -1;
4048 }
4049
4050 if (spapr_dt_phb(spapr, sphb, intc_phandle, fdt, fdt_start_offset)) {
4051 error_setg(errp, "unable to create FDT node for PHB %d", sphb->index);
4052 return -1;
4053 }
4054
4055 /* generally SLOF creates these, for hotplug it's up to QEMU */
4056 _FDT(fdt_setprop_string(fdt, *fdt_start_offset, "name", "pci"));
4057
4058 return 0;
4059 }
4060
spapr_phb_pre_plug(HotplugHandler * hotplug_dev,DeviceState * dev,Error ** errp)4061 static bool spapr_phb_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
4062 Error **errp)
4063 {
4064 SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
4065 SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(dev);
4066 SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
4067 const unsigned windows_supported = spapr_phb_windows_supported(sphb);
4068 SpaprDrc *drc;
4069
4070 if (dev->hotplugged && !smc->dr_phb_enabled) {
4071 error_setg(errp, "PHB hotplug not supported for this machine");
4072 return false;
4073 }
4074
4075 if (sphb->index == (uint32_t)-1) {
4076 error_setg(errp, "\"index\" for PAPR PHB is mandatory");
4077 return false;
4078 }
4079
4080 drc = spapr_drc_by_id(TYPE_SPAPR_DRC_PHB, sphb->index);
4081 if (drc && drc->dev) {
4082 error_setg(errp, "PHB %d already attached", sphb->index);
4083 return false;
4084 }
4085
4086 /*
4087 * This will check that sphb->index doesn't exceed the maximum number of
4088 * PHBs for the current machine type.
4089 */
4090 return
4091 smc->phb_placement(spapr, sphb->index,
4092 &sphb->buid, &sphb->io_win_addr,
4093 &sphb->mem_win_addr, &sphb->mem64_win_addr,
4094 windows_supported, sphb->dma_liobn,
4095 errp);
4096 }
4097
spapr_phb_plug(HotplugHandler * hotplug_dev,DeviceState * dev)4098 static void spapr_phb_plug(HotplugHandler *hotplug_dev, DeviceState *dev)
4099 {
4100 SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
4101 SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
4102 SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(dev);
4103 SpaprDrc *drc;
4104 bool hotplugged = spapr_drc_hotplugged(dev);
4105
4106 if (!smc->dr_phb_enabled) {
4107 return;
4108 }
4109
4110 drc = spapr_drc_by_id(TYPE_SPAPR_DRC_PHB, sphb->index);
4111 /* hotplug hooks should check it's enabled before getting this far */
4112 assert(drc);
4113
4114 /* spapr_phb_pre_plug() already checked the DRC is attachable */
4115 spapr_drc_attach(drc, dev);
4116
4117 if (hotplugged) {
4118 spapr_hotplug_req_add_by_index(drc);
4119 } else {
4120 spapr_drc_reset(drc);
4121 }
4122 }
4123
spapr_phb_release(DeviceState * dev)4124 void spapr_phb_release(DeviceState *dev)
4125 {
4126 HotplugHandler *hotplug_ctrl = qdev_get_hotplug_handler(dev);
4127
4128 hotplug_handler_unplug(hotplug_ctrl, dev, &error_abort);
4129 object_unparent(OBJECT(dev));
4130 }
4131
spapr_phb_unplug(HotplugHandler * hotplug_dev,DeviceState * dev)4132 static void spapr_phb_unplug(HotplugHandler *hotplug_dev, DeviceState *dev)
4133 {
4134 qdev_unrealize(dev);
4135 }
4136
spapr_phb_unplug_request(HotplugHandler * hotplug_dev,DeviceState * dev,Error ** errp)4137 static void spapr_phb_unplug_request(HotplugHandler *hotplug_dev,
4138 DeviceState *dev, Error **errp)
4139 {
4140 SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(dev);
4141 SpaprDrc *drc;
4142
4143 drc = spapr_drc_by_id(TYPE_SPAPR_DRC_PHB, sphb->index);
4144 assert(drc);
4145
4146 if (!spapr_drc_unplug_requested(drc)) {
4147 spapr_drc_unplug_request(drc);
4148 spapr_hotplug_req_remove_by_index(drc);
4149 } else {
4150 error_setg(errp,
4151 "PCI Host Bridge unplug already in progress for device %s",
4152 dev->id);
4153 }
4154 }
4155
4156 static
spapr_tpm_proxy_pre_plug(HotplugHandler * hotplug_dev,DeviceState * dev,Error ** errp)4157 bool spapr_tpm_proxy_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
4158 Error **errp)
4159 {
4160 SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
4161
4162 if (spapr->tpm_proxy != NULL) {
4163 error_setg(errp, "Only one TPM proxy can be specified for this machine");
4164 return false;
4165 }
4166
4167 return true;
4168 }
4169
spapr_tpm_proxy_plug(HotplugHandler * hotplug_dev,DeviceState * dev)4170 static void spapr_tpm_proxy_plug(HotplugHandler *hotplug_dev, DeviceState *dev)
4171 {
4172 SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
4173 SpaprTpmProxy *tpm_proxy = SPAPR_TPM_PROXY(dev);
4174
4175 /* Already checked in spapr_tpm_proxy_pre_plug() */
4176 g_assert(spapr->tpm_proxy == NULL);
4177
4178 spapr->tpm_proxy = tpm_proxy;
4179 }
4180
spapr_tpm_proxy_unplug(HotplugHandler * hotplug_dev,DeviceState * dev)4181 static void spapr_tpm_proxy_unplug(HotplugHandler *hotplug_dev, DeviceState *dev)
4182 {
4183 SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
4184
4185 qdev_unrealize(dev);
4186 object_unparent(OBJECT(dev));
4187 spapr->tpm_proxy = NULL;
4188 }
4189
spapr_machine_device_plug(HotplugHandler * hotplug_dev,DeviceState * dev,Error ** errp)4190 static void spapr_machine_device_plug(HotplugHandler *hotplug_dev,
4191 DeviceState *dev, Error **errp)
4192 {
4193 if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
4194 spapr_memory_plug(hotplug_dev, dev);
4195 } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
4196 spapr_core_plug(hotplug_dev, dev);
4197 } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) {
4198 spapr_phb_plug(hotplug_dev, dev);
4199 } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) {
4200 spapr_tpm_proxy_plug(hotplug_dev, dev);
4201 }
4202 }
4203
spapr_machine_device_unplug(HotplugHandler * hotplug_dev,DeviceState * dev,Error ** errp)4204 static void spapr_machine_device_unplug(HotplugHandler *hotplug_dev,
4205 DeviceState *dev, Error **errp)
4206 {
4207 if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
4208 spapr_memory_unplug(hotplug_dev, dev);
4209 } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
4210 spapr_core_unplug(hotplug_dev, dev);
4211 } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) {
4212 spapr_phb_unplug(hotplug_dev, dev);
4213 } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) {
4214 spapr_tpm_proxy_unplug(hotplug_dev, dev);
4215 }
4216 }
4217
spapr_memory_hot_unplug_supported(SpaprMachineState * spapr)4218 bool spapr_memory_hot_unplug_supported(SpaprMachineState *spapr)
4219 {
4220 return spapr_ovec_test(spapr->ov5_cas, OV5_HP_EVT) ||
4221 /*
4222 * CAS will process all pending unplug requests.
4223 *
4224 * HACK: a guest could theoretically have cleared all bits in OV5,
4225 * but none of the guests we care for do.
4226 */
4227 spapr_ovec_empty(spapr->ov5_cas);
4228 }
4229
spapr_machine_device_unplug_request(HotplugHandler * hotplug_dev,DeviceState * dev,Error ** errp)4230 static void spapr_machine_device_unplug_request(HotplugHandler *hotplug_dev,
4231 DeviceState *dev, Error **errp)
4232 {
4233 SpaprMachineState *sms = SPAPR_MACHINE(OBJECT(hotplug_dev));
4234 MachineClass *mc = MACHINE_GET_CLASS(sms);
4235 SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4236
4237 if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
4238 if (spapr_memory_hot_unplug_supported(sms)) {
4239 spapr_memory_unplug_request(hotplug_dev, dev, errp);
4240 } else {
4241 error_setg(errp, "Memory hot unplug not supported for this guest");
4242 }
4243 } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
4244 if (!mc->has_hotpluggable_cpus) {
4245 error_setg(errp, "CPU hot unplug not supported on this machine");
4246 return;
4247 }
4248 spapr_core_unplug_request(hotplug_dev, dev, errp);
4249 } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) {
4250 if (!smc->dr_phb_enabled) {
4251 error_setg(errp, "PHB hot unplug not supported on this machine");
4252 return;
4253 }
4254 spapr_phb_unplug_request(hotplug_dev, dev, errp);
4255 } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) {
4256 spapr_tpm_proxy_unplug(hotplug_dev, dev);
4257 }
4258 }
4259
spapr_machine_device_pre_plug(HotplugHandler * hotplug_dev,DeviceState * dev,Error ** errp)4260 static void spapr_machine_device_pre_plug(HotplugHandler *hotplug_dev,
4261 DeviceState *dev, Error **errp)
4262 {
4263 if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
4264 spapr_memory_pre_plug(hotplug_dev, dev, errp);
4265 } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
4266 spapr_core_pre_plug(hotplug_dev, dev, errp);
4267 } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) {
4268 spapr_phb_pre_plug(hotplug_dev, dev, errp);
4269 } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) {
4270 spapr_tpm_proxy_pre_plug(hotplug_dev, dev, errp);
4271 }
4272 }
4273
spapr_get_hotplug_handler(MachineState * machine,DeviceState * dev)4274 static HotplugHandler *spapr_get_hotplug_handler(MachineState *machine,
4275 DeviceState *dev)
4276 {
4277 if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM) ||
4278 object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE) ||
4279 object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE) ||
4280 object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) {
4281 return HOTPLUG_HANDLER(machine);
4282 }
4283 if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
4284 PCIDevice *pcidev = PCI_DEVICE(dev);
4285 PCIBus *root = pci_device_root_bus(pcidev);
4286 SpaprPhbState *phb =
4287 (SpaprPhbState *)object_dynamic_cast(OBJECT(BUS(root)->parent),
4288 TYPE_SPAPR_PCI_HOST_BRIDGE);
4289
4290 if (phb) {
4291 return HOTPLUG_HANDLER(phb);
4292 }
4293 }
4294 return NULL;
4295 }
4296
4297 static CpuInstanceProperties
spapr_cpu_index_to_props(MachineState * machine,unsigned cpu_index)4298 spapr_cpu_index_to_props(MachineState *machine, unsigned cpu_index)
4299 {
4300 CPUArchId *core_slot;
4301 MachineClass *mc = MACHINE_GET_CLASS(machine);
4302
4303 /* make sure possible_cpu are initialized */
4304 mc->possible_cpu_arch_ids(machine);
4305 /* get CPU core slot containing thread that matches cpu_index */
4306 core_slot = spapr_find_cpu_slot(machine, cpu_index, NULL);
4307 assert(core_slot);
4308 return core_slot->props;
4309 }
4310
spapr_get_default_cpu_node_id(const MachineState * ms,int idx)4311 static int64_t spapr_get_default_cpu_node_id(const MachineState *ms, int idx)
4312 {
4313 return idx / ms->smp.cores % ms->numa_state->num_nodes;
4314 }
4315
spapr_possible_cpu_arch_ids(MachineState * machine)4316 static const CPUArchIdList *spapr_possible_cpu_arch_ids(MachineState *machine)
4317 {
4318 int i;
4319 unsigned int smp_threads = machine->smp.threads;
4320 unsigned int smp_cpus = machine->smp.cpus;
4321 const char *core_type;
4322 int spapr_max_cores = machine->smp.max_cpus / smp_threads;
4323 MachineClass *mc = MACHINE_GET_CLASS(machine);
4324
4325 if (!mc->has_hotpluggable_cpus) {
4326 spapr_max_cores = QEMU_ALIGN_UP(smp_cpus, smp_threads) / smp_threads;
4327 }
4328 if (machine->possible_cpus) {
4329 assert(machine->possible_cpus->len == spapr_max_cores);
4330 return machine->possible_cpus;
4331 }
4332
4333 core_type = spapr_get_cpu_core_type(machine->cpu_type);
4334 if (!core_type) {
4335 error_report("Unable to find sPAPR CPU Core definition");
4336 exit(1);
4337 }
4338
4339 machine->possible_cpus = g_malloc0(sizeof(CPUArchIdList) +
4340 sizeof(CPUArchId) * spapr_max_cores);
4341 machine->possible_cpus->len = spapr_max_cores;
4342 for (i = 0; i < machine->possible_cpus->len; i++) {
4343 int core_id = i * smp_threads;
4344
4345 machine->possible_cpus->cpus[i].type = core_type;
4346 machine->possible_cpus->cpus[i].vcpus_count = smp_threads;
4347 machine->possible_cpus->cpus[i].arch_id = core_id;
4348 machine->possible_cpus->cpus[i].props.has_core_id = true;
4349 machine->possible_cpus->cpus[i].props.core_id = core_id;
4350 }
4351 return machine->possible_cpus;
4352 }
4353
spapr_phb_placement(SpaprMachineState * spapr,uint32_t index,uint64_t * buid,hwaddr * pio,hwaddr * mmio32,hwaddr * mmio64,unsigned n_dma,uint32_t * liobns,Error ** errp)4354 static bool spapr_phb_placement(SpaprMachineState *spapr, uint32_t index,
4355 uint64_t *buid, hwaddr *pio,
4356 hwaddr *mmio32, hwaddr *mmio64,
4357 unsigned n_dma, uint32_t *liobns, Error **errp)
4358 {
4359 /*
4360 * New-style PHB window placement.
4361 *
4362 * Goals: Gives large (1TiB), naturally aligned 64-bit MMIO window
4363 * for each PHB, in addition to 2GiB 32-bit MMIO and 64kiB PIO
4364 * windows.
4365 *
4366 * Some guest kernels can't work with MMIO windows above 1<<46
4367 * (64TiB), so we place up to 31 PHBs in the area 32TiB..64TiB
4368 *
4369 * 32TiB..(33TiB+1984kiB) contains the 64kiB PIO windows for each
4370 * PHB stacked together. (32TiB+2GiB)..(32TiB+64GiB) contains the
4371 * 2GiB 32-bit MMIO windows for each PHB. Then 33..64TiB has the
4372 * 1TiB 64-bit MMIO windows for each PHB.
4373 */
4374 const uint64_t base_buid = 0x800000020000000ULL;
4375 int i;
4376
4377 /* Sanity check natural alignments */
4378 QEMU_BUILD_BUG_ON((SPAPR_PCI_BASE % SPAPR_PCI_MEM64_WIN_SIZE) != 0);
4379 QEMU_BUILD_BUG_ON((SPAPR_PCI_LIMIT % SPAPR_PCI_MEM64_WIN_SIZE) != 0);
4380 QEMU_BUILD_BUG_ON((SPAPR_PCI_MEM64_WIN_SIZE % SPAPR_PCI_MEM32_WIN_SIZE) != 0);
4381 QEMU_BUILD_BUG_ON((SPAPR_PCI_MEM32_WIN_SIZE % SPAPR_PCI_IO_WIN_SIZE) != 0);
4382 /* Sanity check bounds */
4383 QEMU_BUILD_BUG_ON((SPAPR_MAX_PHBS * SPAPR_PCI_IO_WIN_SIZE) >
4384 SPAPR_PCI_MEM32_WIN_SIZE);
4385 QEMU_BUILD_BUG_ON((SPAPR_MAX_PHBS * SPAPR_PCI_MEM32_WIN_SIZE) >
4386 SPAPR_PCI_MEM64_WIN_SIZE);
4387
4388 if (index >= SPAPR_MAX_PHBS) {
4389 error_setg(errp, "\"index\" for PAPR PHB is too large (max %llu)",
4390 SPAPR_MAX_PHBS - 1);
4391 return false;
4392 }
4393
4394 *buid = base_buid + index;
4395 for (i = 0; i < n_dma; ++i) {
4396 liobns[i] = SPAPR_PCI_LIOBN(index, i);
4397 }
4398
4399 *pio = SPAPR_PCI_BASE + index * SPAPR_PCI_IO_WIN_SIZE;
4400 *mmio32 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM32_WIN_SIZE;
4401 *mmio64 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM64_WIN_SIZE;
4402 return true;
4403 }
4404
spapr_ics_get(XICSFabric * dev,int irq)4405 static ICSState *spapr_ics_get(XICSFabric *dev, int irq)
4406 {
4407 SpaprMachineState *spapr = SPAPR_MACHINE(dev);
4408
4409 return ics_valid_irq(spapr->ics, irq) ? spapr->ics : NULL;
4410 }
4411
spapr_ics_resend(XICSFabric * dev)4412 static void spapr_ics_resend(XICSFabric *dev)
4413 {
4414 SpaprMachineState *spapr = SPAPR_MACHINE(dev);
4415
4416 ics_resend(spapr->ics);
4417 }
4418
spapr_icp_get(XICSFabric * xi,int vcpu_id)4419 static ICPState *spapr_icp_get(XICSFabric *xi, int vcpu_id)
4420 {
4421 PowerPCCPU *cpu = spapr_find_cpu(vcpu_id);
4422
4423 return cpu ? spapr_cpu_state(cpu)->icp : NULL;
4424 }
4425
spapr_pic_print_info(InterruptStatsProvider * obj,GString * buf)4426 static void spapr_pic_print_info(InterruptStatsProvider *obj, GString *buf)
4427 {
4428 SpaprMachineState *spapr = SPAPR_MACHINE(obj);
4429
4430 spapr_irq_print_info(spapr, buf);
4431 g_string_append_printf(buf, "irqchip: %s\n",
4432 kvm_irqchip_in_kernel() ? "in-kernel" : "emulated");
4433 }
4434
4435 /*
4436 * This is a XIVE only operation
4437 */
spapr_match_nvt(XiveFabric * xfb,uint8_t format,uint8_t nvt_blk,uint32_t nvt_idx,bool cam_ignore,uint8_t priority,uint32_t logic_serv,XiveTCTXMatch * match)4438 static int spapr_match_nvt(XiveFabric *xfb, uint8_t format,
4439 uint8_t nvt_blk, uint32_t nvt_idx,
4440 bool cam_ignore, uint8_t priority,
4441 uint32_t logic_serv, XiveTCTXMatch *match)
4442 {
4443 SpaprMachineState *spapr = SPAPR_MACHINE(xfb);
4444 XivePresenter *xptr = XIVE_PRESENTER(spapr->active_intc);
4445 XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr);
4446 int count;
4447
4448 count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, cam_ignore,
4449 priority, logic_serv, match);
4450 if (count < 0) {
4451 return count;
4452 }
4453
4454 /*
4455 * When we implement the save and restore of the thread interrupt
4456 * contexts in the enter/exit CPU handlers of the machine and the
4457 * escalations in QEMU, we should be able to handle non dispatched
4458 * vCPUs.
4459 *
4460 * Until this is done, the sPAPR machine should find at least one
4461 * matching context always.
4462 */
4463 if (count == 0) {
4464 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVT %x/%x is not dispatched\n",
4465 nvt_blk, nvt_idx);
4466 }
4467
4468 return count;
4469 }
4470
spapr_get_vcpu_id(PowerPCCPU * cpu)4471 int spapr_get_vcpu_id(PowerPCCPU *cpu)
4472 {
4473 return cpu->vcpu_id;
4474 }
4475
spapr_set_vcpu_id(PowerPCCPU * cpu,int cpu_index,Error ** errp)4476 bool spapr_set_vcpu_id(PowerPCCPU *cpu, int cpu_index, Error **errp)
4477 {
4478 SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
4479 MachineState *ms = MACHINE(spapr);
4480 int vcpu_id;
4481
4482 vcpu_id = spapr_vcpu_id(spapr, cpu_index);
4483
4484 if (kvm_enabled() && !kvm_vcpu_id_is_valid(vcpu_id)) {
4485 error_setg(errp, "Can't create CPU with id %d in KVM", vcpu_id);
4486 error_append_hint(errp, "Adjust the number of cpus to %d "
4487 "or try to raise the number of threads per core\n",
4488 vcpu_id * ms->smp.threads / spapr->vsmt);
4489 return false;
4490 }
4491
4492 cpu->vcpu_id = vcpu_id;
4493 return true;
4494 }
4495
spapr_find_cpu(int vcpu_id)4496 PowerPCCPU *spapr_find_cpu(int vcpu_id)
4497 {
4498 CPUState *cs;
4499
4500 CPU_FOREACH(cs) {
4501 PowerPCCPU *cpu = POWERPC_CPU(cs);
4502
4503 if (spapr_get_vcpu_id(cpu) == vcpu_id) {
4504 return cpu;
4505 }
4506 }
4507
4508 return NULL;
4509 }
4510
spapr_cpu_in_nested(PowerPCCPU * cpu)4511 static bool spapr_cpu_in_nested(PowerPCCPU *cpu)
4512 {
4513 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
4514
4515 return spapr_cpu->in_nested;
4516 }
4517
spapr_cpu_exec_enter(PPCVirtualHypervisor * vhyp,PowerPCCPU * cpu)4518 static void spapr_cpu_exec_enter(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu)
4519 {
4520 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
4521
4522 /* These are only called by TCG, KVM maintains dispatch state */
4523
4524 spapr_cpu->prod = false;
4525 if (spapr_cpu->vpa_addr) {
4526 CPUState *cs = CPU(cpu);
4527 uint32_t dispatch;
4528
4529 dispatch = ldl_be_phys(cs->as,
4530 spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER);
4531 dispatch++;
4532 if ((dispatch & 1) != 0) {
4533 qemu_log_mask(LOG_GUEST_ERROR,
4534 "VPA: incorrect dispatch counter value for "
4535 "dispatched partition %u, correcting.\n", dispatch);
4536 dispatch++;
4537 }
4538 stl_be_phys(cs->as,
4539 spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER, dispatch);
4540 }
4541 }
4542
spapr_cpu_exec_exit(PPCVirtualHypervisor * vhyp,PowerPCCPU * cpu)4543 static void spapr_cpu_exec_exit(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu)
4544 {
4545 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
4546
4547 if (spapr_cpu->vpa_addr) {
4548 CPUState *cs = CPU(cpu);
4549 uint32_t dispatch;
4550
4551 dispatch = ldl_be_phys(cs->as,
4552 spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER);
4553 dispatch++;
4554 if ((dispatch & 1) != 1) {
4555 qemu_log_mask(LOG_GUEST_ERROR,
4556 "VPA: incorrect dispatch counter value for "
4557 "preempted partition %u, correcting.\n", dispatch);
4558 dispatch++;
4559 }
4560 stl_be_phys(cs->as,
4561 spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER, dispatch);
4562 }
4563 }
4564
spapr_machine_class_init(ObjectClass * oc,void * data)4565 static void spapr_machine_class_init(ObjectClass *oc, void *data)
4566 {
4567 MachineClass *mc = MACHINE_CLASS(oc);
4568 SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(oc);
4569 FWPathProviderClass *fwc = FW_PATH_PROVIDER_CLASS(oc);
4570 NMIClass *nc = NMI_CLASS(oc);
4571 HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc);
4572 PPCVirtualHypervisorClass *vhc = PPC_VIRTUAL_HYPERVISOR_CLASS(oc);
4573 XICSFabricClass *xic = XICS_FABRIC_CLASS(oc);
4574 InterruptStatsProviderClass *ispc = INTERRUPT_STATS_PROVIDER_CLASS(oc);
4575 XiveFabricClass *xfc = XIVE_FABRIC_CLASS(oc);
4576 VofMachineIfClass *vmc = VOF_MACHINE_CLASS(oc);
4577
4578 mc->desc = "pSeries Logical Partition (PAPR compliant)";
4579 mc->ignore_boot_device_suffixes = true;
4580
4581 /*
4582 * We set up the default / latest behaviour here. The class_init
4583 * functions for the specific versioned machine types can override
4584 * these details for backwards compatibility
4585 */
4586 mc->init = spapr_machine_init;
4587 mc->reset = spapr_machine_reset;
4588 mc->block_default_type = IF_SCSI;
4589
4590 /*
4591 * While KVM determines max cpus in kvm_init() using kvm_max_vcpus(),
4592 * In TCG the limit is restricted by the range of CPU IPIs available.
4593 */
4594 mc->max_cpus = SPAPR_IRQ_NR_IPIS;
4595
4596 mc->no_parallel = 1;
4597 mc->default_boot_order = "";
4598 mc->default_ram_size = 512 * MiB;
4599 mc->default_ram_id = "ppc_spapr.ram";
4600 mc->default_display = "std";
4601 mc->kvm_type = spapr_kvm_type;
4602 machine_class_allow_dynamic_sysbus_dev(mc, TYPE_SPAPR_PCI_HOST_BRIDGE);
4603 mc->pci_allow_0_address = true;
4604 assert(!mc->get_hotplug_handler);
4605 mc->get_hotplug_handler = spapr_get_hotplug_handler;
4606 hc->pre_plug = spapr_machine_device_pre_plug;
4607 hc->plug = spapr_machine_device_plug;
4608 mc->cpu_index_to_instance_props = spapr_cpu_index_to_props;
4609 mc->get_default_cpu_node_id = spapr_get_default_cpu_node_id;
4610 mc->possible_cpu_arch_ids = spapr_possible_cpu_arch_ids;
4611 hc->unplug_request = spapr_machine_device_unplug_request;
4612 hc->unplug = spapr_machine_device_unplug;
4613
4614 smc->update_dt_enabled = true;
4615 mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power10_v2.0");
4616 mc->has_hotpluggable_cpus = true;
4617 mc->nvdimm_supported = true;
4618 smc->resize_hpt_default = SPAPR_RESIZE_HPT_ENABLED;
4619 fwc->get_dev_path = spapr_get_fw_dev_path;
4620 nc->nmi_monitor_handler = spapr_nmi;
4621 smc->phb_placement = spapr_phb_placement;
4622 vhc->cpu_in_nested = spapr_cpu_in_nested;
4623 vhc->deliver_hv_excp = spapr_exit_nested;
4624 vhc->hypercall = emulate_spapr_hypercall;
4625 vhc->hpt_mask = spapr_hpt_mask;
4626 vhc->map_hptes = spapr_map_hptes;
4627 vhc->unmap_hptes = spapr_unmap_hptes;
4628 vhc->hpte_set_c = spapr_hpte_set_c;
4629 vhc->hpte_set_r = spapr_hpte_set_r;
4630 vhc->get_pate = spapr_get_pate;
4631 vhc->encode_hpt_for_kvm_pr = spapr_encode_hpt_for_kvm_pr;
4632 vhc->cpu_exec_enter = spapr_cpu_exec_enter;
4633 vhc->cpu_exec_exit = spapr_cpu_exec_exit;
4634 xic->ics_get = spapr_ics_get;
4635 xic->ics_resend = spapr_ics_resend;
4636 xic->icp_get = spapr_icp_get;
4637 ispc->print_info = spapr_pic_print_info;
4638 /* Force NUMA node memory size to be a multiple of
4639 * SPAPR_MEMORY_BLOCK_SIZE (256M) since that's the granularity
4640 * in which LMBs are represented and hot-added
4641 */
4642 mc->numa_mem_align_shift = 28;
4643 mc->auto_enable_numa = true;
4644
4645 smc->default_caps.caps[SPAPR_CAP_HTM] = SPAPR_CAP_OFF;
4646 smc->default_caps.caps[SPAPR_CAP_VSX] = SPAPR_CAP_ON;
4647 smc->default_caps.caps[SPAPR_CAP_DFP] = SPAPR_CAP_ON;
4648 smc->default_caps.caps[SPAPR_CAP_CFPC] = SPAPR_CAP_WORKAROUND;
4649 smc->default_caps.caps[SPAPR_CAP_SBBC] = SPAPR_CAP_WORKAROUND;
4650 smc->default_caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_WORKAROUND;
4651 smc->default_caps.caps[SPAPR_CAP_HPT_MAXPAGESIZE] = 16; /* 64kiB */
4652 smc->default_caps.caps[SPAPR_CAP_NESTED_KVM_HV] = SPAPR_CAP_OFF;
4653 smc->default_caps.caps[SPAPR_CAP_NESTED_PAPR] = SPAPR_CAP_OFF;
4654 smc->default_caps.caps[SPAPR_CAP_LARGE_DECREMENTER] = SPAPR_CAP_ON;
4655 smc->default_caps.caps[SPAPR_CAP_CCF_ASSIST] = SPAPR_CAP_ON;
4656 smc->default_caps.caps[SPAPR_CAP_FWNMI] = SPAPR_CAP_ON;
4657 smc->default_caps.caps[SPAPR_CAP_RPT_INVALIDATE] = SPAPR_CAP_OFF;
4658
4659 /*
4660 * This cap specifies whether the AIL 3 mode for
4661 * H_SET_RESOURCE is supported. The default is modified
4662 * by default_caps_with_cpu().
4663 */
4664 smc->default_caps.caps[SPAPR_CAP_AIL_MODE_3] = SPAPR_CAP_ON;
4665 spapr_caps_add_properties(smc);
4666 smc->irq = &spapr_irq_dual;
4667 smc->dr_phb_enabled = true;
4668 smc->linux_pci_probe = true;
4669 smc->smp_threads_vsmt = true;
4670 smc->nr_xirqs = SPAPR_NR_XIRQS;
4671 xfc->match_nvt = spapr_match_nvt;
4672 vmc->client_architecture_support = spapr_vof_client_architecture_support;
4673 vmc->quiesce = spapr_vof_quiesce;
4674 vmc->setprop = spapr_vof_setprop;
4675 }
4676
4677 static const TypeInfo spapr_machine_info = {
4678 .name = TYPE_SPAPR_MACHINE,
4679 .parent = TYPE_MACHINE,
4680 .abstract = true,
4681 .instance_size = sizeof(SpaprMachineState),
4682 .instance_init = spapr_instance_init,
4683 .instance_finalize = spapr_machine_finalizefn,
4684 .class_size = sizeof(SpaprMachineClass),
4685 .class_init = spapr_machine_class_init,
4686 .interfaces = (InterfaceInfo[]) {
4687 { TYPE_FW_PATH_PROVIDER },
4688 { TYPE_NMI },
4689 { TYPE_HOTPLUG_HANDLER },
4690 { TYPE_PPC_VIRTUAL_HYPERVISOR },
4691 { TYPE_XICS_FABRIC },
4692 { TYPE_INTERRUPT_STATS_PROVIDER },
4693 { TYPE_XIVE_FABRIC },
4694 { TYPE_VOF_MACHINE_IF },
4695 { }
4696 },
4697 };
4698
spapr_machine_latest_class_options(MachineClass * mc)4699 static void spapr_machine_latest_class_options(MachineClass *mc)
4700 {
4701 mc->alias = "pseries";
4702 mc->is_default = true;
4703 }
4704
4705 #define DEFINE_SPAPR_MACHINE_IMPL(latest, ...) \
4706 static void MACHINE_VER_SYM(class_init, spapr, __VA_ARGS__)( \
4707 ObjectClass *oc, \
4708 void *data) \
4709 { \
4710 MachineClass *mc = MACHINE_CLASS(oc); \
4711 MACHINE_VER_SYM(class_options, spapr, __VA_ARGS__)(mc); \
4712 MACHINE_VER_DEPRECATION(__VA_ARGS__); \
4713 if (latest) { \
4714 spapr_machine_latest_class_options(mc); \
4715 } \
4716 } \
4717 static const TypeInfo MACHINE_VER_SYM(info, spapr, __VA_ARGS__) = \
4718 { \
4719 .name = MACHINE_VER_TYPE_NAME("pseries", __VA_ARGS__), \
4720 .parent = TYPE_SPAPR_MACHINE, \
4721 .class_init = MACHINE_VER_SYM(class_init, spapr, __VA_ARGS__), \
4722 }; \
4723 static void MACHINE_VER_SYM(register, spapr, __VA_ARGS__)(void) \
4724 { \
4725 MACHINE_VER_DELETION(__VA_ARGS__); \
4726 type_register(&MACHINE_VER_SYM(info, spapr, __VA_ARGS__)); \
4727 } \
4728 type_init(MACHINE_VER_SYM(register, spapr, __VA_ARGS__))
4729
4730 #define DEFINE_SPAPR_MACHINE_AS_LATEST(major, minor) \
4731 DEFINE_SPAPR_MACHINE_IMPL(true, major, minor)
4732 #define DEFINE_SPAPR_MACHINE(major, minor) \
4733 DEFINE_SPAPR_MACHINE_IMPL(false, major, minor)
4734
4735 /*
4736 * pseries-9.2
4737 */
spapr_machine_9_2_class_options(MachineClass * mc)4738 static void spapr_machine_9_2_class_options(MachineClass *mc)
4739 {
4740 /* Defaults for the latest behaviour inherited from the base class */
4741 }
4742
4743 DEFINE_SPAPR_MACHINE_AS_LATEST(9, 2);
4744
4745 /*
4746 * pseries-9.1
4747 */
spapr_machine_9_1_class_options(MachineClass * mc)4748 static void spapr_machine_9_1_class_options(MachineClass *mc)
4749 {
4750 spapr_machine_9_2_class_options(mc);
4751 compat_props_add(mc->compat_props, hw_compat_9_1, hw_compat_9_1_len);
4752 }
4753
4754 DEFINE_SPAPR_MACHINE(9, 1);
4755
4756 /*
4757 * pseries-9.0
4758 */
spapr_machine_9_0_class_options(MachineClass * mc)4759 static void spapr_machine_9_0_class_options(MachineClass *mc)
4760 {
4761 spapr_machine_9_1_class_options(mc);
4762 compat_props_add(mc->compat_props, hw_compat_9_0, hw_compat_9_0_len);
4763 }
4764
4765 DEFINE_SPAPR_MACHINE(9, 0);
4766
4767 /*
4768 * pseries-8.2
4769 */
spapr_machine_8_2_class_options(MachineClass * mc)4770 static void spapr_machine_8_2_class_options(MachineClass *mc)
4771 {
4772 spapr_machine_9_0_class_options(mc);
4773 compat_props_add(mc->compat_props, hw_compat_8_2, hw_compat_8_2_len);
4774 }
4775
4776 DEFINE_SPAPR_MACHINE(8, 2);
4777
4778 /*
4779 * pseries-8.1
4780 */
spapr_machine_8_1_class_options(MachineClass * mc)4781 static void spapr_machine_8_1_class_options(MachineClass *mc)
4782 {
4783 spapr_machine_8_2_class_options(mc);
4784 compat_props_add(mc->compat_props, hw_compat_8_1, hw_compat_8_1_len);
4785 }
4786
4787 DEFINE_SPAPR_MACHINE(8, 1);
4788
4789 /*
4790 * pseries-8.0
4791 */
spapr_machine_8_0_class_options(MachineClass * mc)4792 static void spapr_machine_8_0_class_options(MachineClass *mc)
4793 {
4794 spapr_machine_8_1_class_options(mc);
4795 compat_props_add(mc->compat_props, hw_compat_8_0, hw_compat_8_0_len);
4796 }
4797
4798 DEFINE_SPAPR_MACHINE(8, 0);
4799
4800 /*
4801 * pseries-7.2
4802 */
spapr_machine_7_2_class_options(MachineClass * mc)4803 static void spapr_machine_7_2_class_options(MachineClass *mc)
4804 {
4805 spapr_machine_8_0_class_options(mc);
4806 compat_props_add(mc->compat_props, hw_compat_7_2, hw_compat_7_2_len);
4807 }
4808
4809 DEFINE_SPAPR_MACHINE(7, 2);
4810
4811 /*
4812 * pseries-7.1
4813 */
spapr_machine_7_1_class_options(MachineClass * mc)4814 static void spapr_machine_7_1_class_options(MachineClass *mc)
4815 {
4816 spapr_machine_7_2_class_options(mc);
4817 compat_props_add(mc->compat_props, hw_compat_7_1, hw_compat_7_1_len);
4818 }
4819
4820 DEFINE_SPAPR_MACHINE(7, 1);
4821
4822 /*
4823 * pseries-7.0
4824 */
spapr_machine_7_0_class_options(MachineClass * mc)4825 static void spapr_machine_7_0_class_options(MachineClass *mc)
4826 {
4827 spapr_machine_7_1_class_options(mc);
4828 compat_props_add(mc->compat_props, hw_compat_7_0, hw_compat_7_0_len);
4829 }
4830
4831 DEFINE_SPAPR_MACHINE(7, 0);
4832
4833 /*
4834 * pseries-6.2
4835 */
spapr_machine_6_2_class_options(MachineClass * mc)4836 static void spapr_machine_6_2_class_options(MachineClass *mc)
4837 {
4838 spapr_machine_7_0_class_options(mc);
4839 compat_props_add(mc->compat_props, hw_compat_6_2, hw_compat_6_2_len);
4840 }
4841
4842 DEFINE_SPAPR_MACHINE(6, 2);
4843
4844 /*
4845 * pseries-6.1
4846 */
spapr_machine_6_1_class_options(MachineClass * mc)4847 static void spapr_machine_6_1_class_options(MachineClass *mc)
4848 {
4849 SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4850
4851 spapr_machine_6_2_class_options(mc);
4852 compat_props_add(mc->compat_props, hw_compat_6_1, hw_compat_6_1_len);
4853 smc->pre_6_2_numa_affinity = true;
4854 mc->smp_props.prefer_sockets = true;
4855 }
4856
4857 DEFINE_SPAPR_MACHINE(6, 1);
4858
4859 /*
4860 * pseries-6.0
4861 */
spapr_machine_6_0_class_options(MachineClass * mc)4862 static void spapr_machine_6_0_class_options(MachineClass *mc)
4863 {
4864 spapr_machine_6_1_class_options(mc);
4865 compat_props_add(mc->compat_props, hw_compat_6_0, hw_compat_6_0_len);
4866 }
4867
4868 DEFINE_SPAPR_MACHINE(6, 0);
4869
4870 /*
4871 * pseries-5.2
4872 */
spapr_machine_5_2_class_options(MachineClass * mc)4873 static void spapr_machine_5_2_class_options(MachineClass *mc)
4874 {
4875 spapr_machine_6_0_class_options(mc);
4876 compat_props_add(mc->compat_props, hw_compat_5_2, hw_compat_5_2_len);
4877 }
4878
4879 DEFINE_SPAPR_MACHINE(5, 2);
4880
4881 /*
4882 * pseries-5.1
4883 */
spapr_machine_5_1_class_options(MachineClass * mc)4884 static void spapr_machine_5_1_class_options(MachineClass *mc)
4885 {
4886 SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4887
4888 spapr_machine_5_2_class_options(mc);
4889 compat_props_add(mc->compat_props, hw_compat_5_1, hw_compat_5_1_len);
4890 smc->pre_5_2_numa_associativity = true;
4891 }
4892
4893 DEFINE_SPAPR_MACHINE(5, 1);
4894
4895 /*
4896 * pseries-5.0
4897 */
spapr_machine_5_0_class_options(MachineClass * mc)4898 static void spapr_machine_5_0_class_options(MachineClass *mc)
4899 {
4900 SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4901 static GlobalProperty compat[] = {
4902 { TYPE_SPAPR_PCI_HOST_BRIDGE, "pre-5.1-associativity", "on" },
4903 };
4904
4905 spapr_machine_5_1_class_options(mc);
4906 compat_props_add(mc->compat_props, hw_compat_5_0, hw_compat_5_0_len);
4907 compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
4908 mc->numa_mem_supported = true;
4909 smc->pre_5_1_assoc_refpoints = true;
4910 }
4911
4912 DEFINE_SPAPR_MACHINE(5, 0);
4913
4914 /*
4915 * pseries-4.2
4916 */
spapr_machine_4_2_class_options(MachineClass * mc)4917 static void spapr_machine_4_2_class_options(MachineClass *mc)
4918 {
4919 SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4920
4921 spapr_machine_5_0_class_options(mc);
4922 compat_props_add(mc->compat_props, hw_compat_4_2, hw_compat_4_2_len);
4923 smc->default_caps.caps[SPAPR_CAP_CCF_ASSIST] = SPAPR_CAP_OFF;
4924 smc->default_caps.caps[SPAPR_CAP_FWNMI] = SPAPR_CAP_OFF;
4925 smc->rma_limit = 16 * GiB;
4926 mc->nvdimm_supported = false;
4927 }
4928
4929 DEFINE_SPAPR_MACHINE(4, 2);
4930
4931 /*
4932 * pseries-4.1
4933 */
spapr_machine_4_1_class_options(MachineClass * mc)4934 static void spapr_machine_4_1_class_options(MachineClass *mc)
4935 {
4936 SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4937 static GlobalProperty compat[] = {
4938 /* Only allow 4kiB and 64kiB IOMMU pagesizes */
4939 { TYPE_SPAPR_PCI_HOST_BRIDGE, "pgsz", "0x11000" },
4940 };
4941
4942 spapr_machine_4_2_class_options(mc);
4943 smc->linux_pci_probe = false;
4944 smc->smp_threads_vsmt = false;
4945 compat_props_add(mc->compat_props, hw_compat_4_1, hw_compat_4_1_len);
4946 compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
4947 }
4948
4949 DEFINE_SPAPR_MACHINE(4, 1);
4950
4951 /*
4952 * pseries-4.0
4953 */
phb_placement_4_0(SpaprMachineState * spapr,uint32_t index,uint64_t * buid,hwaddr * pio,hwaddr * mmio32,hwaddr * mmio64,unsigned n_dma,uint32_t * liobns,Error ** errp)4954 static bool phb_placement_4_0(SpaprMachineState *spapr, uint32_t index,
4955 uint64_t *buid, hwaddr *pio,
4956 hwaddr *mmio32, hwaddr *mmio64,
4957 unsigned n_dma, uint32_t *liobns, Error **errp)
4958 {
4959 if (!spapr_phb_placement(spapr, index, buid, pio, mmio32, mmio64, n_dma,
4960 liobns, errp)) {
4961 return false;
4962 }
4963 return true;
4964 }
spapr_machine_4_0_class_options(MachineClass * mc)4965 static void spapr_machine_4_0_class_options(MachineClass *mc)
4966 {
4967 SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4968
4969 spapr_machine_4_1_class_options(mc);
4970 compat_props_add(mc->compat_props, hw_compat_4_0, hw_compat_4_0_len);
4971 smc->phb_placement = phb_placement_4_0;
4972 smc->irq = &spapr_irq_xics;
4973 smc->pre_4_1_migration = true;
4974 }
4975
4976 DEFINE_SPAPR_MACHINE(4, 0);
4977
4978 /*
4979 * pseries-3.1
4980 */
spapr_machine_3_1_class_options(MachineClass * mc)4981 static void spapr_machine_3_1_class_options(MachineClass *mc)
4982 {
4983 SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4984
4985 spapr_machine_4_0_class_options(mc);
4986 compat_props_add(mc->compat_props, hw_compat_3_1, hw_compat_3_1_len);
4987
4988 mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power8_v2.0");
4989 smc->update_dt_enabled = false;
4990 smc->dr_phb_enabled = false;
4991 smc->broken_host_serial_model = true;
4992 smc->default_caps.caps[SPAPR_CAP_CFPC] = SPAPR_CAP_BROKEN;
4993 smc->default_caps.caps[SPAPR_CAP_SBBC] = SPAPR_CAP_BROKEN;
4994 smc->default_caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_BROKEN;
4995 smc->default_caps.caps[SPAPR_CAP_LARGE_DECREMENTER] = SPAPR_CAP_OFF;
4996 }
4997
4998 DEFINE_SPAPR_MACHINE(3, 1);
4999
5000 /*
5001 * pseries-3.0
5002 */
5003
spapr_machine_3_0_class_options(MachineClass * mc)5004 static void spapr_machine_3_0_class_options(MachineClass *mc)
5005 {
5006 SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
5007
5008 spapr_machine_3_1_class_options(mc);
5009 compat_props_add(mc->compat_props, hw_compat_3_0, hw_compat_3_0_len);
5010
5011 smc->legacy_irq_allocation = true;
5012 smc->nr_xirqs = 0x400;
5013 smc->irq = &spapr_irq_xics_legacy;
5014 }
5015
5016 DEFINE_SPAPR_MACHINE(3, 0);
5017
spapr_machine_register_types(void)5018 static void spapr_machine_register_types(void)
5019 {
5020 type_register_static(&spapr_machine_info);
5021 }
5022
5023 type_init(spapr_machine_register_types)
5024