1 /* 2 * CXL Type 3 (memory expander) device 3 * 4 * Copyright(C) 2020 Intel Corporation. 5 * 6 * This work is licensed under the terms of the GNU GPL, version 2. See the 7 * COPYING file in the top-level directory. 8 * 9 * SPDX-License-Identifier: GPL-v2-only 10 */ 11 12 #include "qemu/osdep.h" 13 #include "qemu/units.h" 14 #include "qemu/error-report.h" 15 #include "qapi/qapi-commands-cxl.h" 16 #include "hw/mem/memory-device.h" 17 #include "hw/mem/pc-dimm.h" 18 #include "hw/pci/pci.h" 19 #include "hw/qdev-properties.h" 20 #include "qapi/error.h" 21 #include "qemu/log.h" 22 #include "qemu/module.h" 23 #include "qemu/pmem.h" 24 #include "qemu/range.h" 25 #include "qemu/rcu.h" 26 #include "qemu/guest-random.h" 27 #include "sysemu/hostmem.h" 28 #include "sysemu/numa.h" 29 #include "hw/cxl/cxl.h" 30 #include "hw/pci/msix.h" 31 32 #define DWORD_BYTE 4 33 34 /* Default CDAT entries for a memory region */ 35 enum { 36 CT3_CDAT_DSMAS, 37 CT3_CDAT_DSLBIS0, 38 CT3_CDAT_DSLBIS1, 39 CT3_CDAT_DSLBIS2, 40 CT3_CDAT_DSLBIS3, 41 CT3_CDAT_DSEMTS, 42 CT3_CDAT_NUM_ENTRIES 43 }; 44 45 static int ct3_build_cdat_entries_for_mr(CDATSubHeader **cdat_table, 46 int dsmad_handle, MemoryRegion *mr, 47 bool is_pmem, uint64_t dpa_base) 48 { 49 g_autofree CDATDsmas *dsmas = NULL; 50 g_autofree CDATDslbis *dslbis0 = NULL; 51 g_autofree CDATDslbis *dslbis1 = NULL; 52 g_autofree CDATDslbis *dslbis2 = NULL; 53 g_autofree CDATDslbis *dslbis3 = NULL; 54 g_autofree CDATDsemts *dsemts = NULL; 55 56 dsmas = g_malloc(sizeof(*dsmas)); 57 if (!dsmas) { 58 return -ENOMEM; 59 } 60 *dsmas = (CDATDsmas) { 61 .header = { 62 .type = CDAT_TYPE_DSMAS, 63 .length = sizeof(*dsmas), 64 }, 65 .DSMADhandle = dsmad_handle, 66 .flags = is_pmem ? CDAT_DSMAS_FLAG_NV : 0, 67 .DPA_base = dpa_base, 68 .DPA_length = memory_region_size(mr), 69 }; 70 71 /* For now, no memory side cache, plausiblish numbers */ 72 dslbis0 = g_malloc(sizeof(*dslbis0)); 73 if (!dslbis0) { 74 return -ENOMEM; 75 } 76 *dslbis0 = (CDATDslbis) { 77 .header = { 78 .type = CDAT_TYPE_DSLBIS, 79 .length = sizeof(*dslbis0), 80 }, 81 .handle = dsmad_handle, 82 .flags = HMAT_LB_MEM_MEMORY, 83 .data_type = HMAT_LB_DATA_READ_LATENCY, 84 .entry_base_unit = 10000, /* 10ns base */ 85 .entry[0] = 15, /* 150ns */ 86 }; 87 88 dslbis1 = g_malloc(sizeof(*dslbis1)); 89 if (!dslbis1) { 90 return -ENOMEM; 91 } 92 *dslbis1 = (CDATDslbis) { 93 .header = { 94 .type = CDAT_TYPE_DSLBIS, 95 .length = sizeof(*dslbis1), 96 }, 97 .handle = dsmad_handle, 98 .flags = HMAT_LB_MEM_MEMORY, 99 .data_type = HMAT_LB_DATA_WRITE_LATENCY, 100 .entry_base_unit = 10000, 101 .entry[0] = 25, /* 250ns */ 102 }; 103 104 dslbis2 = g_malloc(sizeof(*dslbis2)); 105 if (!dslbis2) { 106 return -ENOMEM; 107 } 108 *dslbis2 = (CDATDslbis) { 109 .header = { 110 .type = CDAT_TYPE_DSLBIS, 111 .length = sizeof(*dslbis2), 112 }, 113 .handle = dsmad_handle, 114 .flags = HMAT_LB_MEM_MEMORY, 115 .data_type = HMAT_LB_DATA_READ_BANDWIDTH, 116 .entry_base_unit = 1000, /* GB/s */ 117 .entry[0] = 16, 118 }; 119 120 dslbis3 = g_malloc(sizeof(*dslbis3)); 121 if (!dslbis3) { 122 return -ENOMEM; 123 } 124 *dslbis3 = (CDATDslbis) { 125 .header = { 126 .type = CDAT_TYPE_DSLBIS, 127 .length = sizeof(*dslbis3), 128 }, 129 .handle = dsmad_handle, 130 .flags = HMAT_LB_MEM_MEMORY, 131 .data_type = HMAT_LB_DATA_WRITE_BANDWIDTH, 132 .entry_base_unit = 1000, /* GB/s */ 133 .entry[0] = 16, 134 }; 135 136 dsemts = g_malloc(sizeof(*dsemts)); 137 if (!dsemts) { 138 return -ENOMEM; 139 } 140 *dsemts = (CDATDsemts) { 141 .header = { 142 .type = CDAT_TYPE_DSEMTS, 143 .length = sizeof(*dsemts), 144 }, 145 .DSMAS_handle = dsmad_handle, 146 /* 147 * NV: Reserved - the non volatile from DSMAS matters 148 * V: EFI_MEMORY_SP 149 */ 150 .EFI_memory_type_attr = is_pmem ? 2 : 1, 151 .DPA_offset = 0, 152 .DPA_length = memory_region_size(mr), 153 }; 154 155 /* Header always at start of structure */ 156 cdat_table[CT3_CDAT_DSMAS] = g_steal_pointer(&dsmas); 157 cdat_table[CT3_CDAT_DSLBIS0] = g_steal_pointer(&dslbis0); 158 cdat_table[CT3_CDAT_DSLBIS1] = g_steal_pointer(&dslbis1); 159 cdat_table[CT3_CDAT_DSLBIS2] = g_steal_pointer(&dslbis2); 160 cdat_table[CT3_CDAT_DSLBIS3] = g_steal_pointer(&dslbis3); 161 cdat_table[CT3_CDAT_DSEMTS] = g_steal_pointer(&dsemts); 162 163 return 0; 164 } 165 166 static int ct3_build_cdat_table(CDATSubHeader ***cdat_table, void *priv) 167 { 168 g_autofree CDATSubHeader **table = NULL; 169 CXLType3Dev *ct3d = priv; 170 MemoryRegion *volatile_mr = NULL, *nonvolatile_mr = NULL; 171 int dsmad_handle = 0; 172 int cur_ent = 0; 173 int len = 0; 174 int rc, i; 175 176 if (!ct3d->hostpmem && !ct3d->hostvmem) { 177 return 0; 178 } 179 180 if (ct3d->hostvmem) { 181 volatile_mr = host_memory_backend_get_memory(ct3d->hostvmem); 182 if (!volatile_mr) { 183 return -EINVAL; 184 } 185 len += CT3_CDAT_NUM_ENTRIES; 186 } 187 188 if (ct3d->hostpmem) { 189 nonvolatile_mr = host_memory_backend_get_memory(ct3d->hostpmem); 190 if (!nonvolatile_mr) { 191 return -EINVAL; 192 } 193 len += CT3_CDAT_NUM_ENTRIES; 194 } 195 196 table = g_malloc0(len * sizeof(*table)); 197 if (!table) { 198 return -ENOMEM; 199 } 200 201 /* Now fill them in */ 202 if (volatile_mr) { 203 rc = ct3_build_cdat_entries_for_mr(table, dsmad_handle++, volatile_mr, 204 false, 0); 205 if (rc < 0) { 206 return rc; 207 } 208 cur_ent = CT3_CDAT_NUM_ENTRIES; 209 } 210 211 if (nonvolatile_mr) { 212 uint64_t base = volatile_mr ? memory_region_size(volatile_mr) : 0; 213 rc = ct3_build_cdat_entries_for_mr(&(table[cur_ent]), dsmad_handle++, 214 nonvolatile_mr, true, base); 215 if (rc < 0) { 216 goto error_cleanup; 217 } 218 cur_ent += CT3_CDAT_NUM_ENTRIES; 219 } 220 assert(len == cur_ent); 221 222 *cdat_table = g_steal_pointer(&table); 223 224 return len; 225 error_cleanup: 226 for (i = 0; i < cur_ent; i++) { 227 g_free(table[i]); 228 } 229 return rc; 230 } 231 232 static void ct3_free_cdat_table(CDATSubHeader **cdat_table, int num, void *priv) 233 { 234 int i; 235 236 for (i = 0; i < num; i++) { 237 g_free(cdat_table[i]); 238 } 239 g_free(cdat_table); 240 } 241 242 static bool cxl_doe_cdat_rsp(DOECap *doe_cap) 243 { 244 CDATObject *cdat = &CXL_TYPE3(doe_cap->pdev)->cxl_cstate.cdat; 245 uint16_t ent; 246 void *base; 247 uint32_t len; 248 CDATReq *req = pcie_doe_get_write_mbox_ptr(doe_cap); 249 CDATRsp rsp; 250 251 assert(cdat->entry_len); 252 253 /* Discard if request length mismatched */ 254 if (pcie_doe_get_obj_len(req) < 255 DIV_ROUND_UP(sizeof(CDATReq), DWORD_BYTE)) { 256 return false; 257 } 258 259 ent = req->entry_handle; 260 base = cdat->entry[ent].base; 261 len = cdat->entry[ent].length; 262 263 rsp = (CDATRsp) { 264 .header = { 265 .vendor_id = CXL_VENDOR_ID, 266 .data_obj_type = CXL_DOE_TABLE_ACCESS, 267 .reserved = 0x0, 268 .length = DIV_ROUND_UP((sizeof(rsp) + len), DWORD_BYTE), 269 }, 270 .rsp_code = CXL_DOE_TAB_RSP, 271 .table_type = CXL_DOE_TAB_TYPE_CDAT, 272 .entry_handle = (ent < cdat->entry_len - 1) ? 273 ent + 1 : CXL_DOE_TAB_ENT_MAX, 274 }; 275 276 memcpy(doe_cap->read_mbox, &rsp, sizeof(rsp)); 277 memcpy(doe_cap->read_mbox + DIV_ROUND_UP(sizeof(rsp), DWORD_BYTE), 278 base, len); 279 280 doe_cap->read_mbox_len += rsp.header.length; 281 282 return true; 283 } 284 285 static uint32_t ct3d_config_read(PCIDevice *pci_dev, uint32_t addr, int size) 286 { 287 CXLType3Dev *ct3d = CXL_TYPE3(pci_dev); 288 uint32_t val; 289 290 if (pcie_doe_read_config(&ct3d->doe_cdat, addr, size, &val)) { 291 return val; 292 } 293 294 return pci_default_read_config(pci_dev, addr, size); 295 } 296 297 static void ct3d_config_write(PCIDevice *pci_dev, uint32_t addr, uint32_t val, 298 int size) 299 { 300 CXLType3Dev *ct3d = CXL_TYPE3(pci_dev); 301 302 pcie_doe_write_config(&ct3d->doe_cdat, addr, val, size); 303 pci_default_write_config(pci_dev, addr, val, size); 304 pcie_aer_write_config(pci_dev, addr, val, size); 305 } 306 307 /* 308 * Null value of all Fs suggested by IEEE RA guidelines for use of 309 * EU, OUI and CID 310 */ 311 #define UI64_NULL ~(0ULL) 312 313 static void build_dvsecs(CXLType3Dev *ct3d) 314 { 315 CXLComponentState *cxl_cstate = &ct3d->cxl_cstate; 316 uint8_t *dvsec; 317 uint32_t range1_size_hi, range1_size_lo, 318 range1_base_hi = 0, range1_base_lo = 0, 319 range2_size_hi = 0, range2_size_lo = 0, 320 range2_base_hi = 0, range2_base_lo = 0; 321 322 /* 323 * Volatile memory is mapped as (0x0) 324 * Persistent memory is mapped at (volatile->size) 325 */ 326 if (ct3d->hostvmem) { 327 range1_size_hi = ct3d->hostvmem->size >> 32; 328 range1_size_lo = (2 << 5) | (2 << 2) | 0x3 | 329 (ct3d->hostvmem->size & 0xF0000000); 330 if (ct3d->hostpmem) { 331 range2_size_hi = ct3d->hostpmem->size >> 32; 332 range2_size_lo = (2 << 5) | (2 << 2) | 0x3 | 333 (ct3d->hostpmem->size & 0xF0000000); 334 } 335 } else { 336 range1_size_hi = ct3d->hostpmem->size >> 32; 337 range1_size_lo = (2 << 5) | (2 << 2) | 0x3 | 338 (ct3d->hostpmem->size & 0xF0000000); 339 } 340 341 dvsec = (uint8_t *)&(CXLDVSECDevice){ 342 .cap = 0x1e, 343 .ctrl = 0x2, 344 .status2 = 0x2, 345 .range1_size_hi = range1_size_hi, 346 .range1_size_lo = range1_size_lo, 347 .range1_base_hi = range1_base_hi, 348 .range1_base_lo = range1_base_lo, 349 .range2_size_hi = range2_size_hi, 350 .range2_size_lo = range2_size_lo, 351 .range2_base_hi = range2_base_hi, 352 .range2_base_lo = range2_base_lo, 353 }; 354 cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE, 355 PCIE_CXL_DEVICE_DVSEC_LENGTH, 356 PCIE_CXL_DEVICE_DVSEC, 357 PCIE_CXL2_DEVICE_DVSEC_REVID, dvsec); 358 359 dvsec = (uint8_t *)&(CXLDVSECRegisterLocator){ 360 .rsvd = 0, 361 .reg0_base_lo = RBI_COMPONENT_REG | CXL_COMPONENT_REG_BAR_IDX, 362 .reg0_base_hi = 0, 363 .reg1_base_lo = RBI_CXL_DEVICE_REG | CXL_DEVICE_REG_BAR_IDX, 364 .reg1_base_hi = 0, 365 }; 366 cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE, 367 REG_LOC_DVSEC_LENGTH, REG_LOC_DVSEC, 368 REG_LOC_DVSEC_REVID, dvsec); 369 dvsec = (uint8_t *)&(CXLDVSECDeviceGPF){ 370 .phase2_duration = 0x603, /* 3 seconds */ 371 .phase2_power = 0x33, /* 0x33 miliwatts */ 372 }; 373 cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE, 374 GPF_DEVICE_DVSEC_LENGTH, GPF_DEVICE_DVSEC, 375 GPF_DEVICE_DVSEC_REVID, dvsec); 376 377 dvsec = (uint8_t *)&(CXLDVSECPortFlexBus){ 378 .cap = 0x26, /* 68B, IO, Mem, non-MLD */ 379 .ctrl = 0x02, /* IO always enabled */ 380 .status = 0x26, /* same as capabilities */ 381 .rcvd_mod_ts_data_phase1 = 0xef, /* WTF? */ 382 }; 383 cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE, 384 PCIE_FLEXBUS_PORT_DVSEC_LENGTH_2_0, 385 PCIE_FLEXBUS_PORT_DVSEC, 386 PCIE_FLEXBUS_PORT_DVSEC_REVID_2_0, dvsec); 387 } 388 389 static void hdm_decoder_commit(CXLType3Dev *ct3d, int which) 390 { 391 int hdm_inc = R_CXL_HDM_DECODER1_BASE_LO - R_CXL_HDM_DECODER0_BASE_LO; 392 ComponentRegisters *cregs = &ct3d->cxl_cstate.crb; 393 uint32_t *cache_mem = cregs->cache_mem_registers; 394 uint32_t ctrl; 395 396 ctrl = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL + which * hdm_inc); 397 /* TODO: Sanity checks that the decoder is possible */ 398 ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, ERR, 0); 399 ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, COMMITTED, 1); 400 401 stl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL + which * hdm_inc, ctrl); 402 } 403 404 static void hdm_decoder_uncommit(CXLType3Dev *ct3d, int which) 405 { 406 int hdm_inc = R_CXL_HDM_DECODER1_BASE_LO - R_CXL_HDM_DECODER0_BASE_LO; 407 ComponentRegisters *cregs = &ct3d->cxl_cstate.crb; 408 uint32_t *cache_mem = cregs->cache_mem_registers; 409 uint32_t ctrl; 410 411 ctrl = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL + which * hdm_inc); 412 413 ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, ERR, 0); 414 ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, COMMITTED, 0); 415 416 stl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL + which * hdm_inc, ctrl); 417 } 418 419 static int ct3d_qmp_uncor_err_to_cxl(CxlUncorErrorType qmp_err) 420 { 421 switch (qmp_err) { 422 case CXL_UNCOR_ERROR_TYPE_CACHE_DATA_PARITY: 423 return CXL_RAS_UNC_ERR_CACHE_DATA_PARITY; 424 case CXL_UNCOR_ERROR_TYPE_CACHE_ADDRESS_PARITY: 425 return CXL_RAS_UNC_ERR_CACHE_ADDRESS_PARITY; 426 case CXL_UNCOR_ERROR_TYPE_CACHE_BE_PARITY: 427 return CXL_RAS_UNC_ERR_CACHE_BE_PARITY; 428 case CXL_UNCOR_ERROR_TYPE_CACHE_DATA_ECC: 429 return CXL_RAS_UNC_ERR_CACHE_DATA_ECC; 430 case CXL_UNCOR_ERROR_TYPE_MEM_DATA_PARITY: 431 return CXL_RAS_UNC_ERR_MEM_DATA_PARITY; 432 case CXL_UNCOR_ERROR_TYPE_MEM_ADDRESS_PARITY: 433 return CXL_RAS_UNC_ERR_MEM_ADDRESS_PARITY; 434 case CXL_UNCOR_ERROR_TYPE_MEM_BE_PARITY: 435 return CXL_RAS_UNC_ERR_MEM_BE_PARITY; 436 case CXL_UNCOR_ERROR_TYPE_MEM_DATA_ECC: 437 return CXL_RAS_UNC_ERR_MEM_DATA_ECC; 438 case CXL_UNCOR_ERROR_TYPE_REINIT_THRESHOLD: 439 return CXL_RAS_UNC_ERR_REINIT_THRESHOLD; 440 case CXL_UNCOR_ERROR_TYPE_RSVD_ENCODING: 441 return CXL_RAS_UNC_ERR_RSVD_ENCODING; 442 case CXL_UNCOR_ERROR_TYPE_POISON_RECEIVED: 443 return CXL_RAS_UNC_ERR_POISON_RECEIVED; 444 case CXL_UNCOR_ERROR_TYPE_RECEIVER_OVERFLOW: 445 return CXL_RAS_UNC_ERR_RECEIVER_OVERFLOW; 446 case CXL_UNCOR_ERROR_TYPE_INTERNAL: 447 return CXL_RAS_UNC_ERR_INTERNAL; 448 case CXL_UNCOR_ERROR_TYPE_CXL_IDE_TX: 449 return CXL_RAS_UNC_ERR_CXL_IDE_TX; 450 case CXL_UNCOR_ERROR_TYPE_CXL_IDE_RX: 451 return CXL_RAS_UNC_ERR_CXL_IDE_RX; 452 default: 453 return -EINVAL; 454 } 455 } 456 457 static int ct3d_qmp_cor_err_to_cxl(CxlCorErrorType qmp_err) 458 { 459 switch (qmp_err) { 460 case CXL_COR_ERROR_TYPE_CACHE_DATA_ECC: 461 return CXL_RAS_COR_ERR_CACHE_DATA_ECC; 462 case CXL_COR_ERROR_TYPE_MEM_DATA_ECC: 463 return CXL_RAS_COR_ERR_MEM_DATA_ECC; 464 case CXL_COR_ERROR_TYPE_CRC_THRESHOLD: 465 return CXL_RAS_COR_ERR_CRC_THRESHOLD; 466 case CXL_COR_ERROR_TYPE_RETRY_THRESHOLD: 467 return CXL_RAS_COR_ERR_RETRY_THRESHOLD; 468 case CXL_COR_ERROR_TYPE_CACHE_POISON_RECEIVED: 469 return CXL_RAS_COR_ERR_CACHE_POISON_RECEIVED; 470 case CXL_COR_ERROR_TYPE_MEM_POISON_RECEIVED: 471 return CXL_RAS_COR_ERR_MEM_POISON_RECEIVED; 472 case CXL_COR_ERROR_TYPE_PHYSICAL: 473 return CXL_RAS_COR_ERR_PHYSICAL; 474 default: 475 return -EINVAL; 476 } 477 } 478 479 static void ct3d_reg_write(void *opaque, hwaddr offset, uint64_t value, 480 unsigned size) 481 { 482 CXLComponentState *cxl_cstate = opaque; 483 ComponentRegisters *cregs = &cxl_cstate->crb; 484 CXLType3Dev *ct3d = container_of(cxl_cstate, CXLType3Dev, cxl_cstate); 485 uint32_t *cache_mem = cregs->cache_mem_registers; 486 bool should_commit = false; 487 bool should_uncommit = false; 488 int which_hdm = -1; 489 490 assert(size == 4); 491 g_assert(offset < CXL2_COMPONENT_CM_REGION_SIZE); 492 493 switch (offset) { 494 case A_CXL_HDM_DECODER0_CTRL: 495 should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT); 496 should_uncommit = !should_commit; 497 which_hdm = 0; 498 break; 499 case A_CXL_HDM_DECODER1_CTRL: 500 should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT); 501 should_uncommit = !should_commit; 502 which_hdm = 1; 503 break; 504 case A_CXL_HDM_DECODER2_CTRL: 505 should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT); 506 should_uncommit = !should_commit; 507 which_hdm = 2; 508 break; 509 case A_CXL_HDM_DECODER3_CTRL: 510 should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT); 511 should_uncommit = !should_commit; 512 which_hdm = 3; 513 break; 514 case A_CXL_RAS_UNC_ERR_STATUS: 515 { 516 uint32_t capctrl = ldl_le_p(cache_mem + R_CXL_RAS_ERR_CAP_CTRL); 517 uint32_t fe = FIELD_EX32(capctrl, CXL_RAS_ERR_CAP_CTRL, 518 FIRST_ERROR_POINTER); 519 CXLError *cxl_err; 520 uint32_t unc_err; 521 522 /* 523 * If single bit written that corresponds to the first error 524 * pointer being cleared, update the status and header log. 525 */ 526 if (!QTAILQ_EMPTY(&ct3d->error_list)) { 527 if ((1 << fe) ^ value) { 528 CXLError *cxl_next; 529 /* 530 * Software is using wrong flow for multiple header recording 531 * Following behavior in PCIe r6.0 and assuming multiple 532 * header support. Implementation defined choice to clear all 533 * matching records if more than one bit set - which corresponds 534 * closest to behavior of hardware not capable of multiple 535 * header recording. 536 */ 537 QTAILQ_FOREACH_SAFE(cxl_err, &ct3d->error_list, node, 538 cxl_next) { 539 if ((1 << cxl_err->type) & value) { 540 QTAILQ_REMOVE(&ct3d->error_list, cxl_err, node); 541 g_free(cxl_err); 542 } 543 } 544 } else { 545 /* Done with previous FE, so drop from list */ 546 cxl_err = QTAILQ_FIRST(&ct3d->error_list); 547 QTAILQ_REMOVE(&ct3d->error_list, cxl_err, node); 548 g_free(cxl_err); 549 } 550 551 /* 552 * If there is another FE, then put that in place and update 553 * the header log 554 */ 555 if (!QTAILQ_EMPTY(&ct3d->error_list)) { 556 uint32_t *header_log = &cache_mem[R_CXL_RAS_ERR_HEADER0]; 557 int i; 558 559 cxl_err = QTAILQ_FIRST(&ct3d->error_list); 560 for (i = 0; i < CXL_RAS_ERR_HEADER_NUM; i++) { 561 stl_le_p(header_log + i, cxl_err->header[i]); 562 } 563 capctrl = FIELD_DP32(capctrl, CXL_RAS_ERR_CAP_CTRL, 564 FIRST_ERROR_POINTER, cxl_err->type); 565 } else { 566 /* 567 * If no more errors, then follow recommendation of PCI spec 568 * r6.0 6.2.4.2 to set the first error pointer to a status 569 * bit that will never be used. 570 */ 571 capctrl = FIELD_DP32(capctrl, CXL_RAS_ERR_CAP_CTRL, 572 FIRST_ERROR_POINTER, 573 CXL_RAS_UNC_ERR_CXL_UNUSED); 574 } 575 stl_le_p((uint8_t *)cache_mem + A_CXL_RAS_ERR_CAP_CTRL, capctrl); 576 } 577 unc_err = 0; 578 QTAILQ_FOREACH(cxl_err, &ct3d->error_list, node) { 579 unc_err |= 1 << cxl_err->type; 580 } 581 stl_le_p((uint8_t *)cache_mem + offset, unc_err); 582 583 return; 584 } 585 case A_CXL_RAS_COR_ERR_STATUS: 586 { 587 uint32_t rw1c = value; 588 uint32_t temp = ldl_le_p((uint8_t *)cache_mem + offset); 589 temp &= ~rw1c; 590 stl_le_p((uint8_t *)cache_mem + offset, temp); 591 return; 592 } 593 default: 594 break; 595 } 596 597 stl_le_p((uint8_t *)cache_mem + offset, value); 598 if (should_commit) { 599 hdm_decoder_commit(ct3d, which_hdm); 600 } else if (should_uncommit) { 601 hdm_decoder_uncommit(ct3d, which_hdm); 602 } 603 } 604 605 static bool cxl_setup_memory(CXLType3Dev *ct3d, Error **errp) 606 { 607 DeviceState *ds = DEVICE(ct3d); 608 609 if (!ct3d->hostmem && !ct3d->hostvmem && !ct3d->hostpmem) { 610 error_setg(errp, "at least one memdev property must be set"); 611 return false; 612 } else if (ct3d->hostmem && ct3d->hostpmem) { 613 error_setg(errp, "[memdev] cannot be used with new " 614 "[persistent-memdev] property"); 615 return false; 616 } else if (ct3d->hostmem) { 617 /* Use of hostmem property implies pmem */ 618 ct3d->hostpmem = ct3d->hostmem; 619 ct3d->hostmem = NULL; 620 } 621 622 if (ct3d->hostpmem && !ct3d->lsa) { 623 error_setg(errp, "lsa property must be set for persistent devices"); 624 return false; 625 } 626 627 if (ct3d->hostvmem) { 628 MemoryRegion *vmr; 629 char *v_name; 630 631 vmr = host_memory_backend_get_memory(ct3d->hostvmem); 632 if (!vmr) { 633 error_setg(errp, "volatile memdev must have backing device"); 634 return false; 635 } 636 memory_region_set_nonvolatile(vmr, false); 637 memory_region_set_enabled(vmr, true); 638 host_memory_backend_set_mapped(ct3d->hostvmem, true); 639 if (ds->id) { 640 v_name = g_strdup_printf("cxl-type3-dpa-vmem-space:%s", ds->id); 641 } else { 642 v_name = g_strdup("cxl-type3-dpa-vmem-space"); 643 } 644 address_space_init(&ct3d->hostvmem_as, vmr, v_name); 645 ct3d->cxl_dstate.vmem_size = memory_region_size(vmr); 646 ct3d->cxl_dstate.mem_size += memory_region_size(vmr); 647 g_free(v_name); 648 } 649 650 if (ct3d->hostpmem) { 651 MemoryRegion *pmr; 652 char *p_name; 653 654 pmr = host_memory_backend_get_memory(ct3d->hostpmem); 655 if (!pmr) { 656 error_setg(errp, "persistent memdev must have backing device"); 657 return false; 658 } 659 memory_region_set_nonvolatile(pmr, true); 660 memory_region_set_enabled(pmr, true); 661 host_memory_backend_set_mapped(ct3d->hostpmem, true); 662 if (ds->id) { 663 p_name = g_strdup_printf("cxl-type3-dpa-pmem-space:%s", ds->id); 664 } else { 665 p_name = g_strdup("cxl-type3-dpa-pmem-space"); 666 } 667 address_space_init(&ct3d->hostpmem_as, pmr, p_name); 668 ct3d->cxl_dstate.pmem_size = memory_region_size(pmr); 669 ct3d->cxl_dstate.mem_size += memory_region_size(pmr); 670 g_free(p_name); 671 } 672 673 return true; 674 } 675 676 static DOEProtocol doe_cdat_prot[] = { 677 { CXL_VENDOR_ID, CXL_DOE_TABLE_ACCESS, cxl_doe_cdat_rsp }, 678 { } 679 }; 680 681 static void ct3_realize(PCIDevice *pci_dev, Error **errp) 682 { 683 CXLType3Dev *ct3d = CXL_TYPE3(pci_dev); 684 CXLComponentState *cxl_cstate = &ct3d->cxl_cstate; 685 ComponentRegisters *regs = &cxl_cstate->crb; 686 MemoryRegion *mr = ®s->component_registers; 687 uint8_t *pci_conf = pci_dev->config; 688 unsigned short msix_num = 6; 689 int i, rc; 690 691 QTAILQ_INIT(&ct3d->error_list); 692 693 if (!cxl_setup_memory(ct3d, errp)) { 694 return; 695 } 696 697 pci_config_set_prog_interface(pci_conf, 0x10); 698 699 pcie_endpoint_cap_init(pci_dev, 0x80); 700 if (ct3d->sn != UI64_NULL) { 701 pcie_dev_ser_num_init(pci_dev, 0x100, ct3d->sn); 702 cxl_cstate->dvsec_offset = 0x100 + 0x0c; 703 } else { 704 cxl_cstate->dvsec_offset = 0x100; 705 } 706 707 ct3d->cxl_cstate.pdev = pci_dev; 708 build_dvsecs(ct3d); 709 710 regs->special_ops = g_new0(MemoryRegionOps, 1); 711 regs->special_ops->write = ct3d_reg_write; 712 713 cxl_component_register_block_init(OBJECT(pci_dev), cxl_cstate, 714 TYPE_CXL_TYPE3); 715 716 pci_register_bar( 717 pci_dev, CXL_COMPONENT_REG_BAR_IDX, 718 PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64, mr); 719 720 cxl_device_register_block_init(OBJECT(pci_dev), &ct3d->cxl_dstate, 721 &ct3d->cci); 722 pci_register_bar(pci_dev, CXL_DEVICE_REG_BAR_IDX, 723 PCI_BASE_ADDRESS_SPACE_MEMORY | 724 PCI_BASE_ADDRESS_MEM_TYPE_64, 725 &ct3d->cxl_dstate.device_registers); 726 727 /* MSI(-X) Initialization */ 728 rc = msix_init_exclusive_bar(pci_dev, msix_num, 4, NULL); 729 if (rc) { 730 goto err_address_space_free; 731 } 732 for (i = 0; i < msix_num; i++) { 733 msix_vector_use(pci_dev, i); 734 } 735 736 /* DOE Initialization */ 737 pcie_doe_init(pci_dev, &ct3d->doe_cdat, 0x190, doe_cdat_prot, true, 0); 738 739 cxl_cstate->cdat.build_cdat_table = ct3_build_cdat_table; 740 cxl_cstate->cdat.free_cdat_table = ct3_free_cdat_table; 741 cxl_cstate->cdat.private = ct3d; 742 cxl_doe_cdat_init(cxl_cstate, errp); 743 if (*errp) { 744 goto err_free_special_ops; 745 } 746 747 pcie_cap_deverr_init(pci_dev); 748 /* Leave a bit of room for expansion */ 749 rc = pcie_aer_init(pci_dev, PCI_ERR_VER, 0x200, PCI_ERR_SIZEOF, NULL); 750 if (rc) { 751 goto err_release_cdat; 752 } 753 cxl_event_init(&ct3d->cxl_dstate, 2); 754 755 return; 756 757 err_release_cdat: 758 cxl_doe_cdat_release(cxl_cstate); 759 err_free_special_ops: 760 g_free(regs->special_ops); 761 err_address_space_free: 762 if (ct3d->hostpmem) { 763 address_space_destroy(&ct3d->hostpmem_as); 764 } 765 if (ct3d->hostvmem) { 766 address_space_destroy(&ct3d->hostvmem_as); 767 } 768 return; 769 } 770 771 static void ct3_exit(PCIDevice *pci_dev) 772 { 773 CXLType3Dev *ct3d = CXL_TYPE3(pci_dev); 774 CXLComponentState *cxl_cstate = &ct3d->cxl_cstate; 775 ComponentRegisters *regs = &cxl_cstate->crb; 776 777 pcie_aer_exit(pci_dev); 778 cxl_doe_cdat_release(cxl_cstate); 779 g_free(regs->special_ops); 780 if (ct3d->hostpmem) { 781 address_space_destroy(&ct3d->hostpmem_as); 782 } 783 if (ct3d->hostvmem) { 784 address_space_destroy(&ct3d->hostvmem_as); 785 } 786 } 787 788 static bool cxl_type3_dpa(CXLType3Dev *ct3d, hwaddr host_addr, uint64_t *dpa) 789 { 790 int hdm_inc = R_CXL_HDM_DECODER1_BASE_LO - R_CXL_HDM_DECODER0_BASE_LO; 791 uint32_t *cache_mem = ct3d->cxl_cstate.crb.cache_mem_registers; 792 unsigned int hdm_count; 793 uint32_t cap; 794 uint64_t dpa_base = 0; 795 int i; 796 797 cap = ldl_le_p(cache_mem + R_CXL_HDM_DECODER_CAPABILITY); 798 hdm_count = cxl_decoder_count_dec(FIELD_EX32(cap, 799 CXL_HDM_DECODER_CAPABILITY, 800 DECODER_COUNT)); 801 802 for (i = 0; i < hdm_count; i++) { 803 uint64_t decoder_base, decoder_size, hpa_offset, skip; 804 uint32_t hdm_ctrl, low, high; 805 int ig, iw; 806 807 low = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_BASE_LO + i * hdm_inc); 808 high = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_BASE_HI + i * hdm_inc); 809 decoder_base = ((uint64_t)high << 32) | (low & 0xf0000000); 810 811 low = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_SIZE_LO + i * hdm_inc); 812 high = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_SIZE_HI + i * hdm_inc); 813 decoder_size = ((uint64_t)high << 32) | (low & 0xf0000000); 814 815 low = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_DPA_SKIP_LO + 816 i * hdm_inc); 817 high = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_DPA_SKIP_HI + 818 i * hdm_inc); 819 skip = ((uint64_t)high << 32) | (low & 0xf0000000); 820 dpa_base += skip; 821 822 hpa_offset = (uint64_t)host_addr - decoder_base; 823 824 hdm_ctrl = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL + i * hdm_inc); 825 iw = FIELD_EX32(hdm_ctrl, CXL_HDM_DECODER0_CTRL, IW); 826 ig = FIELD_EX32(hdm_ctrl, CXL_HDM_DECODER0_CTRL, IG); 827 if (!FIELD_EX32(hdm_ctrl, CXL_HDM_DECODER0_CTRL, COMMITTED)) { 828 return false; 829 } 830 if (((uint64_t)host_addr < decoder_base) || 831 (hpa_offset >= decoder_size)) { 832 dpa_base += decoder_size / 833 cxl_interleave_ways_dec(iw, &error_fatal); 834 continue; 835 } 836 837 *dpa = dpa_base + 838 ((MAKE_64BIT_MASK(0, 8 + ig) & hpa_offset) | 839 ((MAKE_64BIT_MASK(8 + ig + iw, 64 - 8 - ig - iw) & hpa_offset) 840 >> iw)); 841 842 return true; 843 } 844 return false; 845 } 846 847 static int cxl_type3_hpa_to_as_and_dpa(CXLType3Dev *ct3d, 848 hwaddr host_addr, 849 unsigned int size, 850 AddressSpace **as, 851 uint64_t *dpa_offset) 852 { 853 MemoryRegion *vmr = NULL, *pmr = NULL; 854 855 if (ct3d->hostvmem) { 856 vmr = host_memory_backend_get_memory(ct3d->hostvmem); 857 } 858 if (ct3d->hostpmem) { 859 pmr = host_memory_backend_get_memory(ct3d->hostpmem); 860 } 861 862 if (!vmr && !pmr) { 863 return -ENODEV; 864 } 865 866 if (!cxl_type3_dpa(ct3d, host_addr, dpa_offset)) { 867 return -EINVAL; 868 } 869 870 if (*dpa_offset > ct3d->cxl_dstate.mem_size) { 871 return -EINVAL; 872 } 873 874 if (vmr) { 875 if (*dpa_offset < memory_region_size(vmr)) { 876 *as = &ct3d->hostvmem_as; 877 } else { 878 *as = &ct3d->hostpmem_as; 879 *dpa_offset -= memory_region_size(vmr); 880 } 881 } else { 882 *as = &ct3d->hostpmem_as; 883 } 884 885 return 0; 886 } 887 888 MemTxResult cxl_type3_read(PCIDevice *d, hwaddr host_addr, uint64_t *data, 889 unsigned size, MemTxAttrs attrs) 890 { 891 CXLType3Dev *ct3d = CXL_TYPE3(d); 892 uint64_t dpa_offset = 0; 893 AddressSpace *as = NULL; 894 int res; 895 896 res = cxl_type3_hpa_to_as_and_dpa(ct3d, host_addr, size, 897 &as, &dpa_offset); 898 if (res) { 899 return MEMTX_ERROR; 900 } 901 902 if (sanitize_running(&ct3d->cci)) { 903 qemu_guest_getrandom_nofail(data, size); 904 return MEMTX_OK; 905 } 906 907 return address_space_read(as, dpa_offset, attrs, data, size); 908 } 909 910 MemTxResult cxl_type3_write(PCIDevice *d, hwaddr host_addr, uint64_t data, 911 unsigned size, MemTxAttrs attrs) 912 { 913 CXLType3Dev *ct3d = CXL_TYPE3(d); 914 uint64_t dpa_offset = 0; 915 AddressSpace *as = NULL; 916 int res; 917 918 res = cxl_type3_hpa_to_as_and_dpa(ct3d, host_addr, size, 919 &as, &dpa_offset); 920 if (res) { 921 return MEMTX_ERROR; 922 } 923 924 if (sanitize_running(&ct3d->cci)) { 925 return MEMTX_OK; 926 } 927 928 return address_space_write(as, dpa_offset, attrs, &data, size); 929 } 930 931 static void ct3d_reset(DeviceState *dev) 932 { 933 CXLType3Dev *ct3d = CXL_TYPE3(dev); 934 uint32_t *reg_state = ct3d->cxl_cstate.crb.cache_mem_registers; 935 uint32_t *write_msk = ct3d->cxl_cstate.crb.cache_mem_regs_write_mask; 936 937 cxl_component_register_init_common(reg_state, write_msk, CXL2_TYPE3_DEVICE); 938 cxl_device_register_init_t3(ct3d); 939 } 940 941 static Property ct3_props[] = { 942 DEFINE_PROP_LINK("memdev", CXLType3Dev, hostmem, TYPE_MEMORY_BACKEND, 943 HostMemoryBackend *), /* for backward compatibility */ 944 DEFINE_PROP_LINK("persistent-memdev", CXLType3Dev, hostpmem, 945 TYPE_MEMORY_BACKEND, HostMemoryBackend *), 946 DEFINE_PROP_LINK("volatile-memdev", CXLType3Dev, hostvmem, 947 TYPE_MEMORY_BACKEND, HostMemoryBackend *), 948 DEFINE_PROP_LINK("lsa", CXLType3Dev, lsa, TYPE_MEMORY_BACKEND, 949 HostMemoryBackend *), 950 DEFINE_PROP_UINT64("sn", CXLType3Dev, sn, UI64_NULL), 951 DEFINE_PROP_STRING("cdat", CXLType3Dev, cxl_cstate.cdat.filename), 952 DEFINE_PROP_END_OF_LIST(), 953 }; 954 955 static uint64_t get_lsa_size(CXLType3Dev *ct3d) 956 { 957 MemoryRegion *mr; 958 959 if (!ct3d->lsa) { 960 return 0; 961 } 962 963 mr = host_memory_backend_get_memory(ct3d->lsa); 964 return memory_region_size(mr); 965 } 966 967 static void validate_lsa_access(MemoryRegion *mr, uint64_t size, 968 uint64_t offset) 969 { 970 assert(offset + size <= memory_region_size(mr)); 971 assert(offset + size > offset); 972 } 973 974 static uint64_t get_lsa(CXLType3Dev *ct3d, void *buf, uint64_t size, 975 uint64_t offset) 976 { 977 MemoryRegion *mr; 978 void *lsa; 979 980 if (!ct3d->lsa) { 981 return 0; 982 } 983 984 mr = host_memory_backend_get_memory(ct3d->lsa); 985 validate_lsa_access(mr, size, offset); 986 987 lsa = memory_region_get_ram_ptr(mr) + offset; 988 memcpy(buf, lsa, size); 989 990 return size; 991 } 992 993 static void set_lsa(CXLType3Dev *ct3d, const void *buf, uint64_t size, 994 uint64_t offset) 995 { 996 MemoryRegion *mr; 997 void *lsa; 998 999 if (!ct3d->lsa) { 1000 return; 1001 } 1002 1003 mr = host_memory_backend_get_memory(ct3d->lsa); 1004 validate_lsa_access(mr, size, offset); 1005 1006 lsa = memory_region_get_ram_ptr(mr) + offset; 1007 memcpy(lsa, buf, size); 1008 memory_region_set_dirty(mr, offset, size); 1009 1010 /* 1011 * Just like the PMEM, if the guest is not allowed to exit gracefully, label 1012 * updates will get lost. 1013 */ 1014 } 1015 1016 static bool set_cacheline(CXLType3Dev *ct3d, uint64_t dpa_offset, uint8_t *data) 1017 { 1018 MemoryRegion *vmr = NULL, *pmr = NULL; 1019 AddressSpace *as; 1020 1021 if (ct3d->hostvmem) { 1022 vmr = host_memory_backend_get_memory(ct3d->hostvmem); 1023 } 1024 if (ct3d->hostpmem) { 1025 pmr = host_memory_backend_get_memory(ct3d->hostpmem); 1026 } 1027 1028 if (!vmr && !pmr) { 1029 return false; 1030 } 1031 1032 if (dpa_offset + CXL_CACHE_LINE_SIZE > ct3d->cxl_dstate.mem_size) { 1033 return false; 1034 } 1035 1036 if (vmr) { 1037 if (dpa_offset < memory_region_size(vmr)) { 1038 as = &ct3d->hostvmem_as; 1039 } else { 1040 as = &ct3d->hostpmem_as; 1041 dpa_offset -= memory_region_size(vmr); 1042 } 1043 } else { 1044 as = &ct3d->hostpmem_as; 1045 } 1046 1047 address_space_write(as, dpa_offset, MEMTXATTRS_UNSPECIFIED, &data, 1048 CXL_CACHE_LINE_SIZE); 1049 return true; 1050 } 1051 1052 void cxl_set_poison_list_overflowed(CXLType3Dev *ct3d) 1053 { 1054 ct3d->poison_list_overflowed = true; 1055 ct3d->poison_list_overflow_ts = 1056 cxl_device_get_timestamp(&ct3d->cxl_dstate); 1057 } 1058 1059 void qmp_cxl_inject_poison(const char *path, uint64_t start, uint64_t length, 1060 Error **errp) 1061 { 1062 Object *obj = object_resolve_path(path, NULL); 1063 CXLType3Dev *ct3d; 1064 CXLPoison *p; 1065 1066 if (length % 64) { 1067 error_setg(errp, "Poison injection must be in multiples of 64 bytes"); 1068 return; 1069 } 1070 if (start % 64) { 1071 error_setg(errp, "Poison start address must be 64 byte aligned"); 1072 return; 1073 } 1074 if (!obj) { 1075 error_setg(errp, "Unable to resolve path"); 1076 return; 1077 } 1078 if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) { 1079 error_setg(errp, "Path does not point to a CXL type 3 device"); 1080 return; 1081 } 1082 1083 ct3d = CXL_TYPE3(obj); 1084 1085 QLIST_FOREACH(p, &ct3d->poison_list, node) { 1086 if (((start >= p->start) && (start < p->start + p->length)) || 1087 ((start + length > p->start) && 1088 (start + length <= p->start + p->length))) { 1089 error_setg(errp, 1090 "Overlap with existing poisoned region not supported"); 1091 return; 1092 } 1093 } 1094 1095 if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) { 1096 cxl_set_poison_list_overflowed(ct3d); 1097 return; 1098 } 1099 1100 p = g_new0(CXLPoison, 1); 1101 p->length = length; 1102 p->start = start; 1103 /* Different from injected via the mbox */ 1104 p->type = CXL_POISON_TYPE_INTERNAL; 1105 1106 QLIST_INSERT_HEAD(&ct3d->poison_list, p, node); 1107 ct3d->poison_list_cnt++; 1108 } 1109 1110 /* For uncorrectable errors include support for multiple header recording */ 1111 void qmp_cxl_inject_uncorrectable_errors(const char *path, 1112 CXLUncorErrorRecordList *errors, 1113 Error **errp) 1114 { 1115 Object *obj = object_resolve_path(path, NULL); 1116 static PCIEAERErr err = {}; 1117 CXLType3Dev *ct3d; 1118 CXLError *cxl_err; 1119 uint32_t *reg_state; 1120 uint32_t unc_err; 1121 bool first; 1122 1123 if (!obj) { 1124 error_setg(errp, "Unable to resolve path"); 1125 return; 1126 } 1127 1128 if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) { 1129 error_setg(errp, "Path does not point to a CXL type 3 device"); 1130 return; 1131 } 1132 1133 err.status = PCI_ERR_UNC_INTN; 1134 err.source_id = pci_requester_id(PCI_DEVICE(obj)); 1135 err.flags = 0; 1136 1137 ct3d = CXL_TYPE3(obj); 1138 1139 first = QTAILQ_EMPTY(&ct3d->error_list); 1140 reg_state = ct3d->cxl_cstate.crb.cache_mem_registers; 1141 while (errors) { 1142 uint32List *header = errors->value->header; 1143 uint8_t header_count = 0; 1144 int cxl_err_code; 1145 1146 cxl_err_code = ct3d_qmp_uncor_err_to_cxl(errors->value->type); 1147 if (cxl_err_code < 0) { 1148 error_setg(errp, "Unknown error code"); 1149 return; 1150 } 1151 1152 /* If the error is masked, nothing to do here */ 1153 if (!((1 << cxl_err_code) & 1154 ~ldl_le_p(reg_state + R_CXL_RAS_UNC_ERR_MASK))) { 1155 errors = errors->next; 1156 continue; 1157 } 1158 1159 cxl_err = g_malloc0(sizeof(*cxl_err)); 1160 if (!cxl_err) { 1161 return; 1162 } 1163 1164 cxl_err->type = cxl_err_code; 1165 while (header && header_count < 32) { 1166 cxl_err->header[header_count++] = header->value; 1167 header = header->next; 1168 } 1169 if (header_count > 32) { 1170 error_setg(errp, "Header must be 32 DWORD or less"); 1171 return; 1172 } 1173 QTAILQ_INSERT_TAIL(&ct3d->error_list, cxl_err, node); 1174 1175 errors = errors->next; 1176 } 1177 1178 if (first && !QTAILQ_EMPTY(&ct3d->error_list)) { 1179 uint32_t *cache_mem = ct3d->cxl_cstate.crb.cache_mem_registers; 1180 uint32_t capctrl = ldl_le_p(cache_mem + R_CXL_RAS_ERR_CAP_CTRL); 1181 uint32_t *header_log = &cache_mem[R_CXL_RAS_ERR_HEADER0]; 1182 int i; 1183 1184 cxl_err = QTAILQ_FIRST(&ct3d->error_list); 1185 for (i = 0; i < CXL_RAS_ERR_HEADER_NUM; i++) { 1186 stl_le_p(header_log + i, cxl_err->header[i]); 1187 } 1188 1189 capctrl = FIELD_DP32(capctrl, CXL_RAS_ERR_CAP_CTRL, 1190 FIRST_ERROR_POINTER, cxl_err->type); 1191 stl_le_p(cache_mem + R_CXL_RAS_ERR_CAP_CTRL, capctrl); 1192 } 1193 1194 unc_err = 0; 1195 QTAILQ_FOREACH(cxl_err, &ct3d->error_list, node) { 1196 unc_err |= (1 << cxl_err->type); 1197 } 1198 if (!unc_err) { 1199 return; 1200 } 1201 1202 stl_le_p(reg_state + R_CXL_RAS_UNC_ERR_STATUS, unc_err); 1203 pcie_aer_inject_error(PCI_DEVICE(obj), &err); 1204 1205 return; 1206 } 1207 1208 void qmp_cxl_inject_correctable_error(const char *path, CxlCorErrorType type, 1209 Error **errp) 1210 { 1211 static PCIEAERErr err = {}; 1212 Object *obj = object_resolve_path(path, NULL); 1213 CXLType3Dev *ct3d; 1214 uint32_t *reg_state; 1215 uint32_t cor_err; 1216 int cxl_err_type; 1217 1218 if (!obj) { 1219 error_setg(errp, "Unable to resolve path"); 1220 return; 1221 } 1222 if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) { 1223 error_setg(errp, "Path does not point to a CXL type 3 device"); 1224 return; 1225 } 1226 1227 err.status = PCI_ERR_COR_INTERNAL; 1228 err.source_id = pci_requester_id(PCI_DEVICE(obj)); 1229 err.flags = PCIE_AER_ERR_IS_CORRECTABLE; 1230 1231 ct3d = CXL_TYPE3(obj); 1232 reg_state = ct3d->cxl_cstate.crb.cache_mem_registers; 1233 cor_err = ldl_le_p(reg_state + R_CXL_RAS_COR_ERR_STATUS); 1234 1235 cxl_err_type = ct3d_qmp_cor_err_to_cxl(type); 1236 if (cxl_err_type < 0) { 1237 error_setg(errp, "Invalid COR error"); 1238 return; 1239 } 1240 /* If the error is masked, nothting to do here */ 1241 if (!((1 << cxl_err_type) & 1242 ~ldl_le_p(reg_state + R_CXL_RAS_COR_ERR_MASK))) { 1243 return; 1244 } 1245 1246 cor_err |= (1 << cxl_err_type); 1247 stl_le_p(reg_state + R_CXL_RAS_COR_ERR_STATUS, cor_err); 1248 1249 pcie_aer_inject_error(PCI_DEVICE(obj), &err); 1250 } 1251 1252 static void cxl_assign_event_header(CXLEventRecordHdr *hdr, 1253 const QemuUUID *uuid, uint32_t flags, 1254 uint8_t length, uint64_t timestamp) 1255 { 1256 st24_le_p(&hdr->flags, flags); 1257 hdr->length = length; 1258 memcpy(&hdr->id, uuid, sizeof(hdr->id)); 1259 stq_le_p(&hdr->timestamp, timestamp); 1260 } 1261 1262 static const QemuUUID gen_media_uuid = { 1263 .data = UUID(0xfbcd0a77, 0xc260, 0x417f, 1264 0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6), 1265 }; 1266 1267 static const QemuUUID dram_uuid = { 1268 .data = UUID(0x601dcbb3, 0x9c06, 0x4eab, 0xb8, 0xaf, 1269 0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24), 1270 }; 1271 1272 static const QemuUUID memory_module_uuid = { 1273 .data = UUID(0xfe927475, 0xdd59, 0x4339, 0xa5, 0x86, 1274 0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74), 1275 }; 1276 1277 #define CXL_GMER_VALID_CHANNEL BIT(0) 1278 #define CXL_GMER_VALID_RANK BIT(1) 1279 #define CXL_GMER_VALID_DEVICE BIT(2) 1280 #define CXL_GMER_VALID_COMPONENT BIT(3) 1281 1282 static int ct3d_qmp_cxl_event_log_enc(CxlEventLog log) 1283 { 1284 switch (log) { 1285 case CXL_EVENT_LOG_INFORMATIONAL: 1286 return CXL_EVENT_TYPE_INFO; 1287 case CXL_EVENT_LOG_WARNING: 1288 return CXL_EVENT_TYPE_WARN; 1289 case CXL_EVENT_LOG_FAILURE: 1290 return CXL_EVENT_TYPE_FAIL; 1291 case CXL_EVENT_LOG_FATAL: 1292 return CXL_EVENT_TYPE_FATAL; 1293 /* DCD not yet supported */ 1294 default: 1295 return -EINVAL; 1296 } 1297 } 1298 /* Component ID is device specific. Define this as a string. */ 1299 void qmp_cxl_inject_general_media_event(const char *path, CxlEventLog log, 1300 uint8_t flags, uint64_t dpa, 1301 uint8_t descriptor, uint8_t type, 1302 uint8_t transaction_type, 1303 bool has_channel, uint8_t channel, 1304 bool has_rank, uint8_t rank, 1305 bool has_device, uint32_t device, 1306 const char *component_id, 1307 Error **errp) 1308 { 1309 Object *obj = object_resolve_path(path, NULL); 1310 CXLEventGenMedia gem; 1311 CXLEventRecordHdr *hdr = &gem.hdr; 1312 CXLDeviceState *cxlds; 1313 CXLType3Dev *ct3d; 1314 uint16_t valid_flags = 0; 1315 uint8_t enc_log; 1316 int rc; 1317 1318 if (!obj) { 1319 error_setg(errp, "Unable to resolve path"); 1320 return; 1321 } 1322 if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) { 1323 error_setg(errp, "Path does not point to a CXL type 3 device"); 1324 return; 1325 } 1326 ct3d = CXL_TYPE3(obj); 1327 cxlds = &ct3d->cxl_dstate; 1328 1329 rc = ct3d_qmp_cxl_event_log_enc(log); 1330 if (rc < 0) { 1331 error_setg(errp, "Unhandled error log type"); 1332 return; 1333 } 1334 enc_log = rc; 1335 1336 memset(&gem, 0, sizeof(gem)); 1337 cxl_assign_event_header(hdr, &gen_media_uuid, flags, sizeof(gem), 1338 cxl_device_get_timestamp(&ct3d->cxl_dstate)); 1339 1340 stq_le_p(&gem.phys_addr, dpa); 1341 gem.descriptor = descriptor; 1342 gem.type = type; 1343 gem.transaction_type = transaction_type; 1344 1345 if (has_channel) { 1346 gem.channel = channel; 1347 valid_flags |= CXL_GMER_VALID_CHANNEL; 1348 } 1349 1350 if (has_rank) { 1351 gem.rank = rank; 1352 valid_flags |= CXL_GMER_VALID_RANK; 1353 } 1354 1355 if (has_device) { 1356 st24_le_p(gem.device, device); 1357 valid_flags |= CXL_GMER_VALID_DEVICE; 1358 } 1359 1360 if (component_id) { 1361 strncpy((char *)gem.component_id, component_id, 1362 sizeof(gem.component_id) - 1); 1363 valid_flags |= CXL_GMER_VALID_COMPONENT; 1364 } 1365 1366 stw_le_p(&gem.validity_flags, valid_flags); 1367 1368 if (cxl_event_insert(cxlds, enc_log, (CXLEventRecordRaw *)&gem)) { 1369 cxl_event_irq_assert(ct3d); 1370 } 1371 } 1372 1373 #define CXL_DRAM_VALID_CHANNEL BIT(0) 1374 #define CXL_DRAM_VALID_RANK BIT(1) 1375 #define CXL_DRAM_VALID_NIBBLE_MASK BIT(2) 1376 #define CXL_DRAM_VALID_BANK_GROUP BIT(3) 1377 #define CXL_DRAM_VALID_BANK BIT(4) 1378 #define CXL_DRAM_VALID_ROW BIT(5) 1379 #define CXL_DRAM_VALID_COLUMN BIT(6) 1380 #define CXL_DRAM_VALID_CORRECTION_MASK BIT(7) 1381 1382 void qmp_cxl_inject_dram_event(const char *path, CxlEventLog log, uint8_t flags, 1383 uint64_t dpa, uint8_t descriptor, 1384 uint8_t type, uint8_t transaction_type, 1385 bool has_channel, uint8_t channel, 1386 bool has_rank, uint8_t rank, 1387 bool has_nibble_mask, uint32_t nibble_mask, 1388 bool has_bank_group, uint8_t bank_group, 1389 bool has_bank, uint8_t bank, 1390 bool has_row, uint32_t row, 1391 bool has_column, uint16_t column, 1392 bool has_correction_mask, 1393 uint64List *correction_mask, 1394 Error **errp) 1395 { 1396 Object *obj = object_resolve_path(path, NULL); 1397 CXLEventDram dram; 1398 CXLEventRecordHdr *hdr = &dram.hdr; 1399 CXLDeviceState *cxlds; 1400 CXLType3Dev *ct3d; 1401 uint16_t valid_flags = 0; 1402 uint8_t enc_log; 1403 int rc; 1404 1405 if (!obj) { 1406 error_setg(errp, "Unable to resolve path"); 1407 return; 1408 } 1409 if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) { 1410 error_setg(errp, "Path does not point to a CXL type 3 device"); 1411 return; 1412 } 1413 ct3d = CXL_TYPE3(obj); 1414 cxlds = &ct3d->cxl_dstate; 1415 1416 rc = ct3d_qmp_cxl_event_log_enc(log); 1417 if (rc < 0) { 1418 error_setg(errp, "Unhandled error log type"); 1419 return; 1420 } 1421 enc_log = rc; 1422 1423 memset(&dram, 0, sizeof(dram)); 1424 cxl_assign_event_header(hdr, &dram_uuid, flags, sizeof(dram), 1425 cxl_device_get_timestamp(&ct3d->cxl_dstate)); 1426 stq_le_p(&dram.phys_addr, dpa); 1427 dram.descriptor = descriptor; 1428 dram.type = type; 1429 dram.transaction_type = transaction_type; 1430 1431 if (has_channel) { 1432 dram.channel = channel; 1433 valid_flags |= CXL_DRAM_VALID_CHANNEL; 1434 } 1435 1436 if (has_rank) { 1437 dram.rank = rank; 1438 valid_flags |= CXL_DRAM_VALID_RANK; 1439 } 1440 1441 if (has_nibble_mask) { 1442 st24_le_p(dram.nibble_mask, nibble_mask); 1443 valid_flags |= CXL_DRAM_VALID_NIBBLE_MASK; 1444 } 1445 1446 if (has_bank_group) { 1447 dram.bank_group = bank_group; 1448 valid_flags |= CXL_DRAM_VALID_BANK_GROUP; 1449 } 1450 1451 if (has_bank) { 1452 dram.bank = bank; 1453 valid_flags |= CXL_DRAM_VALID_BANK; 1454 } 1455 1456 if (has_row) { 1457 st24_le_p(dram.row, row); 1458 valid_flags |= CXL_DRAM_VALID_ROW; 1459 } 1460 1461 if (has_column) { 1462 stw_le_p(&dram.column, column); 1463 valid_flags |= CXL_DRAM_VALID_COLUMN; 1464 } 1465 1466 if (has_correction_mask) { 1467 int count = 0; 1468 while (correction_mask && count < 4) { 1469 stq_le_p(&dram.correction_mask[count], 1470 correction_mask->value); 1471 count++; 1472 correction_mask = correction_mask->next; 1473 } 1474 valid_flags |= CXL_DRAM_VALID_CORRECTION_MASK; 1475 } 1476 1477 stw_le_p(&dram.validity_flags, valid_flags); 1478 1479 if (cxl_event_insert(cxlds, enc_log, (CXLEventRecordRaw *)&dram)) { 1480 cxl_event_irq_assert(ct3d); 1481 } 1482 return; 1483 } 1484 1485 void qmp_cxl_inject_memory_module_event(const char *path, CxlEventLog log, 1486 uint8_t flags, uint8_t type, 1487 uint8_t health_status, 1488 uint8_t media_status, 1489 uint8_t additional_status, 1490 uint8_t life_used, 1491 int16_t temperature, 1492 uint32_t dirty_shutdown_count, 1493 uint32_t corrected_volatile_error_count, 1494 uint32_t corrected_persist_error_count, 1495 Error **errp) 1496 { 1497 Object *obj = object_resolve_path(path, NULL); 1498 CXLEventMemoryModule module; 1499 CXLEventRecordHdr *hdr = &module.hdr; 1500 CXLDeviceState *cxlds; 1501 CXLType3Dev *ct3d; 1502 uint8_t enc_log; 1503 int rc; 1504 1505 if (!obj) { 1506 error_setg(errp, "Unable to resolve path"); 1507 return; 1508 } 1509 if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) { 1510 error_setg(errp, "Path does not point to a CXL type 3 device"); 1511 return; 1512 } 1513 ct3d = CXL_TYPE3(obj); 1514 cxlds = &ct3d->cxl_dstate; 1515 1516 rc = ct3d_qmp_cxl_event_log_enc(log); 1517 if (rc < 0) { 1518 error_setg(errp, "Unhandled error log type"); 1519 return; 1520 } 1521 enc_log = rc; 1522 1523 memset(&module, 0, sizeof(module)); 1524 cxl_assign_event_header(hdr, &memory_module_uuid, flags, sizeof(module), 1525 cxl_device_get_timestamp(&ct3d->cxl_dstate)); 1526 1527 module.type = type; 1528 module.health_status = health_status; 1529 module.media_status = media_status; 1530 module.additional_status = additional_status; 1531 module.life_used = life_used; 1532 stw_le_p(&module.temperature, temperature); 1533 stl_le_p(&module.dirty_shutdown_count, dirty_shutdown_count); 1534 stl_le_p(&module.corrected_volatile_error_count, 1535 corrected_volatile_error_count); 1536 stl_le_p(&module.corrected_persistent_error_count, 1537 corrected_persist_error_count); 1538 1539 if (cxl_event_insert(cxlds, enc_log, (CXLEventRecordRaw *)&module)) { 1540 cxl_event_irq_assert(ct3d); 1541 } 1542 } 1543 1544 static void ct3_class_init(ObjectClass *oc, void *data) 1545 { 1546 DeviceClass *dc = DEVICE_CLASS(oc); 1547 PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc); 1548 CXLType3Class *cvc = CXL_TYPE3_CLASS(oc); 1549 1550 pc->realize = ct3_realize; 1551 pc->exit = ct3_exit; 1552 pc->class_id = PCI_CLASS_MEMORY_CXL; 1553 pc->vendor_id = PCI_VENDOR_ID_INTEL; 1554 pc->device_id = 0xd93; /* LVF for now */ 1555 pc->revision = 1; 1556 1557 pc->config_write = ct3d_config_write; 1558 pc->config_read = ct3d_config_read; 1559 1560 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); 1561 dc->desc = "CXL Memory Device (Type 3)"; 1562 dc->reset = ct3d_reset; 1563 device_class_set_props(dc, ct3_props); 1564 1565 cvc->get_lsa_size = get_lsa_size; 1566 cvc->get_lsa = get_lsa; 1567 cvc->set_lsa = set_lsa; 1568 cvc->set_cacheline = set_cacheline; 1569 } 1570 1571 static const TypeInfo ct3d_info = { 1572 .name = TYPE_CXL_TYPE3, 1573 .parent = TYPE_PCI_DEVICE, 1574 .class_size = sizeof(struct CXLType3Class), 1575 .class_init = ct3_class_init, 1576 .instance_size = sizeof(CXLType3Dev), 1577 .interfaces = (InterfaceInfo[]) { 1578 { INTERFACE_CXL_DEVICE }, 1579 { INTERFACE_PCIE_DEVICE }, 1580 {} 1581 }, 1582 }; 1583 1584 static void ct3d_registers(void) 1585 { 1586 type_register_static(&ct3d_info); 1587 } 1588 1589 type_init(ct3d_registers); 1590