xref: /qemu/hw/mem/cxl_type3.c (revision 5db05230)
1 /*
2  * CXL Type 3 (memory expander) device
3  *
4  * Copyright(C) 2020 Intel Corporation.
5  *
6  * This work is licensed under the terms of the GNU GPL, version 2. See the
7  * COPYING file in the top-level directory.
8  *
9  * SPDX-License-Identifier: GPL-v2-only
10  */
11 
12 #include "qemu/osdep.h"
13 #include "qemu/units.h"
14 #include "qemu/error-report.h"
15 #include "qapi/qapi-commands-cxl.h"
16 #include "hw/mem/memory-device.h"
17 #include "hw/mem/pc-dimm.h"
18 #include "hw/pci/pci.h"
19 #include "hw/qdev-properties.h"
20 #include "qapi/error.h"
21 #include "qemu/log.h"
22 #include "qemu/module.h"
23 #include "qemu/pmem.h"
24 #include "qemu/range.h"
25 #include "qemu/rcu.h"
26 #include "qemu/guest-random.h"
27 #include "sysemu/hostmem.h"
28 #include "sysemu/numa.h"
29 #include "hw/cxl/cxl.h"
30 #include "hw/pci/msix.h"
31 
32 #define DWORD_BYTE 4
33 
34 /* Default CDAT entries for a memory region */
35 enum {
36     CT3_CDAT_DSMAS,
37     CT3_CDAT_DSLBIS0,
38     CT3_CDAT_DSLBIS1,
39     CT3_CDAT_DSLBIS2,
40     CT3_CDAT_DSLBIS3,
41     CT3_CDAT_DSEMTS,
42     CT3_CDAT_NUM_ENTRIES
43 };
44 
45 static int ct3_build_cdat_entries_for_mr(CDATSubHeader **cdat_table,
46                                          int dsmad_handle, MemoryRegion *mr,
47                                          bool is_pmem, uint64_t dpa_base)
48 {
49     g_autofree CDATDsmas *dsmas = NULL;
50     g_autofree CDATDslbis *dslbis0 = NULL;
51     g_autofree CDATDslbis *dslbis1 = NULL;
52     g_autofree CDATDslbis *dslbis2 = NULL;
53     g_autofree CDATDslbis *dslbis3 = NULL;
54     g_autofree CDATDsemts *dsemts = NULL;
55 
56     dsmas = g_malloc(sizeof(*dsmas));
57     if (!dsmas) {
58         return -ENOMEM;
59     }
60     *dsmas = (CDATDsmas) {
61         .header = {
62             .type = CDAT_TYPE_DSMAS,
63             .length = sizeof(*dsmas),
64         },
65         .DSMADhandle = dsmad_handle,
66         .flags = is_pmem ? CDAT_DSMAS_FLAG_NV : 0,
67         .DPA_base = dpa_base,
68         .DPA_length = memory_region_size(mr),
69     };
70 
71     /* For now, no memory side cache, plausiblish numbers */
72     dslbis0 = g_malloc(sizeof(*dslbis0));
73     if (!dslbis0) {
74         return -ENOMEM;
75     }
76     *dslbis0 = (CDATDslbis) {
77         .header = {
78             .type = CDAT_TYPE_DSLBIS,
79             .length = sizeof(*dslbis0),
80         },
81         .handle = dsmad_handle,
82         .flags = HMAT_LB_MEM_MEMORY,
83         .data_type = HMAT_LB_DATA_READ_LATENCY,
84         .entry_base_unit = 10000, /* 10ns base */
85         .entry[0] = 15, /* 150ns */
86     };
87 
88     dslbis1 = g_malloc(sizeof(*dslbis1));
89     if (!dslbis1) {
90         return -ENOMEM;
91     }
92     *dslbis1 = (CDATDslbis) {
93         .header = {
94             .type = CDAT_TYPE_DSLBIS,
95             .length = sizeof(*dslbis1),
96         },
97         .handle = dsmad_handle,
98         .flags = HMAT_LB_MEM_MEMORY,
99         .data_type = HMAT_LB_DATA_WRITE_LATENCY,
100         .entry_base_unit = 10000,
101         .entry[0] = 25, /* 250ns */
102     };
103 
104     dslbis2 = g_malloc(sizeof(*dslbis2));
105     if (!dslbis2) {
106         return -ENOMEM;
107     }
108     *dslbis2 = (CDATDslbis) {
109         .header = {
110             .type = CDAT_TYPE_DSLBIS,
111             .length = sizeof(*dslbis2),
112         },
113         .handle = dsmad_handle,
114         .flags = HMAT_LB_MEM_MEMORY,
115         .data_type = HMAT_LB_DATA_READ_BANDWIDTH,
116         .entry_base_unit = 1000, /* GB/s */
117         .entry[0] = 16,
118     };
119 
120     dslbis3 = g_malloc(sizeof(*dslbis3));
121     if (!dslbis3) {
122         return -ENOMEM;
123     }
124     *dslbis3 = (CDATDslbis) {
125         .header = {
126             .type = CDAT_TYPE_DSLBIS,
127             .length = sizeof(*dslbis3),
128         },
129         .handle = dsmad_handle,
130         .flags = HMAT_LB_MEM_MEMORY,
131         .data_type = HMAT_LB_DATA_WRITE_BANDWIDTH,
132         .entry_base_unit = 1000, /* GB/s */
133         .entry[0] = 16,
134     };
135 
136     dsemts = g_malloc(sizeof(*dsemts));
137     if (!dsemts) {
138         return -ENOMEM;
139     }
140     *dsemts = (CDATDsemts) {
141         .header = {
142             .type = CDAT_TYPE_DSEMTS,
143             .length = sizeof(*dsemts),
144         },
145         .DSMAS_handle = dsmad_handle,
146         /*
147          * NV: Reserved - the non volatile from DSMAS matters
148          * V: EFI_MEMORY_SP
149          */
150         .EFI_memory_type_attr = is_pmem ? 2 : 1,
151         .DPA_offset = 0,
152         .DPA_length = memory_region_size(mr),
153     };
154 
155     /* Header always at start of structure */
156     cdat_table[CT3_CDAT_DSMAS] = g_steal_pointer(&dsmas);
157     cdat_table[CT3_CDAT_DSLBIS0] = g_steal_pointer(&dslbis0);
158     cdat_table[CT3_CDAT_DSLBIS1] = g_steal_pointer(&dslbis1);
159     cdat_table[CT3_CDAT_DSLBIS2] = g_steal_pointer(&dslbis2);
160     cdat_table[CT3_CDAT_DSLBIS3] = g_steal_pointer(&dslbis3);
161     cdat_table[CT3_CDAT_DSEMTS] = g_steal_pointer(&dsemts);
162 
163     return 0;
164 }
165 
166 static int ct3_build_cdat_table(CDATSubHeader ***cdat_table, void *priv)
167 {
168     g_autofree CDATSubHeader **table = NULL;
169     CXLType3Dev *ct3d = priv;
170     MemoryRegion *volatile_mr = NULL, *nonvolatile_mr = NULL;
171     int dsmad_handle = 0;
172     int cur_ent = 0;
173     int len = 0;
174     int rc, i;
175 
176     if (!ct3d->hostpmem && !ct3d->hostvmem) {
177         return 0;
178     }
179 
180     if (ct3d->hostvmem) {
181         volatile_mr = host_memory_backend_get_memory(ct3d->hostvmem);
182         if (!volatile_mr) {
183             return -EINVAL;
184         }
185         len += CT3_CDAT_NUM_ENTRIES;
186     }
187 
188     if (ct3d->hostpmem) {
189         nonvolatile_mr = host_memory_backend_get_memory(ct3d->hostpmem);
190         if (!nonvolatile_mr) {
191             return -EINVAL;
192         }
193         len += CT3_CDAT_NUM_ENTRIES;
194     }
195 
196     table = g_malloc0(len * sizeof(*table));
197     if (!table) {
198         return -ENOMEM;
199     }
200 
201     /* Now fill them in */
202     if (volatile_mr) {
203         rc = ct3_build_cdat_entries_for_mr(table, dsmad_handle++, volatile_mr,
204                                            false, 0);
205         if (rc < 0) {
206             return rc;
207         }
208         cur_ent = CT3_CDAT_NUM_ENTRIES;
209     }
210 
211     if (nonvolatile_mr) {
212         uint64_t base = volatile_mr ? memory_region_size(volatile_mr) : 0;
213         rc = ct3_build_cdat_entries_for_mr(&(table[cur_ent]), dsmad_handle++,
214                                            nonvolatile_mr, true, base);
215         if (rc < 0) {
216             goto error_cleanup;
217         }
218         cur_ent += CT3_CDAT_NUM_ENTRIES;
219     }
220     assert(len == cur_ent);
221 
222     *cdat_table = g_steal_pointer(&table);
223 
224     return len;
225 error_cleanup:
226     for (i = 0; i < cur_ent; i++) {
227         g_free(table[i]);
228     }
229     return rc;
230 }
231 
232 static void ct3_free_cdat_table(CDATSubHeader **cdat_table, int num, void *priv)
233 {
234     int i;
235 
236     for (i = 0; i < num; i++) {
237         g_free(cdat_table[i]);
238     }
239     g_free(cdat_table);
240 }
241 
242 static bool cxl_doe_cdat_rsp(DOECap *doe_cap)
243 {
244     CDATObject *cdat = &CXL_TYPE3(doe_cap->pdev)->cxl_cstate.cdat;
245     uint16_t ent;
246     void *base;
247     uint32_t len;
248     CDATReq *req = pcie_doe_get_write_mbox_ptr(doe_cap);
249     CDATRsp rsp;
250 
251     assert(cdat->entry_len);
252 
253     /* Discard if request length mismatched */
254     if (pcie_doe_get_obj_len(req) <
255         DIV_ROUND_UP(sizeof(CDATReq), DWORD_BYTE)) {
256         return false;
257     }
258 
259     ent = req->entry_handle;
260     base = cdat->entry[ent].base;
261     len = cdat->entry[ent].length;
262 
263     rsp = (CDATRsp) {
264         .header = {
265             .vendor_id = CXL_VENDOR_ID,
266             .data_obj_type = CXL_DOE_TABLE_ACCESS,
267             .reserved = 0x0,
268             .length = DIV_ROUND_UP((sizeof(rsp) + len), DWORD_BYTE),
269         },
270         .rsp_code = CXL_DOE_TAB_RSP,
271         .table_type = CXL_DOE_TAB_TYPE_CDAT,
272         .entry_handle = (ent < cdat->entry_len - 1) ?
273                         ent + 1 : CXL_DOE_TAB_ENT_MAX,
274     };
275 
276     memcpy(doe_cap->read_mbox, &rsp, sizeof(rsp));
277     memcpy(doe_cap->read_mbox + DIV_ROUND_UP(sizeof(rsp), DWORD_BYTE),
278            base, len);
279 
280     doe_cap->read_mbox_len += rsp.header.length;
281 
282     return true;
283 }
284 
285 static uint32_t ct3d_config_read(PCIDevice *pci_dev, uint32_t addr, int size)
286 {
287     CXLType3Dev *ct3d = CXL_TYPE3(pci_dev);
288     uint32_t val;
289 
290     if (pcie_doe_read_config(&ct3d->doe_cdat, addr, size, &val)) {
291         return val;
292     }
293 
294     return pci_default_read_config(pci_dev, addr, size);
295 }
296 
297 static void ct3d_config_write(PCIDevice *pci_dev, uint32_t addr, uint32_t val,
298                               int size)
299 {
300     CXLType3Dev *ct3d = CXL_TYPE3(pci_dev);
301 
302     pcie_doe_write_config(&ct3d->doe_cdat, addr, val, size);
303     pci_default_write_config(pci_dev, addr, val, size);
304     pcie_aer_write_config(pci_dev, addr, val, size);
305 }
306 
307 /*
308  * Null value of all Fs suggested by IEEE RA guidelines for use of
309  * EU, OUI and CID
310  */
311 #define UI64_NULL ~(0ULL)
312 
313 static void build_dvsecs(CXLType3Dev *ct3d)
314 {
315     CXLComponentState *cxl_cstate = &ct3d->cxl_cstate;
316     uint8_t *dvsec;
317     uint32_t range1_size_hi, range1_size_lo,
318              range1_base_hi = 0, range1_base_lo = 0,
319              range2_size_hi = 0, range2_size_lo = 0,
320              range2_base_hi = 0, range2_base_lo = 0;
321 
322     /*
323      * Volatile memory is mapped as (0x0)
324      * Persistent memory is mapped at (volatile->size)
325      */
326     if (ct3d->hostvmem) {
327         range1_size_hi = ct3d->hostvmem->size >> 32;
328         range1_size_lo = (2 << 5) | (2 << 2) | 0x3 |
329                          (ct3d->hostvmem->size & 0xF0000000);
330         if (ct3d->hostpmem) {
331             range2_size_hi = ct3d->hostpmem->size >> 32;
332             range2_size_lo = (2 << 5) | (2 << 2) | 0x3 |
333                              (ct3d->hostpmem->size & 0xF0000000);
334         }
335     } else {
336         range1_size_hi = ct3d->hostpmem->size >> 32;
337         range1_size_lo = (2 << 5) | (2 << 2) | 0x3 |
338                          (ct3d->hostpmem->size & 0xF0000000);
339     }
340 
341     dvsec = (uint8_t *)&(CXLDVSECDevice){
342         .cap = 0x1e,
343         .ctrl = 0x2,
344         .status2 = 0x2,
345         .range1_size_hi = range1_size_hi,
346         .range1_size_lo = range1_size_lo,
347         .range1_base_hi = range1_base_hi,
348         .range1_base_lo = range1_base_lo,
349         .range2_size_hi = range2_size_hi,
350         .range2_size_lo = range2_size_lo,
351         .range2_base_hi = range2_base_hi,
352         .range2_base_lo = range2_base_lo,
353     };
354     cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE,
355                                PCIE_CXL_DEVICE_DVSEC_LENGTH,
356                                PCIE_CXL_DEVICE_DVSEC,
357                                PCIE_CXL2_DEVICE_DVSEC_REVID, dvsec);
358 
359     dvsec = (uint8_t *)&(CXLDVSECRegisterLocator){
360         .rsvd         = 0,
361         .reg0_base_lo = RBI_COMPONENT_REG | CXL_COMPONENT_REG_BAR_IDX,
362         .reg0_base_hi = 0,
363         .reg1_base_lo = RBI_CXL_DEVICE_REG | CXL_DEVICE_REG_BAR_IDX,
364         .reg1_base_hi = 0,
365     };
366     cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE,
367                                REG_LOC_DVSEC_LENGTH, REG_LOC_DVSEC,
368                                REG_LOC_DVSEC_REVID, dvsec);
369     dvsec = (uint8_t *)&(CXLDVSECDeviceGPF){
370         .phase2_duration = 0x603, /* 3 seconds */
371         .phase2_power = 0x33, /* 0x33 miliwatts */
372     };
373     cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE,
374                                GPF_DEVICE_DVSEC_LENGTH, GPF_DEVICE_DVSEC,
375                                GPF_DEVICE_DVSEC_REVID, dvsec);
376 
377     dvsec = (uint8_t *)&(CXLDVSECPortFlexBus){
378         .cap                     = 0x26, /* 68B, IO, Mem, non-MLD */
379         .ctrl                    = 0x02, /* IO always enabled */
380         .status                  = 0x26, /* same as capabilities */
381         .rcvd_mod_ts_data_phase1 = 0xef, /* WTF? */
382     };
383     cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE,
384                                PCIE_FLEXBUS_PORT_DVSEC_LENGTH_2_0,
385                                PCIE_FLEXBUS_PORT_DVSEC,
386                                PCIE_FLEXBUS_PORT_DVSEC_REVID_2_0, dvsec);
387 }
388 
389 static void hdm_decoder_commit(CXLType3Dev *ct3d, int which)
390 {
391     int hdm_inc = R_CXL_HDM_DECODER1_BASE_LO - R_CXL_HDM_DECODER0_BASE_LO;
392     ComponentRegisters *cregs = &ct3d->cxl_cstate.crb;
393     uint32_t *cache_mem = cregs->cache_mem_registers;
394     uint32_t ctrl;
395 
396     ctrl = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL + which * hdm_inc);
397     /* TODO: Sanity checks that the decoder is possible */
398     ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, ERR, 0);
399     ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, COMMITTED, 1);
400 
401     stl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL + which * hdm_inc, ctrl);
402 }
403 
404 static void hdm_decoder_uncommit(CXLType3Dev *ct3d, int which)
405 {
406     int hdm_inc = R_CXL_HDM_DECODER1_BASE_LO - R_CXL_HDM_DECODER0_BASE_LO;
407     ComponentRegisters *cregs = &ct3d->cxl_cstate.crb;
408     uint32_t *cache_mem = cregs->cache_mem_registers;
409     uint32_t ctrl;
410 
411     ctrl = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL + which * hdm_inc);
412 
413     ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, ERR, 0);
414     ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, COMMITTED, 0);
415 
416     stl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL + which * hdm_inc, ctrl);
417 }
418 
419 static int ct3d_qmp_uncor_err_to_cxl(CxlUncorErrorType qmp_err)
420 {
421     switch (qmp_err) {
422     case CXL_UNCOR_ERROR_TYPE_CACHE_DATA_PARITY:
423         return CXL_RAS_UNC_ERR_CACHE_DATA_PARITY;
424     case CXL_UNCOR_ERROR_TYPE_CACHE_ADDRESS_PARITY:
425         return CXL_RAS_UNC_ERR_CACHE_ADDRESS_PARITY;
426     case CXL_UNCOR_ERROR_TYPE_CACHE_BE_PARITY:
427         return CXL_RAS_UNC_ERR_CACHE_BE_PARITY;
428     case CXL_UNCOR_ERROR_TYPE_CACHE_DATA_ECC:
429         return CXL_RAS_UNC_ERR_CACHE_DATA_ECC;
430     case CXL_UNCOR_ERROR_TYPE_MEM_DATA_PARITY:
431         return CXL_RAS_UNC_ERR_MEM_DATA_PARITY;
432     case CXL_UNCOR_ERROR_TYPE_MEM_ADDRESS_PARITY:
433         return CXL_RAS_UNC_ERR_MEM_ADDRESS_PARITY;
434     case CXL_UNCOR_ERROR_TYPE_MEM_BE_PARITY:
435         return CXL_RAS_UNC_ERR_MEM_BE_PARITY;
436     case CXL_UNCOR_ERROR_TYPE_MEM_DATA_ECC:
437         return CXL_RAS_UNC_ERR_MEM_DATA_ECC;
438     case CXL_UNCOR_ERROR_TYPE_REINIT_THRESHOLD:
439         return CXL_RAS_UNC_ERR_REINIT_THRESHOLD;
440     case CXL_UNCOR_ERROR_TYPE_RSVD_ENCODING:
441         return CXL_RAS_UNC_ERR_RSVD_ENCODING;
442     case CXL_UNCOR_ERROR_TYPE_POISON_RECEIVED:
443         return CXL_RAS_UNC_ERR_POISON_RECEIVED;
444     case CXL_UNCOR_ERROR_TYPE_RECEIVER_OVERFLOW:
445         return CXL_RAS_UNC_ERR_RECEIVER_OVERFLOW;
446     case CXL_UNCOR_ERROR_TYPE_INTERNAL:
447         return CXL_RAS_UNC_ERR_INTERNAL;
448     case CXL_UNCOR_ERROR_TYPE_CXL_IDE_TX:
449         return CXL_RAS_UNC_ERR_CXL_IDE_TX;
450     case CXL_UNCOR_ERROR_TYPE_CXL_IDE_RX:
451         return CXL_RAS_UNC_ERR_CXL_IDE_RX;
452     default:
453         return -EINVAL;
454     }
455 }
456 
457 static int ct3d_qmp_cor_err_to_cxl(CxlCorErrorType qmp_err)
458 {
459     switch (qmp_err) {
460     case CXL_COR_ERROR_TYPE_CACHE_DATA_ECC:
461         return CXL_RAS_COR_ERR_CACHE_DATA_ECC;
462     case CXL_COR_ERROR_TYPE_MEM_DATA_ECC:
463         return CXL_RAS_COR_ERR_MEM_DATA_ECC;
464     case CXL_COR_ERROR_TYPE_CRC_THRESHOLD:
465         return CXL_RAS_COR_ERR_CRC_THRESHOLD;
466     case CXL_COR_ERROR_TYPE_RETRY_THRESHOLD:
467         return CXL_RAS_COR_ERR_RETRY_THRESHOLD;
468     case CXL_COR_ERROR_TYPE_CACHE_POISON_RECEIVED:
469         return CXL_RAS_COR_ERR_CACHE_POISON_RECEIVED;
470     case CXL_COR_ERROR_TYPE_MEM_POISON_RECEIVED:
471         return CXL_RAS_COR_ERR_MEM_POISON_RECEIVED;
472     case CXL_COR_ERROR_TYPE_PHYSICAL:
473         return CXL_RAS_COR_ERR_PHYSICAL;
474     default:
475         return -EINVAL;
476     }
477 }
478 
479 static void ct3d_reg_write(void *opaque, hwaddr offset, uint64_t value,
480                            unsigned size)
481 {
482     CXLComponentState *cxl_cstate = opaque;
483     ComponentRegisters *cregs = &cxl_cstate->crb;
484     CXLType3Dev *ct3d = container_of(cxl_cstate, CXLType3Dev, cxl_cstate);
485     uint32_t *cache_mem = cregs->cache_mem_registers;
486     bool should_commit = false;
487     bool should_uncommit = false;
488     int which_hdm = -1;
489 
490     assert(size == 4);
491     g_assert(offset < CXL2_COMPONENT_CM_REGION_SIZE);
492 
493     switch (offset) {
494     case A_CXL_HDM_DECODER0_CTRL:
495         should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT);
496         should_uncommit = !should_commit;
497         which_hdm = 0;
498         break;
499     case A_CXL_HDM_DECODER1_CTRL:
500         should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT);
501         should_uncommit = !should_commit;
502         which_hdm = 1;
503         break;
504     case A_CXL_HDM_DECODER2_CTRL:
505         should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT);
506         should_uncommit = !should_commit;
507         which_hdm = 2;
508         break;
509     case A_CXL_HDM_DECODER3_CTRL:
510         should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT);
511         should_uncommit = !should_commit;
512         which_hdm = 3;
513         break;
514     case A_CXL_RAS_UNC_ERR_STATUS:
515     {
516         uint32_t capctrl = ldl_le_p(cache_mem + R_CXL_RAS_ERR_CAP_CTRL);
517         uint32_t fe = FIELD_EX32(capctrl, CXL_RAS_ERR_CAP_CTRL,
518                                  FIRST_ERROR_POINTER);
519         CXLError *cxl_err;
520         uint32_t unc_err;
521 
522         /*
523          * If single bit written that corresponds to the first error
524          * pointer being cleared, update the status and header log.
525          */
526         if (!QTAILQ_EMPTY(&ct3d->error_list)) {
527             if ((1 << fe) ^ value) {
528                 CXLError *cxl_next;
529                 /*
530                  * Software is using wrong flow for multiple header recording
531                  * Following behavior in PCIe r6.0 and assuming multiple
532                  * header support. Implementation defined choice to clear all
533                  * matching records if more than one bit set - which corresponds
534                  * closest to behavior of hardware not capable of multiple
535                  * header recording.
536                  */
537                 QTAILQ_FOREACH_SAFE(cxl_err, &ct3d->error_list, node,
538                                     cxl_next) {
539                     if ((1 << cxl_err->type) & value) {
540                         QTAILQ_REMOVE(&ct3d->error_list, cxl_err, node);
541                         g_free(cxl_err);
542                     }
543                 }
544             } else {
545                 /* Done with previous FE, so drop from list */
546                 cxl_err = QTAILQ_FIRST(&ct3d->error_list);
547                 QTAILQ_REMOVE(&ct3d->error_list, cxl_err, node);
548                 g_free(cxl_err);
549             }
550 
551             /*
552              * If there is another FE, then put that in place and update
553              * the header log
554              */
555             if (!QTAILQ_EMPTY(&ct3d->error_list)) {
556                 uint32_t *header_log = &cache_mem[R_CXL_RAS_ERR_HEADER0];
557                 int i;
558 
559                 cxl_err = QTAILQ_FIRST(&ct3d->error_list);
560                 for (i = 0; i < CXL_RAS_ERR_HEADER_NUM; i++) {
561                     stl_le_p(header_log + i, cxl_err->header[i]);
562                 }
563                 capctrl = FIELD_DP32(capctrl, CXL_RAS_ERR_CAP_CTRL,
564                                      FIRST_ERROR_POINTER, cxl_err->type);
565             } else {
566                 /*
567                  * If no more errors, then follow recommendation of PCI spec
568                  * r6.0 6.2.4.2 to set the first error pointer to a status
569                  * bit that will never be used.
570                  */
571                 capctrl = FIELD_DP32(capctrl, CXL_RAS_ERR_CAP_CTRL,
572                                      FIRST_ERROR_POINTER,
573                                      CXL_RAS_UNC_ERR_CXL_UNUSED);
574             }
575             stl_le_p((uint8_t *)cache_mem + A_CXL_RAS_ERR_CAP_CTRL, capctrl);
576         }
577         unc_err = 0;
578         QTAILQ_FOREACH(cxl_err, &ct3d->error_list, node) {
579             unc_err |= 1 << cxl_err->type;
580         }
581         stl_le_p((uint8_t *)cache_mem + offset, unc_err);
582 
583         return;
584     }
585     case A_CXL_RAS_COR_ERR_STATUS:
586     {
587         uint32_t rw1c = value;
588         uint32_t temp = ldl_le_p((uint8_t *)cache_mem + offset);
589         temp &= ~rw1c;
590         stl_le_p((uint8_t *)cache_mem + offset, temp);
591         return;
592     }
593     default:
594         break;
595     }
596 
597     stl_le_p((uint8_t *)cache_mem + offset, value);
598     if (should_commit) {
599         hdm_decoder_commit(ct3d, which_hdm);
600     } else if (should_uncommit) {
601         hdm_decoder_uncommit(ct3d, which_hdm);
602     }
603 }
604 
605 static bool cxl_setup_memory(CXLType3Dev *ct3d, Error **errp)
606 {
607     DeviceState *ds = DEVICE(ct3d);
608 
609     if (!ct3d->hostmem && !ct3d->hostvmem && !ct3d->hostpmem) {
610         error_setg(errp, "at least one memdev property must be set");
611         return false;
612     } else if (ct3d->hostmem && ct3d->hostpmem) {
613         error_setg(errp, "[memdev] cannot be used with new "
614                          "[persistent-memdev] property");
615         return false;
616     } else if (ct3d->hostmem) {
617         /* Use of hostmem property implies pmem */
618         ct3d->hostpmem = ct3d->hostmem;
619         ct3d->hostmem = NULL;
620     }
621 
622     if (ct3d->hostpmem && !ct3d->lsa) {
623         error_setg(errp, "lsa property must be set for persistent devices");
624         return false;
625     }
626 
627     if (ct3d->hostvmem) {
628         MemoryRegion *vmr;
629         char *v_name;
630 
631         vmr = host_memory_backend_get_memory(ct3d->hostvmem);
632         if (!vmr) {
633             error_setg(errp, "volatile memdev must have backing device");
634             return false;
635         }
636         memory_region_set_nonvolatile(vmr, false);
637         memory_region_set_enabled(vmr, true);
638         host_memory_backend_set_mapped(ct3d->hostvmem, true);
639         if (ds->id) {
640             v_name = g_strdup_printf("cxl-type3-dpa-vmem-space:%s", ds->id);
641         } else {
642             v_name = g_strdup("cxl-type3-dpa-vmem-space");
643         }
644         address_space_init(&ct3d->hostvmem_as, vmr, v_name);
645         ct3d->cxl_dstate.vmem_size = memory_region_size(vmr);
646         ct3d->cxl_dstate.mem_size += memory_region_size(vmr);
647         g_free(v_name);
648     }
649 
650     if (ct3d->hostpmem) {
651         MemoryRegion *pmr;
652         char *p_name;
653 
654         pmr = host_memory_backend_get_memory(ct3d->hostpmem);
655         if (!pmr) {
656             error_setg(errp, "persistent memdev must have backing device");
657             return false;
658         }
659         memory_region_set_nonvolatile(pmr, true);
660         memory_region_set_enabled(pmr, true);
661         host_memory_backend_set_mapped(ct3d->hostpmem, true);
662         if (ds->id) {
663             p_name = g_strdup_printf("cxl-type3-dpa-pmem-space:%s", ds->id);
664         } else {
665             p_name = g_strdup("cxl-type3-dpa-pmem-space");
666         }
667         address_space_init(&ct3d->hostpmem_as, pmr, p_name);
668         ct3d->cxl_dstate.pmem_size = memory_region_size(pmr);
669         ct3d->cxl_dstate.mem_size += memory_region_size(pmr);
670         g_free(p_name);
671     }
672 
673     return true;
674 }
675 
676 static DOEProtocol doe_cdat_prot[] = {
677     { CXL_VENDOR_ID, CXL_DOE_TABLE_ACCESS, cxl_doe_cdat_rsp },
678     { }
679 };
680 
681 static void ct3_realize(PCIDevice *pci_dev, Error **errp)
682 {
683     CXLType3Dev *ct3d = CXL_TYPE3(pci_dev);
684     CXLComponentState *cxl_cstate = &ct3d->cxl_cstate;
685     ComponentRegisters *regs = &cxl_cstate->crb;
686     MemoryRegion *mr = &regs->component_registers;
687     uint8_t *pci_conf = pci_dev->config;
688     unsigned short msix_num = 6;
689     int i, rc;
690 
691     QTAILQ_INIT(&ct3d->error_list);
692 
693     if (!cxl_setup_memory(ct3d, errp)) {
694         return;
695     }
696 
697     pci_config_set_prog_interface(pci_conf, 0x10);
698 
699     pcie_endpoint_cap_init(pci_dev, 0x80);
700     if (ct3d->sn != UI64_NULL) {
701         pcie_dev_ser_num_init(pci_dev, 0x100, ct3d->sn);
702         cxl_cstate->dvsec_offset = 0x100 + 0x0c;
703     } else {
704         cxl_cstate->dvsec_offset = 0x100;
705     }
706 
707     ct3d->cxl_cstate.pdev = pci_dev;
708     build_dvsecs(ct3d);
709 
710     regs->special_ops = g_new0(MemoryRegionOps, 1);
711     regs->special_ops->write = ct3d_reg_write;
712 
713     cxl_component_register_block_init(OBJECT(pci_dev), cxl_cstate,
714                                       TYPE_CXL_TYPE3);
715 
716     pci_register_bar(
717         pci_dev, CXL_COMPONENT_REG_BAR_IDX,
718         PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64, mr);
719 
720     cxl_device_register_block_init(OBJECT(pci_dev), &ct3d->cxl_dstate,
721                                    &ct3d->cci);
722     pci_register_bar(pci_dev, CXL_DEVICE_REG_BAR_IDX,
723                      PCI_BASE_ADDRESS_SPACE_MEMORY |
724                          PCI_BASE_ADDRESS_MEM_TYPE_64,
725                      &ct3d->cxl_dstate.device_registers);
726 
727     /* MSI(-X) Initialization */
728     rc = msix_init_exclusive_bar(pci_dev, msix_num, 4, NULL);
729     if (rc) {
730         goto err_address_space_free;
731     }
732     for (i = 0; i < msix_num; i++) {
733         msix_vector_use(pci_dev, i);
734     }
735 
736     /* DOE Initialization */
737     pcie_doe_init(pci_dev, &ct3d->doe_cdat, 0x190, doe_cdat_prot, true, 0);
738 
739     cxl_cstate->cdat.build_cdat_table = ct3_build_cdat_table;
740     cxl_cstate->cdat.free_cdat_table = ct3_free_cdat_table;
741     cxl_cstate->cdat.private = ct3d;
742     cxl_doe_cdat_init(cxl_cstate, errp);
743     if (*errp) {
744         goto err_free_special_ops;
745     }
746 
747     pcie_cap_deverr_init(pci_dev);
748     /* Leave a bit of room for expansion */
749     rc = pcie_aer_init(pci_dev, PCI_ERR_VER, 0x200, PCI_ERR_SIZEOF, NULL);
750     if (rc) {
751         goto err_release_cdat;
752     }
753     cxl_event_init(&ct3d->cxl_dstate, 2);
754 
755     return;
756 
757 err_release_cdat:
758     cxl_doe_cdat_release(cxl_cstate);
759 err_free_special_ops:
760     g_free(regs->special_ops);
761 err_address_space_free:
762     if (ct3d->hostpmem) {
763         address_space_destroy(&ct3d->hostpmem_as);
764     }
765     if (ct3d->hostvmem) {
766         address_space_destroy(&ct3d->hostvmem_as);
767     }
768     return;
769 }
770 
771 static void ct3_exit(PCIDevice *pci_dev)
772 {
773     CXLType3Dev *ct3d = CXL_TYPE3(pci_dev);
774     CXLComponentState *cxl_cstate = &ct3d->cxl_cstate;
775     ComponentRegisters *regs = &cxl_cstate->crb;
776 
777     pcie_aer_exit(pci_dev);
778     cxl_doe_cdat_release(cxl_cstate);
779     g_free(regs->special_ops);
780     if (ct3d->hostpmem) {
781         address_space_destroy(&ct3d->hostpmem_as);
782     }
783     if (ct3d->hostvmem) {
784         address_space_destroy(&ct3d->hostvmem_as);
785     }
786 }
787 
788 static bool cxl_type3_dpa(CXLType3Dev *ct3d, hwaddr host_addr, uint64_t *dpa)
789 {
790     int hdm_inc = R_CXL_HDM_DECODER1_BASE_LO - R_CXL_HDM_DECODER0_BASE_LO;
791     uint32_t *cache_mem = ct3d->cxl_cstate.crb.cache_mem_registers;
792     unsigned int hdm_count;
793     uint32_t cap;
794     uint64_t dpa_base = 0;
795     int i;
796 
797     cap = ldl_le_p(cache_mem + R_CXL_HDM_DECODER_CAPABILITY);
798     hdm_count = cxl_decoder_count_dec(FIELD_EX32(cap,
799                                                  CXL_HDM_DECODER_CAPABILITY,
800                                                  DECODER_COUNT));
801 
802     for (i = 0; i < hdm_count; i++) {
803         uint64_t decoder_base, decoder_size, hpa_offset, skip;
804         uint32_t hdm_ctrl, low, high;
805         int ig, iw;
806 
807         low = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_BASE_LO + i * hdm_inc);
808         high = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_BASE_HI + i * hdm_inc);
809         decoder_base = ((uint64_t)high << 32) | (low & 0xf0000000);
810 
811         low = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_SIZE_LO + i * hdm_inc);
812         high = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_SIZE_HI + i * hdm_inc);
813         decoder_size = ((uint64_t)high << 32) | (low & 0xf0000000);
814 
815         low = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_DPA_SKIP_LO +
816                        i * hdm_inc);
817         high = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_DPA_SKIP_HI +
818                         i * hdm_inc);
819         skip = ((uint64_t)high << 32) | (low & 0xf0000000);
820         dpa_base += skip;
821 
822         hpa_offset = (uint64_t)host_addr - decoder_base;
823 
824         hdm_ctrl = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL + i * hdm_inc);
825         iw = FIELD_EX32(hdm_ctrl, CXL_HDM_DECODER0_CTRL, IW);
826         ig = FIELD_EX32(hdm_ctrl, CXL_HDM_DECODER0_CTRL, IG);
827         if (!FIELD_EX32(hdm_ctrl, CXL_HDM_DECODER0_CTRL, COMMITTED)) {
828             return false;
829         }
830         if (((uint64_t)host_addr < decoder_base) ||
831             (hpa_offset >= decoder_size)) {
832             dpa_base += decoder_size /
833                 cxl_interleave_ways_dec(iw, &error_fatal);
834             continue;
835         }
836 
837         *dpa = dpa_base +
838             ((MAKE_64BIT_MASK(0, 8 + ig) & hpa_offset) |
839              ((MAKE_64BIT_MASK(8 + ig + iw, 64 - 8 - ig - iw) & hpa_offset)
840               >> iw));
841 
842         return true;
843     }
844     return false;
845 }
846 
847 static int cxl_type3_hpa_to_as_and_dpa(CXLType3Dev *ct3d,
848                                        hwaddr host_addr,
849                                        unsigned int size,
850                                        AddressSpace **as,
851                                        uint64_t *dpa_offset)
852 {
853     MemoryRegion *vmr = NULL, *pmr = NULL;
854 
855     if (ct3d->hostvmem) {
856         vmr = host_memory_backend_get_memory(ct3d->hostvmem);
857     }
858     if (ct3d->hostpmem) {
859         pmr = host_memory_backend_get_memory(ct3d->hostpmem);
860     }
861 
862     if (!vmr && !pmr) {
863         return -ENODEV;
864     }
865 
866     if (!cxl_type3_dpa(ct3d, host_addr, dpa_offset)) {
867         return -EINVAL;
868     }
869 
870     if (*dpa_offset > ct3d->cxl_dstate.mem_size) {
871         return -EINVAL;
872     }
873 
874     if (vmr) {
875         if (*dpa_offset < memory_region_size(vmr)) {
876             *as = &ct3d->hostvmem_as;
877         } else {
878             *as = &ct3d->hostpmem_as;
879             *dpa_offset -= memory_region_size(vmr);
880         }
881     } else {
882         *as = &ct3d->hostpmem_as;
883     }
884 
885     return 0;
886 }
887 
888 MemTxResult cxl_type3_read(PCIDevice *d, hwaddr host_addr, uint64_t *data,
889                            unsigned size, MemTxAttrs attrs)
890 {
891     CXLType3Dev *ct3d = CXL_TYPE3(d);
892     uint64_t dpa_offset = 0;
893     AddressSpace *as = NULL;
894     int res;
895 
896     res = cxl_type3_hpa_to_as_and_dpa(ct3d, host_addr, size,
897                                       &as, &dpa_offset);
898     if (res) {
899         return MEMTX_ERROR;
900     }
901 
902     if (sanitize_running(&ct3d->cci)) {
903         qemu_guest_getrandom_nofail(data, size);
904         return MEMTX_OK;
905     }
906 
907     return address_space_read(as, dpa_offset, attrs, data, size);
908 }
909 
910 MemTxResult cxl_type3_write(PCIDevice *d, hwaddr host_addr, uint64_t data,
911                             unsigned size, MemTxAttrs attrs)
912 {
913     CXLType3Dev *ct3d = CXL_TYPE3(d);
914     uint64_t dpa_offset = 0;
915     AddressSpace *as = NULL;
916     int res;
917 
918     res = cxl_type3_hpa_to_as_and_dpa(ct3d, host_addr, size,
919                                       &as, &dpa_offset);
920     if (res) {
921         return MEMTX_ERROR;
922     }
923 
924     if (sanitize_running(&ct3d->cci)) {
925         return MEMTX_OK;
926     }
927 
928     return address_space_write(as, dpa_offset, attrs, &data, size);
929 }
930 
931 static void ct3d_reset(DeviceState *dev)
932 {
933     CXLType3Dev *ct3d = CXL_TYPE3(dev);
934     uint32_t *reg_state = ct3d->cxl_cstate.crb.cache_mem_registers;
935     uint32_t *write_msk = ct3d->cxl_cstate.crb.cache_mem_regs_write_mask;
936 
937     cxl_component_register_init_common(reg_state, write_msk, CXL2_TYPE3_DEVICE);
938     cxl_device_register_init_t3(ct3d);
939 
940     /*
941      * Bring up an endpoint to target with MCTP over VDM.
942      * This device is emulating an MLD with single LD for now.
943      */
944     cxl_initialize_t3_fm_owned_ld_mctpcci(&ct3d->vdm_fm_owned_ld_mctp_cci,
945                                           DEVICE(ct3d), DEVICE(ct3d),
946                                           512); /* Max payload made up */
947     cxl_initialize_t3_ld_cci(&ct3d->ld0_cci, DEVICE(ct3d), DEVICE(ct3d),
948                              512); /* Max payload made up */
949 
950 }
951 
952 static Property ct3_props[] = {
953     DEFINE_PROP_LINK("memdev", CXLType3Dev, hostmem, TYPE_MEMORY_BACKEND,
954                      HostMemoryBackend *), /* for backward compatibility */
955     DEFINE_PROP_LINK("persistent-memdev", CXLType3Dev, hostpmem,
956                      TYPE_MEMORY_BACKEND, HostMemoryBackend *),
957     DEFINE_PROP_LINK("volatile-memdev", CXLType3Dev, hostvmem,
958                      TYPE_MEMORY_BACKEND, HostMemoryBackend *),
959     DEFINE_PROP_LINK("lsa", CXLType3Dev, lsa, TYPE_MEMORY_BACKEND,
960                      HostMemoryBackend *),
961     DEFINE_PROP_UINT64("sn", CXLType3Dev, sn, UI64_NULL),
962     DEFINE_PROP_STRING("cdat", CXLType3Dev, cxl_cstate.cdat.filename),
963     DEFINE_PROP_END_OF_LIST(),
964 };
965 
966 static uint64_t get_lsa_size(CXLType3Dev *ct3d)
967 {
968     MemoryRegion *mr;
969 
970     if (!ct3d->lsa) {
971         return 0;
972     }
973 
974     mr = host_memory_backend_get_memory(ct3d->lsa);
975     return memory_region_size(mr);
976 }
977 
978 static void validate_lsa_access(MemoryRegion *mr, uint64_t size,
979                                 uint64_t offset)
980 {
981     assert(offset + size <= memory_region_size(mr));
982     assert(offset + size > offset);
983 }
984 
985 static uint64_t get_lsa(CXLType3Dev *ct3d, void *buf, uint64_t size,
986                     uint64_t offset)
987 {
988     MemoryRegion *mr;
989     void *lsa;
990 
991     if (!ct3d->lsa) {
992         return 0;
993     }
994 
995     mr = host_memory_backend_get_memory(ct3d->lsa);
996     validate_lsa_access(mr, size, offset);
997 
998     lsa = memory_region_get_ram_ptr(mr) + offset;
999     memcpy(buf, lsa, size);
1000 
1001     return size;
1002 }
1003 
1004 static void set_lsa(CXLType3Dev *ct3d, const void *buf, uint64_t size,
1005                     uint64_t offset)
1006 {
1007     MemoryRegion *mr;
1008     void *lsa;
1009 
1010     if (!ct3d->lsa) {
1011         return;
1012     }
1013 
1014     mr = host_memory_backend_get_memory(ct3d->lsa);
1015     validate_lsa_access(mr, size, offset);
1016 
1017     lsa = memory_region_get_ram_ptr(mr) + offset;
1018     memcpy(lsa, buf, size);
1019     memory_region_set_dirty(mr, offset, size);
1020 
1021     /*
1022      * Just like the PMEM, if the guest is not allowed to exit gracefully, label
1023      * updates will get lost.
1024      */
1025 }
1026 
1027 static bool set_cacheline(CXLType3Dev *ct3d, uint64_t dpa_offset, uint8_t *data)
1028 {
1029     MemoryRegion *vmr = NULL, *pmr = NULL;
1030     AddressSpace *as;
1031 
1032     if (ct3d->hostvmem) {
1033         vmr = host_memory_backend_get_memory(ct3d->hostvmem);
1034     }
1035     if (ct3d->hostpmem) {
1036         pmr = host_memory_backend_get_memory(ct3d->hostpmem);
1037     }
1038 
1039     if (!vmr && !pmr) {
1040         return false;
1041     }
1042 
1043     if (dpa_offset + CXL_CACHE_LINE_SIZE > ct3d->cxl_dstate.mem_size) {
1044         return false;
1045     }
1046 
1047     if (vmr) {
1048         if (dpa_offset < memory_region_size(vmr)) {
1049             as = &ct3d->hostvmem_as;
1050         } else {
1051             as = &ct3d->hostpmem_as;
1052             dpa_offset -= memory_region_size(vmr);
1053         }
1054     } else {
1055         as = &ct3d->hostpmem_as;
1056     }
1057 
1058     address_space_write(as, dpa_offset, MEMTXATTRS_UNSPECIFIED, &data,
1059                         CXL_CACHE_LINE_SIZE);
1060     return true;
1061 }
1062 
1063 void cxl_set_poison_list_overflowed(CXLType3Dev *ct3d)
1064 {
1065         ct3d->poison_list_overflowed = true;
1066         ct3d->poison_list_overflow_ts =
1067             cxl_device_get_timestamp(&ct3d->cxl_dstate);
1068 }
1069 
1070 void qmp_cxl_inject_poison(const char *path, uint64_t start, uint64_t length,
1071                            Error **errp)
1072 {
1073     Object *obj = object_resolve_path(path, NULL);
1074     CXLType3Dev *ct3d;
1075     CXLPoison *p;
1076 
1077     if (length % 64) {
1078         error_setg(errp, "Poison injection must be in multiples of 64 bytes");
1079         return;
1080     }
1081     if (start % 64) {
1082         error_setg(errp, "Poison start address must be 64 byte aligned");
1083         return;
1084     }
1085     if (!obj) {
1086         error_setg(errp, "Unable to resolve path");
1087         return;
1088     }
1089     if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
1090         error_setg(errp, "Path does not point to a CXL type 3 device");
1091         return;
1092     }
1093 
1094     ct3d = CXL_TYPE3(obj);
1095 
1096     QLIST_FOREACH(p, &ct3d->poison_list, node) {
1097         if (((start >= p->start) && (start < p->start + p->length)) ||
1098             ((start + length > p->start) &&
1099              (start + length <= p->start + p->length))) {
1100             error_setg(errp,
1101                        "Overlap with existing poisoned region not supported");
1102             return;
1103         }
1104     }
1105 
1106     if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) {
1107         cxl_set_poison_list_overflowed(ct3d);
1108         return;
1109     }
1110 
1111     p = g_new0(CXLPoison, 1);
1112     p->length = length;
1113     p->start = start;
1114     /* Different from injected via the mbox */
1115     p->type = CXL_POISON_TYPE_INTERNAL;
1116 
1117     QLIST_INSERT_HEAD(&ct3d->poison_list, p, node);
1118     ct3d->poison_list_cnt++;
1119 }
1120 
1121 /* For uncorrectable errors include support for multiple header recording */
1122 void qmp_cxl_inject_uncorrectable_errors(const char *path,
1123                                          CXLUncorErrorRecordList *errors,
1124                                          Error **errp)
1125 {
1126     Object *obj = object_resolve_path(path, NULL);
1127     static PCIEAERErr err = {};
1128     CXLType3Dev *ct3d;
1129     CXLError *cxl_err;
1130     uint32_t *reg_state;
1131     uint32_t unc_err;
1132     bool first;
1133 
1134     if (!obj) {
1135         error_setg(errp, "Unable to resolve path");
1136         return;
1137     }
1138 
1139     if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
1140         error_setg(errp, "Path does not point to a CXL type 3 device");
1141         return;
1142     }
1143 
1144     err.status = PCI_ERR_UNC_INTN;
1145     err.source_id = pci_requester_id(PCI_DEVICE(obj));
1146     err.flags = 0;
1147 
1148     ct3d = CXL_TYPE3(obj);
1149 
1150     first = QTAILQ_EMPTY(&ct3d->error_list);
1151     reg_state = ct3d->cxl_cstate.crb.cache_mem_registers;
1152     while (errors) {
1153         uint32List *header = errors->value->header;
1154         uint8_t header_count = 0;
1155         int cxl_err_code;
1156 
1157         cxl_err_code = ct3d_qmp_uncor_err_to_cxl(errors->value->type);
1158         if (cxl_err_code < 0) {
1159             error_setg(errp, "Unknown error code");
1160             return;
1161         }
1162 
1163         /* If the error is masked, nothing to do here */
1164         if (!((1 << cxl_err_code) &
1165               ~ldl_le_p(reg_state + R_CXL_RAS_UNC_ERR_MASK))) {
1166             errors = errors->next;
1167             continue;
1168         }
1169 
1170         cxl_err = g_malloc0(sizeof(*cxl_err));
1171         if (!cxl_err) {
1172             return;
1173         }
1174 
1175         cxl_err->type = cxl_err_code;
1176         while (header && header_count < 32) {
1177             cxl_err->header[header_count++] = header->value;
1178             header = header->next;
1179         }
1180         if (header_count > 32) {
1181             error_setg(errp, "Header must be 32 DWORD or less");
1182             return;
1183         }
1184         QTAILQ_INSERT_TAIL(&ct3d->error_list, cxl_err, node);
1185 
1186         errors = errors->next;
1187     }
1188 
1189     if (first && !QTAILQ_EMPTY(&ct3d->error_list)) {
1190         uint32_t *cache_mem = ct3d->cxl_cstate.crb.cache_mem_registers;
1191         uint32_t capctrl = ldl_le_p(cache_mem + R_CXL_RAS_ERR_CAP_CTRL);
1192         uint32_t *header_log = &cache_mem[R_CXL_RAS_ERR_HEADER0];
1193         int i;
1194 
1195         cxl_err = QTAILQ_FIRST(&ct3d->error_list);
1196         for (i = 0; i < CXL_RAS_ERR_HEADER_NUM; i++) {
1197             stl_le_p(header_log + i, cxl_err->header[i]);
1198         }
1199 
1200         capctrl = FIELD_DP32(capctrl, CXL_RAS_ERR_CAP_CTRL,
1201                              FIRST_ERROR_POINTER, cxl_err->type);
1202         stl_le_p(cache_mem + R_CXL_RAS_ERR_CAP_CTRL, capctrl);
1203     }
1204 
1205     unc_err = 0;
1206     QTAILQ_FOREACH(cxl_err, &ct3d->error_list, node) {
1207         unc_err |= (1 << cxl_err->type);
1208     }
1209     if (!unc_err) {
1210         return;
1211     }
1212 
1213     stl_le_p(reg_state + R_CXL_RAS_UNC_ERR_STATUS, unc_err);
1214     pcie_aer_inject_error(PCI_DEVICE(obj), &err);
1215 
1216     return;
1217 }
1218 
1219 void qmp_cxl_inject_correctable_error(const char *path, CxlCorErrorType type,
1220                                       Error **errp)
1221 {
1222     static PCIEAERErr err = {};
1223     Object *obj = object_resolve_path(path, NULL);
1224     CXLType3Dev *ct3d;
1225     uint32_t *reg_state;
1226     uint32_t cor_err;
1227     int cxl_err_type;
1228 
1229     if (!obj) {
1230         error_setg(errp, "Unable to resolve path");
1231         return;
1232     }
1233     if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
1234         error_setg(errp, "Path does not point to a CXL type 3 device");
1235         return;
1236     }
1237 
1238     err.status = PCI_ERR_COR_INTERNAL;
1239     err.source_id = pci_requester_id(PCI_DEVICE(obj));
1240     err.flags = PCIE_AER_ERR_IS_CORRECTABLE;
1241 
1242     ct3d = CXL_TYPE3(obj);
1243     reg_state = ct3d->cxl_cstate.crb.cache_mem_registers;
1244     cor_err = ldl_le_p(reg_state + R_CXL_RAS_COR_ERR_STATUS);
1245 
1246     cxl_err_type = ct3d_qmp_cor_err_to_cxl(type);
1247     if (cxl_err_type < 0) {
1248         error_setg(errp, "Invalid COR error");
1249         return;
1250     }
1251     /* If the error is masked, nothting to do here */
1252     if (!((1 << cxl_err_type) &
1253           ~ldl_le_p(reg_state + R_CXL_RAS_COR_ERR_MASK))) {
1254         return;
1255     }
1256 
1257     cor_err |= (1 << cxl_err_type);
1258     stl_le_p(reg_state + R_CXL_RAS_COR_ERR_STATUS, cor_err);
1259 
1260     pcie_aer_inject_error(PCI_DEVICE(obj), &err);
1261 }
1262 
1263 static void cxl_assign_event_header(CXLEventRecordHdr *hdr,
1264                                     const QemuUUID *uuid, uint32_t flags,
1265                                     uint8_t length, uint64_t timestamp)
1266 {
1267     st24_le_p(&hdr->flags, flags);
1268     hdr->length = length;
1269     memcpy(&hdr->id, uuid, sizeof(hdr->id));
1270     stq_le_p(&hdr->timestamp, timestamp);
1271 }
1272 
1273 static const QemuUUID gen_media_uuid = {
1274     .data = UUID(0xfbcd0a77, 0xc260, 0x417f,
1275                  0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6),
1276 };
1277 
1278 static const QemuUUID dram_uuid = {
1279     .data = UUID(0x601dcbb3, 0x9c06, 0x4eab, 0xb8, 0xaf,
1280                  0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24),
1281 };
1282 
1283 static const QemuUUID memory_module_uuid = {
1284     .data = UUID(0xfe927475, 0xdd59, 0x4339, 0xa5, 0x86,
1285                  0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74),
1286 };
1287 
1288 #define CXL_GMER_VALID_CHANNEL                          BIT(0)
1289 #define CXL_GMER_VALID_RANK                             BIT(1)
1290 #define CXL_GMER_VALID_DEVICE                           BIT(2)
1291 #define CXL_GMER_VALID_COMPONENT                        BIT(3)
1292 
1293 static int ct3d_qmp_cxl_event_log_enc(CxlEventLog log)
1294 {
1295     switch (log) {
1296     case CXL_EVENT_LOG_INFORMATIONAL:
1297         return CXL_EVENT_TYPE_INFO;
1298     case CXL_EVENT_LOG_WARNING:
1299         return CXL_EVENT_TYPE_WARN;
1300     case CXL_EVENT_LOG_FAILURE:
1301         return CXL_EVENT_TYPE_FAIL;
1302     case CXL_EVENT_LOG_FATAL:
1303         return CXL_EVENT_TYPE_FATAL;
1304 /* DCD not yet supported */
1305     default:
1306         return -EINVAL;
1307     }
1308 }
1309 /* Component ID is device specific.  Define this as a string. */
1310 void qmp_cxl_inject_general_media_event(const char *path, CxlEventLog log,
1311                                         uint8_t flags, uint64_t dpa,
1312                                         uint8_t descriptor, uint8_t type,
1313                                         uint8_t transaction_type,
1314                                         bool has_channel, uint8_t channel,
1315                                         bool has_rank, uint8_t rank,
1316                                         bool has_device, uint32_t device,
1317                                         const char *component_id,
1318                                         Error **errp)
1319 {
1320     Object *obj = object_resolve_path(path, NULL);
1321     CXLEventGenMedia gem;
1322     CXLEventRecordHdr *hdr = &gem.hdr;
1323     CXLDeviceState *cxlds;
1324     CXLType3Dev *ct3d;
1325     uint16_t valid_flags = 0;
1326     uint8_t enc_log;
1327     int rc;
1328 
1329     if (!obj) {
1330         error_setg(errp, "Unable to resolve path");
1331         return;
1332     }
1333     if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
1334         error_setg(errp, "Path does not point to a CXL type 3 device");
1335         return;
1336     }
1337     ct3d = CXL_TYPE3(obj);
1338     cxlds = &ct3d->cxl_dstate;
1339 
1340     rc = ct3d_qmp_cxl_event_log_enc(log);
1341     if (rc < 0) {
1342         error_setg(errp, "Unhandled error log type");
1343         return;
1344     }
1345     enc_log = rc;
1346 
1347     memset(&gem, 0, sizeof(gem));
1348     cxl_assign_event_header(hdr, &gen_media_uuid, flags, sizeof(gem),
1349                             cxl_device_get_timestamp(&ct3d->cxl_dstate));
1350 
1351     stq_le_p(&gem.phys_addr, dpa);
1352     gem.descriptor = descriptor;
1353     gem.type = type;
1354     gem.transaction_type = transaction_type;
1355 
1356     if (has_channel) {
1357         gem.channel = channel;
1358         valid_flags |= CXL_GMER_VALID_CHANNEL;
1359     }
1360 
1361     if (has_rank) {
1362         gem.rank = rank;
1363         valid_flags |= CXL_GMER_VALID_RANK;
1364     }
1365 
1366     if (has_device) {
1367         st24_le_p(gem.device, device);
1368         valid_flags |= CXL_GMER_VALID_DEVICE;
1369     }
1370 
1371     if (component_id) {
1372         strncpy((char *)gem.component_id, component_id,
1373                 sizeof(gem.component_id) - 1);
1374         valid_flags |= CXL_GMER_VALID_COMPONENT;
1375     }
1376 
1377     stw_le_p(&gem.validity_flags, valid_flags);
1378 
1379     if (cxl_event_insert(cxlds, enc_log, (CXLEventRecordRaw *)&gem)) {
1380         cxl_event_irq_assert(ct3d);
1381     }
1382 }
1383 
1384 #define CXL_DRAM_VALID_CHANNEL                          BIT(0)
1385 #define CXL_DRAM_VALID_RANK                             BIT(1)
1386 #define CXL_DRAM_VALID_NIBBLE_MASK                      BIT(2)
1387 #define CXL_DRAM_VALID_BANK_GROUP                       BIT(3)
1388 #define CXL_DRAM_VALID_BANK                             BIT(4)
1389 #define CXL_DRAM_VALID_ROW                              BIT(5)
1390 #define CXL_DRAM_VALID_COLUMN                           BIT(6)
1391 #define CXL_DRAM_VALID_CORRECTION_MASK                  BIT(7)
1392 
1393 void qmp_cxl_inject_dram_event(const char *path, CxlEventLog log, uint8_t flags,
1394                                uint64_t dpa, uint8_t descriptor,
1395                                uint8_t type, uint8_t transaction_type,
1396                                bool has_channel, uint8_t channel,
1397                                bool has_rank, uint8_t rank,
1398                                bool has_nibble_mask, uint32_t nibble_mask,
1399                                bool has_bank_group, uint8_t bank_group,
1400                                bool has_bank, uint8_t bank,
1401                                bool has_row, uint32_t row,
1402                                bool has_column, uint16_t column,
1403                                bool has_correction_mask,
1404                                uint64List *correction_mask,
1405                                Error **errp)
1406 {
1407     Object *obj = object_resolve_path(path, NULL);
1408     CXLEventDram dram;
1409     CXLEventRecordHdr *hdr = &dram.hdr;
1410     CXLDeviceState *cxlds;
1411     CXLType3Dev *ct3d;
1412     uint16_t valid_flags = 0;
1413     uint8_t enc_log;
1414     int rc;
1415 
1416     if (!obj) {
1417         error_setg(errp, "Unable to resolve path");
1418         return;
1419     }
1420     if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
1421         error_setg(errp, "Path does not point to a CXL type 3 device");
1422         return;
1423     }
1424     ct3d = CXL_TYPE3(obj);
1425     cxlds = &ct3d->cxl_dstate;
1426 
1427     rc = ct3d_qmp_cxl_event_log_enc(log);
1428     if (rc < 0) {
1429         error_setg(errp, "Unhandled error log type");
1430         return;
1431     }
1432     enc_log = rc;
1433 
1434     memset(&dram, 0, sizeof(dram));
1435     cxl_assign_event_header(hdr, &dram_uuid, flags, sizeof(dram),
1436                             cxl_device_get_timestamp(&ct3d->cxl_dstate));
1437     stq_le_p(&dram.phys_addr, dpa);
1438     dram.descriptor = descriptor;
1439     dram.type = type;
1440     dram.transaction_type = transaction_type;
1441 
1442     if (has_channel) {
1443         dram.channel = channel;
1444         valid_flags |= CXL_DRAM_VALID_CHANNEL;
1445     }
1446 
1447     if (has_rank) {
1448         dram.rank = rank;
1449         valid_flags |= CXL_DRAM_VALID_RANK;
1450     }
1451 
1452     if (has_nibble_mask) {
1453         st24_le_p(dram.nibble_mask, nibble_mask);
1454         valid_flags |= CXL_DRAM_VALID_NIBBLE_MASK;
1455     }
1456 
1457     if (has_bank_group) {
1458         dram.bank_group = bank_group;
1459         valid_flags |= CXL_DRAM_VALID_BANK_GROUP;
1460     }
1461 
1462     if (has_bank) {
1463         dram.bank = bank;
1464         valid_flags |= CXL_DRAM_VALID_BANK;
1465     }
1466 
1467     if (has_row) {
1468         st24_le_p(dram.row, row);
1469         valid_flags |= CXL_DRAM_VALID_ROW;
1470     }
1471 
1472     if (has_column) {
1473         stw_le_p(&dram.column, column);
1474         valid_flags |= CXL_DRAM_VALID_COLUMN;
1475     }
1476 
1477     if (has_correction_mask) {
1478         int count = 0;
1479         while (correction_mask && count < 4) {
1480             stq_le_p(&dram.correction_mask[count],
1481                      correction_mask->value);
1482             count++;
1483             correction_mask = correction_mask->next;
1484         }
1485         valid_flags |= CXL_DRAM_VALID_CORRECTION_MASK;
1486     }
1487 
1488     stw_le_p(&dram.validity_flags, valid_flags);
1489 
1490     if (cxl_event_insert(cxlds, enc_log, (CXLEventRecordRaw *)&dram)) {
1491         cxl_event_irq_assert(ct3d);
1492     }
1493     return;
1494 }
1495 
1496 void qmp_cxl_inject_memory_module_event(const char *path, CxlEventLog log,
1497                                         uint8_t flags, uint8_t type,
1498                                         uint8_t health_status,
1499                                         uint8_t media_status,
1500                                         uint8_t additional_status,
1501                                         uint8_t life_used,
1502                                         int16_t temperature,
1503                                         uint32_t dirty_shutdown_count,
1504                                         uint32_t corrected_volatile_error_count,
1505                                         uint32_t corrected_persist_error_count,
1506                                         Error **errp)
1507 {
1508     Object *obj = object_resolve_path(path, NULL);
1509     CXLEventMemoryModule module;
1510     CXLEventRecordHdr *hdr = &module.hdr;
1511     CXLDeviceState *cxlds;
1512     CXLType3Dev *ct3d;
1513     uint8_t enc_log;
1514     int rc;
1515 
1516     if (!obj) {
1517         error_setg(errp, "Unable to resolve path");
1518         return;
1519     }
1520     if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
1521         error_setg(errp, "Path does not point to a CXL type 3 device");
1522         return;
1523     }
1524     ct3d = CXL_TYPE3(obj);
1525     cxlds = &ct3d->cxl_dstate;
1526 
1527     rc = ct3d_qmp_cxl_event_log_enc(log);
1528     if (rc < 0) {
1529         error_setg(errp, "Unhandled error log type");
1530         return;
1531     }
1532     enc_log = rc;
1533 
1534     memset(&module, 0, sizeof(module));
1535     cxl_assign_event_header(hdr, &memory_module_uuid, flags, sizeof(module),
1536                             cxl_device_get_timestamp(&ct3d->cxl_dstate));
1537 
1538     module.type = type;
1539     module.health_status = health_status;
1540     module.media_status = media_status;
1541     module.additional_status = additional_status;
1542     module.life_used = life_used;
1543     stw_le_p(&module.temperature, temperature);
1544     stl_le_p(&module.dirty_shutdown_count, dirty_shutdown_count);
1545     stl_le_p(&module.corrected_volatile_error_count,
1546              corrected_volatile_error_count);
1547     stl_le_p(&module.corrected_persistent_error_count,
1548              corrected_persist_error_count);
1549 
1550     if (cxl_event_insert(cxlds, enc_log, (CXLEventRecordRaw *)&module)) {
1551         cxl_event_irq_assert(ct3d);
1552     }
1553 }
1554 
1555 static void ct3_class_init(ObjectClass *oc, void *data)
1556 {
1557     DeviceClass *dc = DEVICE_CLASS(oc);
1558     PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc);
1559     CXLType3Class *cvc = CXL_TYPE3_CLASS(oc);
1560 
1561     pc->realize = ct3_realize;
1562     pc->exit = ct3_exit;
1563     pc->class_id = PCI_CLASS_MEMORY_CXL;
1564     pc->vendor_id = PCI_VENDOR_ID_INTEL;
1565     pc->device_id = 0xd93; /* LVF for now */
1566     pc->revision = 1;
1567 
1568     pc->config_write = ct3d_config_write;
1569     pc->config_read = ct3d_config_read;
1570 
1571     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1572     dc->desc = "CXL Memory Device (Type 3)";
1573     dc->reset = ct3d_reset;
1574     device_class_set_props(dc, ct3_props);
1575 
1576     cvc->get_lsa_size = get_lsa_size;
1577     cvc->get_lsa = get_lsa;
1578     cvc->set_lsa = set_lsa;
1579     cvc->set_cacheline = set_cacheline;
1580 }
1581 
1582 static const TypeInfo ct3d_info = {
1583     .name = TYPE_CXL_TYPE3,
1584     .parent = TYPE_PCI_DEVICE,
1585     .class_size = sizeof(struct CXLType3Class),
1586     .class_init = ct3_class_init,
1587     .instance_size = sizeof(CXLType3Dev),
1588     .interfaces = (InterfaceInfo[]) {
1589         { INTERFACE_CXL_DEVICE },
1590         { INTERFACE_PCIE_DEVICE },
1591         {}
1592     },
1593 };
1594 
1595 static void ct3d_registers(void)
1596 {
1597     type_register_static(&ct3d_info);
1598 }
1599 
1600 type_init(ct3d_registers);
1601