xref: /qemu/hw/mem/cxl_type3.c (revision f8ed3648)
1 #include "qemu/osdep.h"
2 #include "qemu/units.h"
3 #include "qemu/error-report.h"
4 #include "qapi/qapi-commands-cxl.h"
5 #include "hw/mem/memory-device.h"
6 #include "hw/mem/pc-dimm.h"
7 #include "hw/pci/pci.h"
8 #include "hw/qdev-properties.h"
9 #include "qapi/error.h"
10 #include "qemu/log.h"
11 #include "qemu/module.h"
12 #include "qemu/pmem.h"
13 #include "qemu/range.h"
14 #include "qemu/rcu.h"
15 #include "sysemu/hostmem.h"
16 #include "sysemu/numa.h"
17 #include "hw/cxl/cxl.h"
18 #include "hw/pci/msix.h"
19 
20 #define DWORD_BYTE 4
21 
22 /* Default CDAT entries for a memory region */
23 enum {
24     CT3_CDAT_DSMAS,
25     CT3_CDAT_DSLBIS0,
26     CT3_CDAT_DSLBIS1,
27     CT3_CDAT_DSLBIS2,
28     CT3_CDAT_DSLBIS3,
29     CT3_CDAT_DSEMTS,
30     CT3_CDAT_NUM_ENTRIES
31 };
32 
33 static int ct3_build_cdat_entries_for_mr(CDATSubHeader **cdat_table,
34                                          int dsmad_handle, MemoryRegion *mr,
35                                          bool is_pmem, uint64_t dpa_base)
36 {
37     g_autofree CDATDsmas *dsmas = NULL;
38     g_autofree CDATDslbis *dslbis0 = NULL;
39     g_autofree CDATDslbis *dslbis1 = NULL;
40     g_autofree CDATDslbis *dslbis2 = NULL;
41     g_autofree CDATDslbis *dslbis3 = NULL;
42     g_autofree CDATDsemts *dsemts = NULL;
43 
44     dsmas = g_malloc(sizeof(*dsmas));
45     if (!dsmas) {
46         return -ENOMEM;
47     }
48     *dsmas = (CDATDsmas) {
49         .header = {
50             .type = CDAT_TYPE_DSMAS,
51             .length = sizeof(*dsmas),
52         },
53         .DSMADhandle = dsmad_handle,
54         .flags = is_pmem ? CDAT_DSMAS_FLAG_NV : 0,
55         .DPA_base = dpa_base,
56         .DPA_length = memory_region_size(mr),
57     };
58 
59     /* For now, no memory side cache, plausiblish numbers */
60     dslbis0 = g_malloc(sizeof(*dslbis0));
61     if (!dslbis0) {
62         return -ENOMEM;
63     }
64     *dslbis0 = (CDATDslbis) {
65         .header = {
66             .type = CDAT_TYPE_DSLBIS,
67             .length = sizeof(*dslbis0),
68         },
69         .handle = dsmad_handle,
70         .flags = HMAT_LB_MEM_MEMORY,
71         .data_type = HMAT_LB_DATA_READ_LATENCY,
72         .entry_base_unit = 10000, /* 10ns base */
73         .entry[0] = 15, /* 150ns */
74     };
75 
76     dslbis1 = g_malloc(sizeof(*dslbis1));
77     if (!dslbis1) {
78         return -ENOMEM;
79     }
80     *dslbis1 = (CDATDslbis) {
81         .header = {
82             .type = CDAT_TYPE_DSLBIS,
83             .length = sizeof(*dslbis1),
84         },
85         .handle = dsmad_handle,
86         .flags = HMAT_LB_MEM_MEMORY,
87         .data_type = HMAT_LB_DATA_WRITE_LATENCY,
88         .entry_base_unit = 10000,
89         .entry[0] = 25, /* 250ns */
90     };
91 
92     dslbis2 = g_malloc(sizeof(*dslbis2));
93     if (!dslbis2) {
94         return -ENOMEM;
95     }
96     *dslbis2 = (CDATDslbis) {
97         .header = {
98             .type = CDAT_TYPE_DSLBIS,
99             .length = sizeof(*dslbis2),
100         },
101         .handle = dsmad_handle,
102         .flags = HMAT_LB_MEM_MEMORY,
103         .data_type = HMAT_LB_DATA_READ_BANDWIDTH,
104         .entry_base_unit = 1000, /* GB/s */
105         .entry[0] = 16,
106     };
107 
108     dslbis3 = g_malloc(sizeof(*dslbis3));
109     if (!dslbis3) {
110         return -ENOMEM;
111     }
112     *dslbis3 = (CDATDslbis) {
113         .header = {
114             .type = CDAT_TYPE_DSLBIS,
115             .length = sizeof(*dslbis3),
116         },
117         .handle = dsmad_handle,
118         .flags = HMAT_LB_MEM_MEMORY,
119         .data_type = HMAT_LB_DATA_WRITE_BANDWIDTH,
120         .entry_base_unit = 1000, /* GB/s */
121         .entry[0] = 16,
122     };
123 
124     dsemts = g_malloc(sizeof(*dsemts));
125     if (!dsemts) {
126         return -ENOMEM;
127     }
128     *dsemts = (CDATDsemts) {
129         .header = {
130             .type = CDAT_TYPE_DSEMTS,
131             .length = sizeof(*dsemts),
132         },
133         .DSMAS_handle = dsmad_handle,
134         /*
135          * NV: Reserved - the non volatile from DSMAS matters
136          * V: EFI_MEMORY_SP
137          */
138         .EFI_memory_type_attr = is_pmem ? 2 : 1,
139         .DPA_offset = 0,
140         .DPA_length = memory_region_size(mr),
141     };
142 
143     /* Header always at start of structure */
144     cdat_table[CT3_CDAT_DSMAS] = g_steal_pointer(&dsmas);
145     cdat_table[CT3_CDAT_DSLBIS0] = g_steal_pointer(&dslbis0);
146     cdat_table[CT3_CDAT_DSLBIS1] = g_steal_pointer(&dslbis1);
147     cdat_table[CT3_CDAT_DSLBIS2] = g_steal_pointer(&dslbis2);
148     cdat_table[CT3_CDAT_DSLBIS3] = g_steal_pointer(&dslbis3);
149     cdat_table[CT3_CDAT_DSEMTS] = g_steal_pointer(&dsemts);
150 
151     return 0;
152 }
153 
154 static int ct3_build_cdat_table(CDATSubHeader ***cdat_table, void *priv)
155 {
156     g_autofree CDATSubHeader **table = NULL;
157     CXLType3Dev *ct3d = priv;
158     MemoryRegion *volatile_mr = NULL, *nonvolatile_mr = NULL;
159     int dsmad_handle = 0;
160     int cur_ent = 0;
161     int len = 0;
162     int rc, i;
163 
164     if (!ct3d->hostpmem && !ct3d->hostvmem) {
165         return 0;
166     }
167 
168     if (ct3d->hostvmem) {
169         volatile_mr = host_memory_backend_get_memory(ct3d->hostvmem);
170         if (!volatile_mr) {
171             return -EINVAL;
172         }
173         len += CT3_CDAT_NUM_ENTRIES;
174     }
175 
176     if (ct3d->hostpmem) {
177         nonvolatile_mr = host_memory_backend_get_memory(ct3d->hostpmem);
178         if (!nonvolatile_mr) {
179             return -EINVAL;
180         }
181         len += CT3_CDAT_NUM_ENTRIES;
182     }
183 
184     table = g_malloc0(len * sizeof(*table));
185     if (!table) {
186         return -ENOMEM;
187     }
188 
189     /* Now fill them in */
190     if (volatile_mr) {
191         rc = ct3_build_cdat_entries_for_mr(table, dsmad_handle++, volatile_mr,
192                                            false, 0);
193         if (rc < 0) {
194             return rc;
195         }
196         cur_ent = CT3_CDAT_NUM_ENTRIES;
197     }
198 
199     if (nonvolatile_mr) {
200         rc = ct3_build_cdat_entries_for_mr(&(table[cur_ent]), dsmad_handle++,
201                                            nonvolatile_mr, true,
202                                            (volatile_mr ?
203                                             memory_region_size(volatile_mr) : 0));
204         if (rc < 0) {
205             goto error_cleanup;
206         }
207         cur_ent += CT3_CDAT_NUM_ENTRIES;
208     }
209     assert(len == cur_ent);
210 
211     *cdat_table = g_steal_pointer(&table);
212 
213     return len;
214 error_cleanup:
215     for (i = 0; i < cur_ent; i++) {
216         g_free(table[i]);
217     }
218     return rc;
219 }
220 
221 static void ct3_free_cdat_table(CDATSubHeader **cdat_table, int num, void *priv)
222 {
223     int i;
224 
225     for (i = 0; i < num; i++) {
226         g_free(cdat_table[i]);
227     }
228     g_free(cdat_table);
229 }
230 
231 static bool cxl_doe_cdat_rsp(DOECap *doe_cap)
232 {
233     CDATObject *cdat = &CXL_TYPE3(doe_cap->pdev)->cxl_cstate.cdat;
234     uint16_t ent;
235     void *base;
236     uint32_t len;
237     CDATReq *req = pcie_doe_get_write_mbox_ptr(doe_cap);
238     CDATRsp rsp;
239 
240     assert(cdat->entry_len);
241 
242     /* Discard if request length mismatched */
243     if (pcie_doe_get_obj_len(req) <
244         DIV_ROUND_UP(sizeof(CDATReq), DWORD_BYTE)) {
245         return false;
246     }
247 
248     ent = req->entry_handle;
249     base = cdat->entry[ent].base;
250     len = cdat->entry[ent].length;
251 
252     rsp = (CDATRsp) {
253         .header = {
254             .vendor_id = CXL_VENDOR_ID,
255             .data_obj_type = CXL_DOE_TABLE_ACCESS,
256             .reserved = 0x0,
257             .length = DIV_ROUND_UP((sizeof(rsp) + len), DWORD_BYTE),
258         },
259         .rsp_code = CXL_DOE_TAB_RSP,
260         .table_type = CXL_DOE_TAB_TYPE_CDAT,
261         .entry_handle = (ent < cdat->entry_len - 1) ?
262                         ent + 1 : CXL_DOE_TAB_ENT_MAX,
263     };
264 
265     memcpy(doe_cap->read_mbox, &rsp, sizeof(rsp));
266     memcpy(doe_cap->read_mbox + DIV_ROUND_UP(sizeof(rsp), DWORD_BYTE),
267            base, len);
268 
269     doe_cap->read_mbox_len += rsp.header.length;
270 
271     return true;
272 }
273 
274 static uint32_t ct3d_config_read(PCIDevice *pci_dev, uint32_t addr, int size)
275 {
276     CXLType3Dev *ct3d = CXL_TYPE3(pci_dev);
277     uint32_t val;
278 
279     if (pcie_doe_read_config(&ct3d->doe_cdat, addr, size, &val)) {
280         return val;
281     }
282 
283     return pci_default_read_config(pci_dev, addr, size);
284 }
285 
286 static void ct3d_config_write(PCIDevice *pci_dev, uint32_t addr, uint32_t val,
287                               int size)
288 {
289     CXLType3Dev *ct3d = CXL_TYPE3(pci_dev);
290 
291     pcie_doe_write_config(&ct3d->doe_cdat, addr, val, size);
292     pci_default_write_config(pci_dev, addr, val, size);
293     pcie_aer_write_config(pci_dev, addr, val, size);
294 }
295 
296 /*
297  * Null value of all Fs suggested by IEEE RA guidelines for use of
298  * EU, OUI and CID
299  */
300 #define UI64_NULL ~(0ULL)
301 
302 static void build_dvsecs(CXLType3Dev *ct3d)
303 {
304     CXLComponentState *cxl_cstate = &ct3d->cxl_cstate;
305     uint8_t *dvsec;
306     uint32_t range1_size_hi, range1_size_lo,
307              range1_base_hi = 0, range1_base_lo = 0,
308              range2_size_hi = 0, range2_size_lo = 0,
309              range2_base_hi = 0, range2_base_lo = 0;
310 
311     /*
312      * Volatile memory is mapped as (0x0)
313      * Persistent memory is mapped at (volatile->size)
314      */
315     if (ct3d->hostvmem) {
316         range1_size_hi = ct3d->hostvmem->size >> 32;
317         range1_size_lo = (2 << 5) | (2 << 2) | 0x3 |
318                          (ct3d->hostvmem->size & 0xF0000000);
319         if (ct3d->hostpmem) {
320             range2_size_hi = ct3d->hostpmem->size >> 32;
321             range2_size_lo = (2 << 5) | (2 << 2) | 0x3 |
322                              (ct3d->hostpmem->size & 0xF0000000);
323         }
324     } else {
325         range1_size_hi = ct3d->hostpmem->size >> 32;
326         range1_size_lo = (2 << 5) | (2 << 2) | 0x3 |
327                          (ct3d->hostpmem->size & 0xF0000000);
328     }
329 
330     dvsec = (uint8_t *)&(CXLDVSECDevice){
331         .cap = 0x1e,
332         .ctrl = 0x2,
333         .status2 = 0x2,
334         .range1_size_hi = range1_size_hi,
335         .range1_size_lo = range1_size_lo,
336         .range1_base_hi = range1_base_hi,
337         .range1_base_lo = range1_base_lo,
338         .range2_size_hi = range2_size_hi,
339         .range2_size_lo = range2_size_lo,
340         .range2_base_hi = range2_base_hi,
341         .range2_base_lo = range2_base_lo,
342     };
343     cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE,
344                                PCIE_CXL_DEVICE_DVSEC_LENGTH,
345                                PCIE_CXL_DEVICE_DVSEC,
346                                PCIE_CXL2_DEVICE_DVSEC_REVID, dvsec);
347 
348     dvsec = (uint8_t *)&(CXLDVSECRegisterLocator){
349         .rsvd         = 0,
350         .reg0_base_lo = RBI_COMPONENT_REG | CXL_COMPONENT_REG_BAR_IDX,
351         .reg0_base_hi = 0,
352         .reg1_base_lo = RBI_CXL_DEVICE_REG | CXL_DEVICE_REG_BAR_IDX,
353         .reg1_base_hi = 0,
354     };
355     cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE,
356                                REG_LOC_DVSEC_LENGTH, REG_LOC_DVSEC,
357                                REG_LOC_DVSEC_REVID, dvsec);
358     dvsec = (uint8_t *)&(CXLDVSECDeviceGPF){
359         .phase2_duration = 0x603, /* 3 seconds */
360         .phase2_power = 0x33, /* 0x33 miliwatts */
361     };
362     cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE,
363                                GPF_DEVICE_DVSEC_LENGTH, GPF_DEVICE_DVSEC,
364                                GPF_DEVICE_DVSEC_REVID, dvsec);
365 
366     dvsec = (uint8_t *)&(CXLDVSECPortFlexBus){
367         .cap                     = 0x26, /* 68B, IO, Mem, non-MLD */
368         .ctrl                    = 0x02, /* IO always enabled */
369         .status                  = 0x26, /* same as capabilities */
370         .rcvd_mod_ts_data_phase1 = 0xef, /* WTF? */
371     };
372     cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE,
373                                PCIE_FLEXBUS_PORT_DVSEC_LENGTH_2_0,
374                                PCIE_FLEXBUS_PORT_DVSEC,
375                                PCIE_FLEXBUS_PORT_DVSEC_REVID_2_0, dvsec);
376 }
377 
378 static void hdm_decoder_commit(CXLType3Dev *ct3d, int which)
379 {
380     ComponentRegisters *cregs = &ct3d->cxl_cstate.crb;
381     uint32_t *cache_mem = cregs->cache_mem_registers;
382     uint32_t ctrl;
383 
384     assert(which == 0);
385 
386     ctrl = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL);
387     /* TODO: Sanity checks that the decoder is possible */
388     ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, ERR, 0);
389     ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, COMMITTED, 1);
390 
391     stl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL, ctrl);
392 }
393 
394 static void hdm_decoder_uncommit(CXLType3Dev *ct3d, int which)
395 {
396     ComponentRegisters *cregs = &ct3d->cxl_cstate.crb;
397     uint32_t *cache_mem = cregs->cache_mem_registers;
398     uint32_t ctrl;
399 
400     assert(which == 0);
401 
402     ctrl = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL);
403 
404     ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, ERR, 0);
405     ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, COMMITTED, 0);
406 
407     stl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL, ctrl);
408 }
409 
410 static int ct3d_qmp_uncor_err_to_cxl(CxlUncorErrorType qmp_err)
411 {
412     switch (qmp_err) {
413     case CXL_UNCOR_ERROR_TYPE_CACHE_DATA_PARITY:
414         return CXL_RAS_UNC_ERR_CACHE_DATA_PARITY;
415     case CXL_UNCOR_ERROR_TYPE_CACHE_ADDRESS_PARITY:
416         return CXL_RAS_UNC_ERR_CACHE_ADDRESS_PARITY;
417     case CXL_UNCOR_ERROR_TYPE_CACHE_BE_PARITY:
418         return CXL_RAS_UNC_ERR_CACHE_BE_PARITY;
419     case CXL_UNCOR_ERROR_TYPE_CACHE_DATA_ECC:
420         return CXL_RAS_UNC_ERR_CACHE_DATA_ECC;
421     case CXL_UNCOR_ERROR_TYPE_MEM_DATA_PARITY:
422         return CXL_RAS_UNC_ERR_MEM_DATA_PARITY;
423     case CXL_UNCOR_ERROR_TYPE_MEM_ADDRESS_PARITY:
424         return CXL_RAS_UNC_ERR_MEM_ADDRESS_PARITY;
425     case CXL_UNCOR_ERROR_TYPE_MEM_BE_PARITY:
426         return CXL_RAS_UNC_ERR_MEM_BE_PARITY;
427     case CXL_UNCOR_ERROR_TYPE_MEM_DATA_ECC:
428         return CXL_RAS_UNC_ERR_MEM_DATA_ECC;
429     case CXL_UNCOR_ERROR_TYPE_REINIT_THRESHOLD:
430         return CXL_RAS_UNC_ERR_REINIT_THRESHOLD;
431     case CXL_UNCOR_ERROR_TYPE_RSVD_ENCODING:
432         return CXL_RAS_UNC_ERR_RSVD_ENCODING;
433     case CXL_UNCOR_ERROR_TYPE_POISON_RECEIVED:
434         return CXL_RAS_UNC_ERR_POISON_RECEIVED;
435     case CXL_UNCOR_ERROR_TYPE_RECEIVER_OVERFLOW:
436         return CXL_RAS_UNC_ERR_RECEIVER_OVERFLOW;
437     case CXL_UNCOR_ERROR_TYPE_INTERNAL:
438         return CXL_RAS_UNC_ERR_INTERNAL;
439     case CXL_UNCOR_ERROR_TYPE_CXL_IDE_TX:
440         return CXL_RAS_UNC_ERR_CXL_IDE_TX;
441     case CXL_UNCOR_ERROR_TYPE_CXL_IDE_RX:
442         return CXL_RAS_UNC_ERR_CXL_IDE_RX;
443     default:
444         return -EINVAL;
445     }
446 }
447 
448 static int ct3d_qmp_cor_err_to_cxl(CxlCorErrorType qmp_err)
449 {
450     switch (qmp_err) {
451     case CXL_COR_ERROR_TYPE_CACHE_DATA_ECC:
452         return CXL_RAS_COR_ERR_CACHE_DATA_ECC;
453     case CXL_COR_ERROR_TYPE_MEM_DATA_ECC:
454         return CXL_RAS_COR_ERR_MEM_DATA_ECC;
455     case CXL_COR_ERROR_TYPE_CRC_THRESHOLD:
456         return CXL_RAS_COR_ERR_CRC_THRESHOLD;
457     case CXL_COR_ERROR_TYPE_RETRY_THRESHOLD:
458         return CXL_RAS_COR_ERR_RETRY_THRESHOLD;
459     case CXL_COR_ERROR_TYPE_CACHE_POISON_RECEIVED:
460         return CXL_RAS_COR_ERR_CACHE_POISON_RECEIVED;
461     case CXL_COR_ERROR_TYPE_MEM_POISON_RECEIVED:
462         return CXL_RAS_COR_ERR_MEM_POISON_RECEIVED;
463     case CXL_COR_ERROR_TYPE_PHYSICAL:
464         return CXL_RAS_COR_ERR_PHYSICAL;
465     default:
466         return -EINVAL;
467     }
468 }
469 
470 static void ct3d_reg_write(void *opaque, hwaddr offset, uint64_t value,
471                            unsigned size)
472 {
473     CXLComponentState *cxl_cstate = opaque;
474     ComponentRegisters *cregs = &cxl_cstate->crb;
475     CXLType3Dev *ct3d = container_of(cxl_cstate, CXLType3Dev, cxl_cstate);
476     uint32_t *cache_mem = cregs->cache_mem_registers;
477     bool should_commit = false;
478     bool should_uncommit = false;
479     int which_hdm = -1;
480 
481     assert(size == 4);
482     g_assert(offset < CXL2_COMPONENT_CM_REGION_SIZE);
483 
484     switch (offset) {
485     case A_CXL_HDM_DECODER0_CTRL:
486         should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT);
487         should_uncommit = !should_commit;
488         which_hdm = 0;
489         break;
490     case A_CXL_RAS_UNC_ERR_STATUS:
491     {
492         uint32_t capctrl = ldl_le_p(cache_mem + R_CXL_RAS_ERR_CAP_CTRL);
493         uint32_t fe = FIELD_EX32(capctrl, CXL_RAS_ERR_CAP_CTRL, FIRST_ERROR_POINTER);
494         CXLError *cxl_err;
495         uint32_t unc_err;
496 
497         /*
498          * If single bit written that corresponds to the first error
499          * pointer being cleared, update the status and header log.
500          */
501         if (!QTAILQ_EMPTY(&ct3d->error_list)) {
502             if ((1 << fe) ^ value) {
503                 CXLError *cxl_next;
504                 /*
505                  * Software is using wrong flow for multiple header recording
506                  * Following behavior in PCIe r6.0 and assuming multiple
507                  * header support. Implementation defined choice to clear all
508                  * matching records if more than one bit set - which corresponds
509                  * closest to behavior of hardware not capable of multiple
510                  * header recording.
511                  */
512                 QTAILQ_FOREACH_SAFE(cxl_err, &ct3d->error_list, node, cxl_next) {
513                     if ((1 << cxl_err->type) & value) {
514                         QTAILQ_REMOVE(&ct3d->error_list, cxl_err, node);
515                         g_free(cxl_err);
516                     }
517                 }
518             } else {
519                 /* Done with previous FE, so drop from list */
520                 cxl_err = QTAILQ_FIRST(&ct3d->error_list);
521                 QTAILQ_REMOVE(&ct3d->error_list, cxl_err, node);
522                 g_free(cxl_err);
523             }
524 
525             /*
526              * If there is another FE, then put that in place and update
527              * the header log
528              */
529             if (!QTAILQ_EMPTY(&ct3d->error_list)) {
530                 uint32_t *header_log = &cache_mem[R_CXL_RAS_ERR_HEADER0];
531                 int i;
532 
533                 cxl_err = QTAILQ_FIRST(&ct3d->error_list);
534                 for (i = 0; i < CXL_RAS_ERR_HEADER_NUM; i++) {
535                     stl_le_p(header_log + i, cxl_err->header[i]);
536                 }
537                 capctrl = FIELD_DP32(capctrl, CXL_RAS_ERR_CAP_CTRL,
538                                      FIRST_ERROR_POINTER, cxl_err->type);
539             } else {
540                 /*
541                  * If no more errors, then follow recomendation of PCI spec
542                  * r6.0 6.2.4.2 to set the first error pointer to a status
543                  * bit that will never be used.
544                  */
545                 capctrl = FIELD_DP32(capctrl, CXL_RAS_ERR_CAP_CTRL,
546                                      FIRST_ERROR_POINTER,
547                                      CXL_RAS_UNC_ERR_CXL_UNUSED);
548             }
549             stl_le_p((uint8_t *)cache_mem + A_CXL_RAS_ERR_CAP_CTRL, capctrl);
550         }
551         unc_err = 0;
552         QTAILQ_FOREACH(cxl_err, &ct3d->error_list, node) {
553             unc_err |= 1 << cxl_err->type;
554         }
555         stl_le_p((uint8_t *)cache_mem + offset, unc_err);
556 
557         return;
558     }
559     case A_CXL_RAS_COR_ERR_STATUS:
560     {
561         uint32_t rw1c = value;
562         uint32_t temp = ldl_le_p((uint8_t *)cache_mem + offset);
563         temp &= ~rw1c;
564         stl_le_p((uint8_t *)cache_mem + offset, temp);
565         return;
566     }
567     default:
568         break;
569     }
570 
571     stl_le_p((uint8_t *)cache_mem + offset, value);
572     if (should_commit) {
573         hdm_decoder_commit(ct3d, which_hdm);
574     } else if (should_uncommit) {
575         hdm_decoder_uncommit(ct3d, which_hdm);
576     }
577 }
578 
579 static bool cxl_setup_memory(CXLType3Dev *ct3d, Error **errp)
580 {
581     DeviceState *ds = DEVICE(ct3d);
582 
583     if (!ct3d->hostmem && !ct3d->hostvmem && !ct3d->hostpmem) {
584         error_setg(errp, "at least one memdev property must be set");
585         return false;
586     } else if (ct3d->hostmem && ct3d->hostpmem) {
587         error_setg(errp, "[memdev] cannot be used with new "
588                          "[persistent-memdev] property");
589         return false;
590     } else if (ct3d->hostmem) {
591         /* Use of hostmem property implies pmem */
592         ct3d->hostpmem = ct3d->hostmem;
593         ct3d->hostmem = NULL;
594     }
595 
596     if (ct3d->hostpmem && !ct3d->lsa) {
597         error_setg(errp, "lsa property must be set for persistent devices");
598         return false;
599     }
600 
601     if (ct3d->hostvmem) {
602         MemoryRegion *vmr;
603         char *v_name;
604 
605         vmr = host_memory_backend_get_memory(ct3d->hostvmem);
606         if (!vmr) {
607             error_setg(errp, "volatile memdev must have backing device");
608             return false;
609         }
610         memory_region_set_nonvolatile(vmr, false);
611         memory_region_set_enabled(vmr, true);
612         host_memory_backend_set_mapped(ct3d->hostvmem, true);
613         if (ds->id) {
614             v_name = g_strdup_printf("cxl-type3-dpa-vmem-space:%s", ds->id);
615         } else {
616             v_name = g_strdup("cxl-type3-dpa-vmem-space");
617         }
618         address_space_init(&ct3d->hostvmem_as, vmr, v_name);
619         ct3d->cxl_dstate.vmem_size = memory_region_size(vmr);
620         ct3d->cxl_dstate.mem_size += memory_region_size(vmr);
621         g_free(v_name);
622     }
623 
624     if (ct3d->hostpmem) {
625         MemoryRegion *pmr;
626         char *p_name;
627 
628         pmr = host_memory_backend_get_memory(ct3d->hostpmem);
629         if (!pmr) {
630             error_setg(errp, "persistent memdev must have backing device");
631             return false;
632         }
633         memory_region_set_nonvolatile(pmr, true);
634         memory_region_set_enabled(pmr, true);
635         host_memory_backend_set_mapped(ct3d->hostpmem, true);
636         if (ds->id) {
637             p_name = g_strdup_printf("cxl-type3-dpa-pmem-space:%s", ds->id);
638         } else {
639             p_name = g_strdup("cxl-type3-dpa-pmem-space");
640         }
641         address_space_init(&ct3d->hostpmem_as, pmr, p_name);
642         ct3d->cxl_dstate.pmem_size = memory_region_size(pmr);
643         ct3d->cxl_dstate.mem_size += memory_region_size(pmr);
644         g_free(p_name);
645     }
646 
647     return true;
648 }
649 
650 static DOEProtocol doe_cdat_prot[] = {
651     { CXL_VENDOR_ID, CXL_DOE_TABLE_ACCESS, cxl_doe_cdat_rsp },
652     { }
653 };
654 
655 static void ct3_realize(PCIDevice *pci_dev, Error **errp)
656 {
657     CXLType3Dev *ct3d = CXL_TYPE3(pci_dev);
658     CXLComponentState *cxl_cstate = &ct3d->cxl_cstate;
659     ComponentRegisters *regs = &cxl_cstate->crb;
660     MemoryRegion *mr = &regs->component_registers;
661     uint8_t *pci_conf = pci_dev->config;
662     unsigned short msix_num = 6;
663     int i, rc;
664 
665     QTAILQ_INIT(&ct3d->error_list);
666 
667     if (!cxl_setup_memory(ct3d, errp)) {
668         return;
669     }
670 
671     pci_config_set_prog_interface(pci_conf, 0x10);
672 
673     pcie_endpoint_cap_init(pci_dev, 0x80);
674     if (ct3d->sn != UI64_NULL) {
675         pcie_dev_ser_num_init(pci_dev, 0x100, ct3d->sn);
676         cxl_cstate->dvsec_offset = 0x100 + 0x0c;
677     } else {
678         cxl_cstate->dvsec_offset = 0x100;
679     }
680 
681     ct3d->cxl_cstate.pdev = pci_dev;
682     build_dvsecs(ct3d);
683 
684     regs->special_ops = g_new0(MemoryRegionOps, 1);
685     regs->special_ops->write = ct3d_reg_write;
686 
687     cxl_component_register_block_init(OBJECT(pci_dev), cxl_cstate,
688                                       TYPE_CXL_TYPE3);
689 
690     pci_register_bar(
691         pci_dev, CXL_COMPONENT_REG_BAR_IDX,
692         PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64, mr);
693 
694     cxl_device_register_block_init(OBJECT(pci_dev), &ct3d->cxl_dstate);
695     pci_register_bar(pci_dev, CXL_DEVICE_REG_BAR_IDX,
696                      PCI_BASE_ADDRESS_SPACE_MEMORY |
697                          PCI_BASE_ADDRESS_MEM_TYPE_64,
698                      &ct3d->cxl_dstate.device_registers);
699 
700     /* MSI(-X) Initailization */
701     rc = msix_init_exclusive_bar(pci_dev, msix_num, 4, NULL);
702     if (rc) {
703         goto err_address_space_free;
704     }
705     for (i = 0; i < msix_num; i++) {
706         msix_vector_use(pci_dev, i);
707     }
708 
709     /* DOE Initailization */
710     pcie_doe_init(pci_dev, &ct3d->doe_cdat, 0x190, doe_cdat_prot, true, 0);
711 
712     cxl_cstate->cdat.build_cdat_table = ct3_build_cdat_table;
713     cxl_cstate->cdat.free_cdat_table = ct3_free_cdat_table;
714     cxl_cstate->cdat.private = ct3d;
715     cxl_doe_cdat_init(cxl_cstate, errp);
716     if (*errp) {
717         goto err_free_special_ops;
718     }
719 
720     pcie_cap_deverr_init(pci_dev);
721     /* Leave a bit of room for expansion */
722     rc = pcie_aer_init(pci_dev, PCI_ERR_VER, 0x200, PCI_ERR_SIZEOF, NULL);
723     if (rc) {
724         goto err_release_cdat;
725     }
726     cxl_event_init(&ct3d->cxl_dstate, 2);
727 
728     return;
729 
730 err_release_cdat:
731     cxl_doe_cdat_release(cxl_cstate);
732 err_free_special_ops:
733     g_free(regs->special_ops);
734 err_address_space_free:
735     if (ct3d->hostpmem) {
736         address_space_destroy(&ct3d->hostpmem_as);
737     }
738     if (ct3d->hostvmem) {
739         address_space_destroy(&ct3d->hostvmem_as);
740     }
741     return;
742 }
743 
744 static void ct3_exit(PCIDevice *pci_dev)
745 {
746     CXLType3Dev *ct3d = CXL_TYPE3(pci_dev);
747     CXLComponentState *cxl_cstate = &ct3d->cxl_cstate;
748     ComponentRegisters *regs = &cxl_cstate->crb;
749 
750     pcie_aer_exit(pci_dev);
751     cxl_doe_cdat_release(cxl_cstate);
752     g_free(regs->special_ops);
753     if (ct3d->hostpmem) {
754         address_space_destroy(&ct3d->hostpmem_as);
755     }
756     if (ct3d->hostvmem) {
757         address_space_destroy(&ct3d->hostvmem_as);
758     }
759 }
760 
761 /* TODO: Support multiple HDM decoders and DPA skip */
762 static bool cxl_type3_dpa(CXLType3Dev *ct3d, hwaddr host_addr, uint64_t *dpa)
763 {
764     uint32_t *cache_mem = ct3d->cxl_cstate.crb.cache_mem_registers;
765     uint64_t decoder_base, decoder_size, hpa_offset;
766     uint32_t hdm0_ctrl;
767     int ig, iw;
768 
769     decoder_base = (((uint64_t)cache_mem[R_CXL_HDM_DECODER0_BASE_HI] << 32) |
770                     cache_mem[R_CXL_HDM_DECODER0_BASE_LO]);
771     if ((uint64_t)host_addr < decoder_base) {
772         return false;
773     }
774 
775     hpa_offset = (uint64_t)host_addr - decoder_base;
776 
777     decoder_size = ((uint64_t)cache_mem[R_CXL_HDM_DECODER0_SIZE_HI] << 32) |
778         cache_mem[R_CXL_HDM_DECODER0_SIZE_LO];
779     if (hpa_offset >= decoder_size) {
780         return false;
781     }
782 
783     hdm0_ctrl = cache_mem[R_CXL_HDM_DECODER0_CTRL];
784     iw = FIELD_EX32(hdm0_ctrl, CXL_HDM_DECODER0_CTRL, IW);
785     ig = FIELD_EX32(hdm0_ctrl, CXL_HDM_DECODER0_CTRL, IG);
786 
787     *dpa = (MAKE_64BIT_MASK(0, 8 + ig) & hpa_offset) |
788         ((MAKE_64BIT_MASK(8 + ig + iw, 64 - 8 - ig - iw) & hpa_offset) >> iw);
789 
790     return true;
791 }
792 
793 static int cxl_type3_hpa_to_as_and_dpa(CXLType3Dev *ct3d,
794                                        hwaddr host_addr,
795                                        unsigned int size,
796                                        AddressSpace **as,
797                                        uint64_t *dpa_offset)
798 {
799     MemoryRegion *vmr = NULL, *pmr = NULL;
800 
801     if (ct3d->hostvmem) {
802         vmr = host_memory_backend_get_memory(ct3d->hostvmem);
803     }
804     if (ct3d->hostpmem) {
805         pmr = host_memory_backend_get_memory(ct3d->hostpmem);
806     }
807 
808     if (!vmr && !pmr) {
809         return -ENODEV;
810     }
811 
812     if (!cxl_type3_dpa(ct3d, host_addr, dpa_offset)) {
813         return -EINVAL;
814     }
815 
816     if (*dpa_offset > ct3d->cxl_dstate.mem_size) {
817         return -EINVAL;
818     }
819 
820     if (vmr) {
821         if (*dpa_offset < memory_region_size(vmr)) {
822             *as = &ct3d->hostvmem_as;
823         } else {
824             *as = &ct3d->hostpmem_as;
825             *dpa_offset -= memory_region_size(vmr);
826         }
827     } else {
828         *as = &ct3d->hostpmem_as;
829     }
830 
831     return 0;
832 }
833 
834 MemTxResult cxl_type3_read(PCIDevice *d, hwaddr host_addr, uint64_t *data,
835                            unsigned size, MemTxAttrs attrs)
836 {
837     uint64_t dpa_offset = 0;
838     AddressSpace *as = NULL;
839     int res;
840 
841     res = cxl_type3_hpa_to_as_and_dpa(CXL_TYPE3(d), host_addr, size,
842                                       &as, &dpa_offset);
843     if (res) {
844         return MEMTX_ERROR;
845     }
846 
847     return address_space_read(as, dpa_offset, attrs, data, size);
848 }
849 
850 MemTxResult cxl_type3_write(PCIDevice *d, hwaddr host_addr, uint64_t data,
851                             unsigned size, MemTxAttrs attrs)
852 {
853     uint64_t dpa_offset = 0;
854     AddressSpace *as = NULL;
855     int res;
856 
857     res = cxl_type3_hpa_to_as_and_dpa(CXL_TYPE3(d), host_addr, size,
858                                       &as, &dpa_offset);
859     if (res) {
860         return MEMTX_ERROR;
861     }
862 
863     return address_space_write(as, dpa_offset, attrs, &data, size);
864 }
865 
866 static void ct3d_reset(DeviceState *dev)
867 {
868     CXLType3Dev *ct3d = CXL_TYPE3(dev);
869     uint32_t *reg_state = ct3d->cxl_cstate.crb.cache_mem_registers;
870     uint32_t *write_msk = ct3d->cxl_cstate.crb.cache_mem_regs_write_mask;
871 
872     cxl_component_register_init_common(reg_state, write_msk, CXL2_TYPE3_DEVICE);
873     cxl_device_register_init_common(&ct3d->cxl_dstate);
874 }
875 
876 static Property ct3_props[] = {
877     DEFINE_PROP_LINK("memdev", CXLType3Dev, hostmem, TYPE_MEMORY_BACKEND,
878                      HostMemoryBackend *), /* for backward compatibility */
879     DEFINE_PROP_LINK("persistent-memdev", CXLType3Dev, hostpmem,
880                      TYPE_MEMORY_BACKEND, HostMemoryBackend *),
881     DEFINE_PROP_LINK("volatile-memdev", CXLType3Dev, hostvmem,
882                      TYPE_MEMORY_BACKEND, HostMemoryBackend *),
883     DEFINE_PROP_LINK("lsa", CXLType3Dev, lsa, TYPE_MEMORY_BACKEND,
884                      HostMemoryBackend *),
885     DEFINE_PROP_UINT64("sn", CXLType3Dev, sn, UI64_NULL),
886     DEFINE_PROP_STRING("cdat", CXLType3Dev, cxl_cstate.cdat.filename),
887     DEFINE_PROP_END_OF_LIST(),
888 };
889 
890 static uint64_t get_lsa_size(CXLType3Dev *ct3d)
891 {
892     MemoryRegion *mr;
893 
894     if (!ct3d->lsa) {
895         return 0;
896     }
897 
898     mr = host_memory_backend_get_memory(ct3d->lsa);
899     return memory_region_size(mr);
900 }
901 
902 static void validate_lsa_access(MemoryRegion *mr, uint64_t size,
903                                 uint64_t offset)
904 {
905     assert(offset + size <= memory_region_size(mr));
906     assert(offset + size > offset);
907 }
908 
909 static uint64_t get_lsa(CXLType3Dev *ct3d, void *buf, uint64_t size,
910                     uint64_t offset)
911 {
912     MemoryRegion *mr;
913     void *lsa;
914 
915     if (!ct3d->lsa) {
916         return 0;
917     }
918 
919     mr = host_memory_backend_get_memory(ct3d->lsa);
920     validate_lsa_access(mr, size, offset);
921 
922     lsa = memory_region_get_ram_ptr(mr) + offset;
923     memcpy(buf, lsa, size);
924 
925     return size;
926 }
927 
928 static void set_lsa(CXLType3Dev *ct3d, const void *buf, uint64_t size,
929                     uint64_t offset)
930 {
931     MemoryRegion *mr;
932     void *lsa;
933 
934     if (!ct3d->lsa) {
935         return;
936     }
937 
938     mr = host_memory_backend_get_memory(ct3d->lsa);
939     validate_lsa_access(mr, size, offset);
940 
941     lsa = memory_region_get_ram_ptr(mr) + offset;
942     memcpy(lsa, buf, size);
943     memory_region_set_dirty(mr, offset, size);
944 
945     /*
946      * Just like the PMEM, if the guest is not allowed to exit gracefully, label
947      * updates will get lost.
948      */
949 }
950 
951 static bool set_cacheline(CXLType3Dev *ct3d, uint64_t dpa_offset, uint8_t *data)
952 {
953     MemoryRegion *vmr = NULL, *pmr = NULL;
954     AddressSpace *as;
955 
956     if (ct3d->hostvmem) {
957         vmr = host_memory_backend_get_memory(ct3d->hostvmem);
958     }
959     if (ct3d->hostpmem) {
960         pmr = host_memory_backend_get_memory(ct3d->hostpmem);
961     }
962 
963     if (!vmr && !pmr) {
964         return false;
965     }
966 
967     if (dpa_offset + CXL_CACHE_LINE_SIZE > ct3d->cxl_dstate.mem_size) {
968         return false;
969     }
970 
971     if (vmr) {
972         if (dpa_offset < memory_region_size(vmr)) {
973             as = &ct3d->hostvmem_as;
974         } else {
975             as = &ct3d->hostpmem_as;
976             dpa_offset -= memory_region_size(vmr);
977         }
978     } else {
979         as = &ct3d->hostpmem_as;
980     }
981 
982     address_space_write(as, dpa_offset, MEMTXATTRS_UNSPECIFIED, &data,
983                         CXL_CACHE_LINE_SIZE);
984     return true;
985 }
986 
987 void cxl_set_poison_list_overflowed(CXLType3Dev *ct3d)
988 {
989         ct3d->poison_list_overflowed = true;
990         ct3d->poison_list_overflow_ts =
991             cxl_device_get_timestamp(&ct3d->cxl_dstate);
992 }
993 
994 void qmp_cxl_inject_poison(const char *path, uint64_t start, uint64_t length,
995                            Error **errp)
996 {
997     Object *obj = object_resolve_path(path, NULL);
998     CXLType3Dev *ct3d;
999     CXLPoison *p;
1000 
1001     if (length % 64) {
1002         error_setg(errp, "Poison injection must be in multiples of 64 bytes");
1003         return;
1004     }
1005     if (start % 64) {
1006         error_setg(errp, "Poison start address must be 64 byte aligned");
1007         return;
1008     }
1009     if (!obj) {
1010         error_setg(errp, "Unable to resolve path");
1011         return;
1012     }
1013     if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
1014         error_setg(errp, "Path does not point to a CXL type 3 device");
1015         return;
1016     }
1017 
1018     ct3d = CXL_TYPE3(obj);
1019 
1020     QLIST_FOREACH(p, &ct3d->poison_list, node) {
1021         if (((start >= p->start) && (start < p->start + p->length)) ||
1022             ((start + length > p->start) &&
1023              (start + length <= p->start + p->length))) {
1024             error_setg(errp, "Overlap with existing poisoned region not supported");
1025             return;
1026         }
1027     }
1028 
1029     if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) {
1030         cxl_set_poison_list_overflowed(ct3d);
1031         return;
1032     }
1033 
1034     p = g_new0(CXLPoison, 1);
1035     p->length = length;
1036     p->start = start;
1037     p->type = CXL_POISON_TYPE_INTERNAL; /* Different from injected via the mbox */
1038 
1039     QLIST_INSERT_HEAD(&ct3d->poison_list, p, node);
1040     ct3d->poison_list_cnt++;
1041 }
1042 
1043 /* For uncorrectable errors include support for multiple header recording */
1044 void qmp_cxl_inject_uncorrectable_errors(const char *path,
1045                                          CXLUncorErrorRecordList *errors,
1046                                          Error **errp)
1047 {
1048     Object *obj = object_resolve_path(path, NULL);
1049     static PCIEAERErr err = {};
1050     CXLType3Dev *ct3d;
1051     CXLError *cxl_err;
1052     uint32_t *reg_state;
1053     uint32_t unc_err;
1054     bool first;
1055 
1056     if (!obj) {
1057         error_setg(errp, "Unable to resolve path");
1058         return;
1059     }
1060 
1061     if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
1062         error_setg(errp, "Path does not point to a CXL type 3 device");
1063         return;
1064     }
1065 
1066     err.status = PCI_ERR_UNC_INTN;
1067     err.source_id = pci_requester_id(PCI_DEVICE(obj));
1068     err.flags = 0;
1069 
1070     ct3d = CXL_TYPE3(obj);
1071 
1072     first = QTAILQ_EMPTY(&ct3d->error_list);
1073     reg_state = ct3d->cxl_cstate.crb.cache_mem_registers;
1074     while (errors) {
1075         uint32List *header = errors->value->header;
1076         uint8_t header_count = 0;
1077         int cxl_err_code;
1078 
1079         cxl_err_code = ct3d_qmp_uncor_err_to_cxl(errors->value->type);
1080         if (cxl_err_code < 0) {
1081             error_setg(errp, "Unknown error code");
1082             return;
1083         }
1084 
1085         /* If the error is masked, nothing to do here */
1086         if (!((1 << cxl_err_code) &
1087               ~ldl_le_p(reg_state + R_CXL_RAS_UNC_ERR_MASK))) {
1088             errors = errors->next;
1089             continue;
1090         }
1091 
1092         cxl_err = g_malloc0(sizeof(*cxl_err));
1093         if (!cxl_err) {
1094             return;
1095         }
1096 
1097         cxl_err->type = cxl_err_code;
1098         while (header && header_count < 32) {
1099             cxl_err->header[header_count++] = header->value;
1100             header = header->next;
1101         }
1102         if (header_count > 32) {
1103             error_setg(errp, "Header must be 32 DWORD or less");
1104             return;
1105         }
1106         QTAILQ_INSERT_TAIL(&ct3d->error_list, cxl_err, node);
1107 
1108         errors = errors->next;
1109     }
1110 
1111     if (first && !QTAILQ_EMPTY(&ct3d->error_list)) {
1112         uint32_t *cache_mem = ct3d->cxl_cstate.crb.cache_mem_registers;
1113         uint32_t capctrl = ldl_le_p(cache_mem + R_CXL_RAS_ERR_CAP_CTRL);
1114         uint32_t *header_log = &cache_mem[R_CXL_RAS_ERR_HEADER0];
1115         int i;
1116 
1117         cxl_err = QTAILQ_FIRST(&ct3d->error_list);
1118         for (i = 0; i < CXL_RAS_ERR_HEADER_NUM; i++) {
1119             stl_le_p(header_log + i, cxl_err->header[i]);
1120         }
1121 
1122         capctrl = FIELD_DP32(capctrl, CXL_RAS_ERR_CAP_CTRL,
1123                              FIRST_ERROR_POINTER, cxl_err->type);
1124         stl_le_p(cache_mem + R_CXL_RAS_ERR_CAP_CTRL, capctrl);
1125     }
1126 
1127     unc_err = 0;
1128     QTAILQ_FOREACH(cxl_err, &ct3d->error_list, node) {
1129         unc_err |= (1 << cxl_err->type);
1130     }
1131     if (!unc_err) {
1132         return;
1133     }
1134 
1135     stl_le_p(reg_state + R_CXL_RAS_UNC_ERR_STATUS, unc_err);
1136     pcie_aer_inject_error(PCI_DEVICE(obj), &err);
1137 
1138     return;
1139 }
1140 
1141 void qmp_cxl_inject_correctable_error(const char *path, CxlCorErrorType type,
1142                                       Error **errp)
1143 {
1144     static PCIEAERErr err = {};
1145     Object *obj = object_resolve_path(path, NULL);
1146     CXLType3Dev *ct3d;
1147     uint32_t *reg_state;
1148     uint32_t cor_err;
1149     int cxl_err_type;
1150 
1151     if (!obj) {
1152         error_setg(errp, "Unable to resolve path");
1153         return;
1154     }
1155     if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
1156         error_setg(errp, "Path does not point to a CXL type 3 device");
1157         return;
1158     }
1159 
1160     err.status = PCI_ERR_COR_INTERNAL;
1161     err.source_id = pci_requester_id(PCI_DEVICE(obj));
1162     err.flags = PCIE_AER_ERR_IS_CORRECTABLE;
1163 
1164     ct3d = CXL_TYPE3(obj);
1165     reg_state = ct3d->cxl_cstate.crb.cache_mem_registers;
1166     cor_err = ldl_le_p(reg_state + R_CXL_RAS_COR_ERR_STATUS);
1167 
1168     cxl_err_type = ct3d_qmp_cor_err_to_cxl(type);
1169     if (cxl_err_type < 0) {
1170         error_setg(errp, "Invalid COR error");
1171         return;
1172     }
1173     /* If the error is masked, nothting to do here */
1174     if (!((1 << cxl_err_type) & ~ldl_le_p(reg_state + R_CXL_RAS_COR_ERR_MASK))) {
1175         return;
1176     }
1177 
1178     cor_err |= (1 << cxl_err_type);
1179     stl_le_p(reg_state + R_CXL_RAS_COR_ERR_STATUS, cor_err);
1180 
1181     pcie_aer_inject_error(PCI_DEVICE(obj), &err);
1182 }
1183 
1184 static void cxl_assign_event_header(CXLEventRecordHdr *hdr,
1185                                     const QemuUUID *uuid, uint32_t flags,
1186                                     uint8_t length, uint64_t timestamp)
1187 {
1188     st24_le_p(&hdr->flags, flags);
1189     hdr->length = length;
1190     memcpy(&hdr->id, uuid, sizeof(hdr->id));
1191     stq_le_p(&hdr->timestamp, timestamp);
1192 }
1193 
1194 static const QemuUUID gen_media_uuid = {
1195     .data = UUID(0xfbcd0a77, 0xc260, 0x417f,
1196                  0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6),
1197 };
1198 
1199 static const QemuUUID dram_uuid = {
1200     .data = UUID(0x601dcbb3, 0x9c06, 0x4eab, 0xb8, 0xaf,
1201                  0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24),
1202 };
1203 
1204 static const QemuUUID memory_module_uuid = {
1205     .data = UUID(0xfe927475, 0xdd59, 0x4339, 0xa5, 0x86,
1206                  0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74),
1207 };
1208 
1209 #define CXL_GMER_VALID_CHANNEL                          BIT(0)
1210 #define CXL_GMER_VALID_RANK                             BIT(1)
1211 #define CXL_GMER_VALID_DEVICE                           BIT(2)
1212 #define CXL_GMER_VALID_COMPONENT                        BIT(3)
1213 
1214 static int ct3d_qmp_cxl_event_log_enc(CxlEventLog log)
1215 {
1216     switch (log) {
1217     case CXL_EVENT_LOG_INFORMATIONAL:
1218         return CXL_EVENT_TYPE_INFO;
1219     case CXL_EVENT_LOG_WARNING:
1220         return CXL_EVENT_TYPE_WARN;
1221     case CXL_EVENT_LOG_FAILURE:
1222         return CXL_EVENT_TYPE_FAIL;
1223     case CXL_EVENT_LOG_FATAL:
1224         return CXL_EVENT_TYPE_FATAL;
1225 /* DCD not yet supported */
1226     default:
1227         return -EINVAL;
1228     }
1229 }
1230 /* Component ID is device specific.  Define this as a string. */
1231 void qmp_cxl_inject_general_media_event(const char *path, CxlEventLog log,
1232                                         uint8_t flags, uint64_t dpa,
1233                                         uint8_t descriptor, uint8_t type,
1234                                         uint8_t transaction_type,
1235                                         bool has_channel, uint8_t channel,
1236                                         bool has_rank, uint8_t rank,
1237                                         bool has_device, uint32_t device,
1238                                         const char *component_id,
1239                                         Error **errp)
1240 {
1241     Object *obj = object_resolve_path(path, NULL);
1242     CXLEventGenMedia gem;
1243     CXLEventRecordHdr *hdr = &gem.hdr;
1244     CXLDeviceState *cxlds;
1245     CXLType3Dev *ct3d;
1246     uint16_t valid_flags = 0;
1247     uint8_t enc_log;
1248     int rc;
1249 
1250     if (!obj) {
1251         error_setg(errp, "Unable to resolve path");
1252         return;
1253     }
1254     if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
1255         error_setg(errp, "Path does not point to a CXL type 3 device");
1256         return;
1257     }
1258     ct3d = CXL_TYPE3(obj);
1259     cxlds = &ct3d->cxl_dstate;
1260 
1261     rc = ct3d_qmp_cxl_event_log_enc(log);
1262     if (rc < 0) {
1263         error_setg(errp, "Unhandled error log type");
1264         return;
1265     }
1266     enc_log = rc;
1267 
1268     memset(&gem, 0, sizeof(gem));
1269     cxl_assign_event_header(hdr, &gen_media_uuid, flags, sizeof(gem),
1270                             cxl_device_get_timestamp(&ct3d->cxl_dstate));
1271 
1272     stq_le_p(&gem.phys_addr, dpa);
1273     gem.descriptor = descriptor;
1274     gem.type = type;
1275     gem.transaction_type = transaction_type;
1276 
1277     if (has_channel) {
1278         gem.channel = channel;
1279         valid_flags |= CXL_GMER_VALID_CHANNEL;
1280     }
1281 
1282     if (has_rank) {
1283         gem.rank = rank;
1284         valid_flags |= CXL_GMER_VALID_RANK;
1285     }
1286 
1287     if (has_device) {
1288         st24_le_p(gem.device, device);
1289         valid_flags |= CXL_GMER_VALID_DEVICE;
1290     }
1291 
1292     if (component_id) {
1293         strncpy((char *)gem.component_id, component_id,
1294                 sizeof(gem.component_id) - 1);
1295         valid_flags |= CXL_GMER_VALID_COMPONENT;
1296     }
1297 
1298     stw_le_p(&gem.validity_flags, valid_flags);
1299 
1300     if (cxl_event_insert(cxlds, enc_log, (CXLEventRecordRaw *)&gem)) {
1301         cxl_event_irq_assert(ct3d);
1302     }
1303 }
1304 
1305 #define CXL_DRAM_VALID_CHANNEL                          BIT(0)
1306 #define CXL_DRAM_VALID_RANK                             BIT(1)
1307 #define CXL_DRAM_VALID_NIBBLE_MASK                      BIT(2)
1308 #define CXL_DRAM_VALID_BANK_GROUP                       BIT(3)
1309 #define CXL_DRAM_VALID_BANK                             BIT(4)
1310 #define CXL_DRAM_VALID_ROW                              BIT(5)
1311 #define CXL_DRAM_VALID_COLUMN                           BIT(6)
1312 #define CXL_DRAM_VALID_CORRECTION_MASK                  BIT(7)
1313 
1314 void qmp_cxl_inject_dram_event(const char *path, CxlEventLog log, uint8_t flags,
1315                                uint64_t dpa, uint8_t descriptor,
1316                                uint8_t type, uint8_t transaction_type,
1317                                bool has_channel, uint8_t channel,
1318                                bool has_rank, uint8_t rank,
1319                                bool has_nibble_mask, uint32_t nibble_mask,
1320                                bool has_bank_group, uint8_t bank_group,
1321                                bool has_bank, uint8_t bank,
1322                                bool has_row, uint32_t row,
1323                                bool has_column, uint16_t column,
1324                                bool has_correction_mask, uint64List *correction_mask,
1325                                Error **errp)
1326 {
1327     Object *obj = object_resolve_path(path, NULL);
1328     CXLEventDram dram;
1329     CXLEventRecordHdr *hdr = &dram.hdr;
1330     CXLDeviceState *cxlds;
1331     CXLType3Dev *ct3d;
1332     uint16_t valid_flags = 0;
1333     uint8_t enc_log;
1334     int rc;
1335 
1336     if (!obj) {
1337         error_setg(errp, "Unable to resolve path");
1338         return;
1339     }
1340     if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
1341         error_setg(errp, "Path does not point to a CXL type 3 device");
1342         return;
1343     }
1344     ct3d = CXL_TYPE3(obj);
1345     cxlds = &ct3d->cxl_dstate;
1346 
1347     rc = ct3d_qmp_cxl_event_log_enc(log);
1348     if (rc < 0) {
1349         error_setg(errp, "Unhandled error log type");
1350         return;
1351     }
1352     enc_log = rc;
1353 
1354     memset(&dram, 0, sizeof(dram));
1355     cxl_assign_event_header(hdr, &dram_uuid, flags, sizeof(dram),
1356                             cxl_device_get_timestamp(&ct3d->cxl_dstate));
1357     stq_le_p(&dram.phys_addr, dpa);
1358     dram.descriptor = descriptor;
1359     dram.type = type;
1360     dram.transaction_type = transaction_type;
1361 
1362     if (has_channel) {
1363         dram.channel = channel;
1364         valid_flags |= CXL_DRAM_VALID_CHANNEL;
1365     }
1366 
1367     if (has_rank) {
1368         dram.rank = rank;
1369         valid_flags |= CXL_DRAM_VALID_RANK;
1370     }
1371 
1372     if (has_nibble_mask) {
1373         st24_le_p(dram.nibble_mask, nibble_mask);
1374         valid_flags |= CXL_DRAM_VALID_NIBBLE_MASK;
1375     }
1376 
1377     if (has_bank_group) {
1378         dram.bank_group = bank_group;
1379         valid_flags |= CXL_DRAM_VALID_BANK_GROUP;
1380     }
1381 
1382     if (has_bank) {
1383         dram.bank = bank;
1384         valid_flags |= CXL_DRAM_VALID_BANK;
1385     }
1386 
1387     if (has_row) {
1388         st24_le_p(dram.row, row);
1389         valid_flags |= CXL_DRAM_VALID_ROW;
1390     }
1391 
1392     if (has_column) {
1393         stw_le_p(&dram.column, column);
1394         valid_flags |= CXL_DRAM_VALID_COLUMN;
1395     }
1396 
1397     if (has_correction_mask) {
1398         int count = 0;
1399         while (correction_mask && count < 4) {
1400             stq_le_p(&dram.correction_mask[count],
1401                      correction_mask->value);
1402             count++;
1403             correction_mask = correction_mask->next;
1404         }
1405         valid_flags |= CXL_DRAM_VALID_CORRECTION_MASK;
1406     }
1407 
1408     stw_le_p(&dram.validity_flags, valid_flags);
1409 
1410     if (cxl_event_insert(cxlds, enc_log, (CXLEventRecordRaw *)&dram)) {
1411         cxl_event_irq_assert(ct3d);
1412     }
1413     return;
1414 }
1415 
1416 void qmp_cxl_inject_memory_module_event(const char *path, CxlEventLog log,
1417                                         uint8_t flags, uint8_t type,
1418                                         uint8_t health_status,
1419                                         uint8_t media_status,
1420                                         uint8_t additional_status,
1421                                         uint8_t life_used,
1422                                         int16_t temperature,
1423                                         uint32_t dirty_shutdown_count,
1424                                         uint32_t corrected_volatile_error_count,
1425                                         uint32_t corrected_persistent_error_count,
1426                                         Error **errp)
1427 {
1428     Object *obj = object_resolve_path(path, NULL);
1429     CXLEventMemoryModule module;
1430     CXLEventRecordHdr *hdr = &module.hdr;
1431     CXLDeviceState *cxlds;
1432     CXLType3Dev *ct3d;
1433     uint8_t enc_log;
1434     int rc;
1435 
1436     if (!obj) {
1437         error_setg(errp, "Unable to resolve path");
1438         return;
1439     }
1440     if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
1441         error_setg(errp, "Path does not point to a CXL type 3 device");
1442         return;
1443     }
1444     ct3d = CXL_TYPE3(obj);
1445     cxlds = &ct3d->cxl_dstate;
1446 
1447     rc = ct3d_qmp_cxl_event_log_enc(log);
1448     if (rc < 0) {
1449         error_setg(errp, "Unhandled error log type");
1450         return;
1451     }
1452     enc_log = rc;
1453 
1454     memset(&module, 0, sizeof(module));
1455     cxl_assign_event_header(hdr, &memory_module_uuid, flags, sizeof(module),
1456                             cxl_device_get_timestamp(&ct3d->cxl_dstate));
1457 
1458     module.type = type;
1459     module.health_status = health_status;
1460     module.media_status = media_status;
1461     module.additional_status = additional_status;
1462     module.life_used = life_used;
1463     stw_le_p(&module.temperature, temperature);
1464     stl_le_p(&module.dirty_shutdown_count, dirty_shutdown_count);
1465     stl_le_p(&module.corrected_volatile_error_count, corrected_volatile_error_count);
1466     stl_le_p(&module.corrected_persistent_error_count, corrected_persistent_error_count);
1467 
1468     if (cxl_event_insert(cxlds, enc_log, (CXLEventRecordRaw *)&module)) {
1469         cxl_event_irq_assert(ct3d);
1470     }
1471 }
1472 
1473 static void ct3_class_init(ObjectClass *oc, void *data)
1474 {
1475     DeviceClass *dc = DEVICE_CLASS(oc);
1476     PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc);
1477     CXLType3Class *cvc = CXL_TYPE3_CLASS(oc);
1478 
1479     pc->realize = ct3_realize;
1480     pc->exit = ct3_exit;
1481     pc->class_id = PCI_CLASS_MEMORY_CXL;
1482     pc->vendor_id = PCI_VENDOR_ID_INTEL;
1483     pc->device_id = 0xd93; /* LVF for now */
1484     pc->revision = 1;
1485 
1486     pc->config_write = ct3d_config_write;
1487     pc->config_read = ct3d_config_read;
1488 
1489     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1490     dc->desc = "CXL Memory Device (Type 3)";
1491     dc->reset = ct3d_reset;
1492     device_class_set_props(dc, ct3_props);
1493 
1494     cvc->get_lsa_size = get_lsa_size;
1495     cvc->get_lsa = get_lsa;
1496     cvc->set_lsa = set_lsa;
1497     cvc->set_cacheline = set_cacheline;
1498 }
1499 
1500 static const TypeInfo ct3d_info = {
1501     .name = TYPE_CXL_TYPE3,
1502     .parent = TYPE_PCI_DEVICE,
1503     .class_size = sizeof(struct CXLType3Class),
1504     .class_init = ct3_class_init,
1505     .instance_size = sizeof(CXLType3Dev),
1506     .interfaces = (InterfaceInfo[]) {
1507         { INTERFACE_CXL_DEVICE },
1508         { INTERFACE_PCIE_DEVICE },
1509         {}
1510     },
1511 };
1512 
1513 static void ct3d_registers(void)
1514 {
1515     type_register_static(&ct3d_info);
1516 }
1517 
1518 type_init(ct3d_registers);
1519