xref: /qemu/hw/mem/cxl_type3.c (revision a1fadbcf)
1 /*
2  * CXL Type 3 (memory expander) device
3  *
4  * Copyright(C) 2020 Intel Corporation.
5  *
6  * This work is licensed under the terms of the GNU GPL, version 2. See the
7  * COPYING file in the top-level directory.
8  *
9  * SPDX-License-Identifier: GPL-v2-only
10  */
11 
12 #include "qemu/osdep.h"
13 #include "qemu/units.h"
14 #include "qemu/error-report.h"
15 #include "qapi/qapi-commands-cxl.h"
16 #include "hw/mem/memory-device.h"
17 #include "hw/mem/pc-dimm.h"
18 #include "hw/pci/pci.h"
19 #include "hw/qdev-properties.h"
20 #include "qapi/error.h"
21 #include "qemu/log.h"
22 #include "qemu/module.h"
23 #include "qemu/pmem.h"
24 #include "qemu/range.h"
25 #include "qemu/rcu.h"
26 #include "sysemu/hostmem.h"
27 #include "sysemu/numa.h"
28 #include "hw/cxl/cxl.h"
29 #include "hw/pci/msix.h"
30 
31 #define DWORD_BYTE 4
32 
33 /* Default CDAT entries for a memory region */
34 enum {
35     CT3_CDAT_DSMAS,
36     CT3_CDAT_DSLBIS0,
37     CT3_CDAT_DSLBIS1,
38     CT3_CDAT_DSLBIS2,
39     CT3_CDAT_DSLBIS3,
40     CT3_CDAT_DSEMTS,
41     CT3_CDAT_NUM_ENTRIES
42 };
43 
44 static int ct3_build_cdat_entries_for_mr(CDATSubHeader **cdat_table,
45                                          int dsmad_handle, MemoryRegion *mr,
46                                          bool is_pmem, uint64_t dpa_base)
47 {
48     g_autofree CDATDsmas *dsmas = NULL;
49     g_autofree CDATDslbis *dslbis0 = NULL;
50     g_autofree CDATDslbis *dslbis1 = NULL;
51     g_autofree CDATDslbis *dslbis2 = NULL;
52     g_autofree CDATDslbis *dslbis3 = NULL;
53     g_autofree CDATDsemts *dsemts = NULL;
54 
55     dsmas = g_malloc(sizeof(*dsmas));
56     if (!dsmas) {
57         return -ENOMEM;
58     }
59     *dsmas = (CDATDsmas) {
60         .header = {
61             .type = CDAT_TYPE_DSMAS,
62             .length = sizeof(*dsmas),
63         },
64         .DSMADhandle = dsmad_handle,
65         .flags = is_pmem ? CDAT_DSMAS_FLAG_NV : 0,
66         .DPA_base = dpa_base,
67         .DPA_length = memory_region_size(mr),
68     };
69 
70     /* For now, no memory side cache, plausiblish numbers */
71     dslbis0 = g_malloc(sizeof(*dslbis0));
72     if (!dslbis0) {
73         return -ENOMEM;
74     }
75     *dslbis0 = (CDATDslbis) {
76         .header = {
77             .type = CDAT_TYPE_DSLBIS,
78             .length = sizeof(*dslbis0),
79         },
80         .handle = dsmad_handle,
81         .flags = HMAT_LB_MEM_MEMORY,
82         .data_type = HMAT_LB_DATA_READ_LATENCY,
83         .entry_base_unit = 10000, /* 10ns base */
84         .entry[0] = 15, /* 150ns */
85     };
86 
87     dslbis1 = g_malloc(sizeof(*dslbis1));
88     if (!dslbis1) {
89         return -ENOMEM;
90     }
91     *dslbis1 = (CDATDslbis) {
92         .header = {
93             .type = CDAT_TYPE_DSLBIS,
94             .length = sizeof(*dslbis1),
95         },
96         .handle = dsmad_handle,
97         .flags = HMAT_LB_MEM_MEMORY,
98         .data_type = HMAT_LB_DATA_WRITE_LATENCY,
99         .entry_base_unit = 10000,
100         .entry[0] = 25, /* 250ns */
101     };
102 
103     dslbis2 = g_malloc(sizeof(*dslbis2));
104     if (!dslbis2) {
105         return -ENOMEM;
106     }
107     *dslbis2 = (CDATDslbis) {
108         .header = {
109             .type = CDAT_TYPE_DSLBIS,
110             .length = sizeof(*dslbis2),
111         },
112         .handle = dsmad_handle,
113         .flags = HMAT_LB_MEM_MEMORY,
114         .data_type = HMAT_LB_DATA_READ_BANDWIDTH,
115         .entry_base_unit = 1000, /* GB/s */
116         .entry[0] = 16,
117     };
118 
119     dslbis3 = g_malloc(sizeof(*dslbis3));
120     if (!dslbis3) {
121         return -ENOMEM;
122     }
123     *dslbis3 = (CDATDslbis) {
124         .header = {
125             .type = CDAT_TYPE_DSLBIS,
126             .length = sizeof(*dslbis3),
127         },
128         .handle = dsmad_handle,
129         .flags = HMAT_LB_MEM_MEMORY,
130         .data_type = HMAT_LB_DATA_WRITE_BANDWIDTH,
131         .entry_base_unit = 1000, /* GB/s */
132         .entry[0] = 16,
133     };
134 
135     dsemts = g_malloc(sizeof(*dsemts));
136     if (!dsemts) {
137         return -ENOMEM;
138     }
139     *dsemts = (CDATDsemts) {
140         .header = {
141             .type = CDAT_TYPE_DSEMTS,
142             .length = sizeof(*dsemts),
143         },
144         .DSMAS_handle = dsmad_handle,
145         /*
146          * NV: Reserved - the non volatile from DSMAS matters
147          * V: EFI_MEMORY_SP
148          */
149         .EFI_memory_type_attr = is_pmem ? 2 : 1,
150         .DPA_offset = 0,
151         .DPA_length = memory_region_size(mr),
152     };
153 
154     /* Header always at start of structure */
155     cdat_table[CT3_CDAT_DSMAS] = g_steal_pointer(&dsmas);
156     cdat_table[CT3_CDAT_DSLBIS0] = g_steal_pointer(&dslbis0);
157     cdat_table[CT3_CDAT_DSLBIS1] = g_steal_pointer(&dslbis1);
158     cdat_table[CT3_CDAT_DSLBIS2] = g_steal_pointer(&dslbis2);
159     cdat_table[CT3_CDAT_DSLBIS3] = g_steal_pointer(&dslbis3);
160     cdat_table[CT3_CDAT_DSEMTS] = g_steal_pointer(&dsemts);
161 
162     return 0;
163 }
164 
165 static int ct3_build_cdat_table(CDATSubHeader ***cdat_table, void *priv)
166 {
167     g_autofree CDATSubHeader **table = NULL;
168     CXLType3Dev *ct3d = priv;
169     MemoryRegion *volatile_mr = NULL, *nonvolatile_mr = NULL;
170     int dsmad_handle = 0;
171     int cur_ent = 0;
172     int len = 0;
173     int rc, i;
174 
175     if (!ct3d->hostpmem && !ct3d->hostvmem) {
176         return 0;
177     }
178 
179     if (ct3d->hostvmem) {
180         volatile_mr = host_memory_backend_get_memory(ct3d->hostvmem);
181         if (!volatile_mr) {
182             return -EINVAL;
183         }
184         len += CT3_CDAT_NUM_ENTRIES;
185     }
186 
187     if (ct3d->hostpmem) {
188         nonvolatile_mr = host_memory_backend_get_memory(ct3d->hostpmem);
189         if (!nonvolatile_mr) {
190             return -EINVAL;
191         }
192         len += CT3_CDAT_NUM_ENTRIES;
193     }
194 
195     table = g_malloc0(len * sizeof(*table));
196     if (!table) {
197         return -ENOMEM;
198     }
199 
200     /* Now fill them in */
201     if (volatile_mr) {
202         rc = ct3_build_cdat_entries_for_mr(table, dsmad_handle++, volatile_mr,
203                                            false, 0);
204         if (rc < 0) {
205             return rc;
206         }
207         cur_ent = CT3_CDAT_NUM_ENTRIES;
208     }
209 
210     if (nonvolatile_mr) {
211         rc = ct3_build_cdat_entries_for_mr(&(table[cur_ent]), dsmad_handle++,
212                                            nonvolatile_mr, true,
213                                            (volatile_mr ?
214                                             memory_region_size(volatile_mr) : 0));
215         if (rc < 0) {
216             goto error_cleanup;
217         }
218         cur_ent += CT3_CDAT_NUM_ENTRIES;
219     }
220     assert(len == cur_ent);
221 
222     *cdat_table = g_steal_pointer(&table);
223 
224     return len;
225 error_cleanup:
226     for (i = 0; i < cur_ent; i++) {
227         g_free(table[i]);
228     }
229     return rc;
230 }
231 
232 static void ct3_free_cdat_table(CDATSubHeader **cdat_table, int num, void *priv)
233 {
234     int i;
235 
236     for (i = 0; i < num; i++) {
237         g_free(cdat_table[i]);
238     }
239     g_free(cdat_table);
240 }
241 
242 static bool cxl_doe_cdat_rsp(DOECap *doe_cap)
243 {
244     CDATObject *cdat = &CXL_TYPE3(doe_cap->pdev)->cxl_cstate.cdat;
245     uint16_t ent;
246     void *base;
247     uint32_t len;
248     CDATReq *req = pcie_doe_get_write_mbox_ptr(doe_cap);
249     CDATRsp rsp;
250 
251     assert(cdat->entry_len);
252 
253     /* Discard if request length mismatched */
254     if (pcie_doe_get_obj_len(req) <
255         DIV_ROUND_UP(sizeof(CDATReq), DWORD_BYTE)) {
256         return false;
257     }
258 
259     ent = req->entry_handle;
260     base = cdat->entry[ent].base;
261     len = cdat->entry[ent].length;
262 
263     rsp = (CDATRsp) {
264         .header = {
265             .vendor_id = CXL_VENDOR_ID,
266             .data_obj_type = CXL_DOE_TABLE_ACCESS,
267             .reserved = 0x0,
268             .length = DIV_ROUND_UP((sizeof(rsp) + len), DWORD_BYTE),
269         },
270         .rsp_code = CXL_DOE_TAB_RSP,
271         .table_type = CXL_DOE_TAB_TYPE_CDAT,
272         .entry_handle = (ent < cdat->entry_len - 1) ?
273                         ent + 1 : CXL_DOE_TAB_ENT_MAX,
274     };
275 
276     memcpy(doe_cap->read_mbox, &rsp, sizeof(rsp));
277     memcpy(doe_cap->read_mbox + DIV_ROUND_UP(sizeof(rsp), DWORD_BYTE),
278            base, len);
279 
280     doe_cap->read_mbox_len += rsp.header.length;
281 
282     return true;
283 }
284 
285 static uint32_t ct3d_config_read(PCIDevice *pci_dev, uint32_t addr, int size)
286 {
287     CXLType3Dev *ct3d = CXL_TYPE3(pci_dev);
288     uint32_t val;
289 
290     if (pcie_doe_read_config(&ct3d->doe_cdat, addr, size, &val)) {
291         return val;
292     }
293 
294     return pci_default_read_config(pci_dev, addr, size);
295 }
296 
297 static void ct3d_config_write(PCIDevice *pci_dev, uint32_t addr, uint32_t val,
298                               int size)
299 {
300     CXLType3Dev *ct3d = CXL_TYPE3(pci_dev);
301 
302     pcie_doe_write_config(&ct3d->doe_cdat, addr, val, size);
303     pci_default_write_config(pci_dev, addr, val, size);
304     pcie_aer_write_config(pci_dev, addr, val, size);
305 }
306 
307 /*
308  * Null value of all Fs suggested by IEEE RA guidelines for use of
309  * EU, OUI and CID
310  */
311 #define UI64_NULL ~(0ULL)
312 
313 static void build_dvsecs(CXLType3Dev *ct3d)
314 {
315     CXLComponentState *cxl_cstate = &ct3d->cxl_cstate;
316     uint8_t *dvsec;
317     uint32_t range1_size_hi, range1_size_lo,
318              range1_base_hi = 0, range1_base_lo = 0,
319              range2_size_hi = 0, range2_size_lo = 0,
320              range2_base_hi = 0, range2_base_lo = 0;
321 
322     /*
323      * Volatile memory is mapped as (0x0)
324      * Persistent memory is mapped at (volatile->size)
325      */
326     if (ct3d->hostvmem) {
327         range1_size_hi = ct3d->hostvmem->size >> 32;
328         range1_size_lo = (2 << 5) | (2 << 2) | 0x3 |
329                          (ct3d->hostvmem->size & 0xF0000000);
330         if (ct3d->hostpmem) {
331             range2_size_hi = ct3d->hostpmem->size >> 32;
332             range2_size_lo = (2 << 5) | (2 << 2) | 0x3 |
333                              (ct3d->hostpmem->size & 0xF0000000);
334         }
335     } else {
336         range1_size_hi = ct3d->hostpmem->size >> 32;
337         range1_size_lo = (2 << 5) | (2 << 2) | 0x3 |
338                          (ct3d->hostpmem->size & 0xF0000000);
339     }
340 
341     dvsec = (uint8_t *)&(CXLDVSECDevice){
342         .cap = 0x1e,
343         .ctrl = 0x2,
344         .status2 = 0x2,
345         .range1_size_hi = range1_size_hi,
346         .range1_size_lo = range1_size_lo,
347         .range1_base_hi = range1_base_hi,
348         .range1_base_lo = range1_base_lo,
349         .range2_size_hi = range2_size_hi,
350         .range2_size_lo = range2_size_lo,
351         .range2_base_hi = range2_base_hi,
352         .range2_base_lo = range2_base_lo,
353     };
354     cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE,
355                                PCIE_CXL_DEVICE_DVSEC_LENGTH,
356                                PCIE_CXL_DEVICE_DVSEC,
357                                PCIE_CXL2_DEVICE_DVSEC_REVID, dvsec);
358 
359     dvsec = (uint8_t *)&(CXLDVSECRegisterLocator){
360         .rsvd         = 0,
361         .reg0_base_lo = RBI_COMPONENT_REG | CXL_COMPONENT_REG_BAR_IDX,
362         .reg0_base_hi = 0,
363         .reg1_base_lo = RBI_CXL_DEVICE_REG | CXL_DEVICE_REG_BAR_IDX,
364         .reg1_base_hi = 0,
365     };
366     cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE,
367                                REG_LOC_DVSEC_LENGTH, REG_LOC_DVSEC,
368                                REG_LOC_DVSEC_REVID, dvsec);
369     dvsec = (uint8_t *)&(CXLDVSECDeviceGPF){
370         .phase2_duration = 0x603, /* 3 seconds */
371         .phase2_power = 0x33, /* 0x33 miliwatts */
372     };
373     cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE,
374                                GPF_DEVICE_DVSEC_LENGTH, GPF_DEVICE_DVSEC,
375                                GPF_DEVICE_DVSEC_REVID, dvsec);
376 
377     dvsec = (uint8_t *)&(CXLDVSECPortFlexBus){
378         .cap                     = 0x26, /* 68B, IO, Mem, non-MLD */
379         .ctrl                    = 0x02, /* IO always enabled */
380         .status                  = 0x26, /* same as capabilities */
381         .rcvd_mod_ts_data_phase1 = 0xef, /* WTF? */
382     };
383     cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE,
384                                PCIE_FLEXBUS_PORT_DVSEC_LENGTH_2_0,
385                                PCIE_FLEXBUS_PORT_DVSEC,
386                                PCIE_FLEXBUS_PORT_DVSEC_REVID_2_0, dvsec);
387 }
388 
389 static void hdm_decoder_commit(CXLType3Dev *ct3d, int which)
390 {
391     ComponentRegisters *cregs = &ct3d->cxl_cstate.crb;
392     uint32_t *cache_mem = cregs->cache_mem_registers;
393     uint32_t ctrl;
394 
395     assert(which == 0);
396 
397     ctrl = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL);
398     /* TODO: Sanity checks that the decoder is possible */
399     ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, ERR, 0);
400     ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, COMMITTED, 1);
401 
402     stl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL, ctrl);
403 }
404 
405 static void hdm_decoder_uncommit(CXLType3Dev *ct3d, int which)
406 {
407     ComponentRegisters *cregs = &ct3d->cxl_cstate.crb;
408     uint32_t *cache_mem = cregs->cache_mem_registers;
409     uint32_t ctrl;
410 
411     assert(which == 0);
412 
413     ctrl = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL);
414 
415     ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, ERR, 0);
416     ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, COMMITTED, 0);
417 
418     stl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL, ctrl);
419 }
420 
421 static int ct3d_qmp_uncor_err_to_cxl(CxlUncorErrorType qmp_err)
422 {
423     switch (qmp_err) {
424     case CXL_UNCOR_ERROR_TYPE_CACHE_DATA_PARITY:
425         return CXL_RAS_UNC_ERR_CACHE_DATA_PARITY;
426     case CXL_UNCOR_ERROR_TYPE_CACHE_ADDRESS_PARITY:
427         return CXL_RAS_UNC_ERR_CACHE_ADDRESS_PARITY;
428     case CXL_UNCOR_ERROR_TYPE_CACHE_BE_PARITY:
429         return CXL_RAS_UNC_ERR_CACHE_BE_PARITY;
430     case CXL_UNCOR_ERROR_TYPE_CACHE_DATA_ECC:
431         return CXL_RAS_UNC_ERR_CACHE_DATA_ECC;
432     case CXL_UNCOR_ERROR_TYPE_MEM_DATA_PARITY:
433         return CXL_RAS_UNC_ERR_MEM_DATA_PARITY;
434     case CXL_UNCOR_ERROR_TYPE_MEM_ADDRESS_PARITY:
435         return CXL_RAS_UNC_ERR_MEM_ADDRESS_PARITY;
436     case CXL_UNCOR_ERROR_TYPE_MEM_BE_PARITY:
437         return CXL_RAS_UNC_ERR_MEM_BE_PARITY;
438     case CXL_UNCOR_ERROR_TYPE_MEM_DATA_ECC:
439         return CXL_RAS_UNC_ERR_MEM_DATA_ECC;
440     case CXL_UNCOR_ERROR_TYPE_REINIT_THRESHOLD:
441         return CXL_RAS_UNC_ERR_REINIT_THRESHOLD;
442     case CXL_UNCOR_ERROR_TYPE_RSVD_ENCODING:
443         return CXL_RAS_UNC_ERR_RSVD_ENCODING;
444     case CXL_UNCOR_ERROR_TYPE_POISON_RECEIVED:
445         return CXL_RAS_UNC_ERR_POISON_RECEIVED;
446     case CXL_UNCOR_ERROR_TYPE_RECEIVER_OVERFLOW:
447         return CXL_RAS_UNC_ERR_RECEIVER_OVERFLOW;
448     case CXL_UNCOR_ERROR_TYPE_INTERNAL:
449         return CXL_RAS_UNC_ERR_INTERNAL;
450     case CXL_UNCOR_ERROR_TYPE_CXL_IDE_TX:
451         return CXL_RAS_UNC_ERR_CXL_IDE_TX;
452     case CXL_UNCOR_ERROR_TYPE_CXL_IDE_RX:
453         return CXL_RAS_UNC_ERR_CXL_IDE_RX;
454     default:
455         return -EINVAL;
456     }
457 }
458 
459 static int ct3d_qmp_cor_err_to_cxl(CxlCorErrorType qmp_err)
460 {
461     switch (qmp_err) {
462     case CXL_COR_ERROR_TYPE_CACHE_DATA_ECC:
463         return CXL_RAS_COR_ERR_CACHE_DATA_ECC;
464     case CXL_COR_ERROR_TYPE_MEM_DATA_ECC:
465         return CXL_RAS_COR_ERR_MEM_DATA_ECC;
466     case CXL_COR_ERROR_TYPE_CRC_THRESHOLD:
467         return CXL_RAS_COR_ERR_CRC_THRESHOLD;
468     case CXL_COR_ERROR_TYPE_RETRY_THRESHOLD:
469         return CXL_RAS_COR_ERR_RETRY_THRESHOLD;
470     case CXL_COR_ERROR_TYPE_CACHE_POISON_RECEIVED:
471         return CXL_RAS_COR_ERR_CACHE_POISON_RECEIVED;
472     case CXL_COR_ERROR_TYPE_MEM_POISON_RECEIVED:
473         return CXL_RAS_COR_ERR_MEM_POISON_RECEIVED;
474     case CXL_COR_ERROR_TYPE_PHYSICAL:
475         return CXL_RAS_COR_ERR_PHYSICAL;
476     default:
477         return -EINVAL;
478     }
479 }
480 
481 static void ct3d_reg_write(void *opaque, hwaddr offset, uint64_t value,
482                            unsigned size)
483 {
484     CXLComponentState *cxl_cstate = opaque;
485     ComponentRegisters *cregs = &cxl_cstate->crb;
486     CXLType3Dev *ct3d = container_of(cxl_cstate, CXLType3Dev, cxl_cstate);
487     uint32_t *cache_mem = cregs->cache_mem_registers;
488     bool should_commit = false;
489     bool should_uncommit = false;
490     int which_hdm = -1;
491 
492     assert(size == 4);
493     g_assert(offset < CXL2_COMPONENT_CM_REGION_SIZE);
494 
495     switch (offset) {
496     case A_CXL_HDM_DECODER0_CTRL:
497         should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT);
498         should_uncommit = !should_commit;
499         which_hdm = 0;
500         break;
501     case A_CXL_RAS_UNC_ERR_STATUS:
502     {
503         uint32_t capctrl = ldl_le_p(cache_mem + R_CXL_RAS_ERR_CAP_CTRL);
504         uint32_t fe = FIELD_EX32(capctrl, CXL_RAS_ERR_CAP_CTRL, FIRST_ERROR_POINTER);
505         CXLError *cxl_err;
506         uint32_t unc_err;
507 
508         /*
509          * If single bit written that corresponds to the first error
510          * pointer being cleared, update the status and header log.
511          */
512         if (!QTAILQ_EMPTY(&ct3d->error_list)) {
513             if ((1 << fe) ^ value) {
514                 CXLError *cxl_next;
515                 /*
516                  * Software is using wrong flow for multiple header recording
517                  * Following behavior in PCIe r6.0 and assuming multiple
518                  * header support. Implementation defined choice to clear all
519                  * matching records if more than one bit set - which corresponds
520                  * closest to behavior of hardware not capable of multiple
521                  * header recording.
522                  */
523                 QTAILQ_FOREACH_SAFE(cxl_err, &ct3d->error_list, node, cxl_next) {
524                     if ((1 << cxl_err->type) & value) {
525                         QTAILQ_REMOVE(&ct3d->error_list, cxl_err, node);
526                         g_free(cxl_err);
527                     }
528                 }
529             } else {
530                 /* Done with previous FE, so drop from list */
531                 cxl_err = QTAILQ_FIRST(&ct3d->error_list);
532                 QTAILQ_REMOVE(&ct3d->error_list, cxl_err, node);
533                 g_free(cxl_err);
534             }
535 
536             /*
537              * If there is another FE, then put that in place and update
538              * the header log
539              */
540             if (!QTAILQ_EMPTY(&ct3d->error_list)) {
541                 uint32_t *header_log = &cache_mem[R_CXL_RAS_ERR_HEADER0];
542                 int i;
543 
544                 cxl_err = QTAILQ_FIRST(&ct3d->error_list);
545                 for (i = 0; i < CXL_RAS_ERR_HEADER_NUM; i++) {
546                     stl_le_p(header_log + i, cxl_err->header[i]);
547                 }
548                 capctrl = FIELD_DP32(capctrl, CXL_RAS_ERR_CAP_CTRL,
549                                      FIRST_ERROR_POINTER, cxl_err->type);
550             } else {
551                 /*
552                  * If no more errors, then follow recommendation of PCI spec
553                  * r6.0 6.2.4.2 to set the first error pointer to a status
554                  * bit that will never be used.
555                  */
556                 capctrl = FIELD_DP32(capctrl, CXL_RAS_ERR_CAP_CTRL,
557                                      FIRST_ERROR_POINTER,
558                                      CXL_RAS_UNC_ERR_CXL_UNUSED);
559             }
560             stl_le_p((uint8_t *)cache_mem + A_CXL_RAS_ERR_CAP_CTRL, capctrl);
561         }
562         unc_err = 0;
563         QTAILQ_FOREACH(cxl_err, &ct3d->error_list, node) {
564             unc_err |= 1 << cxl_err->type;
565         }
566         stl_le_p((uint8_t *)cache_mem + offset, unc_err);
567 
568         return;
569     }
570     case A_CXL_RAS_COR_ERR_STATUS:
571     {
572         uint32_t rw1c = value;
573         uint32_t temp = ldl_le_p((uint8_t *)cache_mem + offset);
574         temp &= ~rw1c;
575         stl_le_p((uint8_t *)cache_mem + offset, temp);
576         return;
577     }
578     default:
579         break;
580     }
581 
582     stl_le_p((uint8_t *)cache_mem + offset, value);
583     if (should_commit) {
584         hdm_decoder_commit(ct3d, which_hdm);
585     } else if (should_uncommit) {
586         hdm_decoder_uncommit(ct3d, which_hdm);
587     }
588 }
589 
590 static bool cxl_setup_memory(CXLType3Dev *ct3d, Error **errp)
591 {
592     DeviceState *ds = DEVICE(ct3d);
593 
594     if (!ct3d->hostmem && !ct3d->hostvmem && !ct3d->hostpmem) {
595         error_setg(errp, "at least one memdev property must be set");
596         return false;
597     } else if (ct3d->hostmem && ct3d->hostpmem) {
598         error_setg(errp, "[memdev] cannot be used with new "
599                          "[persistent-memdev] property");
600         return false;
601     } else if (ct3d->hostmem) {
602         /* Use of hostmem property implies pmem */
603         ct3d->hostpmem = ct3d->hostmem;
604         ct3d->hostmem = NULL;
605     }
606 
607     if (ct3d->hostpmem && !ct3d->lsa) {
608         error_setg(errp, "lsa property must be set for persistent devices");
609         return false;
610     }
611 
612     if (ct3d->hostvmem) {
613         MemoryRegion *vmr;
614         char *v_name;
615 
616         vmr = host_memory_backend_get_memory(ct3d->hostvmem);
617         if (!vmr) {
618             error_setg(errp, "volatile memdev must have backing device");
619             return false;
620         }
621         memory_region_set_nonvolatile(vmr, false);
622         memory_region_set_enabled(vmr, true);
623         host_memory_backend_set_mapped(ct3d->hostvmem, true);
624         if (ds->id) {
625             v_name = g_strdup_printf("cxl-type3-dpa-vmem-space:%s", ds->id);
626         } else {
627             v_name = g_strdup("cxl-type3-dpa-vmem-space");
628         }
629         address_space_init(&ct3d->hostvmem_as, vmr, v_name);
630         ct3d->cxl_dstate.vmem_size = memory_region_size(vmr);
631         ct3d->cxl_dstate.mem_size += memory_region_size(vmr);
632         g_free(v_name);
633     }
634 
635     if (ct3d->hostpmem) {
636         MemoryRegion *pmr;
637         char *p_name;
638 
639         pmr = host_memory_backend_get_memory(ct3d->hostpmem);
640         if (!pmr) {
641             error_setg(errp, "persistent memdev must have backing device");
642             return false;
643         }
644         memory_region_set_nonvolatile(pmr, true);
645         memory_region_set_enabled(pmr, true);
646         host_memory_backend_set_mapped(ct3d->hostpmem, true);
647         if (ds->id) {
648             p_name = g_strdup_printf("cxl-type3-dpa-pmem-space:%s", ds->id);
649         } else {
650             p_name = g_strdup("cxl-type3-dpa-pmem-space");
651         }
652         address_space_init(&ct3d->hostpmem_as, pmr, p_name);
653         ct3d->cxl_dstate.pmem_size = memory_region_size(pmr);
654         ct3d->cxl_dstate.mem_size += memory_region_size(pmr);
655         g_free(p_name);
656     }
657 
658     return true;
659 }
660 
661 static DOEProtocol doe_cdat_prot[] = {
662     { CXL_VENDOR_ID, CXL_DOE_TABLE_ACCESS, cxl_doe_cdat_rsp },
663     { }
664 };
665 
666 static void ct3_realize(PCIDevice *pci_dev, Error **errp)
667 {
668     CXLType3Dev *ct3d = CXL_TYPE3(pci_dev);
669     CXLComponentState *cxl_cstate = &ct3d->cxl_cstate;
670     ComponentRegisters *regs = &cxl_cstate->crb;
671     MemoryRegion *mr = &regs->component_registers;
672     uint8_t *pci_conf = pci_dev->config;
673     unsigned short msix_num = 6;
674     int i, rc;
675 
676     QTAILQ_INIT(&ct3d->error_list);
677 
678     if (!cxl_setup_memory(ct3d, errp)) {
679         return;
680     }
681 
682     pci_config_set_prog_interface(pci_conf, 0x10);
683 
684     pcie_endpoint_cap_init(pci_dev, 0x80);
685     if (ct3d->sn != UI64_NULL) {
686         pcie_dev_ser_num_init(pci_dev, 0x100, ct3d->sn);
687         cxl_cstate->dvsec_offset = 0x100 + 0x0c;
688     } else {
689         cxl_cstate->dvsec_offset = 0x100;
690     }
691 
692     ct3d->cxl_cstate.pdev = pci_dev;
693     build_dvsecs(ct3d);
694 
695     regs->special_ops = g_new0(MemoryRegionOps, 1);
696     regs->special_ops->write = ct3d_reg_write;
697 
698     cxl_component_register_block_init(OBJECT(pci_dev), cxl_cstate,
699                                       TYPE_CXL_TYPE3);
700 
701     pci_register_bar(
702         pci_dev, CXL_COMPONENT_REG_BAR_IDX,
703         PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64, mr);
704 
705     cxl_device_register_block_init(OBJECT(pci_dev), &ct3d->cxl_dstate);
706     pci_register_bar(pci_dev, CXL_DEVICE_REG_BAR_IDX,
707                      PCI_BASE_ADDRESS_SPACE_MEMORY |
708                          PCI_BASE_ADDRESS_MEM_TYPE_64,
709                      &ct3d->cxl_dstate.device_registers);
710 
711     /* MSI(-X) Initialization */
712     rc = msix_init_exclusive_bar(pci_dev, msix_num, 4, NULL);
713     if (rc) {
714         goto err_address_space_free;
715     }
716     for (i = 0; i < msix_num; i++) {
717         msix_vector_use(pci_dev, i);
718     }
719 
720     /* DOE Initialization */
721     pcie_doe_init(pci_dev, &ct3d->doe_cdat, 0x190, doe_cdat_prot, true, 0);
722 
723     cxl_cstate->cdat.build_cdat_table = ct3_build_cdat_table;
724     cxl_cstate->cdat.free_cdat_table = ct3_free_cdat_table;
725     cxl_cstate->cdat.private = ct3d;
726     cxl_doe_cdat_init(cxl_cstate, errp);
727     if (*errp) {
728         goto err_free_special_ops;
729     }
730 
731     pcie_cap_deverr_init(pci_dev);
732     /* Leave a bit of room for expansion */
733     rc = pcie_aer_init(pci_dev, PCI_ERR_VER, 0x200, PCI_ERR_SIZEOF, NULL);
734     if (rc) {
735         goto err_release_cdat;
736     }
737     cxl_event_init(&ct3d->cxl_dstate, 2);
738 
739     return;
740 
741 err_release_cdat:
742     cxl_doe_cdat_release(cxl_cstate);
743 err_free_special_ops:
744     g_free(regs->special_ops);
745 err_address_space_free:
746     if (ct3d->hostpmem) {
747         address_space_destroy(&ct3d->hostpmem_as);
748     }
749     if (ct3d->hostvmem) {
750         address_space_destroy(&ct3d->hostvmem_as);
751     }
752     return;
753 }
754 
755 static void ct3_exit(PCIDevice *pci_dev)
756 {
757     CXLType3Dev *ct3d = CXL_TYPE3(pci_dev);
758     CXLComponentState *cxl_cstate = &ct3d->cxl_cstate;
759     ComponentRegisters *regs = &cxl_cstate->crb;
760 
761     pcie_aer_exit(pci_dev);
762     cxl_doe_cdat_release(cxl_cstate);
763     g_free(regs->special_ops);
764     if (ct3d->hostpmem) {
765         address_space_destroy(&ct3d->hostpmem_as);
766     }
767     if (ct3d->hostvmem) {
768         address_space_destroy(&ct3d->hostvmem_as);
769     }
770 }
771 
772 /* TODO: Support multiple HDM decoders and DPA skip */
773 static bool cxl_type3_dpa(CXLType3Dev *ct3d, hwaddr host_addr, uint64_t *dpa)
774 {
775     uint32_t *cache_mem = ct3d->cxl_cstate.crb.cache_mem_registers;
776     uint64_t decoder_base, decoder_size, hpa_offset;
777     uint32_t hdm0_ctrl;
778     int ig, iw;
779 
780     decoder_base = (((uint64_t)cache_mem[R_CXL_HDM_DECODER0_BASE_HI] << 32) |
781                     cache_mem[R_CXL_HDM_DECODER0_BASE_LO]);
782     if ((uint64_t)host_addr < decoder_base) {
783         return false;
784     }
785 
786     hpa_offset = (uint64_t)host_addr - decoder_base;
787 
788     decoder_size = ((uint64_t)cache_mem[R_CXL_HDM_DECODER0_SIZE_HI] << 32) |
789         cache_mem[R_CXL_HDM_DECODER0_SIZE_LO];
790     if (hpa_offset >= decoder_size) {
791         return false;
792     }
793 
794     hdm0_ctrl = cache_mem[R_CXL_HDM_DECODER0_CTRL];
795     iw = FIELD_EX32(hdm0_ctrl, CXL_HDM_DECODER0_CTRL, IW);
796     ig = FIELD_EX32(hdm0_ctrl, CXL_HDM_DECODER0_CTRL, IG);
797 
798     *dpa = (MAKE_64BIT_MASK(0, 8 + ig) & hpa_offset) |
799         ((MAKE_64BIT_MASK(8 + ig + iw, 64 - 8 - ig - iw) & hpa_offset) >> iw);
800 
801     return true;
802 }
803 
804 static int cxl_type3_hpa_to_as_and_dpa(CXLType3Dev *ct3d,
805                                        hwaddr host_addr,
806                                        unsigned int size,
807                                        AddressSpace **as,
808                                        uint64_t *dpa_offset)
809 {
810     MemoryRegion *vmr = NULL, *pmr = NULL;
811 
812     if (ct3d->hostvmem) {
813         vmr = host_memory_backend_get_memory(ct3d->hostvmem);
814     }
815     if (ct3d->hostpmem) {
816         pmr = host_memory_backend_get_memory(ct3d->hostpmem);
817     }
818 
819     if (!vmr && !pmr) {
820         return -ENODEV;
821     }
822 
823     if (!cxl_type3_dpa(ct3d, host_addr, dpa_offset)) {
824         return -EINVAL;
825     }
826 
827     if (*dpa_offset > ct3d->cxl_dstate.mem_size) {
828         return -EINVAL;
829     }
830 
831     if (vmr) {
832         if (*dpa_offset < memory_region_size(vmr)) {
833             *as = &ct3d->hostvmem_as;
834         } else {
835             *as = &ct3d->hostpmem_as;
836             *dpa_offset -= memory_region_size(vmr);
837         }
838     } else {
839         *as = &ct3d->hostpmem_as;
840     }
841 
842     return 0;
843 }
844 
845 MemTxResult cxl_type3_read(PCIDevice *d, hwaddr host_addr, uint64_t *data,
846                            unsigned size, MemTxAttrs attrs)
847 {
848     uint64_t dpa_offset = 0;
849     AddressSpace *as = NULL;
850     int res;
851 
852     res = cxl_type3_hpa_to_as_and_dpa(CXL_TYPE3(d), host_addr, size,
853                                       &as, &dpa_offset);
854     if (res) {
855         return MEMTX_ERROR;
856     }
857 
858     return address_space_read(as, dpa_offset, attrs, data, size);
859 }
860 
861 MemTxResult cxl_type3_write(PCIDevice *d, hwaddr host_addr, uint64_t data,
862                             unsigned size, MemTxAttrs attrs)
863 {
864     uint64_t dpa_offset = 0;
865     AddressSpace *as = NULL;
866     int res;
867 
868     res = cxl_type3_hpa_to_as_and_dpa(CXL_TYPE3(d), host_addr, size,
869                                       &as, &dpa_offset);
870     if (res) {
871         return MEMTX_ERROR;
872     }
873 
874     return address_space_write(as, dpa_offset, attrs, &data, size);
875 }
876 
877 static void ct3d_reset(DeviceState *dev)
878 {
879     CXLType3Dev *ct3d = CXL_TYPE3(dev);
880     uint32_t *reg_state = ct3d->cxl_cstate.crb.cache_mem_registers;
881     uint32_t *write_msk = ct3d->cxl_cstate.crb.cache_mem_regs_write_mask;
882 
883     cxl_component_register_init_common(reg_state, write_msk, CXL2_TYPE3_DEVICE);
884     cxl_device_register_init_common(&ct3d->cxl_dstate);
885 }
886 
887 static Property ct3_props[] = {
888     DEFINE_PROP_LINK("memdev", CXLType3Dev, hostmem, TYPE_MEMORY_BACKEND,
889                      HostMemoryBackend *), /* for backward compatibility */
890     DEFINE_PROP_LINK("persistent-memdev", CXLType3Dev, hostpmem,
891                      TYPE_MEMORY_BACKEND, HostMemoryBackend *),
892     DEFINE_PROP_LINK("volatile-memdev", CXLType3Dev, hostvmem,
893                      TYPE_MEMORY_BACKEND, HostMemoryBackend *),
894     DEFINE_PROP_LINK("lsa", CXLType3Dev, lsa, TYPE_MEMORY_BACKEND,
895                      HostMemoryBackend *),
896     DEFINE_PROP_UINT64("sn", CXLType3Dev, sn, UI64_NULL),
897     DEFINE_PROP_STRING("cdat", CXLType3Dev, cxl_cstate.cdat.filename),
898     DEFINE_PROP_END_OF_LIST(),
899 };
900 
901 static uint64_t get_lsa_size(CXLType3Dev *ct3d)
902 {
903     MemoryRegion *mr;
904 
905     if (!ct3d->lsa) {
906         return 0;
907     }
908 
909     mr = host_memory_backend_get_memory(ct3d->lsa);
910     return memory_region_size(mr);
911 }
912 
913 static void validate_lsa_access(MemoryRegion *mr, uint64_t size,
914                                 uint64_t offset)
915 {
916     assert(offset + size <= memory_region_size(mr));
917     assert(offset + size > offset);
918 }
919 
920 static uint64_t get_lsa(CXLType3Dev *ct3d, void *buf, uint64_t size,
921                     uint64_t offset)
922 {
923     MemoryRegion *mr;
924     void *lsa;
925 
926     if (!ct3d->lsa) {
927         return 0;
928     }
929 
930     mr = host_memory_backend_get_memory(ct3d->lsa);
931     validate_lsa_access(mr, size, offset);
932 
933     lsa = memory_region_get_ram_ptr(mr) + offset;
934     memcpy(buf, lsa, size);
935 
936     return size;
937 }
938 
939 static void set_lsa(CXLType3Dev *ct3d, const void *buf, uint64_t size,
940                     uint64_t offset)
941 {
942     MemoryRegion *mr;
943     void *lsa;
944 
945     if (!ct3d->lsa) {
946         return;
947     }
948 
949     mr = host_memory_backend_get_memory(ct3d->lsa);
950     validate_lsa_access(mr, size, offset);
951 
952     lsa = memory_region_get_ram_ptr(mr) + offset;
953     memcpy(lsa, buf, size);
954     memory_region_set_dirty(mr, offset, size);
955 
956     /*
957      * Just like the PMEM, if the guest is not allowed to exit gracefully, label
958      * updates will get lost.
959      */
960 }
961 
962 static bool set_cacheline(CXLType3Dev *ct3d, uint64_t dpa_offset, uint8_t *data)
963 {
964     MemoryRegion *vmr = NULL, *pmr = NULL;
965     AddressSpace *as;
966 
967     if (ct3d->hostvmem) {
968         vmr = host_memory_backend_get_memory(ct3d->hostvmem);
969     }
970     if (ct3d->hostpmem) {
971         pmr = host_memory_backend_get_memory(ct3d->hostpmem);
972     }
973 
974     if (!vmr && !pmr) {
975         return false;
976     }
977 
978     if (dpa_offset + CXL_CACHE_LINE_SIZE > ct3d->cxl_dstate.mem_size) {
979         return false;
980     }
981 
982     if (vmr) {
983         if (dpa_offset < memory_region_size(vmr)) {
984             as = &ct3d->hostvmem_as;
985         } else {
986             as = &ct3d->hostpmem_as;
987             dpa_offset -= memory_region_size(vmr);
988         }
989     } else {
990         as = &ct3d->hostpmem_as;
991     }
992 
993     address_space_write(as, dpa_offset, MEMTXATTRS_UNSPECIFIED, &data,
994                         CXL_CACHE_LINE_SIZE);
995     return true;
996 }
997 
998 void cxl_set_poison_list_overflowed(CXLType3Dev *ct3d)
999 {
1000         ct3d->poison_list_overflowed = true;
1001         ct3d->poison_list_overflow_ts =
1002             cxl_device_get_timestamp(&ct3d->cxl_dstate);
1003 }
1004 
1005 void qmp_cxl_inject_poison(const char *path, uint64_t start, uint64_t length,
1006                            Error **errp)
1007 {
1008     Object *obj = object_resolve_path(path, NULL);
1009     CXLType3Dev *ct3d;
1010     CXLPoison *p;
1011 
1012     if (length % 64) {
1013         error_setg(errp, "Poison injection must be in multiples of 64 bytes");
1014         return;
1015     }
1016     if (start % 64) {
1017         error_setg(errp, "Poison start address must be 64 byte aligned");
1018         return;
1019     }
1020     if (!obj) {
1021         error_setg(errp, "Unable to resolve path");
1022         return;
1023     }
1024     if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
1025         error_setg(errp, "Path does not point to a CXL type 3 device");
1026         return;
1027     }
1028 
1029     ct3d = CXL_TYPE3(obj);
1030 
1031     QLIST_FOREACH(p, &ct3d->poison_list, node) {
1032         if (((start >= p->start) && (start < p->start + p->length)) ||
1033             ((start + length > p->start) &&
1034              (start + length <= p->start + p->length))) {
1035             error_setg(errp, "Overlap with existing poisoned region not supported");
1036             return;
1037         }
1038     }
1039 
1040     if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) {
1041         cxl_set_poison_list_overflowed(ct3d);
1042         return;
1043     }
1044 
1045     p = g_new0(CXLPoison, 1);
1046     p->length = length;
1047     p->start = start;
1048     p->type = CXL_POISON_TYPE_INTERNAL; /* Different from injected via the mbox */
1049 
1050     QLIST_INSERT_HEAD(&ct3d->poison_list, p, node);
1051     ct3d->poison_list_cnt++;
1052 }
1053 
1054 /* For uncorrectable errors include support for multiple header recording */
1055 void qmp_cxl_inject_uncorrectable_errors(const char *path,
1056                                          CXLUncorErrorRecordList *errors,
1057                                          Error **errp)
1058 {
1059     Object *obj = object_resolve_path(path, NULL);
1060     static PCIEAERErr err = {};
1061     CXLType3Dev *ct3d;
1062     CXLError *cxl_err;
1063     uint32_t *reg_state;
1064     uint32_t unc_err;
1065     bool first;
1066 
1067     if (!obj) {
1068         error_setg(errp, "Unable to resolve path");
1069         return;
1070     }
1071 
1072     if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
1073         error_setg(errp, "Path does not point to a CXL type 3 device");
1074         return;
1075     }
1076 
1077     err.status = PCI_ERR_UNC_INTN;
1078     err.source_id = pci_requester_id(PCI_DEVICE(obj));
1079     err.flags = 0;
1080 
1081     ct3d = CXL_TYPE3(obj);
1082 
1083     first = QTAILQ_EMPTY(&ct3d->error_list);
1084     reg_state = ct3d->cxl_cstate.crb.cache_mem_registers;
1085     while (errors) {
1086         uint32List *header = errors->value->header;
1087         uint8_t header_count = 0;
1088         int cxl_err_code;
1089 
1090         cxl_err_code = ct3d_qmp_uncor_err_to_cxl(errors->value->type);
1091         if (cxl_err_code < 0) {
1092             error_setg(errp, "Unknown error code");
1093             return;
1094         }
1095 
1096         /* If the error is masked, nothing to do here */
1097         if (!((1 << cxl_err_code) &
1098               ~ldl_le_p(reg_state + R_CXL_RAS_UNC_ERR_MASK))) {
1099             errors = errors->next;
1100             continue;
1101         }
1102 
1103         cxl_err = g_malloc0(sizeof(*cxl_err));
1104         if (!cxl_err) {
1105             return;
1106         }
1107 
1108         cxl_err->type = cxl_err_code;
1109         while (header && header_count < 32) {
1110             cxl_err->header[header_count++] = header->value;
1111             header = header->next;
1112         }
1113         if (header_count > 32) {
1114             error_setg(errp, "Header must be 32 DWORD or less");
1115             return;
1116         }
1117         QTAILQ_INSERT_TAIL(&ct3d->error_list, cxl_err, node);
1118 
1119         errors = errors->next;
1120     }
1121 
1122     if (first && !QTAILQ_EMPTY(&ct3d->error_list)) {
1123         uint32_t *cache_mem = ct3d->cxl_cstate.crb.cache_mem_registers;
1124         uint32_t capctrl = ldl_le_p(cache_mem + R_CXL_RAS_ERR_CAP_CTRL);
1125         uint32_t *header_log = &cache_mem[R_CXL_RAS_ERR_HEADER0];
1126         int i;
1127 
1128         cxl_err = QTAILQ_FIRST(&ct3d->error_list);
1129         for (i = 0; i < CXL_RAS_ERR_HEADER_NUM; i++) {
1130             stl_le_p(header_log + i, cxl_err->header[i]);
1131         }
1132 
1133         capctrl = FIELD_DP32(capctrl, CXL_RAS_ERR_CAP_CTRL,
1134                              FIRST_ERROR_POINTER, cxl_err->type);
1135         stl_le_p(cache_mem + R_CXL_RAS_ERR_CAP_CTRL, capctrl);
1136     }
1137 
1138     unc_err = 0;
1139     QTAILQ_FOREACH(cxl_err, &ct3d->error_list, node) {
1140         unc_err |= (1 << cxl_err->type);
1141     }
1142     if (!unc_err) {
1143         return;
1144     }
1145 
1146     stl_le_p(reg_state + R_CXL_RAS_UNC_ERR_STATUS, unc_err);
1147     pcie_aer_inject_error(PCI_DEVICE(obj), &err);
1148 
1149     return;
1150 }
1151 
1152 void qmp_cxl_inject_correctable_error(const char *path, CxlCorErrorType type,
1153                                       Error **errp)
1154 {
1155     static PCIEAERErr err = {};
1156     Object *obj = object_resolve_path(path, NULL);
1157     CXLType3Dev *ct3d;
1158     uint32_t *reg_state;
1159     uint32_t cor_err;
1160     int cxl_err_type;
1161 
1162     if (!obj) {
1163         error_setg(errp, "Unable to resolve path");
1164         return;
1165     }
1166     if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
1167         error_setg(errp, "Path does not point to a CXL type 3 device");
1168         return;
1169     }
1170 
1171     err.status = PCI_ERR_COR_INTERNAL;
1172     err.source_id = pci_requester_id(PCI_DEVICE(obj));
1173     err.flags = PCIE_AER_ERR_IS_CORRECTABLE;
1174 
1175     ct3d = CXL_TYPE3(obj);
1176     reg_state = ct3d->cxl_cstate.crb.cache_mem_registers;
1177     cor_err = ldl_le_p(reg_state + R_CXL_RAS_COR_ERR_STATUS);
1178 
1179     cxl_err_type = ct3d_qmp_cor_err_to_cxl(type);
1180     if (cxl_err_type < 0) {
1181         error_setg(errp, "Invalid COR error");
1182         return;
1183     }
1184     /* If the error is masked, nothting to do here */
1185     if (!((1 << cxl_err_type) & ~ldl_le_p(reg_state + R_CXL_RAS_COR_ERR_MASK))) {
1186         return;
1187     }
1188 
1189     cor_err |= (1 << cxl_err_type);
1190     stl_le_p(reg_state + R_CXL_RAS_COR_ERR_STATUS, cor_err);
1191 
1192     pcie_aer_inject_error(PCI_DEVICE(obj), &err);
1193 }
1194 
1195 static void cxl_assign_event_header(CXLEventRecordHdr *hdr,
1196                                     const QemuUUID *uuid, uint32_t flags,
1197                                     uint8_t length, uint64_t timestamp)
1198 {
1199     st24_le_p(&hdr->flags, flags);
1200     hdr->length = length;
1201     memcpy(&hdr->id, uuid, sizeof(hdr->id));
1202     stq_le_p(&hdr->timestamp, timestamp);
1203 }
1204 
1205 static const QemuUUID gen_media_uuid = {
1206     .data = UUID(0xfbcd0a77, 0xc260, 0x417f,
1207                  0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6),
1208 };
1209 
1210 static const QemuUUID dram_uuid = {
1211     .data = UUID(0x601dcbb3, 0x9c06, 0x4eab, 0xb8, 0xaf,
1212                  0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24),
1213 };
1214 
1215 static const QemuUUID memory_module_uuid = {
1216     .data = UUID(0xfe927475, 0xdd59, 0x4339, 0xa5, 0x86,
1217                  0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74),
1218 };
1219 
1220 #define CXL_GMER_VALID_CHANNEL                          BIT(0)
1221 #define CXL_GMER_VALID_RANK                             BIT(1)
1222 #define CXL_GMER_VALID_DEVICE                           BIT(2)
1223 #define CXL_GMER_VALID_COMPONENT                        BIT(3)
1224 
1225 static int ct3d_qmp_cxl_event_log_enc(CxlEventLog log)
1226 {
1227     switch (log) {
1228     case CXL_EVENT_LOG_INFORMATIONAL:
1229         return CXL_EVENT_TYPE_INFO;
1230     case CXL_EVENT_LOG_WARNING:
1231         return CXL_EVENT_TYPE_WARN;
1232     case CXL_EVENT_LOG_FAILURE:
1233         return CXL_EVENT_TYPE_FAIL;
1234     case CXL_EVENT_LOG_FATAL:
1235         return CXL_EVENT_TYPE_FATAL;
1236 /* DCD not yet supported */
1237     default:
1238         return -EINVAL;
1239     }
1240 }
1241 /* Component ID is device specific.  Define this as a string. */
1242 void qmp_cxl_inject_general_media_event(const char *path, CxlEventLog log,
1243                                         uint8_t flags, uint64_t dpa,
1244                                         uint8_t descriptor, uint8_t type,
1245                                         uint8_t transaction_type,
1246                                         bool has_channel, uint8_t channel,
1247                                         bool has_rank, uint8_t rank,
1248                                         bool has_device, uint32_t device,
1249                                         const char *component_id,
1250                                         Error **errp)
1251 {
1252     Object *obj = object_resolve_path(path, NULL);
1253     CXLEventGenMedia gem;
1254     CXLEventRecordHdr *hdr = &gem.hdr;
1255     CXLDeviceState *cxlds;
1256     CXLType3Dev *ct3d;
1257     uint16_t valid_flags = 0;
1258     uint8_t enc_log;
1259     int rc;
1260 
1261     if (!obj) {
1262         error_setg(errp, "Unable to resolve path");
1263         return;
1264     }
1265     if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
1266         error_setg(errp, "Path does not point to a CXL type 3 device");
1267         return;
1268     }
1269     ct3d = CXL_TYPE3(obj);
1270     cxlds = &ct3d->cxl_dstate;
1271 
1272     rc = ct3d_qmp_cxl_event_log_enc(log);
1273     if (rc < 0) {
1274         error_setg(errp, "Unhandled error log type");
1275         return;
1276     }
1277     enc_log = rc;
1278 
1279     memset(&gem, 0, sizeof(gem));
1280     cxl_assign_event_header(hdr, &gen_media_uuid, flags, sizeof(gem),
1281                             cxl_device_get_timestamp(&ct3d->cxl_dstate));
1282 
1283     stq_le_p(&gem.phys_addr, dpa);
1284     gem.descriptor = descriptor;
1285     gem.type = type;
1286     gem.transaction_type = transaction_type;
1287 
1288     if (has_channel) {
1289         gem.channel = channel;
1290         valid_flags |= CXL_GMER_VALID_CHANNEL;
1291     }
1292 
1293     if (has_rank) {
1294         gem.rank = rank;
1295         valid_flags |= CXL_GMER_VALID_RANK;
1296     }
1297 
1298     if (has_device) {
1299         st24_le_p(gem.device, device);
1300         valid_flags |= CXL_GMER_VALID_DEVICE;
1301     }
1302 
1303     if (component_id) {
1304         strncpy((char *)gem.component_id, component_id,
1305                 sizeof(gem.component_id) - 1);
1306         valid_flags |= CXL_GMER_VALID_COMPONENT;
1307     }
1308 
1309     stw_le_p(&gem.validity_flags, valid_flags);
1310 
1311     if (cxl_event_insert(cxlds, enc_log, (CXLEventRecordRaw *)&gem)) {
1312         cxl_event_irq_assert(ct3d);
1313     }
1314 }
1315 
1316 #define CXL_DRAM_VALID_CHANNEL                          BIT(0)
1317 #define CXL_DRAM_VALID_RANK                             BIT(1)
1318 #define CXL_DRAM_VALID_NIBBLE_MASK                      BIT(2)
1319 #define CXL_DRAM_VALID_BANK_GROUP                       BIT(3)
1320 #define CXL_DRAM_VALID_BANK                             BIT(4)
1321 #define CXL_DRAM_VALID_ROW                              BIT(5)
1322 #define CXL_DRAM_VALID_COLUMN                           BIT(6)
1323 #define CXL_DRAM_VALID_CORRECTION_MASK                  BIT(7)
1324 
1325 void qmp_cxl_inject_dram_event(const char *path, CxlEventLog log, uint8_t flags,
1326                                uint64_t dpa, uint8_t descriptor,
1327                                uint8_t type, uint8_t transaction_type,
1328                                bool has_channel, uint8_t channel,
1329                                bool has_rank, uint8_t rank,
1330                                bool has_nibble_mask, uint32_t nibble_mask,
1331                                bool has_bank_group, uint8_t bank_group,
1332                                bool has_bank, uint8_t bank,
1333                                bool has_row, uint32_t row,
1334                                bool has_column, uint16_t column,
1335                                bool has_correction_mask, uint64List *correction_mask,
1336                                Error **errp)
1337 {
1338     Object *obj = object_resolve_path(path, NULL);
1339     CXLEventDram dram;
1340     CXLEventRecordHdr *hdr = &dram.hdr;
1341     CXLDeviceState *cxlds;
1342     CXLType3Dev *ct3d;
1343     uint16_t valid_flags = 0;
1344     uint8_t enc_log;
1345     int rc;
1346 
1347     if (!obj) {
1348         error_setg(errp, "Unable to resolve path");
1349         return;
1350     }
1351     if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
1352         error_setg(errp, "Path does not point to a CXL type 3 device");
1353         return;
1354     }
1355     ct3d = CXL_TYPE3(obj);
1356     cxlds = &ct3d->cxl_dstate;
1357 
1358     rc = ct3d_qmp_cxl_event_log_enc(log);
1359     if (rc < 0) {
1360         error_setg(errp, "Unhandled error log type");
1361         return;
1362     }
1363     enc_log = rc;
1364 
1365     memset(&dram, 0, sizeof(dram));
1366     cxl_assign_event_header(hdr, &dram_uuid, flags, sizeof(dram),
1367                             cxl_device_get_timestamp(&ct3d->cxl_dstate));
1368     stq_le_p(&dram.phys_addr, dpa);
1369     dram.descriptor = descriptor;
1370     dram.type = type;
1371     dram.transaction_type = transaction_type;
1372 
1373     if (has_channel) {
1374         dram.channel = channel;
1375         valid_flags |= CXL_DRAM_VALID_CHANNEL;
1376     }
1377 
1378     if (has_rank) {
1379         dram.rank = rank;
1380         valid_flags |= CXL_DRAM_VALID_RANK;
1381     }
1382 
1383     if (has_nibble_mask) {
1384         st24_le_p(dram.nibble_mask, nibble_mask);
1385         valid_flags |= CXL_DRAM_VALID_NIBBLE_MASK;
1386     }
1387 
1388     if (has_bank_group) {
1389         dram.bank_group = bank_group;
1390         valid_flags |= CXL_DRAM_VALID_BANK_GROUP;
1391     }
1392 
1393     if (has_bank) {
1394         dram.bank = bank;
1395         valid_flags |= CXL_DRAM_VALID_BANK;
1396     }
1397 
1398     if (has_row) {
1399         st24_le_p(dram.row, row);
1400         valid_flags |= CXL_DRAM_VALID_ROW;
1401     }
1402 
1403     if (has_column) {
1404         stw_le_p(&dram.column, column);
1405         valid_flags |= CXL_DRAM_VALID_COLUMN;
1406     }
1407 
1408     if (has_correction_mask) {
1409         int count = 0;
1410         while (correction_mask && count < 4) {
1411             stq_le_p(&dram.correction_mask[count],
1412                      correction_mask->value);
1413             count++;
1414             correction_mask = correction_mask->next;
1415         }
1416         valid_flags |= CXL_DRAM_VALID_CORRECTION_MASK;
1417     }
1418 
1419     stw_le_p(&dram.validity_flags, valid_flags);
1420 
1421     if (cxl_event_insert(cxlds, enc_log, (CXLEventRecordRaw *)&dram)) {
1422         cxl_event_irq_assert(ct3d);
1423     }
1424     return;
1425 }
1426 
1427 void qmp_cxl_inject_memory_module_event(const char *path, CxlEventLog log,
1428                                         uint8_t flags, uint8_t type,
1429                                         uint8_t health_status,
1430                                         uint8_t media_status,
1431                                         uint8_t additional_status,
1432                                         uint8_t life_used,
1433                                         int16_t temperature,
1434                                         uint32_t dirty_shutdown_count,
1435                                         uint32_t corrected_volatile_error_count,
1436                                         uint32_t corrected_persistent_error_count,
1437                                         Error **errp)
1438 {
1439     Object *obj = object_resolve_path(path, NULL);
1440     CXLEventMemoryModule module;
1441     CXLEventRecordHdr *hdr = &module.hdr;
1442     CXLDeviceState *cxlds;
1443     CXLType3Dev *ct3d;
1444     uint8_t enc_log;
1445     int rc;
1446 
1447     if (!obj) {
1448         error_setg(errp, "Unable to resolve path");
1449         return;
1450     }
1451     if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
1452         error_setg(errp, "Path does not point to a CXL type 3 device");
1453         return;
1454     }
1455     ct3d = CXL_TYPE3(obj);
1456     cxlds = &ct3d->cxl_dstate;
1457 
1458     rc = ct3d_qmp_cxl_event_log_enc(log);
1459     if (rc < 0) {
1460         error_setg(errp, "Unhandled error log type");
1461         return;
1462     }
1463     enc_log = rc;
1464 
1465     memset(&module, 0, sizeof(module));
1466     cxl_assign_event_header(hdr, &memory_module_uuid, flags, sizeof(module),
1467                             cxl_device_get_timestamp(&ct3d->cxl_dstate));
1468 
1469     module.type = type;
1470     module.health_status = health_status;
1471     module.media_status = media_status;
1472     module.additional_status = additional_status;
1473     module.life_used = life_used;
1474     stw_le_p(&module.temperature, temperature);
1475     stl_le_p(&module.dirty_shutdown_count, dirty_shutdown_count);
1476     stl_le_p(&module.corrected_volatile_error_count, corrected_volatile_error_count);
1477     stl_le_p(&module.corrected_persistent_error_count, corrected_persistent_error_count);
1478 
1479     if (cxl_event_insert(cxlds, enc_log, (CXLEventRecordRaw *)&module)) {
1480         cxl_event_irq_assert(ct3d);
1481     }
1482 }
1483 
1484 static void ct3_class_init(ObjectClass *oc, void *data)
1485 {
1486     DeviceClass *dc = DEVICE_CLASS(oc);
1487     PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc);
1488     CXLType3Class *cvc = CXL_TYPE3_CLASS(oc);
1489 
1490     pc->realize = ct3_realize;
1491     pc->exit = ct3_exit;
1492     pc->class_id = PCI_CLASS_MEMORY_CXL;
1493     pc->vendor_id = PCI_VENDOR_ID_INTEL;
1494     pc->device_id = 0xd93; /* LVF for now */
1495     pc->revision = 1;
1496 
1497     pc->config_write = ct3d_config_write;
1498     pc->config_read = ct3d_config_read;
1499 
1500     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1501     dc->desc = "CXL Memory Device (Type 3)";
1502     dc->reset = ct3d_reset;
1503     device_class_set_props(dc, ct3_props);
1504 
1505     cvc->get_lsa_size = get_lsa_size;
1506     cvc->get_lsa = get_lsa;
1507     cvc->set_lsa = set_lsa;
1508     cvc->set_cacheline = set_cacheline;
1509 }
1510 
1511 static const TypeInfo ct3d_info = {
1512     .name = TYPE_CXL_TYPE3,
1513     .parent = TYPE_PCI_DEVICE,
1514     .class_size = sizeof(struct CXLType3Class),
1515     .class_init = ct3_class_init,
1516     .instance_size = sizeof(CXLType3Dev),
1517     .interfaces = (InterfaceInfo[]) {
1518         { INTERFACE_CXL_DEVICE },
1519         { INTERFACE_PCIE_DEVICE },
1520         {}
1521     },
1522 };
1523 
1524 static void ct3d_registers(void)
1525 {
1526     type_register_static(&ct3d_info);
1527 }
1528 
1529 type_init(ct3d_registers);
1530