xref: /qemu/hw/mem/cxl_type3.c (revision 4a1babe5)
1 /*
2  * CXL Type 3 (memory expander) device
3  *
4  * Copyright(C) 2020 Intel Corporation.
5  *
6  * This work is licensed under the terms of the GNU GPL, version 2. See the
7  * COPYING file in the top-level directory.
8  *
9  * SPDX-License-Identifier: GPL-v2-only
10  */
11 
12 #include "qemu/osdep.h"
13 #include "qemu/units.h"
14 #include "qemu/error-report.h"
15 #include "qapi/qapi-commands-cxl.h"
16 #include "hw/mem/memory-device.h"
17 #include "hw/mem/pc-dimm.h"
18 #include "hw/pci/pci.h"
19 #include "hw/qdev-properties.h"
20 #include "qapi/error.h"
21 #include "qemu/log.h"
22 #include "qemu/module.h"
23 #include "qemu/pmem.h"
24 #include "qemu/range.h"
25 #include "qemu/rcu.h"
26 #include "qemu/guest-random.h"
27 #include "sysemu/hostmem.h"
28 #include "sysemu/numa.h"
29 #include "hw/cxl/cxl.h"
30 #include "hw/pci/msix.h"
31 
32 #define DWORD_BYTE 4
33 
34 /* Default CDAT entries for a memory region */
35 enum {
36     CT3_CDAT_DSMAS,
37     CT3_CDAT_DSLBIS0,
38     CT3_CDAT_DSLBIS1,
39     CT3_CDAT_DSLBIS2,
40     CT3_CDAT_DSLBIS3,
41     CT3_CDAT_DSEMTS,
42     CT3_CDAT_NUM_ENTRIES
43 };
44 
45 static void ct3_build_cdat_entries_for_mr(CDATSubHeader **cdat_table,
46                                           int dsmad_handle, MemoryRegion *mr,
47                                           bool is_pmem, uint64_t dpa_base)
48 {
49     CDATDsmas *dsmas;
50     CDATDslbis *dslbis0;
51     CDATDslbis *dslbis1;
52     CDATDslbis *dslbis2;
53     CDATDslbis *dslbis3;
54     CDATDsemts *dsemts;
55 
56     dsmas = g_malloc(sizeof(*dsmas));
57     *dsmas = (CDATDsmas) {
58         .header = {
59             .type = CDAT_TYPE_DSMAS,
60             .length = sizeof(*dsmas),
61         },
62         .DSMADhandle = dsmad_handle,
63         .flags = is_pmem ? CDAT_DSMAS_FLAG_NV : 0,
64         .DPA_base = dpa_base,
65         .DPA_length = memory_region_size(mr),
66     };
67 
68     /* For now, no memory side cache, plausiblish numbers */
69     dslbis0 = g_malloc(sizeof(*dslbis0));
70     *dslbis0 = (CDATDslbis) {
71         .header = {
72             .type = CDAT_TYPE_DSLBIS,
73             .length = sizeof(*dslbis0),
74         },
75         .handle = dsmad_handle,
76         .flags = HMAT_LB_MEM_MEMORY,
77         .data_type = HMAT_LB_DATA_READ_LATENCY,
78         .entry_base_unit = 10000, /* 10ns base */
79         .entry[0] = 15, /* 150ns */
80     };
81 
82     dslbis1 = g_malloc(sizeof(*dslbis1));
83     *dslbis1 = (CDATDslbis) {
84         .header = {
85             .type = CDAT_TYPE_DSLBIS,
86             .length = sizeof(*dslbis1),
87         },
88         .handle = dsmad_handle,
89         .flags = HMAT_LB_MEM_MEMORY,
90         .data_type = HMAT_LB_DATA_WRITE_LATENCY,
91         .entry_base_unit = 10000,
92         .entry[0] = 25, /* 250ns */
93     };
94 
95     dslbis2 = g_malloc(sizeof(*dslbis2));
96     *dslbis2 = (CDATDslbis) {
97         .header = {
98             .type = CDAT_TYPE_DSLBIS,
99             .length = sizeof(*dslbis2),
100         },
101         .handle = dsmad_handle,
102         .flags = HMAT_LB_MEM_MEMORY,
103         .data_type = HMAT_LB_DATA_READ_BANDWIDTH,
104         .entry_base_unit = 1000, /* GB/s */
105         .entry[0] = 16,
106     };
107 
108     dslbis3 = g_malloc(sizeof(*dslbis3));
109     *dslbis3 = (CDATDslbis) {
110         .header = {
111             .type = CDAT_TYPE_DSLBIS,
112             .length = sizeof(*dslbis3),
113         },
114         .handle = dsmad_handle,
115         .flags = HMAT_LB_MEM_MEMORY,
116         .data_type = HMAT_LB_DATA_WRITE_BANDWIDTH,
117         .entry_base_unit = 1000, /* GB/s */
118         .entry[0] = 16,
119     };
120 
121     dsemts = g_malloc(sizeof(*dsemts));
122     *dsemts = (CDATDsemts) {
123         .header = {
124             .type = CDAT_TYPE_DSEMTS,
125             .length = sizeof(*dsemts),
126         },
127         .DSMAS_handle = dsmad_handle,
128         /*
129          * NV: Reserved - the non volatile from DSMAS matters
130          * V: EFI_MEMORY_SP
131          */
132         .EFI_memory_type_attr = is_pmem ? 2 : 1,
133         .DPA_offset = 0,
134         .DPA_length = memory_region_size(mr),
135     };
136 
137     /* Header always at start of structure */
138     cdat_table[CT3_CDAT_DSMAS] = (CDATSubHeader *)dsmas;
139     cdat_table[CT3_CDAT_DSLBIS0] = (CDATSubHeader *)dslbis0;
140     cdat_table[CT3_CDAT_DSLBIS1] = (CDATSubHeader *)dslbis1;
141     cdat_table[CT3_CDAT_DSLBIS2] = (CDATSubHeader *)dslbis2;
142     cdat_table[CT3_CDAT_DSLBIS3] = (CDATSubHeader *)dslbis3;
143     cdat_table[CT3_CDAT_DSEMTS] = (CDATSubHeader *)dsemts;
144 }
145 
146 static int ct3_build_cdat_table(CDATSubHeader ***cdat_table, void *priv)
147 {
148     g_autofree CDATSubHeader **table = NULL;
149     CXLType3Dev *ct3d = priv;
150     MemoryRegion *volatile_mr = NULL, *nonvolatile_mr = NULL;
151     int dsmad_handle = 0;
152     int cur_ent = 0;
153     int len = 0;
154 
155     if (!ct3d->hostpmem && !ct3d->hostvmem) {
156         return 0;
157     }
158 
159     if (ct3d->hostvmem) {
160         volatile_mr = host_memory_backend_get_memory(ct3d->hostvmem);
161         if (!volatile_mr) {
162             return -EINVAL;
163         }
164         len += CT3_CDAT_NUM_ENTRIES;
165     }
166 
167     if (ct3d->hostpmem) {
168         nonvolatile_mr = host_memory_backend_get_memory(ct3d->hostpmem);
169         if (!nonvolatile_mr) {
170             return -EINVAL;
171         }
172         len += CT3_CDAT_NUM_ENTRIES;
173     }
174 
175     table = g_malloc0(len * sizeof(*table));
176 
177     /* Now fill them in */
178     if (volatile_mr) {
179         ct3_build_cdat_entries_for_mr(table, dsmad_handle++, volatile_mr,
180                                       false, 0);
181         cur_ent = CT3_CDAT_NUM_ENTRIES;
182     }
183 
184     if (nonvolatile_mr) {
185         uint64_t base = volatile_mr ? memory_region_size(volatile_mr) : 0;
186         ct3_build_cdat_entries_for_mr(&(table[cur_ent]), dsmad_handle++,
187                                       nonvolatile_mr, true, base);
188         cur_ent += CT3_CDAT_NUM_ENTRIES;
189     }
190     assert(len == cur_ent);
191 
192     *cdat_table = g_steal_pointer(&table);
193 
194     return len;
195 }
196 
197 static void ct3_free_cdat_table(CDATSubHeader **cdat_table, int num, void *priv)
198 {
199     int i;
200 
201     for (i = 0; i < num; i++) {
202         g_free(cdat_table[i]);
203     }
204     g_free(cdat_table);
205 }
206 
207 static bool cxl_doe_cdat_rsp(DOECap *doe_cap)
208 {
209     CDATObject *cdat = &CXL_TYPE3(doe_cap->pdev)->cxl_cstate.cdat;
210     uint16_t ent;
211     void *base;
212     uint32_t len;
213     CDATReq *req = pcie_doe_get_write_mbox_ptr(doe_cap);
214     CDATRsp rsp;
215 
216     assert(cdat->entry_len);
217 
218     /* Discard if request length mismatched */
219     if (pcie_doe_get_obj_len(req) <
220         DIV_ROUND_UP(sizeof(CDATReq), DWORD_BYTE)) {
221         return false;
222     }
223 
224     ent = req->entry_handle;
225     base = cdat->entry[ent].base;
226     len = cdat->entry[ent].length;
227 
228     rsp = (CDATRsp) {
229         .header = {
230             .vendor_id = CXL_VENDOR_ID,
231             .data_obj_type = CXL_DOE_TABLE_ACCESS,
232             .reserved = 0x0,
233             .length = DIV_ROUND_UP((sizeof(rsp) + len), DWORD_BYTE),
234         },
235         .rsp_code = CXL_DOE_TAB_RSP,
236         .table_type = CXL_DOE_TAB_TYPE_CDAT,
237         .entry_handle = (ent < cdat->entry_len - 1) ?
238                         ent + 1 : CXL_DOE_TAB_ENT_MAX,
239     };
240 
241     memcpy(doe_cap->read_mbox, &rsp, sizeof(rsp));
242     memcpy(doe_cap->read_mbox + DIV_ROUND_UP(sizeof(rsp), DWORD_BYTE),
243            base, len);
244 
245     doe_cap->read_mbox_len += rsp.header.length;
246 
247     return true;
248 }
249 
250 static uint32_t ct3d_config_read(PCIDevice *pci_dev, uint32_t addr, int size)
251 {
252     CXLType3Dev *ct3d = CXL_TYPE3(pci_dev);
253     uint32_t val;
254 
255     if (pcie_doe_read_config(&ct3d->doe_cdat, addr, size, &val)) {
256         return val;
257     }
258 
259     return pci_default_read_config(pci_dev, addr, size);
260 }
261 
262 static void ct3d_config_write(PCIDevice *pci_dev, uint32_t addr, uint32_t val,
263                               int size)
264 {
265     CXLType3Dev *ct3d = CXL_TYPE3(pci_dev);
266 
267     pcie_doe_write_config(&ct3d->doe_cdat, addr, val, size);
268     pci_default_write_config(pci_dev, addr, val, size);
269     pcie_aer_write_config(pci_dev, addr, val, size);
270 }
271 
272 /*
273  * Null value of all Fs suggested by IEEE RA guidelines for use of
274  * EU, OUI and CID
275  */
276 #define UI64_NULL ~(0ULL)
277 
278 static void build_dvsecs(CXLType3Dev *ct3d)
279 {
280     CXLComponentState *cxl_cstate = &ct3d->cxl_cstate;
281     uint8_t *dvsec;
282     uint32_t range1_size_hi, range1_size_lo,
283              range1_base_hi = 0, range1_base_lo = 0,
284              range2_size_hi = 0, range2_size_lo = 0,
285              range2_base_hi = 0, range2_base_lo = 0;
286 
287     /*
288      * Volatile memory is mapped as (0x0)
289      * Persistent memory is mapped at (volatile->size)
290      */
291     if (ct3d->hostvmem) {
292         range1_size_hi = ct3d->hostvmem->size >> 32;
293         range1_size_lo = (2 << 5) | (2 << 2) | 0x3 |
294                          (ct3d->hostvmem->size & 0xF0000000);
295         if (ct3d->hostpmem) {
296             range2_size_hi = ct3d->hostpmem->size >> 32;
297             range2_size_lo = (2 << 5) | (2 << 2) | 0x3 |
298                              (ct3d->hostpmem->size & 0xF0000000);
299         }
300     } else {
301         range1_size_hi = ct3d->hostpmem->size >> 32;
302         range1_size_lo = (2 << 5) | (2 << 2) | 0x3 |
303                          (ct3d->hostpmem->size & 0xF0000000);
304     }
305 
306     dvsec = (uint8_t *)&(CXLDVSECDevice){
307         .cap = 0x1e,
308         .ctrl = 0x2,
309         .status2 = 0x2,
310         .range1_size_hi = range1_size_hi,
311         .range1_size_lo = range1_size_lo,
312         .range1_base_hi = range1_base_hi,
313         .range1_base_lo = range1_base_lo,
314         .range2_size_hi = range2_size_hi,
315         .range2_size_lo = range2_size_lo,
316         .range2_base_hi = range2_base_hi,
317         .range2_base_lo = range2_base_lo,
318     };
319     cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE,
320                                PCIE_CXL_DEVICE_DVSEC_LENGTH,
321                                PCIE_CXL_DEVICE_DVSEC,
322                                PCIE_CXL31_DEVICE_DVSEC_REVID, dvsec);
323 
324     dvsec = (uint8_t *)&(CXLDVSECRegisterLocator){
325         .rsvd         = 0,
326         .reg0_base_lo = RBI_COMPONENT_REG | CXL_COMPONENT_REG_BAR_IDX,
327         .reg0_base_hi = 0,
328         .reg1_base_lo = RBI_CXL_DEVICE_REG | CXL_DEVICE_REG_BAR_IDX,
329         .reg1_base_hi = 0,
330     };
331     cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE,
332                                REG_LOC_DVSEC_LENGTH, REG_LOC_DVSEC,
333                                REG_LOC_DVSEC_REVID, dvsec);
334     dvsec = (uint8_t *)&(CXLDVSECDeviceGPF){
335         .phase2_duration = 0x603, /* 3 seconds */
336         .phase2_power = 0x33, /* 0x33 miliwatts */
337     };
338     cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE,
339                                GPF_DEVICE_DVSEC_LENGTH, GPF_DEVICE_DVSEC,
340                                GPF_DEVICE_DVSEC_REVID, dvsec);
341 
342     dvsec = (uint8_t *)&(CXLDVSECPortFlexBus){
343         .cap                     = 0x26, /* 68B, IO, Mem, non-MLD */
344         .ctrl                    = 0x02, /* IO always enabled */
345         .status                  = 0x26, /* same as capabilities */
346         .rcvd_mod_ts_data_phase1 = 0xef, /* WTF? */
347     };
348     cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE,
349                                PCIE_CXL3_FLEXBUS_PORT_DVSEC_LENGTH,
350                                PCIE_FLEXBUS_PORT_DVSEC,
351                                PCIE_CXL3_FLEXBUS_PORT_DVSEC_REVID, dvsec);
352 }
353 
354 static void hdm_decoder_commit(CXLType3Dev *ct3d, int which)
355 {
356     int hdm_inc = R_CXL_HDM_DECODER1_BASE_LO - R_CXL_HDM_DECODER0_BASE_LO;
357     ComponentRegisters *cregs = &ct3d->cxl_cstate.crb;
358     uint32_t *cache_mem = cregs->cache_mem_registers;
359     uint32_t ctrl;
360 
361     ctrl = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL + which * hdm_inc);
362     /* TODO: Sanity checks that the decoder is possible */
363     ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, ERR, 0);
364     ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, COMMITTED, 1);
365 
366     stl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL + which * hdm_inc, ctrl);
367 }
368 
369 static void hdm_decoder_uncommit(CXLType3Dev *ct3d, int which)
370 {
371     int hdm_inc = R_CXL_HDM_DECODER1_BASE_LO - R_CXL_HDM_DECODER0_BASE_LO;
372     ComponentRegisters *cregs = &ct3d->cxl_cstate.crb;
373     uint32_t *cache_mem = cregs->cache_mem_registers;
374     uint32_t ctrl;
375 
376     ctrl = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL + which * hdm_inc);
377 
378     ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, ERR, 0);
379     ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, COMMITTED, 0);
380 
381     stl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL + which * hdm_inc, ctrl);
382 }
383 
384 static int ct3d_qmp_uncor_err_to_cxl(CxlUncorErrorType qmp_err)
385 {
386     switch (qmp_err) {
387     case CXL_UNCOR_ERROR_TYPE_CACHE_DATA_PARITY:
388         return CXL_RAS_UNC_ERR_CACHE_DATA_PARITY;
389     case CXL_UNCOR_ERROR_TYPE_CACHE_ADDRESS_PARITY:
390         return CXL_RAS_UNC_ERR_CACHE_ADDRESS_PARITY;
391     case CXL_UNCOR_ERROR_TYPE_CACHE_BE_PARITY:
392         return CXL_RAS_UNC_ERR_CACHE_BE_PARITY;
393     case CXL_UNCOR_ERROR_TYPE_CACHE_DATA_ECC:
394         return CXL_RAS_UNC_ERR_CACHE_DATA_ECC;
395     case CXL_UNCOR_ERROR_TYPE_MEM_DATA_PARITY:
396         return CXL_RAS_UNC_ERR_MEM_DATA_PARITY;
397     case CXL_UNCOR_ERROR_TYPE_MEM_ADDRESS_PARITY:
398         return CXL_RAS_UNC_ERR_MEM_ADDRESS_PARITY;
399     case CXL_UNCOR_ERROR_TYPE_MEM_BE_PARITY:
400         return CXL_RAS_UNC_ERR_MEM_BE_PARITY;
401     case CXL_UNCOR_ERROR_TYPE_MEM_DATA_ECC:
402         return CXL_RAS_UNC_ERR_MEM_DATA_ECC;
403     case CXL_UNCOR_ERROR_TYPE_REINIT_THRESHOLD:
404         return CXL_RAS_UNC_ERR_REINIT_THRESHOLD;
405     case CXL_UNCOR_ERROR_TYPE_RSVD_ENCODING:
406         return CXL_RAS_UNC_ERR_RSVD_ENCODING;
407     case CXL_UNCOR_ERROR_TYPE_POISON_RECEIVED:
408         return CXL_RAS_UNC_ERR_POISON_RECEIVED;
409     case CXL_UNCOR_ERROR_TYPE_RECEIVER_OVERFLOW:
410         return CXL_RAS_UNC_ERR_RECEIVER_OVERFLOW;
411     case CXL_UNCOR_ERROR_TYPE_INTERNAL:
412         return CXL_RAS_UNC_ERR_INTERNAL;
413     case CXL_UNCOR_ERROR_TYPE_CXL_IDE_TX:
414         return CXL_RAS_UNC_ERR_CXL_IDE_TX;
415     case CXL_UNCOR_ERROR_TYPE_CXL_IDE_RX:
416         return CXL_RAS_UNC_ERR_CXL_IDE_RX;
417     default:
418         return -EINVAL;
419     }
420 }
421 
422 static int ct3d_qmp_cor_err_to_cxl(CxlCorErrorType qmp_err)
423 {
424     switch (qmp_err) {
425     case CXL_COR_ERROR_TYPE_CACHE_DATA_ECC:
426         return CXL_RAS_COR_ERR_CACHE_DATA_ECC;
427     case CXL_COR_ERROR_TYPE_MEM_DATA_ECC:
428         return CXL_RAS_COR_ERR_MEM_DATA_ECC;
429     case CXL_COR_ERROR_TYPE_CRC_THRESHOLD:
430         return CXL_RAS_COR_ERR_CRC_THRESHOLD;
431     case CXL_COR_ERROR_TYPE_RETRY_THRESHOLD:
432         return CXL_RAS_COR_ERR_RETRY_THRESHOLD;
433     case CXL_COR_ERROR_TYPE_CACHE_POISON_RECEIVED:
434         return CXL_RAS_COR_ERR_CACHE_POISON_RECEIVED;
435     case CXL_COR_ERROR_TYPE_MEM_POISON_RECEIVED:
436         return CXL_RAS_COR_ERR_MEM_POISON_RECEIVED;
437     case CXL_COR_ERROR_TYPE_PHYSICAL:
438         return CXL_RAS_COR_ERR_PHYSICAL;
439     default:
440         return -EINVAL;
441     }
442 }
443 
444 static void ct3d_reg_write(void *opaque, hwaddr offset, uint64_t value,
445                            unsigned size)
446 {
447     CXLComponentState *cxl_cstate = opaque;
448     ComponentRegisters *cregs = &cxl_cstate->crb;
449     CXLType3Dev *ct3d = container_of(cxl_cstate, CXLType3Dev, cxl_cstate);
450     uint32_t *cache_mem = cregs->cache_mem_registers;
451     bool should_commit = false;
452     bool should_uncommit = false;
453     int which_hdm = -1;
454 
455     assert(size == 4);
456     g_assert(offset < CXL2_COMPONENT_CM_REGION_SIZE);
457 
458     switch (offset) {
459     case A_CXL_HDM_DECODER0_CTRL:
460         should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT);
461         should_uncommit = !should_commit;
462         which_hdm = 0;
463         break;
464     case A_CXL_HDM_DECODER1_CTRL:
465         should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT);
466         should_uncommit = !should_commit;
467         which_hdm = 1;
468         break;
469     case A_CXL_HDM_DECODER2_CTRL:
470         should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT);
471         should_uncommit = !should_commit;
472         which_hdm = 2;
473         break;
474     case A_CXL_HDM_DECODER3_CTRL:
475         should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT);
476         should_uncommit = !should_commit;
477         which_hdm = 3;
478         break;
479     case A_CXL_RAS_UNC_ERR_STATUS:
480     {
481         uint32_t capctrl = ldl_le_p(cache_mem + R_CXL_RAS_ERR_CAP_CTRL);
482         uint32_t fe = FIELD_EX32(capctrl, CXL_RAS_ERR_CAP_CTRL,
483                                  FIRST_ERROR_POINTER);
484         CXLError *cxl_err;
485         uint32_t unc_err;
486 
487         /*
488          * If single bit written that corresponds to the first error
489          * pointer being cleared, update the status and header log.
490          */
491         if (!QTAILQ_EMPTY(&ct3d->error_list)) {
492             if ((1 << fe) ^ value) {
493                 CXLError *cxl_next;
494                 /*
495                  * Software is using wrong flow for multiple header recording
496                  * Following behavior in PCIe r6.0 and assuming multiple
497                  * header support. Implementation defined choice to clear all
498                  * matching records if more than one bit set - which corresponds
499                  * closest to behavior of hardware not capable of multiple
500                  * header recording.
501                  */
502                 QTAILQ_FOREACH_SAFE(cxl_err, &ct3d->error_list, node,
503                                     cxl_next) {
504                     if ((1 << cxl_err->type) & value) {
505                         QTAILQ_REMOVE(&ct3d->error_list, cxl_err, node);
506                         g_free(cxl_err);
507                     }
508                 }
509             } else {
510                 /* Done with previous FE, so drop from list */
511                 cxl_err = QTAILQ_FIRST(&ct3d->error_list);
512                 QTAILQ_REMOVE(&ct3d->error_list, cxl_err, node);
513                 g_free(cxl_err);
514             }
515 
516             /*
517              * If there is another FE, then put that in place and update
518              * the header log
519              */
520             if (!QTAILQ_EMPTY(&ct3d->error_list)) {
521                 uint32_t *header_log = &cache_mem[R_CXL_RAS_ERR_HEADER0];
522                 int i;
523 
524                 cxl_err = QTAILQ_FIRST(&ct3d->error_list);
525                 for (i = 0; i < CXL_RAS_ERR_HEADER_NUM; i++) {
526                     stl_le_p(header_log + i, cxl_err->header[i]);
527                 }
528                 capctrl = FIELD_DP32(capctrl, CXL_RAS_ERR_CAP_CTRL,
529                                      FIRST_ERROR_POINTER, cxl_err->type);
530             } else {
531                 /*
532                  * If no more errors, then follow recommendation of PCI spec
533                  * r6.0 6.2.4.2 to set the first error pointer to a status
534                  * bit that will never be used.
535                  */
536                 capctrl = FIELD_DP32(capctrl, CXL_RAS_ERR_CAP_CTRL,
537                                      FIRST_ERROR_POINTER,
538                                      CXL_RAS_UNC_ERR_CXL_UNUSED);
539             }
540             stl_le_p((uint8_t *)cache_mem + A_CXL_RAS_ERR_CAP_CTRL, capctrl);
541         }
542         unc_err = 0;
543         QTAILQ_FOREACH(cxl_err, &ct3d->error_list, node) {
544             unc_err |= 1 << cxl_err->type;
545         }
546         stl_le_p((uint8_t *)cache_mem + offset, unc_err);
547 
548         return;
549     }
550     case A_CXL_RAS_COR_ERR_STATUS:
551     {
552         uint32_t rw1c = value;
553         uint32_t temp = ldl_le_p((uint8_t *)cache_mem + offset);
554         temp &= ~rw1c;
555         stl_le_p((uint8_t *)cache_mem + offset, temp);
556         return;
557     }
558     default:
559         break;
560     }
561 
562     stl_le_p((uint8_t *)cache_mem + offset, value);
563     if (should_commit) {
564         hdm_decoder_commit(ct3d, which_hdm);
565     } else if (should_uncommit) {
566         hdm_decoder_uncommit(ct3d, which_hdm);
567     }
568 }
569 
570 static bool cxl_setup_memory(CXLType3Dev *ct3d, Error **errp)
571 {
572     DeviceState *ds = DEVICE(ct3d);
573 
574     if (!ct3d->hostmem && !ct3d->hostvmem && !ct3d->hostpmem) {
575         error_setg(errp, "at least one memdev property must be set");
576         return false;
577     } else if (ct3d->hostmem && ct3d->hostpmem) {
578         error_setg(errp, "[memdev] cannot be used with new "
579                          "[persistent-memdev] property");
580         return false;
581     } else if (ct3d->hostmem) {
582         /* Use of hostmem property implies pmem */
583         ct3d->hostpmem = ct3d->hostmem;
584         ct3d->hostmem = NULL;
585     }
586 
587     if (ct3d->hostpmem && !ct3d->lsa) {
588         error_setg(errp, "lsa property must be set for persistent devices");
589         return false;
590     }
591 
592     if (ct3d->hostvmem) {
593         MemoryRegion *vmr;
594         char *v_name;
595 
596         vmr = host_memory_backend_get_memory(ct3d->hostvmem);
597         if (!vmr) {
598             error_setg(errp, "volatile memdev must have backing device");
599             return false;
600         }
601         memory_region_set_nonvolatile(vmr, false);
602         memory_region_set_enabled(vmr, true);
603         host_memory_backend_set_mapped(ct3d->hostvmem, true);
604         if (ds->id) {
605             v_name = g_strdup_printf("cxl-type3-dpa-vmem-space:%s", ds->id);
606         } else {
607             v_name = g_strdup("cxl-type3-dpa-vmem-space");
608         }
609         address_space_init(&ct3d->hostvmem_as, vmr, v_name);
610         ct3d->cxl_dstate.vmem_size = memory_region_size(vmr);
611         ct3d->cxl_dstate.mem_size += memory_region_size(vmr);
612         g_free(v_name);
613     }
614 
615     if (ct3d->hostpmem) {
616         MemoryRegion *pmr;
617         char *p_name;
618 
619         pmr = host_memory_backend_get_memory(ct3d->hostpmem);
620         if (!pmr) {
621             error_setg(errp, "persistent memdev must have backing device");
622             return false;
623         }
624         memory_region_set_nonvolatile(pmr, true);
625         memory_region_set_enabled(pmr, true);
626         host_memory_backend_set_mapped(ct3d->hostpmem, true);
627         if (ds->id) {
628             p_name = g_strdup_printf("cxl-type3-dpa-pmem-space:%s", ds->id);
629         } else {
630             p_name = g_strdup("cxl-type3-dpa-pmem-space");
631         }
632         address_space_init(&ct3d->hostpmem_as, pmr, p_name);
633         ct3d->cxl_dstate.pmem_size = memory_region_size(pmr);
634         ct3d->cxl_dstate.mem_size += memory_region_size(pmr);
635         g_free(p_name);
636     }
637 
638     return true;
639 }
640 
641 static DOEProtocol doe_cdat_prot[] = {
642     { CXL_VENDOR_ID, CXL_DOE_TABLE_ACCESS, cxl_doe_cdat_rsp },
643     { }
644 };
645 
646 static void ct3_realize(PCIDevice *pci_dev, Error **errp)
647 {
648     ERRP_GUARD();
649     CXLType3Dev *ct3d = CXL_TYPE3(pci_dev);
650     CXLComponentState *cxl_cstate = &ct3d->cxl_cstate;
651     ComponentRegisters *regs = &cxl_cstate->crb;
652     MemoryRegion *mr = &regs->component_registers;
653     uint8_t *pci_conf = pci_dev->config;
654     unsigned short msix_num = 6;
655     int i, rc;
656 
657     QTAILQ_INIT(&ct3d->error_list);
658 
659     if (!cxl_setup_memory(ct3d, errp)) {
660         return;
661     }
662 
663     pci_config_set_prog_interface(pci_conf, 0x10);
664 
665     pcie_endpoint_cap_init(pci_dev, 0x80);
666     if (ct3d->sn != UI64_NULL) {
667         pcie_dev_ser_num_init(pci_dev, 0x100, ct3d->sn);
668         cxl_cstate->dvsec_offset = 0x100 + 0x0c;
669     } else {
670         cxl_cstate->dvsec_offset = 0x100;
671     }
672 
673     ct3d->cxl_cstate.pdev = pci_dev;
674     build_dvsecs(ct3d);
675 
676     regs->special_ops = g_new0(MemoryRegionOps, 1);
677     regs->special_ops->write = ct3d_reg_write;
678 
679     cxl_component_register_block_init(OBJECT(pci_dev), cxl_cstate,
680                                       TYPE_CXL_TYPE3);
681 
682     pci_register_bar(
683         pci_dev, CXL_COMPONENT_REG_BAR_IDX,
684         PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64, mr);
685 
686     cxl_device_register_block_init(OBJECT(pci_dev), &ct3d->cxl_dstate,
687                                    &ct3d->cci);
688     pci_register_bar(pci_dev, CXL_DEVICE_REG_BAR_IDX,
689                      PCI_BASE_ADDRESS_SPACE_MEMORY |
690                          PCI_BASE_ADDRESS_MEM_TYPE_64,
691                      &ct3d->cxl_dstate.device_registers);
692 
693     /* MSI(-X) Initialization */
694     rc = msix_init_exclusive_bar(pci_dev, msix_num, 4, NULL);
695     if (rc) {
696         goto err_address_space_free;
697     }
698     for (i = 0; i < msix_num; i++) {
699         msix_vector_use(pci_dev, i);
700     }
701 
702     /* DOE Initialization */
703     pcie_doe_init(pci_dev, &ct3d->doe_cdat, 0x190, doe_cdat_prot, true, 0);
704 
705     cxl_cstate->cdat.build_cdat_table = ct3_build_cdat_table;
706     cxl_cstate->cdat.free_cdat_table = ct3_free_cdat_table;
707     cxl_cstate->cdat.private = ct3d;
708     cxl_doe_cdat_init(cxl_cstate, errp);
709     if (*errp) {
710         goto err_free_special_ops;
711     }
712 
713     pcie_cap_deverr_init(pci_dev);
714     /* Leave a bit of room for expansion */
715     rc = pcie_aer_init(pci_dev, PCI_ERR_VER, 0x200, PCI_ERR_SIZEOF, NULL);
716     if (rc) {
717         goto err_release_cdat;
718     }
719     cxl_event_init(&ct3d->cxl_dstate, 2);
720 
721     return;
722 
723 err_release_cdat:
724     cxl_doe_cdat_release(cxl_cstate);
725 err_free_special_ops:
726     g_free(regs->special_ops);
727 err_address_space_free:
728     if (ct3d->hostpmem) {
729         address_space_destroy(&ct3d->hostpmem_as);
730     }
731     if (ct3d->hostvmem) {
732         address_space_destroy(&ct3d->hostvmem_as);
733     }
734     return;
735 }
736 
737 static void ct3_exit(PCIDevice *pci_dev)
738 {
739     CXLType3Dev *ct3d = CXL_TYPE3(pci_dev);
740     CXLComponentState *cxl_cstate = &ct3d->cxl_cstate;
741     ComponentRegisters *regs = &cxl_cstate->crb;
742 
743     pcie_aer_exit(pci_dev);
744     cxl_doe_cdat_release(cxl_cstate);
745     g_free(regs->special_ops);
746     if (ct3d->hostpmem) {
747         address_space_destroy(&ct3d->hostpmem_as);
748     }
749     if (ct3d->hostvmem) {
750         address_space_destroy(&ct3d->hostvmem_as);
751     }
752 }
753 
754 static bool cxl_type3_dpa(CXLType3Dev *ct3d, hwaddr host_addr, uint64_t *dpa)
755 {
756     int hdm_inc = R_CXL_HDM_DECODER1_BASE_LO - R_CXL_HDM_DECODER0_BASE_LO;
757     uint32_t *cache_mem = ct3d->cxl_cstate.crb.cache_mem_registers;
758     unsigned int hdm_count;
759     uint32_t cap;
760     uint64_t dpa_base = 0;
761     int i;
762 
763     cap = ldl_le_p(cache_mem + R_CXL_HDM_DECODER_CAPABILITY);
764     hdm_count = cxl_decoder_count_dec(FIELD_EX32(cap,
765                                                  CXL_HDM_DECODER_CAPABILITY,
766                                                  DECODER_COUNT));
767 
768     for (i = 0; i < hdm_count; i++) {
769         uint64_t decoder_base, decoder_size, hpa_offset, skip;
770         uint32_t hdm_ctrl, low, high;
771         int ig, iw;
772 
773         low = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_BASE_LO + i * hdm_inc);
774         high = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_BASE_HI + i * hdm_inc);
775         decoder_base = ((uint64_t)high << 32) | (low & 0xf0000000);
776 
777         low = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_SIZE_LO + i * hdm_inc);
778         high = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_SIZE_HI + i * hdm_inc);
779         decoder_size = ((uint64_t)high << 32) | (low & 0xf0000000);
780 
781         low = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_DPA_SKIP_LO +
782                        i * hdm_inc);
783         high = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_DPA_SKIP_HI +
784                         i * hdm_inc);
785         skip = ((uint64_t)high << 32) | (low & 0xf0000000);
786         dpa_base += skip;
787 
788         hpa_offset = (uint64_t)host_addr - decoder_base;
789 
790         hdm_ctrl = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL + i * hdm_inc);
791         iw = FIELD_EX32(hdm_ctrl, CXL_HDM_DECODER0_CTRL, IW);
792         ig = FIELD_EX32(hdm_ctrl, CXL_HDM_DECODER0_CTRL, IG);
793         if (!FIELD_EX32(hdm_ctrl, CXL_HDM_DECODER0_CTRL, COMMITTED)) {
794             return false;
795         }
796         if (((uint64_t)host_addr < decoder_base) ||
797             (hpa_offset >= decoder_size)) {
798             int decoded_iw = cxl_interleave_ways_dec(iw, &error_fatal);
799 
800             if (decoded_iw == 0) {
801                 return false;
802             }
803 
804             dpa_base += decoder_size / decoded_iw;
805             continue;
806         }
807 
808         *dpa = dpa_base +
809             ((MAKE_64BIT_MASK(0, 8 + ig) & hpa_offset) |
810              ((MAKE_64BIT_MASK(8 + ig + iw, 64 - 8 - ig - iw) & hpa_offset)
811               >> iw));
812 
813         return true;
814     }
815     return false;
816 }
817 
818 static int cxl_type3_hpa_to_as_and_dpa(CXLType3Dev *ct3d,
819                                        hwaddr host_addr,
820                                        unsigned int size,
821                                        AddressSpace **as,
822                                        uint64_t *dpa_offset)
823 {
824     MemoryRegion *vmr = NULL, *pmr = NULL;
825 
826     if (ct3d->hostvmem) {
827         vmr = host_memory_backend_get_memory(ct3d->hostvmem);
828     }
829     if (ct3d->hostpmem) {
830         pmr = host_memory_backend_get_memory(ct3d->hostpmem);
831     }
832 
833     if (!vmr && !pmr) {
834         return -ENODEV;
835     }
836 
837     if (!cxl_type3_dpa(ct3d, host_addr, dpa_offset)) {
838         return -EINVAL;
839     }
840 
841     if (*dpa_offset > ct3d->cxl_dstate.mem_size) {
842         return -EINVAL;
843     }
844 
845     if (vmr) {
846         if (*dpa_offset < memory_region_size(vmr)) {
847             *as = &ct3d->hostvmem_as;
848         } else {
849             *as = &ct3d->hostpmem_as;
850             *dpa_offset -= memory_region_size(vmr);
851         }
852     } else {
853         *as = &ct3d->hostpmem_as;
854     }
855 
856     return 0;
857 }
858 
859 MemTxResult cxl_type3_read(PCIDevice *d, hwaddr host_addr, uint64_t *data,
860                            unsigned size, MemTxAttrs attrs)
861 {
862     CXLType3Dev *ct3d = CXL_TYPE3(d);
863     uint64_t dpa_offset = 0;
864     AddressSpace *as = NULL;
865     int res;
866 
867     res = cxl_type3_hpa_to_as_and_dpa(ct3d, host_addr, size,
868                                       &as, &dpa_offset);
869     if (res) {
870         return MEMTX_ERROR;
871     }
872 
873     if (sanitize_running(&ct3d->cci)) {
874         qemu_guest_getrandom_nofail(data, size);
875         return MEMTX_OK;
876     }
877 
878     return address_space_read(as, dpa_offset, attrs, data, size);
879 }
880 
881 MemTxResult cxl_type3_write(PCIDevice *d, hwaddr host_addr, uint64_t data,
882                             unsigned size, MemTxAttrs attrs)
883 {
884     CXLType3Dev *ct3d = CXL_TYPE3(d);
885     uint64_t dpa_offset = 0;
886     AddressSpace *as = NULL;
887     int res;
888 
889     res = cxl_type3_hpa_to_as_and_dpa(ct3d, host_addr, size,
890                                       &as, &dpa_offset);
891     if (res) {
892         return MEMTX_ERROR;
893     }
894 
895     if (sanitize_running(&ct3d->cci)) {
896         return MEMTX_OK;
897     }
898 
899     return address_space_write(as, dpa_offset, attrs, &data, size);
900 }
901 
902 static void ct3d_reset(DeviceState *dev)
903 {
904     CXLType3Dev *ct3d = CXL_TYPE3(dev);
905     uint32_t *reg_state = ct3d->cxl_cstate.crb.cache_mem_registers;
906     uint32_t *write_msk = ct3d->cxl_cstate.crb.cache_mem_regs_write_mask;
907 
908     cxl_component_register_init_common(reg_state, write_msk, CXL2_TYPE3_DEVICE);
909     cxl_device_register_init_t3(ct3d);
910 
911     /*
912      * Bring up an endpoint to target with MCTP over VDM.
913      * This device is emulating an MLD with single LD for now.
914      */
915     cxl_initialize_t3_fm_owned_ld_mctpcci(&ct3d->vdm_fm_owned_ld_mctp_cci,
916                                           DEVICE(ct3d), DEVICE(ct3d),
917                                           512); /* Max payload made up */
918     cxl_initialize_t3_ld_cci(&ct3d->ld0_cci, DEVICE(ct3d), DEVICE(ct3d),
919                              512); /* Max payload made up */
920 
921 }
922 
923 static Property ct3_props[] = {
924     DEFINE_PROP_LINK("memdev", CXLType3Dev, hostmem, TYPE_MEMORY_BACKEND,
925                      HostMemoryBackend *), /* for backward compatibility */
926     DEFINE_PROP_LINK("persistent-memdev", CXLType3Dev, hostpmem,
927                      TYPE_MEMORY_BACKEND, HostMemoryBackend *),
928     DEFINE_PROP_LINK("volatile-memdev", CXLType3Dev, hostvmem,
929                      TYPE_MEMORY_BACKEND, HostMemoryBackend *),
930     DEFINE_PROP_LINK("lsa", CXLType3Dev, lsa, TYPE_MEMORY_BACKEND,
931                      HostMemoryBackend *),
932     DEFINE_PROP_UINT64("sn", CXLType3Dev, sn, UI64_NULL),
933     DEFINE_PROP_STRING("cdat", CXLType3Dev, cxl_cstate.cdat.filename),
934     DEFINE_PROP_END_OF_LIST(),
935 };
936 
937 static uint64_t get_lsa_size(CXLType3Dev *ct3d)
938 {
939     MemoryRegion *mr;
940 
941     if (!ct3d->lsa) {
942         return 0;
943     }
944 
945     mr = host_memory_backend_get_memory(ct3d->lsa);
946     return memory_region_size(mr);
947 }
948 
949 static void validate_lsa_access(MemoryRegion *mr, uint64_t size,
950                                 uint64_t offset)
951 {
952     assert(offset + size <= memory_region_size(mr));
953     assert(offset + size > offset);
954 }
955 
956 static uint64_t get_lsa(CXLType3Dev *ct3d, void *buf, uint64_t size,
957                     uint64_t offset)
958 {
959     MemoryRegion *mr;
960     void *lsa;
961 
962     if (!ct3d->lsa) {
963         return 0;
964     }
965 
966     mr = host_memory_backend_get_memory(ct3d->lsa);
967     validate_lsa_access(mr, size, offset);
968 
969     lsa = memory_region_get_ram_ptr(mr) + offset;
970     memcpy(buf, lsa, size);
971 
972     return size;
973 }
974 
975 static void set_lsa(CXLType3Dev *ct3d, const void *buf, uint64_t size,
976                     uint64_t offset)
977 {
978     MemoryRegion *mr;
979     void *lsa;
980 
981     if (!ct3d->lsa) {
982         return;
983     }
984 
985     mr = host_memory_backend_get_memory(ct3d->lsa);
986     validate_lsa_access(mr, size, offset);
987 
988     lsa = memory_region_get_ram_ptr(mr) + offset;
989     memcpy(lsa, buf, size);
990     memory_region_set_dirty(mr, offset, size);
991 
992     /*
993      * Just like the PMEM, if the guest is not allowed to exit gracefully, label
994      * updates will get lost.
995      */
996 }
997 
998 static bool set_cacheline(CXLType3Dev *ct3d, uint64_t dpa_offset, uint8_t *data)
999 {
1000     MemoryRegion *vmr = NULL, *pmr = NULL;
1001     AddressSpace *as;
1002 
1003     if (ct3d->hostvmem) {
1004         vmr = host_memory_backend_get_memory(ct3d->hostvmem);
1005     }
1006     if (ct3d->hostpmem) {
1007         pmr = host_memory_backend_get_memory(ct3d->hostpmem);
1008     }
1009 
1010     if (!vmr && !pmr) {
1011         return false;
1012     }
1013 
1014     if (dpa_offset + CXL_CACHE_LINE_SIZE > ct3d->cxl_dstate.mem_size) {
1015         return false;
1016     }
1017 
1018     if (vmr) {
1019         if (dpa_offset < memory_region_size(vmr)) {
1020             as = &ct3d->hostvmem_as;
1021         } else {
1022             as = &ct3d->hostpmem_as;
1023             dpa_offset -= memory_region_size(vmr);
1024         }
1025     } else {
1026         as = &ct3d->hostpmem_as;
1027     }
1028 
1029     address_space_write(as, dpa_offset, MEMTXATTRS_UNSPECIFIED, &data,
1030                         CXL_CACHE_LINE_SIZE);
1031     return true;
1032 }
1033 
1034 void cxl_set_poison_list_overflowed(CXLType3Dev *ct3d)
1035 {
1036         ct3d->poison_list_overflowed = true;
1037         ct3d->poison_list_overflow_ts =
1038             cxl_device_get_timestamp(&ct3d->cxl_dstate);
1039 }
1040 
1041 void qmp_cxl_inject_poison(const char *path, uint64_t start, uint64_t length,
1042                            Error **errp)
1043 {
1044     Object *obj = object_resolve_path(path, NULL);
1045     CXLType3Dev *ct3d;
1046     CXLPoison *p;
1047 
1048     if (length % 64) {
1049         error_setg(errp, "Poison injection must be in multiples of 64 bytes");
1050         return;
1051     }
1052     if (start % 64) {
1053         error_setg(errp, "Poison start address must be 64 byte aligned");
1054         return;
1055     }
1056     if (!obj) {
1057         error_setg(errp, "Unable to resolve path");
1058         return;
1059     }
1060     if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
1061         error_setg(errp, "Path does not point to a CXL type 3 device");
1062         return;
1063     }
1064 
1065     ct3d = CXL_TYPE3(obj);
1066 
1067     QLIST_FOREACH(p, &ct3d->poison_list, node) {
1068         if (((start >= p->start) && (start < p->start + p->length)) ||
1069             ((start + length > p->start) &&
1070              (start + length <= p->start + p->length))) {
1071             error_setg(errp,
1072                        "Overlap with existing poisoned region not supported");
1073             return;
1074         }
1075     }
1076 
1077     if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) {
1078         cxl_set_poison_list_overflowed(ct3d);
1079         return;
1080     }
1081 
1082     p = g_new0(CXLPoison, 1);
1083     p->length = length;
1084     p->start = start;
1085     /* Different from injected via the mbox */
1086     p->type = CXL_POISON_TYPE_INTERNAL;
1087 
1088     QLIST_INSERT_HEAD(&ct3d->poison_list, p, node);
1089     ct3d->poison_list_cnt++;
1090 }
1091 
1092 /* For uncorrectable errors include support for multiple header recording */
1093 void qmp_cxl_inject_uncorrectable_errors(const char *path,
1094                                          CXLUncorErrorRecordList *errors,
1095                                          Error **errp)
1096 {
1097     Object *obj = object_resolve_path(path, NULL);
1098     static PCIEAERErr err = {};
1099     CXLType3Dev *ct3d;
1100     CXLError *cxl_err;
1101     uint32_t *reg_state;
1102     uint32_t unc_err;
1103     bool first;
1104 
1105     if (!obj) {
1106         error_setg(errp, "Unable to resolve path");
1107         return;
1108     }
1109 
1110     if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
1111         error_setg(errp, "Path does not point to a CXL type 3 device");
1112         return;
1113     }
1114 
1115     err.status = PCI_ERR_UNC_INTN;
1116     err.source_id = pci_requester_id(PCI_DEVICE(obj));
1117     err.flags = 0;
1118 
1119     ct3d = CXL_TYPE3(obj);
1120 
1121     first = QTAILQ_EMPTY(&ct3d->error_list);
1122     reg_state = ct3d->cxl_cstate.crb.cache_mem_registers;
1123     while (errors) {
1124         uint32List *header = errors->value->header;
1125         uint8_t header_count = 0;
1126         int cxl_err_code;
1127 
1128         cxl_err_code = ct3d_qmp_uncor_err_to_cxl(errors->value->type);
1129         if (cxl_err_code < 0) {
1130             error_setg(errp, "Unknown error code");
1131             return;
1132         }
1133 
1134         /* If the error is masked, nothing to do here */
1135         if (!((1 << cxl_err_code) &
1136               ~ldl_le_p(reg_state + R_CXL_RAS_UNC_ERR_MASK))) {
1137             errors = errors->next;
1138             continue;
1139         }
1140 
1141         cxl_err = g_malloc0(sizeof(*cxl_err));
1142 
1143         cxl_err->type = cxl_err_code;
1144         while (header && header_count < 32) {
1145             cxl_err->header[header_count++] = header->value;
1146             header = header->next;
1147         }
1148         if (header_count > 32) {
1149             error_setg(errp, "Header must be 32 DWORD or less");
1150             return;
1151         }
1152         QTAILQ_INSERT_TAIL(&ct3d->error_list, cxl_err, node);
1153 
1154         errors = errors->next;
1155     }
1156 
1157     if (first && !QTAILQ_EMPTY(&ct3d->error_list)) {
1158         uint32_t *cache_mem = ct3d->cxl_cstate.crb.cache_mem_registers;
1159         uint32_t capctrl = ldl_le_p(cache_mem + R_CXL_RAS_ERR_CAP_CTRL);
1160         uint32_t *header_log = &cache_mem[R_CXL_RAS_ERR_HEADER0];
1161         int i;
1162 
1163         cxl_err = QTAILQ_FIRST(&ct3d->error_list);
1164         for (i = 0; i < CXL_RAS_ERR_HEADER_NUM; i++) {
1165             stl_le_p(header_log + i, cxl_err->header[i]);
1166         }
1167 
1168         capctrl = FIELD_DP32(capctrl, CXL_RAS_ERR_CAP_CTRL,
1169                              FIRST_ERROR_POINTER, cxl_err->type);
1170         stl_le_p(cache_mem + R_CXL_RAS_ERR_CAP_CTRL, capctrl);
1171     }
1172 
1173     unc_err = 0;
1174     QTAILQ_FOREACH(cxl_err, &ct3d->error_list, node) {
1175         unc_err |= (1 << cxl_err->type);
1176     }
1177     if (!unc_err) {
1178         return;
1179     }
1180 
1181     stl_le_p(reg_state + R_CXL_RAS_UNC_ERR_STATUS, unc_err);
1182     pcie_aer_inject_error(PCI_DEVICE(obj), &err);
1183 
1184     return;
1185 }
1186 
1187 void qmp_cxl_inject_correctable_error(const char *path, CxlCorErrorType type,
1188                                       Error **errp)
1189 {
1190     static PCIEAERErr err = {};
1191     Object *obj = object_resolve_path(path, NULL);
1192     CXLType3Dev *ct3d;
1193     uint32_t *reg_state;
1194     uint32_t cor_err;
1195     int cxl_err_type;
1196 
1197     if (!obj) {
1198         error_setg(errp, "Unable to resolve path");
1199         return;
1200     }
1201     if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
1202         error_setg(errp, "Path does not point to a CXL type 3 device");
1203         return;
1204     }
1205 
1206     err.status = PCI_ERR_COR_INTERNAL;
1207     err.source_id = pci_requester_id(PCI_DEVICE(obj));
1208     err.flags = PCIE_AER_ERR_IS_CORRECTABLE;
1209 
1210     ct3d = CXL_TYPE3(obj);
1211     reg_state = ct3d->cxl_cstate.crb.cache_mem_registers;
1212     cor_err = ldl_le_p(reg_state + R_CXL_RAS_COR_ERR_STATUS);
1213 
1214     cxl_err_type = ct3d_qmp_cor_err_to_cxl(type);
1215     if (cxl_err_type < 0) {
1216         error_setg(errp, "Invalid COR error");
1217         return;
1218     }
1219     /* If the error is masked, nothting to do here */
1220     if (!((1 << cxl_err_type) &
1221           ~ldl_le_p(reg_state + R_CXL_RAS_COR_ERR_MASK))) {
1222         return;
1223     }
1224 
1225     cor_err |= (1 << cxl_err_type);
1226     stl_le_p(reg_state + R_CXL_RAS_COR_ERR_STATUS, cor_err);
1227 
1228     pcie_aer_inject_error(PCI_DEVICE(obj), &err);
1229 }
1230 
1231 static void cxl_assign_event_header(CXLEventRecordHdr *hdr,
1232                                     const QemuUUID *uuid, uint32_t flags,
1233                                     uint8_t length, uint64_t timestamp)
1234 {
1235     st24_le_p(&hdr->flags, flags);
1236     hdr->length = length;
1237     memcpy(&hdr->id, uuid, sizeof(hdr->id));
1238     stq_le_p(&hdr->timestamp, timestamp);
1239 }
1240 
1241 static const QemuUUID gen_media_uuid = {
1242     .data = UUID(0xfbcd0a77, 0xc260, 0x417f,
1243                  0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6),
1244 };
1245 
1246 static const QemuUUID dram_uuid = {
1247     .data = UUID(0x601dcbb3, 0x9c06, 0x4eab, 0xb8, 0xaf,
1248                  0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24),
1249 };
1250 
1251 static const QemuUUID memory_module_uuid = {
1252     .data = UUID(0xfe927475, 0xdd59, 0x4339, 0xa5, 0x86,
1253                  0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74),
1254 };
1255 
1256 #define CXL_GMER_VALID_CHANNEL                          BIT(0)
1257 #define CXL_GMER_VALID_RANK                             BIT(1)
1258 #define CXL_GMER_VALID_DEVICE                           BIT(2)
1259 #define CXL_GMER_VALID_COMPONENT                        BIT(3)
1260 
1261 static int ct3d_qmp_cxl_event_log_enc(CxlEventLog log)
1262 {
1263     switch (log) {
1264     case CXL_EVENT_LOG_INFORMATIONAL:
1265         return CXL_EVENT_TYPE_INFO;
1266     case CXL_EVENT_LOG_WARNING:
1267         return CXL_EVENT_TYPE_WARN;
1268     case CXL_EVENT_LOG_FAILURE:
1269         return CXL_EVENT_TYPE_FAIL;
1270     case CXL_EVENT_LOG_FATAL:
1271         return CXL_EVENT_TYPE_FATAL;
1272 /* DCD not yet supported */
1273     default:
1274         return -EINVAL;
1275     }
1276 }
1277 /* Component ID is device specific.  Define this as a string. */
1278 void qmp_cxl_inject_general_media_event(const char *path, CxlEventLog log,
1279                                         uint8_t flags, uint64_t dpa,
1280                                         uint8_t descriptor, uint8_t type,
1281                                         uint8_t transaction_type,
1282                                         bool has_channel, uint8_t channel,
1283                                         bool has_rank, uint8_t rank,
1284                                         bool has_device, uint32_t device,
1285                                         const char *component_id,
1286                                         Error **errp)
1287 {
1288     Object *obj = object_resolve_path(path, NULL);
1289     CXLEventGenMedia gem;
1290     CXLEventRecordHdr *hdr = &gem.hdr;
1291     CXLDeviceState *cxlds;
1292     CXLType3Dev *ct3d;
1293     uint16_t valid_flags = 0;
1294     uint8_t enc_log;
1295     int rc;
1296 
1297     if (!obj) {
1298         error_setg(errp, "Unable to resolve path");
1299         return;
1300     }
1301     if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
1302         error_setg(errp, "Path does not point to a CXL type 3 device");
1303         return;
1304     }
1305     ct3d = CXL_TYPE3(obj);
1306     cxlds = &ct3d->cxl_dstate;
1307 
1308     rc = ct3d_qmp_cxl_event_log_enc(log);
1309     if (rc < 0) {
1310         error_setg(errp, "Unhandled error log type");
1311         return;
1312     }
1313     enc_log = rc;
1314 
1315     memset(&gem, 0, sizeof(gem));
1316     cxl_assign_event_header(hdr, &gen_media_uuid, flags, sizeof(gem),
1317                             cxl_device_get_timestamp(&ct3d->cxl_dstate));
1318 
1319     stq_le_p(&gem.phys_addr, dpa);
1320     gem.descriptor = descriptor;
1321     gem.type = type;
1322     gem.transaction_type = transaction_type;
1323 
1324     if (has_channel) {
1325         gem.channel = channel;
1326         valid_flags |= CXL_GMER_VALID_CHANNEL;
1327     }
1328 
1329     if (has_rank) {
1330         gem.rank = rank;
1331         valid_flags |= CXL_GMER_VALID_RANK;
1332     }
1333 
1334     if (has_device) {
1335         st24_le_p(gem.device, device);
1336         valid_flags |= CXL_GMER_VALID_DEVICE;
1337     }
1338 
1339     if (component_id) {
1340         strncpy((char *)gem.component_id, component_id,
1341                 sizeof(gem.component_id) - 1);
1342         valid_flags |= CXL_GMER_VALID_COMPONENT;
1343     }
1344 
1345     stw_le_p(&gem.validity_flags, valid_flags);
1346 
1347     if (cxl_event_insert(cxlds, enc_log, (CXLEventRecordRaw *)&gem)) {
1348         cxl_event_irq_assert(ct3d);
1349     }
1350 }
1351 
1352 #define CXL_DRAM_VALID_CHANNEL                          BIT(0)
1353 #define CXL_DRAM_VALID_RANK                             BIT(1)
1354 #define CXL_DRAM_VALID_NIBBLE_MASK                      BIT(2)
1355 #define CXL_DRAM_VALID_BANK_GROUP                       BIT(3)
1356 #define CXL_DRAM_VALID_BANK                             BIT(4)
1357 #define CXL_DRAM_VALID_ROW                              BIT(5)
1358 #define CXL_DRAM_VALID_COLUMN                           BIT(6)
1359 #define CXL_DRAM_VALID_CORRECTION_MASK                  BIT(7)
1360 
1361 void qmp_cxl_inject_dram_event(const char *path, CxlEventLog log, uint8_t flags,
1362                                uint64_t dpa, uint8_t descriptor,
1363                                uint8_t type, uint8_t transaction_type,
1364                                bool has_channel, uint8_t channel,
1365                                bool has_rank, uint8_t rank,
1366                                bool has_nibble_mask, uint32_t nibble_mask,
1367                                bool has_bank_group, uint8_t bank_group,
1368                                bool has_bank, uint8_t bank,
1369                                bool has_row, uint32_t row,
1370                                bool has_column, uint16_t column,
1371                                bool has_correction_mask,
1372                                uint64List *correction_mask,
1373                                Error **errp)
1374 {
1375     Object *obj = object_resolve_path(path, NULL);
1376     CXLEventDram dram;
1377     CXLEventRecordHdr *hdr = &dram.hdr;
1378     CXLDeviceState *cxlds;
1379     CXLType3Dev *ct3d;
1380     uint16_t valid_flags = 0;
1381     uint8_t enc_log;
1382     int rc;
1383 
1384     if (!obj) {
1385         error_setg(errp, "Unable to resolve path");
1386         return;
1387     }
1388     if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
1389         error_setg(errp, "Path does not point to a CXL type 3 device");
1390         return;
1391     }
1392     ct3d = CXL_TYPE3(obj);
1393     cxlds = &ct3d->cxl_dstate;
1394 
1395     rc = ct3d_qmp_cxl_event_log_enc(log);
1396     if (rc < 0) {
1397         error_setg(errp, "Unhandled error log type");
1398         return;
1399     }
1400     enc_log = rc;
1401 
1402     memset(&dram, 0, sizeof(dram));
1403     cxl_assign_event_header(hdr, &dram_uuid, flags, sizeof(dram),
1404                             cxl_device_get_timestamp(&ct3d->cxl_dstate));
1405     stq_le_p(&dram.phys_addr, dpa);
1406     dram.descriptor = descriptor;
1407     dram.type = type;
1408     dram.transaction_type = transaction_type;
1409 
1410     if (has_channel) {
1411         dram.channel = channel;
1412         valid_flags |= CXL_DRAM_VALID_CHANNEL;
1413     }
1414 
1415     if (has_rank) {
1416         dram.rank = rank;
1417         valid_flags |= CXL_DRAM_VALID_RANK;
1418     }
1419 
1420     if (has_nibble_mask) {
1421         st24_le_p(dram.nibble_mask, nibble_mask);
1422         valid_flags |= CXL_DRAM_VALID_NIBBLE_MASK;
1423     }
1424 
1425     if (has_bank_group) {
1426         dram.bank_group = bank_group;
1427         valid_flags |= CXL_DRAM_VALID_BANK_GROUP;
1428     }
1429 
1430     if (has_bank) {
1431         dram.bank = bank;
1432         valid_flags |= CXL_DRAM_VALID_BANK;
1433     }
1434 
1435     if (has_row) {
1436         st24_le_p(dram.row, row);
1437         valid_flags |= CXL_DRAM_VALID_ROW;
1438     }
1439 
1440     if (has_column) {
1441         stw_le_p(&dram.column, column);
1442         valid_flags |= CXL_DRAM_VALID_COLUMN;
1443     }
1444 
1445     if (has_correction_mask) {
1446         int count = 0;
1447         while (correction_mask && count < 4) {
1448             stq_le_p(&dram.correction_mask[count],
1449                      correction_mask->value);
1450             count++;
1451             correction_mask = correction_mask->next;
1452         }
1453         valid_flags |= CXL_DRAM_VALID_CORRECTION_MASK;
1454     }
1455 
1456     stw_le_p(&dram.validity_flags, valid_flags);
1457 
1458     if (cxl_event_insert(cxlds, enc_log, (CXLEventRecordRaw *)&dram)) {
1459         cxl_event_irq_assert(ct3d);
1460     }
1461     return;
1462 }
1463 
1464 void qmp_cxl_inject_memory_module_event(const char *path, CxlEventLog log,
1465                                         uint8_t flags, uint8_t type,
1466                                         uint8_t health_status,
1467                                         uint8_t media_status,
1468                                         uint8_t additional_status,
1469                                         uint8_t life_used,
1470                                         int16_t temperature,
1471                                         uint32_t dirty_shutdown_count,
1472                                         uint32_t corrected_volatile_error_count,
1473                                         uint32_t corrected_persist_error_count,
1474                                         Error **errp)
1475 {
1476     Object *obj = object_resolve_path(path, NULL);
1477     CXLEventMemoryModule module;
1478     CXLEventRecordHdr *hdr = &module.hdr;
1479     CXLDeviceState *cxlds;
1480     CXLType3Dev *ct3d;
1481     uint8_t enc_log;
1482     int rc;
1483 
1484     if (!obj) {
1485         error_setg(errp, "Unable to resolve path");
1486         return;
1487     }
1488     if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
1489         error_setg(errp, "Path does not point to a CXL type 3 device");
1490         return;
1491     }
1492     ct3d = CXL_TYPE3(obj);
1493     cxlds = &ct3d->cxl_dstate;
1494 
1495     rc = ct3d_qmp_cxl_event_log_enc(log);
1496     if (rc < 0) {
1497         error_setg(errp, "Unhandled error log type");
1498         return;
1499     }
1500     enc_log = rc;
1501 
1502     memset(&module, 0, sizeof(module));
1503     cxl_assign_event_header(hdr, &memory_module_uuid, flags, sizeof(module),
1504                             cxl_device_get_timestamp(&ct3d->cxl_dstate));
1505 
1506     module.type = type;
1507     module.health_status = health_status;
1508     module.media_status = media_status;
1509     module.additional_status = additional_status;
1510     module.life_used = life_used;
1511     stw_le_p(&module.temperature, temperature);
1512     stl_le_p(&module.dirty_shutdown_count, dirty_shutdown_count);
1513     stl_le_p(&module.corrected_volatile_error_count,
1514              corrected_volatile_error_count);
1515     stl_le_p(&module.corrected_persistent_error_count,
1516              corrected_persist_error_count);
1517 
1518     if (cxl_event_insert(cxlds, enc_log, (CXLEventRecordRaw *)&module)) {
1519         cxl_event_irq_assert(ct3d);
1520     }
1521 }
1522 
1523 static void ct3_class_init(ObjectClass *oc, void *data)
1524 {
1525     DeviceClass *dc = DEVICE_CLASS(oc);
1526     PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc);
1527     CXLType3Class *cvc = CXL_TYPE3_CLASS(oc);
1528 
1529     pc->realize = ct3_realize;
1530     pc->exit = ct3_exit;
1531     pc->class_id = PCI_CLASS_MEMORY_CXL;
1532     pc->vendor_id = PCI_VENDOR_ID_INTEL;
1533     pc->device_id = 0xd93; /* LVF for now */
1534     pc->revision = 1;
1535 
1536     pc->config_write = ct3d_config_write;
1537     pc->config_read = ct3d_config_read;
1538 
1539     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1540     dc->desc = "CXL Memory Device (Type 3)";
1541     dc->reset = ct3d_reset;
1542     device_class_set_props(dc, ct3_props);
1543 
1544     cvc->get_lsa_size = get_lsa_size;
1545     cvc->get_lsa = get_lsa;
1546     cvc->set_lsa = set_lsa;
1547     cvc->set_cacheline = set_cacheline;
1548 }
1549 
1550 static const TypeInfo ct3d_info = {
1551     .name = TYPE_CXL_TYPE3,
1552     .parent = TYPE_PCI_DEVICE,
1553     .class_size = sizeof(struct CXLType3Class),
1554     .class_init = ct3_class_init,
1555     .instance_size = sizeof(CXLType3Dev),
1556     .interfaces = (InterfaceInfo[]) {
1557         { INTERFACE_CXL_DEVICE },
1558         { INTERFACE_PCIE_DEVICE },
1559         {}
1560     },
1561 };
1562 
1563 static void ct3d_registers(void)
1564 {
1565     type_register_static(&ct3d_info);
1566 }
1567 
1568 type_init(ct3d_registers);
1569