xref: /qemu/hw/cxl/cxl-device-utils.c (revision b342489a)
1 /*
2  * CXL Utility library for devices
3  *
4  * Copyright(C) 2020 Intel Corporation.
5  *
6  * This work is licensed under the terms of the GNU GPL, version 2. See the
7  * COPYING file in the top-level directory.
8  */
9 
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "hw/cxl/cxl.h"
13 
14 /*
15  * Device registers have no restrictions per the spec, and so fall back to the
16  * default memory mapped register rules in 8.2:
17  *   Software shall use CXL.io Memory Read and Write to access memory mapped
18  *   register defined in this section. Unless otherwise specified, software
19  *   shall restrict the accesses width based on the following:
20  *   • A 32 bit register shall be accessed as a 1 Byte, 2 Bytes or 4 Bytes
21  *     quantity.
22  *   • A 64 bit register shall be accessed as a 1 Byte, 2 Bytes, 4 Bytes or 8
23  *     Bytes
24  *   • The address shall be a multiple of the access width, e.g. when
25  *     accessing a register as a 4 Byte quantity, the address shall be
26  *     multiple of 4.
27  *   • The accesses shall map to contiguous bytes.If these rules are not
28  *     followed, the behavior is undefined
29  */
30 
31 static uint64_t caps_reg_read(void *opaque, hwaddr offset, unsigned size)
32 {
33     CXLDeviceState *cxl_dstate = opaque;
34 
35     switch (size) {
36     case 4:
37         return cxl_dstate->caps_reg_state32[offset / size];
38     case 8:
39         return cxl_dstate->caps_reg_state64[offset / size];
40     default:
41         g_assert_not_reached();
42     }
43 }
44 
45 static uint64_t dev_reg_read(void *opaque, hwaddr offset, unsigned size)
46 {
47     CXLDeviceState *cxl_dstate = opaque;
48 
49     switch (size) {
50     case 1:
51         return cxl_dstate->dev_reg_state[offset];
52     case 2:
53         return cxl_dstate->dev_reg_state16[offset / size];
54     case 4:
55         return cxl_dstate->dev_reg_state32[offset / size];
56     case 8:
57         return cxl_dstate->dev_reg_state64[offset / size];
58     default:
59         g_assert_not_reached();
60     }
61 }
62 
63 static uint64_t mailbox_reg_read(void *opaque, hwaddr offset, unsigned size)
64 {
65     CXLDeviceState *cxl_dstate = opaque;
66 
67     switch (size) {
68     case 1:
69         return cxl_dstate->mbox_reg_state[offset];
70     case 2:
71         return cxl_dstate->mbox_reg_state16[offset / size];
72     case 4:
73         return cxl_dstate->mbox_reg_state32[offset / size];
74     case 8:
75         return cxl_dstate->mbox_reg_state64[offset / size];
76     default:
77         g_assert_not_reached();
78     }
79 }
80 
81 static void mailbox_mem_writel(uint32_t *reg_state, hwaddr offset,
82                                uint64_t value)
83 {
84     switch (offset) {
85     case A_CXL_DEV_MAILBOX_CTRL:
86         /* fallthrough */
87     case A_CXL_DEV_MAILBOX_CAP:
88         /* RO register */
89         break;
90     default:
91         qemu_log_mask(LOG_UNIMP,
92                       "%s Unexpected 32-bit access to 0x%" PRIx64 " (WI)\n",
93                       __func__, offset);
94         return;
95     }
96 
97     reg_state[offset / sizeof(*reg_state)] = value;
98 }
99 
100 static void mailbox_mem_writeq(uint64_t *reg_state, hwaddr offset,
101                                uint64_t value)
102 {
103     switch (offset) {
104     case A_CXL_DEV_MAILBOX_CMD:
105         break;
106     case A_CXL_DEV_BG_CMD_STS:
107         /* BG not supported */
108         /* fallthrough */
109     case A_CXL_DEV_MAILBOX_STS:
110         /* Read only register, will get updated by the state machine */
111         return;
112     default:
113         qemu_log_mask(LOG_UNIMP,
114                       "%s Unexpected 64-bit access to 0x%" PRIx64 " (WI)\n",
115                       __func__, offset);
116         return;
117     }
118 
119 
120     reg_state[offset / sizeof(*reg_state)] = value;
121 }
122 
123 static void mailbox_reg_write(void *opaque, hwaddr offset, uint64_t value,
124                               unsigned size)
125 {
126     CXLDeviceState *cxl_dstate = opaque;
127 
128     if (offset >= A_CXL_DEV_CMD_PAYLOAD) {
129         memcpy(cxl_dstate->mbox_reg_state + offset, &value, size);
130         return;
131     }
132 
133     switch (size) {
134     case 4:
135         mailbox_mem_writel(cxl_dstate->mbox_reg_state32, offset, value);
136         break;
137     case 8:
138         mailbox_mem_writeq(cxl_dstate->mbox_reg_state64, offset, value);
139         break;
140     default:
141         g_assert_not_reached();
142     }
143 
144     if (ARRAY_FIELD_EX32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CTRL,
145                          DOORBELL)) {
146         cxl_process_mailbox(cxl_dstate);
147     }
148 }
149 
150 static uint64_t mdev_reg_read(void *opaque, hwaddr offset, unsigned size)
151 {
152     uint64_t retval = 0;
153 
154     retval = FIELD_DP64(retval, CXL_MEM_DEV_STS, MEDIA_STATUS, 1);
155     retval = FIELD_DP64(retval, CXL_MEM_DEV_STS, MBOX_READY, 1);
156 
157     return retval;
158 }
159 
160 static void ro_reg_write(void *opaque, hwaddr offset, uint64_t value,
161                            unsigned size)
162 {
163     /* Many register sets are read only */
164 }
165 
166 static const MemoryRegionOps mdev_ops = {
167     .read = mdev_reg_read,
168     .write = ro_reg_write,
169     .endianness = DEVICE_LITTLE_ENDIAN,
170     .valid = {
171         .min_access_size = 1,
172         .max_access_size = 8,
173         .unaligned = false,
174     },
175     .impl = {
176         .min_access_size = 8,
177         .max_access_size = 8,
178     },
179 };
180 
181 static const MemoryRegionOps mailbox_ops = {
182     .read = mailbox_reg_read,
183     .write = mailbox_reg_write,
184     .endianness = DEVICE_LITTLE_ENDIAN,
185     .valid = {
186         .min_access_size = 1,
187         .max_access_size = 8,
188         .unaligned = false,
189     },
190     .impl = {
191         .min_access_size = 1,
192         .max_access_size = 8,
193     },
194 };
195 
196 static const MemoryRegionOps dev_ops = {
197     .read = dev_reg_read,
198     .write = ro_reg_write,
199     .endianness = DEVICE_LITTLE_ENDIAN,
200     .valid = {
201         .min_access_size = 1,
202         .max_access_size = 8,
203         .unaligned = false,
204     },
205     .impl = {
206         .min_access_size = 1,
207         .max_access_size = 8,
208     },
209 };
210 
211 static const MemoryRegionOps caps_ops = {
212     .read = caps_reg_read,
213     .write = ro_reg_write,
214     .endianness = DEVICE_LITTLE_ENDIAN,
215     .valid = {
216         .min_access_size = 1,
217         .max_access_size = 8,
218         .unaligned = false,
219     },
220     .impl = {
221         .min_access_size = 4,
222         .max_access_size = 8,
223     },
224 };
225 
226 void cxl_device_register_block_init(Object *obj, CXLDeviceState *cxl_dstate)
227 {
228     /* This will be a BAR, so needs to be rounded up to pow2 for PCI spec */
229     memory_region_init(&cxl_dstate->device_registers, obj, "device-registers",
230                        pow2ceil(CXL_MMIO_SIZE));
231 
232     memory_region_init_io(&cxl_dstate->caps, obj, &caps_ops, cxl_dstate,
233                           "cap-array", CXL_CAPS_SIZE);
234     memory_region_init_io(&cxl_dstate->device, obj, &dev_ops, cxl_dstate,
235                           "device-status", CXL_DEVICE_STATUS_REGISTERS_LENGTH);
236     memory_region_init_io(&cxl_dstate->mailbox, obj, &mailbox_ops, cxl_dstate,
237                           "mailbox", CXL_MAILBOX_REGISTERS_LENGTH);
238     memory_region_init_io(&cxl_dstate->memory_device, obj, &mdev_ops,
239                           cxl_dstate, "memory device caps",
240                           CXL_MEMORY_DEVICE_REGISTERS_LENGTH);
241 
242     memory_region_add_subregion(&cxl_dstate->device_registers, 0,
243                                 &cxl_dstate->caps);
244     memory_region_add_subregion(&cxl_dstate->device_registers,
245                                 CXL_DEVICE_STATUS_REGISTERS_OFFSET,
246                                 &cxl_dstate->device);
247     memory_region_add_subregion(&cxl_dstate->device_registers,
248                                 CXL_MAILBOX_REGISTERS_OFFSET,
249                                 &cxl_dstate->mailbox);
250     memory_region_add_subregion(&cxl_dstate->device_registers,
251                                 CXL_MEMORY_DEVICE_REGISTERS_OFFSET,
252                                 &cxl_dstate->memory_device);
253 }
254 
255 void cxl_event_set_status(CXLDeviceState *cxl_dstate, CXLEventLogType log_type,
256                           bool available)
257 {
258     if (available) {
259         cxl_dstate->event_status |= (1 << log_type);
260     } else {
261         cxl_dstate->event_status &= ~(1 << log_type);
262     }
263 
264     ARRAY_FIELD_DP64(cxl_dstate->dev_reg_state64, CXL_DEV_EVENT_STATUS,
265                      EVENT_STATUS, cxl_dstate->event_status);
266 }
267 
268 static void device_reg_init_common(CXLDeviceState *cxl_dstate)
269 {
270     CXLEventLogType log;
271 
272     for (log = 0; log < CXL_EVENT_TYPE_MAX; log++) {
273         cxl_event_set_status(cxl_dstate, log, false);
274     }
275 }
276 
277 static void mailbox_reg_init_common(CXLDeviceState *cxl_dstate)
278 {
279     /* 2048 payload size, with no interrupt or background support */
280     ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP,
281                      PAYLOAD_SIZE, CXL_MAILBOX_PAYLOAD_SHIFT);
282     cxl_dstate->payload_size = CXL_MAILBOX_MAX_PAYLOAD_SIZE;
283 }
284 
285 static void memdev_reg_init_common(CXLDeviceState *cxl_dstate) { }
286 
287 void cxl_device_register_init_common(CXLDeviceState *cxl_dstate)
288 {
289     uint64_t *cap_h = cxl_dstate->caps_reg_state64;
290     const int cap_count = 3;
291 
292     /* CXL Device Capabilities Array Register */
293     ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_ID, 0);
294     ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_VERSION, 1);
295     ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_COUNT, cap_count);
296 
297     cxl_device_cap_init(cxl_dstate, DEVICE_STATUS, 1, 2);
298     device_reg_init_common(cxl_dstate);
299 
300     cxl_device_cap_init(cxl_dstate, MAILBOX, 2, 1);
301     mailbox_reg_init_common(cxl_dstate);
302 
303     cxl_device_cap_init(cxl_dstate, MEMORY_DEVICE, 0x4000, 1);
304     memdev_reg_init_common(cxl_dstate);
305 
306     cxl_initialize_mailbox(cxl_dstate);
307 }
308 
309 uint64_t cxl_device_get_timestamp(CXLDeviceState *cxl_dstate)
310 {
311     uint64_t time, delta;
312     uint64_t final_time = 0;
313 
314     if (cxl_dstate->timestamp.set) {
315         /* Find the delta from the last time the host set the time. */
316         time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
317         delta = time - cxl_dstate->timestamp.last_set;
318         final_time = cxl_dstate->timestamp.host_set + delta;
319     }
320 
321     return final_time;
322 }
323