xref: /qemu/hw/remote/proxy.c (revision d7a84021)
1 /*
2  * Copyright © 2018, 2021 Oracle and/or its affiliates.
3  *
4  * This work is licensed under the terms of the GNU GPL, version 2 or later.
5  * See the COPYING file in the top-level directory.
6  *
7  */
8 
9 #include "qemu/osdep.h"
10 #include "qemu-common.h"
11 
12 #include "hw/remote/proxy.h"
13 #include "hw/pci/pci.h"
14 #include "qapi/error.h"
15 #include "io/channel-util.h"
16 #include "hw/qdev-properties.h"
17 #include "monitor/monitor.h"
18 #include "migration/blocker.h"
19 #include "qemu/sockets.h"
20 #include "hw/remote/mpqemu-link.h"
21 #include "qemu/error-report.h"
22 #include "hw/remote/proxy-memory-listener.h"
23 #include "qom/object.h"
24 #include "qemu/event_notifier.h"
25 #include "sysemu/kvm.h"
26 #include "util/event_notifier-posix.c"
27 
28 static void probe_pci_info(PCIDevice *dev, Error **errp);
29 static void proxy_device_reset(DeviceState *dev);
30 
31 static void proxy_intx_update(PCIDevice *pci_dev)
32 {
33     PCIProxyDev *dev = PCI_PROXY_DEV(pci_dev);
34     PCIINTxRoute route;
35     int pin = pci_get_byte(pci_dev->config + PCI_INTERRUPT_PIN) - 1;
36 
37     if (dev->virq != -1) {
38         kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, &dev->intr, dev->virq);
39         dev->virq = -1;
40     }
41 
42     route = pci_device_route_intx_to_irq(pci_dev, pin);
43 
44     dev->virq = route.irq;
45 
46     if (dev->virq != -1) {
47         kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, &dev->intr,
48                                            &dev->resample, dev->virq);
49     }
50 }
51 
52 static void setup_irqfd(PCIProxyDev *dev)
53 {
54     PCIDevice *pci_dev = PCI_DEVICE(dev);
55     MPQemuMsg msg;
56     Error *local_err = NULL;
57 
58     event_notifier_init(&dev->intr, 0);
59     event_notifier_init(&dev->resample, 0);
60 
61     memset(&msg, 0, sizeof(MPQemuMsg));
62     msg.cmd = MPQEMU_CMD_SET_IRQFD;
63     msg.num_fds = 2;
64     msg.fds[0] = event_notifier_get_fd(&dev->intr);
65     msg.fds[1] = event_notifier_get_fd(&dev->resample);
66     msg.size = 0;
67 
68     if (!mpqemu_msg_send(&msg, dev->ioc, &local_err)) {
69         error_report_err(local_err);
70     }
71 
72     dev->virq = -1;
73 
74     proxy_intx_update(pci_dev);
75 
76     pci_device_set_intx_routing_notifier(pci_dev, proxy_intx_update);
77 }
78 
79 static void pci_proxy_dev_realize(PCIDevice *device, Error **errp)
80 {
81     ERRP_GUARD();
82     PCIProxyDev *dev = PCI_PROXY_DEV(device);
83     uint8_t *pci_conf = device->config;
84     int fd;
85 
86     if (!dev->fd) {
87         error_setg(errp, "fd parameter not specified for %s",
88                    DEVICE(device)->id);
89         return;
90     }
91 
92     fd = monitor_fd_param(monitor_cur(), dev->fd, errp);
93     if (fd == -1) {
94         error_prepend(errp, "proxy: unable to parse fd %s: ", dev->fd);
95         return;
96     }
97 
98     if (!fd_is_socket(fd)) {
99         error_setg(errp, "proxy: fd %d is not a socket", fd);
100         close(fd);
101         return;
102     }
103 
104     dev->ioc = qio_channel_new_fd(fd, errp);
105 
106     error_setg(&dev->migration_blocker, "%s does not support migration",
107                TYPE_PCI_PROXY_DEV);
108     migrate_add_blocker(dev->migration_blocker, errp);
109 
110     qemu_mutex_init(&dev->io_mutex);
111     qio_channel_set_blocking(dev->ioc, true, NULL);
112 
113     pci_conf[PCI_LATENCY_TIMER] = 0xff;
114     pci_conf[PCI_INTERRUPT_PIN] = 0x01;
115 
116     proxy_memory_listener_configure(&dev->proxy_listener, dev->ioc);
117 
118     setup_irqfd(dev);
119 
120     probe_pci_info(PCI_DEVICE(dev), errp);
121 }
122 
123 static void pci_proxy_dev_exit(PCIDevice *pdev)
124 {
125     PCIProxyDev *dev = PCI_PROXY_DEV(pdev);
126 
127     if (dev->ioc) {
128         qio_channel_close(dev->ioc, NULL);
129     }
130 
131     migrate_del_blocker(dev->migration_blocker);
132 
133     error_free(dev->migration_blocker);
134 
135     proxy_memory_listener_deconfigure(&dev->proxy_listener);
136 
137     event_notifier_cleanup(&dev->intr);
138     event_notifier_cleanup(&dev->resample);
139 }
140 
141 static void config_op_send(PCIProxyDev *pdev, uint32_t addr, uint32_t *val,
142                            int len, unsigned int op)
143 {
144     MPQemuMsg msg = { 0 };
145     uint64_t ret = -EINVAL;
146     Error *local_err = NULL;
147 
148     msg.cmd = op;
149     msg.data.pci_conf_data.addr = addr;
150     msg.data.pci_conf_data.val = (op == MPQEMU_CMD_PCI_CFGWRITE) ? *val : 0;
151     msg.data.pci_conf_data.len = len;
152     msg.size = sizeof(PciConfDataMsg);
153 
154     ret = mpqemu_msg_send_and_await_reply(&msg, pdev, &local_err);
155     if (local_err) {
156         error_report_err(local_err);
157     }
158 
159     if (ret == UINT64_MAX) {
160         error_report("Failed to perform PCI config %s operation",
161                      (op == MPQEMU_CMD_PCI_CFGREAD) ? "READ" : "WRITE");
162     }
163 
164     if (op == MPQEMU_CMD_PCI_CFGREAD) {
165         *val = (uint32_t)ret;
166     }
167 }
168 
169 static uint32_t pci_proxy_read_config(PCIDevice *d, uint32_t addr, int len)
170 {
171     uint32_t val;
172 
173     config_op_send(PCI_PROXY_DEV(d), addr, &val, len, MPQEMU_CMD_PCI_CFGREAD);
174 
175     return val;
176 }
177 
178 static void pci_proxy_write_config(PCIDevice *d, uint32_t addr, uint32_t val,
179                                    int len)
180 {
181     /*
182      * Some of the functions access the copy of remote device's PCI config
183      * space which is cached in the proxy device. Therefore, maintain
184      * it updated.
185      */
186     pci_default_write_config(d, addr, val, len);
187 
188     config_op_send(PCI_PROXY_DEV(d), addr, &val, len, MPQEMU_CMD_PCI_CFGWRITE);
189 }
190 
191 static Property proxy_properties[] = {
192     DEFINE_PROP_STRING("fd", PCIProxyDev, fd),
193     DEFINE_PROP_END_OF_LIST(),
194 };
195 
196 static void pci_proxy_dev_class_init(ObjectClass *klass, void *data)
197 {
198     DeviceClass *dc = DEVICE_CLASS(klass);
199     PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
200 
201     k->realize = pci_proxy_dev_realize;
202     k->exit = pci_proxy_dev_exit;
203     k->config_read = pci_proxy_read_config;
204     k->config_write = pci_proxy_write_config;
205 
206     dc->reset = proxy_device_reset;
207 
208     device_class_set_props(dc, proxy_properties);
209 }
210 
211 static const TypeInfo pci_proxy_dev_type_info = {
212     .name          = TYPE_PCI_PROXY_DEV,
213     .parent        = TYPE_PCI_DEVICE,
214     .instance_size = sizeof(PCIProxyDev),
215     .class_init    = pci_proxy_dev_class_init,
216     .interfaces = (InterfaceInfo[]) {
217         { INTERFACE_CONVENTIONAL_PCI_DEVICE },
218         { },
219     },
220 };
221 
222 static void pci_proxy_dev_register_types(void)
223 {
224     type_register_static(&pci_proxy_dev_type_info);
225 }
226 
227 type_init(pci_proxy_dev_register_types)
228 
229 static void send_bar_access_msg(PCIProxyDev *pdev, MemoryRegion *mr,
230                                 bool write, hwaddr addr, uint64_t *val,
231                                 unsigned size, bool memory)
232 {
233     MPQemuMsg msg = { 0 };
234     long ret = -EINVAL;
235     Error *local_err = NULL;
236 
237     msg.size = sizeof(BarAccessMsg);
238     msg.data.bar_access.addr = mr->addr + addr;
239     msg.data.bar_access.size = size;
240     msg.data.bar_access.memory = memory;
241 
242     if (write) {
243         msg.cmd = MPQEMU_CMD_BAR_WRITE;
244         msg.data.bar_access.val = *val;
245     } else {
246         msg.cmd = MPQEMU_CMD_BAR_READ;
247     }
248 
249     ret = mpqemu_msg_send_and_await_reply(&msg, pdev, &local_err);
250     if (local_err) {
251         error_report_err(local_err);
252     }
253 
254     if (!write) {
255         *val = ret;
256     }
257 }
258 
259 static void proxy_bar_write(void *opaque, hwaddr addr, uint64_t val,
260                             unsigned size)
261 {
262     ProxyMemoryRegion *pmr = opaque;
263 
264     send_bar_access_msg(pmr->dev, &pmr->mr, true, addr, &val, size,
265                         pmr->memory);
266 }
267 
268 static uint64_t proxy_bar_read(void *opaque, hwaddr addr, unsigned size)
269 {
270     ProxyMemoryRegion *pmr = opaque;
271     uint64_t val;
272 
273     send_bar_access_msg(pmr->dev, &pmr->mr, false, addr, &val, size,
274                         pmr->memory);
275 
276     return val;
277 }
278 
279 const MemoryRegionOps proxy_mr_ops = {
280     .read = proxy_bar_read,
281     .write = proxy_bar_write,
282     .endianness = DEVICE_NATIVE_ENDIAN,
283     .impl = {
284         .min_access_size = 1,
285         .max_access_size = 8,
286     },
287 };
288 
289 static void probe_pci_info(PCIDevice *dev, Error **errp)
290 {
291     PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(dev);
292     uint32_t orig_val, new_val, base_class, val;
293     PCIProxyDev *pdev = PCI_PROXY_DEV(dev);
294     DeviceClass *dc = DEVICE_CLASS(pc);
295     uint8_t type;
296     int i, size;
297 
298     config_op_send(pdev, PCI_VENDOR_ID, &val, 2, MPQEMU_CMD_PCI_CFGREAD);
299     pc->vendor_id = (uint16_t)val;
300 
301     config_op_send(pdev, PCI_DEVICE_ID, &val, 2, MPQEMU_CMD_PCI_CFGREAD);
302     pc->device_id = (uint16_t)val;
303 
304     config_op_send(pdev, PCI_CLASS_DEVICE, &val, 2, MPQEMU_CMD_PCI_CFGREAD);
305     pc->class_id = (uint16_t)val;
306 
307     config_op_send(pdev, PCI_SUBSYSTEM_ID, &val, 2, MPQEMU_CMD_PCI_CFGREAD);
308     pc->subsystem_id = (uint16_t)val;
309 
310     base_class = pc->class_id >> 4;
311     switch (base_class) {
312     case PCI_BASE_CLASS_BRIDGE:
313         set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
314         break;
315     case PCI_BASE_CLASS_STORAGE:
316         set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
317         break;
318     case PCI_BASE_CLASS_NETWORK:
319         set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
320         break;
321     case PCI_BASE_CLASS_INPUT:
322         set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
323         break;
324     case PCI_BASE_CLASS_DISPLAY:
325         set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories);
326         break;
327     case PCI_BASE_CLASS_PROCESSOR:
328         set_bit(DEVICE_CATEGORY_CPU, dc->categories);
329         break;
330     default:
331         set_bit(DEVICE_CATEGORY_MISC, dc->categories);
332         break;
333     }
334 
335     for (i = 0; i < PCI_NUM_REGIONS; i++) {
336         config_op_send(pdev, PCI_BASE_ADDRESS_0 + (4 * i), &orig_val, 4,
337                        MPQEMU_CMD_PCI_CFGREAD);
338         new_val = 0xffffffff;
339         config_op_send(pdev, PCI_BASE_ADDRESS_0 + (4 * i), &new_val, 4,
340                        MPQEMU_CMD_PCI_CFGWRITE);
341         config_op_send(pdev, PCI_BASE_ADDRESS_0 + (4 * i), &new_val, 4,
342                        MPQEMU_CMD_PCI_CFGREAD);
343         size = (~(new_val & 0xFFFFFFF0)) + 1;
344         config_op_send(pdev, PCI_BASE_ADDRESS_0 + (4 * i), &orig_val, 4,
345                        MPQEMU_CMD_PCI_CFGWRITE);
346         type = (new_val & 0x1) ?
347                    PCI_BASE_ADDRESS_SPACE_IO : PCI_BASE_ADDRESS_SPACE_MEMORY;
348 
349         if (size) {
350             g_autofree char *name;
351             pdev->region[i].dev = pdev;
352             pdev->region[i].present = true;
353             if (type == PCI_BASE_ADDRESS_SPACE_MEMORY) {
354                 pdev->region[i].memory = true;
355             }
356             name = g_strdup_printf("bar-region-%d", i);
357             memory_region_init_io(&pdev->region[i].mr, OBJECT(pdev),
358                                   &proxy_mr_ops, &pdev->region[i],
359                                   name, size);
360             pci_register_bar(dev, i, type, &pdev->region[i].mr);
361         }
362     }
363 }
364 
365 static void proxy_device_reset(DeviceState *dev)
366 {
367     PCIProxyDev *pdev = PCI_PROXY_DEV(dev);
368     MPQemuMsg msg = { 0 };
369     Error *local_err = NULL;
370 
371     msg.cmd = MPQEMU_CMD_DEVICE_RESET;
372     msg.size = 0;
373 
374     mpqemu_msg_send_and_await_reply(&msg, pdev, &local_err);
375     if (local_err) {
376         error_report_err(local_err);
377     }
378 
379 }
380