1 /* virtio-pci.c - pci interface for virtio interface
2 *
3 * (c) Copyright 2008 Bull S.A.S.
4 *
5 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
6 *
7 * some parts from Linux Virtio PCI driver
8 *
9 * Copyright IBM Corp. 2007
10 * Authors: Anthony Liguori <aliguori@us.ibm.com>
11 *
12 * Adopted for Seabios: Gleb Natapov <gleb@redhat.com>
13 *
14 * This work is licensed under the terms of the GNU LGPLv3
15 * See the COPYING file in the top-level directory.
16 */
17
18 #include "config.h" // CONFIG_DEBUG_LEVEL
19 #include "malloc.h" // free
20 #include "output.h" // dprintf
21 #include "pci.h" // pci_config_readl
22 #include "pcidevice.h" // struct pci_device
23 #include "pci_regs.h" // PCI_BASE_ADDRESS_0
24 #include "string.h" // memset
25 #include "virtio-pci.h"
26 #include "virtio-ring.h"
27
_vp_read(struct vp_cap * cap,u32 offset,u8 size)28 u64 _vp_read(struct vp_cap *cap, u32 offset, u8 size)
29 {
30 u64 var = 0;
31
32 switch (cap->mode) {
33 case VP_ACCESS_IO:
34 {
35 u32 addr = cap->ioaddr + offset;
36 switch (size) {
37 case 8:
38 var = inl(addr);
39 var |= (u64)inl(addr+4) << 32;
40 break;
41 case 4:
42 var = inl(addr);
43 break;
44 case 2:
45 var = inw(addr);
46 break;
47 case 1:
48 var = inb(addr);
49 break;
50 }
51 break;
52 }
53
54 case VP_ACCESS_MMIO:
55 {
56 void *addr = cap->memaddr + offset;
57 switch (size) {
58 case 8:
59 var = le32_to_cpu(readl(addr));
60 var |= (u64)le32_to_cpu(readl(addr+4)) << 32;
61 break;
62 case 4:
63 var = le32_to_cpu(readl(addr));
64 break;
65 case 2:
66 var = le16_to_cpu(readw(addr));
67 break;
68 case 1:
69 var = readb(addr);
70 break;
71 }
72 break;
73 }
74
75 case VP_ACCESS_PCICFG:
76 {
77 u32 addr = cap->baroff + offset;
78 pci_config_writeb(cap->bdf, cap->cfg +
79 offsetof(struct virtio_pci_cfg_cap, cap.bar),
80 cap->bar);
81 pci_config_writel(cap->bdf, cap->cfg +
82 offsetof(struct virtio_pci_cfg_cap, cap.offset),
83 addr);
84 pci_config_writel(cap->bdf, cap->cfg +
85 offsetof(struct virtio_pci_cfg_cap, cap.length),
86 (size > 4) ? 4 : size);
87 switch (size) {
88 case 8:
89 var = pci_config_readl(cap->bdf, cap->cfg +
90 offsetof(struct virtio_pci_cfg_cap, pci_cfg_data));
91 pci_config_writel(cap->bdf, cap->cfg +
92 offsetof(struct virtio_pci_cfg_cap, cap.offset),
93 addr + 4);
94 var |= (u64)pci_config_readl(cap->bdf, cap->cfg +
95 offsetof(struct virtio_pci_cfg_cap, pci_cfg_data)) << 32;
96 break;
97 case 4:
98 var = pci_config_readl(cap->bdf, cap->cfg +
99 offsetof(struct virtio_pci_cfg_cap, pci_cfg_data));
100 break;
101 case 2:
102 var = pci_config_readw(cap->bdf, cap->cfg +
103 offsetof(struct virtio_pci_cfg_cap, pci_cfg_data));
104 break;
105 case 1:
106 var = pci_config_readb(cap->bdf, cap->cfg +
107 offsetof(struct virtio_pci_cfg_cap, pci_cfg_data));
108 break;
109 }
110 }
111 }
112 dprintf(9, "vp read %x (%d) -> 0x%llx\n", cap->ioaddr + offset, size, var);
113 return var;
114 }
115
_vp_write(struct vp_cap * cap,u32 offset,u8 size,u64 var)116 void _vp_write(struct vp_cap *cap, u32 offset, u8 size, u64 var)
117 {
118 dprintf(9, "vp write %x (%d) <- 0x%llx mode=%d\n", cap->ioaddr + offset, size, var, cap->mode);
119
120 switch (cap->mode) {
121 case VP_ACCESS_IO:
122 {
123 u32 addr = cap->ioaddr + offset;
124 switch (size) {
125 case 4:
126 outl(var, addr);
127 break;
128 case 2:
129 outw(var, addr);
130 break;
131 case 1:
132 outb(var, addr);
133 break;
134 }
135 break;
136 }
137
138 case VP_ACCESS_MMIO:
139 {
140 void *addr = cap->memaddr + offset;
141 switch (size) {
142 case 4:
143 writel(addr, cpu_to_le32(var));
144 break;
145 case 2:
146 writew(addr, cpu_to_le16(var));
147 break;
148 case 1:
149 writeb(addr, var);
150 break;
151 }
152 break;
153 }
154
155 case VP_ACCESS_PCICFG:
156 {
157 u32 addr = cap->baroff + offset;
158 pci_config_writeb(cap->bdf, cap->cfg +
159 offsetof(struct virtio_pci_cfg_cap, cap.bar),
160 cap->bar);
161 pci_config_writel(cap->bdf, cap->cfg +
162 offsetof(struct virtio_pci_cfg_cap, cap.offset),
163 addr);
164 pci_config_writel(cap->bdf, cap->cfg +
165 offsetof(struct virtio_pci_cfg_cap, cap.length),
166 size);
167 switch (size) {
168 case 4:
169 pci_config_writel(cap->bdf, cap->cfg +
170 offsetof(struct virtio_pci_cfg_cap, pci_cfg_data),
171 var);
172 break;
173 case 2:
174 pci_config_writew(cap->bdf, cap->cfg +
175 offsetof(struct virtio_pci_cfg_cap, pci_cfg_data),
176 var);
177 break;
178 case 1:
179 pci_config_writeb(cap->bdf, cap->cfg +
180 offsetof(struct virtio_pci_cfg_cap, pci_cfg_data),
181 var);
182 break;
183 }
184 }
185 }
186 }
187
vp_get_features(struct vp_device * vp)188 u64 vp_get_features(struct vp_device *vp)
189 {
190 u32 f0, f1;
191
192 if (vp->use_modern) {
193 vp_write(&vp->common, virtio_pci_common_cfg, device_feature_select, 0);
194 f0 = vp_read(&vp->common, virtio_pci_common_cfg, device_feature);
195 vp_write(&vp->common, virtio_pci_common_cfg, device_feature_select, 1);
196 f1 = vp_read(&vp->common, virtio_pci_common_cfg, device_feature);
197 } else {
198 f0 = vp_read(&vp->legacy, virtio_pci_legacy, host_features);
199 f1 = 0;
200 }
201 return ((u64)f1 << 32) | f0;
202 }
203
vp_set_features(struct vp_device * vp,u64 features)204 void vp_set_features(struct vp_device *vp, u64 features)
205 {
206 u32 f0, f1;
207
208 f0 = features;
209 f1 = features >> 32;
210
211 if (vp->use_modern) {
212 vp_write(&vp->common, virtio_pci_common_cfg, guest_feature_select, 0);
213 vp_write(&vp->common, virtio_pci_common_cfg, guest_feature, f0);
214 vp_write(&vp->common, virtio_pci_common_cfg, guest_feature_select, 1);
215 vp_write(&vp->common, virtio_pci_common_cfg, guest_feature, f1);
216 } else {
217 vp_write(&vp->legacy, virtio_pci_legacy, guest_features, f0);
218 }
219 }
220
vp_get_status(struct vp_device * vp)221 u8 vp_get_status(struct vp_device *vp)
222 {
223 if (vp->use_modern) {
224 return vp_read(&vp->common, virtio_pci_common_cfg, device_status);
225 } else {
226 return vp_read(&vp->legacy, virtio_pci_legacy, status);
227 }
228 }
229
vp_set_status(struct vp_device * vp,u8 status)230 void vp_set_status(struct vp_device *vp, u8 status)
231 {
232 if (status == 0) /* reset */
233 return;
234 if (vp->use_modern) {
235 vp_write(&vp->common, virtio_pci_common_cfg, device_status, status);
236 } else {
237 vp_write(&vp->legacy, virtio_pci_legacy, status, status);
238 }
239 }
240
vp_get_isr(struct vp_device * vp)241 u8 vp_get_isr(struct vp_device *vp)
242 {
243 if (vp->use_modern) {
244 return vp_read(&vp->isr, virtio_pci_isr, isr);
245 } else {
246 return vp_read(&vp->legacy, virtio_pci_legacy, isr);
247 }
248 }
249
vp_reset(struct vp_device * vp)250 void vp_reset(struct vp_device *vp)
251 {
252 if (vp->use_modern) {
253 vp_write(&vp->common, virtio_pci_common_cfg, device_status, 0);
254 vp_read(&vp->isr, virtio_pci_isr, isr);
255 } else {
256 vp_write(&vp->legacy, virtio_pci_legacy, status, 0);
257 vp_read(&vp->legacy, virtio_pci_legacy, isr);
258 }
259 }
260
vp_notify(struct vp_device * vp,struct vring_virtqueue * vq)261 void vp_notify(struct vp_device *vp, struct vring_virtqueue *vq)
262 {
263 if (vp->use_modern) {
264 u32 offset = vq->queue_notify_off * vp->notify_off_multiplier;
265 switch (vp->notify.mode) {
266 case VP_ACCESS_IO:
267 outw(vq->queue_index, vp->notify.ioaddr + offset);
268 break;
269 case VP_ACCESS_MMIO:
270 writew(vp->notify.memaddr + offset, vq->queue_index);
271 break;
272 case VP_ACCESS_PCICFG:
273 pci_config_writeb(vp->notify.bdf, vp->notify.cfg +
274 offsetof(struct virtio_pci_cfg_cap, cap.bar),
275 vp->notify.bar);
276 pci_config_writel(vp->notify.bdf, vp->notify.cfg +
277 offsetof(struct virtio_pci_cfg_cap, cap.offset),
278 vp->notify.baroff + offset);
279 pci_config_writel(vp->notify.bdf, vp->notify.cfg +
280 offsetof(struct virtio_pci_cfg_cap, cap.length),
281 2);
282 pci_config_writew(vp->notify.bdf, vp->notify.cfg +
283 offsetof(struct virtio_pci_cfg_cap, pci_cfg_data),
284 vq->queue_index);
285 }
286 dprintf(9, "vp notify %x (%d) -- 0x%x\n",
287 vp->notify.ioaddr, 2, vq->queue_index);
288 } else {
289 vp_write(&vp->legacy, virtio_pci_legacy, queue_notify, vq->queue_index);
290 }
291 }
292
vp_find_vq(struct vp_device * vp,int queue_index,struct vring_virtqueue ** p_vq)293 int vp_find_vq(struct vp_device *vp, int queue_index,
294 struct vring_virtqueue **p_vq)
295 {
296 u16 num;
297
298 ASSERT32FLAT();
299 struct vring_virtqueue *vq = *p_vq = memalign_high(PAGE_SIZE, sizeof(*vq));
300 if (!vq) {
301 warn_noalloc();
302 goto fail;
303 }
304 memset(vq, 0, sizeof(*vq));
305
306
307 /* select the queue */
308 if (vp->use_modern) {
309 vp_write(&vp->common, virtio_pci_common_cfg, queue_select, queue_index);
310 } else {
311 vp_write(&vp->legacy, virtio_pci_legacy, queue_sel, queue_index);
312 }
313
314 /* check if the queue is available */
315 if (vp->use_modern) {
316 num = vp_read(&vp->common, virtio_pci_common_cfg, queue_size);
317 if (num > MAX_QUEUE_NUM) {
318 vp_write(&vp->common, virtio_pci_common_cfg, queue_size,
319 MAX_QUEUE_NUM);
320 num = vp_read(&vp->common, virtio_pci_common_cfg, queue_size);
321 }
322 } else {
323 num = vp_read(&vp->legacy, virtio_pci_legacy, queue_num);
324 }
325 if (!num) {
326 dprintf(1, "ERROR: queue size is 0\n");
327 goto fail;
328 }
329 if (num > MAX_QUEUE_NUM) {
330 dprintf(1, "ERROR: queue size %d > %d\n", num, MAX_QUEUE_NUM);
331 goto fail;
332 }
333
334 /* check if the queue is already active */
335 if (vp->use_modern) {
336 if (vp_read(&vp->common, virtio_pci_common_cfg, queue_enable)) {
337 dprintf(1, "ERROR: queue already active\n");
338 goto fail;
339 }
340 } else {
341 if (vp_read(&vp->legacy, virtio_pci_legacy, queue_pfn)) {
342 dprintf(1, "ERROR: queue already active\n");
343 goto fail;
344 }
345 }
346 vq->queue_index = queue_index;
347
348 /* initialize the queue */
349 struct vring * vr = &vq->vring;
350 vring_init(vr, num, (unsigned char*)&vq->queue);
351
352 /* activate the queue
353 *
354 * NOTE: vr->desc is initialized by vring_init()
355 */
356
357 if (vp->use_modern) {
358 vp_write(&vp->common, virtio_pci_common_cfg, queue_desc_lo,
359 (unsigned long)virt_to_phys(vr->desc));
360 vp_write(&vp->common, virtio_pci_common_cfg, queue_desc_hi, 0);
361 vp_write(&vp->common, virtio_pci_common_cfg, queue_avail_lo,
362 (unsigned long)virt_to_phys(vr->avail));
363 vp_write(&vp->common, virtio_pci_common_cfg, queue_avail_hi, 0);
364 vp_write(&vp->common, virtio_pci_common_cfg, queue_used_lo,
365 (unsigned long)virt_to_phys(vr->used));
366 vp_write(&vp->common, virtio_pci_common_cfg, queue_used_hi, 0);
367 vp_write(&vp->common, virtio_pci_common_cfg, queue_enable, 1);
368 vq->queue_notify_off = vp_read(&vp->common, virtio_pci_common_cfg,
369 queue_notify_off);
370 } else {
371 vp_write(&vp->legacy, virtio_pci_legacy, queue_pfn,
372 (unsigned long)virt_to_phys(vr->desc) >> PAGE_SHIFT);
373 }
374 return num;
375
376 fail:
377 free(vq);
378 *p_vq = NULL;
379 return -1;
380 }
381
vp_init_simple(struct vp_device * vp,struct pci_device * pci)382 void vp_init_simple(struct vp_device *vp, struct pci_device *pci)
383 {
384 u8 cap = pci_find_capability(pci->bdf, PCI_CAP_ID_VNDR, 0);
385 struct vp_cap *vp_cap;
386 const char *mode;
387 u32 offset, base, mul;
388 u64 addr;
389 u8 type;
390
391 memset(vp, 0, sizeof(*vp));
392 while (cap != 0) {
393 type = pci_config_readb(pci->bdf, cap +
394 offsetof(struct virtio_pci_cap, cfg_type));
395 switch (type) {
396 case VIRTIO_PCI_CAP_COMMON_CFG:
397 vp_cap = &vp->common;
398 break;
399 case VIRTIO_PCI_CAP_NOTIFY_CFG:
400 vp_cap = &vp->notify;
401 mul = offsetof(struct virtio_pci_notify_cap, notify_off_multiplier);
402 vp->notify_off_multiplier = pci_config_readl(pci->bdf, cap + mul);
403 break;
404 case VIRTIO_PCI_CAP_ISR_CFG:
405 vp_cap = &vp->isr;
406 break;
407 case VIRTIO_PCI_CAP_DEVICE_CFG:
408 vp_cap = &vp->device;
409 break;
410 case VIRTIO_PCI_CAP_PCI_CFG:
411 vp->common.cfg = cap;
412 vp->common.bdf = pci->bdf;
413 vp->notify.cfg = cap;
414 vp->notify.bdf = pci->bdf;
415 vp->isr.cfg = cap;
416 vp->isr.bdf = pci->bdf;
417 vp->device.cfg = cap;
418 vp->device.bdf = pci->bdf;
419 vp_cap = NULL;
420 dprintf(1, "pci dev %x:%x virtio cap at 0x%x type %d [pci cfg access]\n",
421 pci_bdf_to_bus(pci->bdf), pci_bdf_to_dev(pci->bdf),
422 cap, type);
423 break;
424 default:
425 vp_cap = NULL;
426 break;
427 }
428 if (vp_cap && !vp_cap->cap) {
429 vp_cap->cap = cap;
430 vp_cap->bar = pci_config_readb(pci->bdf, cap +
431 offsetof(struct virtio_pci_cap, bar));
432 offset = pci_config_readl(pci->bdf, cap +
433 offsetof(struct virtio_pci_cap, offset));
434 base = PCI_BASE_ADDRESS_0 + 4 * vp_cap->bar;
435 addr = pci_config_readl(pci->bdf, base);
436 if (addr & PCI_BASE_ADDRESS_SPACE_IO) {
437 addr &= PCI_BASE_ADDRESS_IO_MASK;
438 vp_cap->mode = VP_ACCESS_IO;
439 } else if ((addr & PCI_BASE_ADDRESS_MEM_TYPE_MASK) ==
440 PCI_BASE_ADDRESS_MEM_TYPE_64) {
441 addr &= PCI_BASE_ADDRESS_MEM_MASK;
442 addr |= (u64)pci_config_readl(pci->bdf, base + 4) << 32;
443 vp_cap->mode = (addr > 0xffffffffll) ?
444 VP_ACCESS_PCICFG : VP_ACCESS_MMIO;
445 } else {
446 addr &= PCI_BASE_ADDRESS_MEM_MASK;
447 vp_cap->mode = VP_ACCESS_MMIO;
448 }
449 switch (vp_cap->mode) {
450 case VP_ACCESS_IO:
451 {
452 u32 addr = pci_enable_iobar(pci, base);
453 if (!addr)
454 return;
455 vp_cap->ioaddr = addr + offset;
456 mode = "io";
457 break;
458 }
459 case VP_ACCESS_MMIO:
460 {
461 void *addr = pci_enable_membar(pci, base);
462 if (!addr)
463 return;
464 vp_cap->memaddr = addr + offset;
465 mode = "mmio";
466 break;
467 }
468 case VP_ACCESS_PCICFG:
469 mode = "pcicfg";
470 vp_cap->baroff = offset;
471 break;
472 default:
473 mode = "Huh?";
474 break;
475 }
476 dprintf(1, "pci dev %x:%x virtio cap at 0x%x type %d "
477 "bar %d at 0x%08llx off +0x%04x [%s]\n",
478 pci_bdf_to_bus(pci->bdf), pci_bdf_to_dev(pci->bdf),
479 vp_cap->cap, type, vp_cap->bar, addr, offset, mode);
480 }
481
482 cap = pci_find_capability(pci->bdf, PCI_CAP_ID_VNDR, cap);
483 }
484
485 if (vp->common.cap && vp->notify.cap && vp->isr.cap && vp->device.cap) {
486 dprintf(1, "pci dev %pP using modern (1.0) virtio mode\n", pci);
487 vp->use_modern = 1;
488 } else {
489 dprintf(1, "pci dev %pP using legacy (0.9.5) virtio mode\n", pci);
490 vp->legacy.bar = 0;
491 vp->legacy.ioaddr = pci_enable_iobar(pci, PCI_BASE_ADDRESS_0);
492 if (!vp->legacy.ioaddr)
493 return;
494 vp->legacy.mode = VP_ACCESS_IO;
495 }
496
497 vp_reset(vp);
498 pci_enable_busmaster(pci);
499 vp_set_status(vp, VIRTIO_CONFIG_S_ACKNOWLEDGE |
500 VIRTIO_CONFIG_S_DRIVER );
501 }
502