1 /*
2  * Virtio PCI driver - common functionality for all device versions
3  *
4  * Copyright IBM Corp. 2007
5  * Copyright Red Hat, Inc. 2014
6  *
7  * Authors:
8  *  Anthony Liguori  <aliguori@us.ibm.com>
9  *  Rusty Russell <rusty@rustcorp.com.au>
10  *  Michael S. Tsirkin <mst@redhat.com>
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met :
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and / or other materials provided with the distribution.
20  * 3. Neither the names of the copyright holders nor the names of their contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 
36 #include "osdep.h"
37 #include "virtio_pci.h"
38 #include "VirtIO.h"
39 #include "kdebugprint.h"
40 #include <stddef.h>
41 
42 #include "virtio_pci_common.h"
43 
virtio_device_initialize(VirtIODevice * vdev,const VirtIOSystemOps * pSystemOps,PVOID DeviceContext,bool msix_used)44 NTSTATUS virtio_device_initialize(VirtIODevice *vdev,
45                                   const VirtIOSystemOps *pSystemOps,
46                                   PVOID DeviceContext,
47                                   bool msix_used)
48 {
49     NTSTATUS status;
50 
51     RtlZeroMemory(vdev, sizeof(VirtIODevice));
52     vdev->DeviceContext = DeviceContext;
53     vdev->system = pSystemOps;
54     vdev->msix_used = msix_used;
55     vdev->info = vdev->inline_info;
56     vdev->maxQueues = ARRAYSIZE(vdev->inline_info);
57 
58     status = vio_modern_initialize(vdev);
59     if (status == STATUS_DEVICE_NOT_CONNECTED) {
60         /* fall back to legacy virtio device */
61         status = vio_legacy_initialize(vdev);
62     }
63     if (NT_SUCCESS(status)) {
64         /* Always start by resetting the device */
65         virtio_device_reset(vdev);
66 
67         /* Acknowledge that we've seen the device. */
68         virtio_add_status(vdev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
69 
70         /* If we are here, we must have found a driver for the device */
71         virtio_add_status(vdev, VIRTIO_CONFIG_S_DRIVER);
72     }
73 
74     return status;
75 }
76 
virtio_device_shutdown(VirtIODevice * vdev)77 void virtio_device_shutdown(VirtIODevice *vdev)
78 {
79     if (vdev->info &&
80         vdev->info != vdev->inline_info) {
81         mem_free_nonpaged_block(vdev, vdev->info);
82         vdev->info = NULL;
83     }
84 }
85 
virtio_get_status(VirtIODevice * vdev)86 u8 virtio_get_status(VirtIODevice *vdev)
87 {
88     return vdev->device->get_status(vdev);
89 }
90 
virtio_set_status(VirtIODevice * vdev,u8 status)91 void virtio_set_status(VirtIODevice *vdev, u8 status)
92 {
93     vdev->device->set_status(vdev, status);
94 }
95 
virtio_add_status(VirtIODevice * vdev,u8 status)96 void virtio_add_status(VirtIODevice *vdev, u8 status)
97 {
98     vdev->device->set_status(vdev, (u8)(vdev->device->get_status(vdev) | status));
99 }
100 
virtio_device_reset(VirtIODevice * vdev)101 void virtio_device_reset(VirtIODevice *vdev)
102 {
103     vdev->device->reset(vdev);
104 }
105 
virtio_device_ready(VirtIODevice * vdev)106 void virtio_device_ready(VirtIODevice *vdev)
107 {
108     unsigned status = vdev->device->get_status(vdev);
109 
110     ASSERT(!(status & VIRTIO_CONFIG_S_DRIVER_OK));
111     vdev->device->set_status(vdev, (u8)(status | VIRTIO_CONFIG_S_DRIVER_OK));
112 }
113 
virtio_get_features(VirtIODevice * vdev)114 u64 virtio_get_features(VirtIODevice *vdev)
115 {
116     return vdev->device->get_features(vdev);
117 }
118 
virtio_set_features(VirtIODevice * vdev,u64 features)119 NTSTATUS virtio_set_features(VirtIODevice *vdev, u64 features)
120 {
121     unsigned char dev_status;
122     NTSTATUS status;
123 
124     vdev->event_suppression_enabled = virtio_is_feature_enabled(features, VIRTIO_RING_F_EVENT_IDX);
125     vdev->packed_ring = virtio_is_feature_enabled(features, VIRTIO_F_RING_PACKED);
126 
127     status = vdev->device->set_features(vdev, features);
128     if (!NT_SUCCESS(status)) {
129         return status;
130     }
131 
132     if (!virtio_is_feature_enabled(features, VIRTIO_F_VERSION_1)) {
133         return status;
134     }
135 
136     virtio_add_status(vdev, VIRTIO_CONFIG_S_FEATURES_OK);
137     dev_status = vdev->device->get_status(vdev);
138     if (!(dev_status & VIRTIO_CONFIG_S_FEATURES_OK)) {
139         DPrintf(0, "virtio: device refuses features: %x\n", dev_status);
140         status = STATUS_INVALID_PARAMETER;
141     }
142     return status;
143 }
144 
145 /* Read @count fields, @bytes each. */
virtio_cread_many(VirtIODevice * vdev,unsigned int offset,void * buf,size_t count,size_t bytes)146 static void virtio_cread_many(VirtIODevice *vdev,
147                               unsigned int offset,
148                               void *buf, size_t count, size_t bytes)
149 {
150     u32 old, gen = vdev->device->get_config_generation ?
151         vdev->device->get_config_generation(vdev) : 0;
152     size_t i;
153 
154     do {
155         old = gen;
156 
157         for (i = 0; i < count; i++) {
158             vdev->device->get_config(vdev, (unsigned)(offset + bytes * i),
159                 (char *)buf + i * bytes, (unsigned)bytes);
160         }
161 
162         gen = vdev->device->get_config_generation ?
163             vdev->device->get_config_generation(vdev) : 0;
164     } while (gen != old);
165 }
166 
virtio_get_config(VirtIODevice * vdev,unsigned offset,void * buf,unsigned len)167 void virtio_get_config(VirtIODevice *vdev, unsigned offset,
168                        void *buf, unsigned len)
169 {
170     switch (len) {
171     case 1:
172     case 2:
173     case 4:
174         vdev->device->get_config(vdev, offset, buf, len);
175         break;
176     case 8:
177         virtio_cread_many(vdev, offset, buf, 2, sizeof(u32));
178         break;
179     default:
180         virtio_cread_many(vdev, offset, buf, len, 1);
181         break;
182     }
183 }
184 
185 /* Write @count fields, @bytes each. */
virtio_cwrite_many(VirtIODevice * vdev,unsigned int offset,void * buf,size_t count,size_t bytes)186 static void virtio_cwrite_many(VirtIODevice *vdev,
187                                unsigned int offset,
188                                void *buf, size_t count, size_t bytes)
189 {
190     size_t i;
191     for (i = 0; i < count; i++) {
192         vdev->device->set_config(vdev, (unsigned)(offset + bytes * i),
193             (char *)buf + i * bytes, (unsigned)bytes);
194     }
195 }
196 
virtio_set_config(VirtIODevice * vdev,unsigned offset,void * buf,unsigned len)197 void virtio_set_config(VirtIODevice *vdev, unsigned offset,
198                        void *buf, unsigned len)
199 {
200     switch (len) {
201     case 1:
202     case 2:
203     case 4:
204         vdev->device->set_config(vdev, offset, buf, len);
205         break;
206     case 8:
207         virtio_cwrite_many(vdev, offset, buf, 2, sizeof(u32));
208         break;
209     default:
210         virtio_cwrite_many(vdev, offset, buf, len, 1);
211         break;
212     }
213 }
214 
virtio_query_queue_allocation(VirtIODevice * vdev,unsigned index,unsigned short * pNumEntries,unsigned long * pRingSize,unsigned long * pHeapSize)215 NTSTATUS virtio_query_queue_allocation(VirtIODevice *vdev,
216                                        unsigned index,
217                                        unsigned short *pNumEntries,
218                                        unsigned long *pRingSize,
219                                        unsigned long *pHeapSize)
220 {
221     return vdev->device->query_queue_alloc(vdev, index, pNumEntries, pRingSize, pHeapSize);
222 }
223 
virtio_reserve_queue_memory(VirtIODevice * vdev,unsigned nvqs)224 NTSTATUS virtio_reserve_queue_memory(VirtIODevice *vdev, unsigned nvqs)
225 {
226     if (nvqs > vdev->maxQueues) {
227         /* allocate new space for queue infos */
228         void *new_info = mem_alloc_nonpaged_block(vdev, nvqs * virtio_get_queue_descriptor_size());
229         if (!new_info) {
230             return STATUS_INSUFFICIENT_RESOURCES;
231         }
232 
233         if (vdev->info && vdev->info != vdev->inline_info) {
234             mem_free_nonpaged_block(vdev, vdev->info);
235         }
236         vdev->info = new_info;
237         vdev->maxQueues = nvqs;
238     }
239     return STATUS_SUCCESS;
240 }
241 
vp_setup_vq(struct virtqueue ** queue,VirtIODevice * vdev,unsigned index,u16 msix_vec)242 static NTSTATUS vp_setup_vq(struct virtqueue **queue,
243                             VirtIODevice *vdev, unsigned index,
244                             u16 msix_vec)
245 {
246     VirtIOQueueInfo *info = &vdev->info[index];
247 
248     NTSTATUS status = vdev->device->setup_queue(queue, vdev, info, index, msix_vec);
249     if (NT_SUCCESS(status)) {
250         info->vq = *queue;
251     }
252 
253     return status;
254 }
255 
virtio_find_queue(VirtIODevice * vdev,unsigned index,struct virtqueue ** vq)256 NTSTATUS virtio_find_queue(VirtIODevice *vdev, unsigned index,
257                            struct virtqueue **vq)
258 {
259     u16 msix_vec = vdev_get_msix_vector(vdev, index);
260     return vp_setup_vq(
261         vq,
262         vdev,
263         index,
264         msix_vec);
265 }
266 
virtio_find_queues(VirtIODevice * vdev,unsigned nvqs,struct virtqueue * vqs[])267 NTSTATUS virtio_find_queues(VirtIODevice *vdev,
268                             unsigned nvqs,
269                             struct virtqueue *vqs[])
270 {
271     unsigned i;
272     NTSTATUS status;
273     u16 msix_vec;
274 
275     status = virtio_reserve_queue_memory(vdev, nvqs);
276     if (!NT_SUCCESS(status)) {
277         return status;
278     }
279 
280     /* set up the device config interrupt */
281     msix_vec = vdev_get_msix_vector(vdev, -1);
282 
283     if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
284         msix_vec = vdev->device->set_config_vector(vdev, msix_vec);
285         /* Verify we had enough resources to assign the vector */
286         if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
287             status = STATUS_DEVICE_BUSY;
288             goto error_find;
289         }
290     }
291 
292     /* set up queue interrupts */
293     for (i = 0; i < nvqs; i++) {
294         msix_vec = vdev_get_msix_vector(vdev, i);
295         status = vp_setup_vq(
296             &vqs[i],
297             vdev,
298             i,
299             msix_vec);
300         if (!NT_SUCCESS(status)) {
301             goto error_find;
302         }
303     }
304     return STATUS_SUCCESS;
305 
306 error_find:
307     virtio_delete_queues(vdev);
308     return status;
309 }
310 
virtio_delete_queue(struct virtqueue * vq)311 void virtio_delete_queue(struct virtqueue *vq)
312 {
313     VirtIODevice *vdev = vq->vdev;
314     unsigned i = vq->index;
315 
316     vdev->device->delete_queue(&vdev->info[i]);
317     vdev->info[i].vq = NULL;
318 }
319 
virtio_delete_queues(VirtIODevice * vdev)320 void virtio_delete_queues(VirtIODevice *vdev)
321 {
322     struct virtqueue *vq;
323     unsigned i;
324 
325     if (vdev->info == NULL)
326         return;
327 
328     for (i = 0; i < vdev->maxQueues; i++) {
329         vq = vdev->info[i].vq;
330         if (vq != NULL) {
331             vdev->device->delete_queue(&vdev->info[i]);
332             vdev->info[i].vq = NULL;
333         }
334     }
335 }
336 
virtio_get_queue_size(struct virtqueue * vq)337 u32 virtio_get_queue_size(struct virtqueue *vq)
338 {
339     return vq->vdev->info[vq->index].num;
340 }
341 
virtio_set_config_vector(VirtIODevice * vdev,u16 vector)342 u16 virtio_set_config_vector(VirtIODevice *vdev, u16 vector)
343 {
344     return vdev->device->set_config_vector(vdev, vector);
345 }
346 
virtio_set_queue_vector(struct virtqueue * vq,u16 vector)347 u16 virtio_set_queue_vector(struct virtqueue *vq, u16 vector)
348 {
349     return vq->vdev->device->set_queue_vector(vq, vector);
350 }
351 
virtio_read_isr_status(VirtIODevice * vdev)352 u8 virtio_read_isr_status(VirtIODevice *vdev)
353 {
354     return ioread8(vdev, vdev->isr);
355 }
356 
virtio_get_bar_index(PPCI_COMMON_HEADER pPCIHeader,PHYSICAL_ADDRESS BasePA)357 int virtio_get_bar_index(PPCI_COMMON_HEADER pPCIHeader, PHYSICAL_ADDRESS BasePA)
358 {
359     int iBar, i;
360 
361     /* no point in supporting PCI and CardBus bridges */
362     ASSERT((pPCIHeader->HeaderType & ~PCI_MULTIFUNCTION) == PCI_DEVICE_TYPE);
363 
364     for (i = 0; i < PCI_TYPE0_ADDRESSES; i++) {
365         PHYSICAL_ADDRESS BAR;
366         BAR.LowPart = pPCIHeader->u.type0.BaseAddresses[i];
367 
368         iBar = i;
369         if (BAR.LowPart & PCI_ADDRESS_IO_SPACE) {
370             /* I/O space */
371             BAR.LowPart &= PCI_ADDRESS_IO_ADDRESS_MASK;
372             BAR.HighPart = 0;
373         } else if ((BAR.LowPart & PCI_ADDRESS_MEMORY_TYPE_MASK) == PCI_TYPE_64BIT) {
374             /* memory space 64-bit */
375             BAR.LowPart &= PCI_ADDRESS_MEMORY_ADDRESS_MASK;
376             BAR.HighPart = pPCIHeader->u.type0.BaseAddresses[++i];
377         } else {
378             /* memory space 32-bit */
379             BAR.LowPart &= PCI_ADDRESS_MEMORY_ADDRESS_MASK;
380             BAR.HighPart = 0;
381         }
382 
383         if (BAR.QuadPart == BasePA.QuadPart) {
384             return iBar;
385         }
386     }
387     return -1;
388 }
389 
390 /* The notify function used when creating a virt queue, common to both modern
391  * and legacy (the difference is in how vq->notification_addr is set up).
392  */
vp_notify(struct virtqueue * vq)393 void vp_notify(struct virtqueue *vq)
394 {
395     /* we write the queue's selector into the notification register to
396      * signal the other end */
397     iowrite16(vq->vdev, (unsigned short)vq->index, vq->notification_addr);
398     DPrintf(6, "virtio: vp_notify vq->index = %x\n", vq->index);
399 }
400 
virtqueue_notify(struct virtqueue * vq)401 void virtqueue_notify(struct virtqueue *vq)
402 {
403     vq->notification_cb(vq);
404 }
405 
virtqueue_kick(struct virtqueue * vq)406 void virtqueue_kick(struct virtqueue *vq)
407 {
408     if (virtqueue_kick_prepare(vq)) {
409         virtqueue_notify(vq);
410     }
411 }
412