1 /*
2 * Virtio PCI driver - legacy (virtio 0.9) device support
3 *
4 * Copyright IBM Corp. 2007
5 *
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
8 * Windows porting - Yan Vugenfirer <yvugenfi@redhat.com>
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met :
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and / or other materials provided with the distribution.
18 * 3. Neither the names of the copyright holders nor the names of their contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 */
33 #include "osdep.h"
34 #include "virtio_pci.h"
35 #include "VirtIO.h"
36 #include "kdebugprint.h"
37 #include "virtio_ring.h"
38 #include "virtio_pci_common.h"
39 #include "windows/virtio_ring_allocation.h"
40
41 #ifdef WPP_EVENT_TRACING
42 #include "VirtIOPCILegacy.tmh"
43 #endif
44
45 /////////////////////////////////////////////////////////////////////////////////////
46 //
47 // vio_legacy_dump_registers - Dump HW registers of the device
48 //
49 /////////////////////////////////////////////////////////////////////////////////////
vio_legacy_dump_registers(VirtIODevice * vdev)50 void vio_legacy_dump_registers(VirtIODevice *vdev)
51 {
52 DPrintf(5, "%s\n", __FUNCTION__);
53
54 DPrintf(0, "[VIRTIO_PCI_HOST_FEATURES] = %x\n", ioread32(vdev, vdev->addr + VIRTIO_PCI_HOST_FEATURES));
55 DPrintf(0, "[VIRTIO_PCI_GUEST_FEATURES] = %x\n", ioread32(vdev, vdev->addr + VIRTIO_PCI_GUEST_FEATURES));
56 DPrintf(0, "[VIRTIO_PCI_QUEUE_PFN] = %x\n", ioread32(vdev, vdev->addr + VIRTIO_PCI_QUEUE_PFN));
57 DPrintf(0, "[VIRTIO_PCI_QUEUE_NUM] = %x\n", ioread32(vdev, vdev->addr + VIRTIO_PCI_QUEUE_NUM));
58 DPrintf(0, "[VIRTIO_PCI_QUEUE_SEL] = %x\n", ioread32(vdev, vdev->addr + VIRTIO_PCI_QUEUE_SEL));
59 DPrintf(0, "[VIRTIO_PCI_QUEUE_NOTIFY] = %x\n", ioread32(vdev, vdev->addr + VIRTIO_PCI_QUEUE_NOTIFY));
60 DPrintf(0, "[VIRTIO_PCI_STATUS] = %x\n", ioread32(vdev, vdev->addr + VIRTIO_PCI_STATUS));
61 DPrintf(0, "[VIRTIO_PCI_ISR] = %x\n", ioread32(vdev, vdev->addr + VIRTIO_PCI_ISR));
62 }
63
vio_legacy_get_config(VirtIODevice * vdev,unsigned offset,void * buf,unsigned len)64 static void vio_legacy_get_config(VirtIODevice * vdev,
65 unsigned offset,
66 void *buf,
67 unsigned len)
68 {
69 ULONG_PTR ioaddr = vdev->addr + VIRTIO_PCI_CONFIG(vdev->msix_used) + offset;
70 u8 *ptr = buf;
71 unsigned i;
72
73 DPrintf(5, "%s\n", __FUNCTION__);
74
75 for (i = 0; i < len; i++) {
76 ptr[i] = ioread8(vdev, ioaddr + i);
77 }
78 }
79
vio_legacy_set_config(VirtIODevice * vdev,unsigned offset,const void * buf,unsigned len)80 static void vio_legacy_set_config(VirtIODevice *vdev,
81 unsigned offset,
82 const void *buf,
83 unsigned len)
84 {
85 ULONG_PTR ioaddr = vdev->addr + VIRTIO_PCI_CONFIG(vdev->msix_used) + offset;
86 const u8 *ptr = buf;
87 unsigned i;
88
89 DPrintf(5, "%s\n", __FUNCTION__);
90
91 for (i = 0; i < len; i++) {
92 iowrite8(vdev, ptr[i], ioaddr + i);
93 }
94 }
95
vio_legacy_get_status(VirtIODevice * vdev)96 static u8 vio_legacy_get_status(VirtIODevice *vdev)
97 {
98 DPrintf(6, "%s\n", __FUNCTION__);
99 return ioread8(vdev, vdev->addr + VIRTIO_PCI_STATUS);
100 }
101
vio_legacy_set_status(VirtIODevice * vdev,u8 status)102 static void vio_legacy_set_status(VirtIODevice *vdev, u8 status)
103 {
104 DPrintf(6, "%s>>> %x\n", __FUNCTION__, status);
105 iowrite8(vdev, status, vdev->addr + VIRTIO_PCI_STATUS);
106 }
107
vio_legacy_reset(VirtIODevice * vdev)108 static void vio_legacy_reset(VirtIODevice *vdev)
109 {
110 /* 0 status means a reset. */
111 iowrite8(vdev, 0, vdev->addr + VIRTIO_PCI_STATUS);
112 }
113
vio_legacy_get_features(VirtIODevice * vdev)114 static u64 vio_legacy_get_features(VirtIODevice *vdev)
115 {
116 return ioread32(vdev, vdev->addr + VIRTIO_PCI_HOST_FEATURES);
117 }
118
vio_legacy_set_features(VirtIODevice * vdev,u64 features)119 static NTSTATUS vio_legacy_set_features(VirtIODevice *vdev, u64 features)
120 {
121 /* Give virtio_ring a chance to accept features. */
122 vring_transport_features(vdev, &features);
123
124 /* Make sure we don't have any features > 32 bits! */
125 ASSERT((u32)features == features);
126 iowrite32(vdev, (u32)features, vdev->addr + VIRTIO_PCI_GUEST_FEATURES);
127
128 return STATUS_SUCCESS;
129 }
130
vio_legacy_set_config_vector(VirtIODevice * vdev,u16 vector)131 static u16 vio_legacy_set_config_vector(VirtIODevice *vdev, u16 vector)
132 {
133 /* Setup the vector used for configuration events */
134 iowrite16(vdev, vector, vdev->addr + VIRTIO_MSI_CONFIG_VECTOR);
135 /* Verify we had enough resources to assign the vector */
136 /* Will also flush the write out to device */
137 return ioread16(vdev, vdev->addr + VIRTIO_MSI_CONFIG_VECTOR);
138 }
139
vio_legacy_set_queue_vector(struct virtqueue * vq,u16 vector)140 static u16 vio_legacy_set_queue_vector(struct virtqueue *vq, u16 vector)
141 {
142 VirtIODevice *vdev = vq->vdev;
143
144 iowrite16(vdev, (u16)vq->index, vdev->addr + VIRTIO_PCI_QUEUE_SEL);
145 iowrite16(vdev, vector, vdev->addr + VIRTIO_MSI_QUEUE_VECTOR);
146 return ioread16(vdev, vdev->addr + VIRTIO_MSI_QUEUE_VECTOR);
147 }
148
vio_legacy_query_vq_alloc(VirtIODevice * vdev,unsigned index,unsigned short * pNumEntries,unsigned long * pRingSize,unsigned long * pHeapSize)149 static NTSTATUS vio_legacy_query_vq_alloc(VirtIODevice *vdev,
150 unsigned index,
151 unsigned short *pNumEntries,
152 unsigned long *pRingSize,
153 unsigned long *pHeapSize)
154 {
155 unsigned long ring_size, data_size;
156 u16 num;
157
158 /* Select the queue we're interested in */
159 iowrite16(vdev, (u16)index, vdev->addr + VIRTIO_PCI_QUEUE_SEL);
160
161 /* Check if queue is either not available or already active. */
162 num = ioread16(vdev, vdev->addr + VIRTIO_PCI_QUEUE_NUM);
163 if (!num || ioread32(vdev, vdev->addr + VIRTIO_PCI_QUEUE_PFN)) {
164 return STATUS_NOT_FOUND;
165 }
166
167 ring_size = ROUND_TO_PAGES(vring_size(num, VIRTIO_PCI_VRING_ALIGN, false));
168 data_size = ROUND_TO_PAGES(vring_control_block_size(num, false));
169
170 *pNumEntries = num;
171 *pRingSize = ring_size + data_size;
172 *pHeapSize = 0;
173
174 return STATUS_SUCCESS;
175 }
176
vio_legacy_setup_vq(struct virtqueue ** queue,VirtIODevice * vdev,VirtIOQueueInfo * info,unsigned index,u16 msix_vec)177 static NTSTATUS vio_legacy_setup_vq(struct virtqueue **queue,
178 VirtIODevice *vdev,
179 VirtIOQueueInfo *info,
180 unsigned index,
181 u16 msix_vec)
182 {
183 struct virtqueue *vq;
184 unsigned long ring_size, heap_size;
185 NTSTATUS status;
186
187 /* Select the queue and query allocation parameters */
188 status = vio_legacy_query_vq_alloc(vdev, index, &info->num, &ring_size, &heap_size);
189 if (!NT_SUCCESS(status)) {
190 return status;
191 }
192
193 info->queue = mem_alloc_contiguous_pages(vdev, ring_size);
194 if (info->queue == NULL) {
195 return STATUS_INSUFFICIENT_RESOURCES;
196 }
197
198 /* activate the queue */
199 iowrite32(vdev, (u32)(mem_get_physical_address(vdev, info->queue) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT),
200 vdev->addr + VIRTIO_PCI_QUEUE_PFN);
201
202 /* create the vring */
203 vq = vring_new_virtqueue_split(index, info->num,
204 VIRTIO_PCI_VRING_ALIGN, vdev,
205 info->queue, vp_notify,
206 (u8 *)info->queue + ROUND_TO_PAGES(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN, false)));
207 if (!vq) {
208 status = STATUS_INSUFFICIENT_RESOURCES;
209 goto err_activate_queue;
210 }
211
212 vq->notification_addr = (void *)(vdev->addr + VIRTIO_PCI_QUEUE_NOTIFY);
213
214 if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
215 msix_vec = vdev->device->set_queue_vector(vq, msix_vec);
216 if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
217 status = STATUS_DEVICE_BUSY;
218 goto err_assign;
219 }
220 }
221
222 *queue = vq;
223 return STATUS_SUCCESS;
224
225 err_assign:
226 err_activate_queue:
227 iowrite32(vdev, 0, vdev->addr + VIRTIO_PCI_QUEUE_PFN);
228 mem_free_contiguous_pages(vdev, info->queue);
229 return status;
230 }
231
vio_legacy_del_vq(VirtIOQueueInfo * info)232 static void vio_legacy_del_vq(VirtIOQueueInfo *info)
233 {
234 struct virtqueue *vq = info->vq;
235 VirtIODevice *vdev = vq->vdev;
236
237 iowrite16(vdev, (u16)vq->index, vdev->addr + VIRTIO_PCI_QUEUE_SEL);
238
239 if (vdev->msix_used) {
240 iowrite16(vdev, VIRTIO_MSI_NO_VECTOR,
241 vdev->addr + VIRTIO_MSI_QUEUE_VECTOR);
242 /* Flush the write out to device */
243 ioread8(vdev, vdev->addr + VIRTIO_PCI_ISR);
244 }
245
246 /* Select and deactivate the queue */
247 iowrite32(vdev, 0, vdev->addr + VIRTIO_PCI_QUEUE_PFN);
248
249 mem_free_contiguous_pages(vdev, info->queue);
250 }
251
252 static const struct virtio_device_ops virtio_pci_device_ops = {
253 /* .get_config = */ vio_legacy_get_config,
254 /* .set_config = */ vio_legacy_set_config,
255 /* .get_config_generation = */ NULL,
256 /* .get_status = */ vio_legacy_get_status,
257 /* .set_status = */ vio_legacy_set_status,
258 /* .reset = */ vio_legacy_reset,
259 /* .get_features = */ vio_legacy_get_features,
260 /* .set_features = */ vio_legacy_set_features,
261 /* .set_config_vector = */ vio_legacy_set_config_vector,
262 /* .set_queue_vector = */ vio_legacy_set_queue_vector,
263 /* .query_queue_alloc = */ vio_legacy_query_vq_alloc,
264 /* .setup_queue = */ vio_legacy_setup_vq,
265 /* .delete_queue = */ vio_legacy_del_vq,
266 };
267
268 /* Legacy device initialization */
vio_legacy_initialize(VirtIODevice * vdev)269 NTSTATUS vio_legacy_initialize(VirtIODevice *vdev)
270 {
271 size_t length = pci_get_resource_len(vdev, 0);
272 vdev->addr = (ULONG_PTR)pci_map_address_range(vdev, 0, 0, length);
273
274 if (!vdev->addr) {
275 return STATUS_INSUFFICIENT_RESOURCES;
276 }
277
278 vdev->isr = (u8 *)vdev->addr + VIRTIO_PCI_ISR;
279
280 vdev->device = &virtio_pci_device_ops;
281
282 return STATUS_SUCCESS;
283 }
284