1 #ifndef _VIRTIO_PCI_H_
2 # define _VIRTIO_PCI_H_
3 
4 /* A 32-bit r/o bitmask of the features supported by the host */
5 #define VIRTIO_PCI_HOST_FEATURES        0
6 
7 /* A 32-bit r/w bitmask of features activated by the guest */
8 #define VIRTIO_PCI_GUEST_FEATURES       4
9 
10 /* A 32-bit r/w PFN for the currently selected queue */
11 #define VIRTIO_PCI_QUEUE_PFN            8
12 
13 /* A 16-bit r/o queue size for the currently selected queue */
14 #define VIRTIO_PCI_QUEUE_NUM            12
15 
16 /* A 16-bit r/w queue selector */
17 #define VIRTIO_PCI_QUEUE_SEL            14
18 
19 /* A 16-bit r/w queue notifier */
20 #define VIRTIO_PCI_QUEUE_NOTIFY         16
21 
22 /* An 8-bit device status register.  */
23 #define VIRTIO_PCI_STATUS               18
24 
25 /* An 8-bit r/o interrupt status register.  Reading the value will return the
26  * current contents of the ISR and will also clear it.  This is effectively
27  * a read-and-acknowledge. */
28 #define VIRTIO_PCI_ISR                  19
29 
30 /* The bit of the ISR which indicates a device configuration change. */
31 #define VIRTIO_PCI_ISR_CONFIG           0x2
32 
33 /* The remaining space is defined by each driver as the per-driver
34  * configuration space */
35 #define VIRTIO_PCI_CONFIG               20
36 
37 /* Virtio ABI version, this must match exactly */
38 #define VIRTIO_PCI_ABI_VERSION          0
39 
40 /* PCI capability types: */
41 #define VIRTIO_PCI_CAP_COMMON_CFG       1  /* Common configuration */
42 #define VIRTIO_PCI_CAP_NOTIFY_CFG       2  /* Notifications */
43 #define VIRTIO_PCI_CAP_ISR_CFG          3  /* ISR access */
44 #define VIRTIO_PCI_CAP_DEVICE_CFG       4  /* Device specific configuration */
45 #define VIRTIO_PCI_CAP_PCI_CFG          5  /* PCI configuration access */
46 
47 #define __u8       uint8_t
48 #define __le16     uint16_t
49 #define __le32     uint32_t
50 #define __le64     uint64_t
51 
52 /* This is the PCI capability header: */
53 struct virtio_pci_cap {
54     __u8 cap_vndr;    /* Generic PCI field: PCI_CAP_ID_VNDR */
55     __u8 cap_next;    /* Generic PCI field: next ptr. */
56     __u8 cap_len;     /* Generic PCI field: capability length */
57     __u8 cfg_type;    /* Identifies the structure. */
58     __u8 bar;         /* Where to find it. */
59     __u8 padding[3];  /* Pad to full dword. */
60     __le32 offset;    /* Offset within bar. */
61     __le32 length;    /* Length of the structure, in bytes. */
62 };
63 
64 struct virtio_pci_notify_cap {
65     struct virtio_pci_cap cap;
66     __le32 notify_off_multiplier; /* Multiplier for queue_notify_off. */
67 };
68 
69 struct virtio_pci_cfg_cap {
70     struct virtio_pci_cap cap;
71     __u8 pci_cfg_data[4]; /* Data for BAR access. */
72 };
73 
74 /* Fields in VIRTIO_PCI_CAP_COMMON_CFG: */
75 struct virtio_pci_common_cfg {
76     /* About the whole device. */
77     __le32 device_feature_select; /* read-write */
78     __le32 device_feature;        /* read-only */
79     __le32 guest_feature_select;  /* read-write */
80     __le32 guest_feature;         /* read-write */
81     __le16 msix_config;           /* read-write */
82     __le16 num_queues;            /* read-only */
83     __u8 device_status;           /* read-write */
84     __u8 config_generation;       /* read-only */
85 
86     /* About a specific virtqueue. */
87     __le16 queue_select;          /* read-write */
88     __le16 queue_size;            /* read-write, power of 2. */
89     __le16 queue_msix_vector;     /* read-write */
90     __le16 queue_enable;          /* read-write */
91     __le16 queue_notify_off;      /* read-only */
92     __le32 queue_desc_lo;         /* read-write */
93     __le32 queue_desc_hi;         /* read-write */
94     __le32 queue_avail_lo;        /* read-write */
95     __le32 queue_avail_hi;        /* read-write */
96     __le32 queue_used_lo;         /* read-write */
97     __le32 queue_used_hi;         /* read-write */
98 };
99 
100 /* Virtio 1.0 PCI region descriptor. We support memory mapped I/O, port I/O,
101  * and PCI config space access via the cfg PCI capability as a fallback. */
102 struct virtio_pci_region {
103     void *base;
104     size_t length;
105     u8 bar;
106 
107 /* How to interpret the base field */
108 #define VIRTIO_PCI_REGION_TYPE_MASK  0x00000003
109 /* The base field is a memory address */
110 #define VIRTIO_PCI_REGION_MEMORY     0x00000001
111 /* The base field is a port address */
112 #define VIRTIO_PCI_REGION_PORT       0x00000002
113 /* The base field is an offset within the PCI bar */
114 #define VIRTIO_PCI_REGION_PCI_CONFIG 0x00000003
115     unsigned flags;
116 };
117 
118 /* Virtio 1.0 device state */
119 struct virtio_pci_modern_device {
120     struct pci_device *pci;
121 
122     /* VIRTIO_PCI_CAP_PCI_CFG position */
123     int cfg_cap_pos;
124 
125     /* VIRTIO_PCI_CAP_COMMON_CFG data */
126     struct virtio_pci_region common;
127 
128     /* VIRTIO_PCI_CAP_DEVICE_CFG data */
129     struct virtio_pci_region device;
130 
131     /* VIRTIO_PCI_CAP_ISR_CFG data */
132     struct virtio_pci_region isr;
133 
134     /* VIRTIO_PCI_CAP_NOTIFY_CFG data */
135     int notify_cap_pos;
136 };
137 
vp_get_features(unsigned int ioaddr)138 static inline u32 vp_get_features(unsigned int ioaddr)
139 {
140    return inl(ioaddr + VIRTIO_PCI_HOST_FEATURES);
141 }
142 
vp_set_features(unsigned int ioaddr,u32 features)143 static inline void vp_set_features(unsigned int ioaddr, u32 features)
144 {
145         outl(features, ioaddr + VIRTIO_PCI_GUEST_FEATURES);
146 }
147 
vp_get(unsigned int ioaddr,unsigned offset,void * buf,unsigned len)148 static inline void vp_get(unsigned int ioaddr, unsigned offset,
149                      void *buf, unsigned len)
150 {
151    u8 *ptr = buf;
152    unsigned i;
153 
154    for (i = 0; i < len; i++)
155            ptr[i] = inb(ioaddr + VIRTIO_PCI_CONFIG + offset + i);
156 }
157 
vp_get_status(unsigned int ioaddr)158 static inline u8 vp_get_status(unsigned int ioaddr)
159 {
160    return inb(ioaddr + VIRTIO_PCI_STATUS);
161 }
162 
vp_set_status(unsigned int ioaddr,u8 status)163 static inline void vp_set_status(unsigned int ioaddr, u8 status)
164 {
165    if (status == 0)        /* reset */
166            return;
167    outb(status, ioaddr + VIRTIO_PCI_STATUS);
168 }
169 
vp_get_isr(unsigned int ioaddr)170 static inline u8 vp_get_isr(unsigned int ioaddr)
171 {
172    return inb(ioaddr + VIRTIO_PCI_ISR);
173 }
174 
vp_reset(unsigned int ioaddr)175 static inline void vp_reset(unsigned int ioaddr)
176 {
177    outb(0, ioaddr + VIRTIO_PCI_STATUS);
178    (void)inb(ioaddr + VIRTIO_PCI_ISR);
179 }
180 
vp_notify(unsigned int ioaddr,int queue_index)181 static inline void vp_notify(unsigned int ioaddr, int queue_index)
182 {
183    outw(queue_index, ioaddr + VIRTIO_PCI_QUEUE_NOTIFY);
184 }
185 
vp_del_vq(unsigned int ioaddr,int queue_index)186 static inline void vp_del_vq(unsigned int ioaddr, int queue_index)
187 {
188    /* select the queue */
189 
190    outw(queue_index, ioaddr + VIRTIO_PCI_QUEUE_SEL);
191 
192    /* deactivate the queue */
193 
194    outl(0, ioaddr + VIRTIO_PCI_QUEUE_PFN);
195 }
196 
197 struct vring_virtqueue;
198 
199 void vp_free_vq(struct vring_virtqueue *vq);
200 int vp_find_vq(unsigned int ioaddr, int queue_index,
201                struct vring_virtqueue *vq);
202 
203 
204 /* Virtio 1.0 I/O routines abstract away the three possible HW access
205  * mechanisms - memory, port I/O, and PCI cfg space access. Also built-in
206  * are endianness conversions - to LE on write and from LE on read. */
207 
208 void vpm_iowrite8(struct virtio_pci_modern_device *vdev,
209                   struct virtio_pci_region *region, u8 data, size_t offset);
210 
211 void vpm_iowrite16(struct virtio_pci_modern_device *vdev,
212                    struct virtio_pci_region *region, u16 data, size_t offset);
213 
214 void vpm_iowrite32(struct virtio_pci_modern_device *vdev,
215                    struct virtio_pci_region *region, u32 data, size_t offset);
216 
vpm_iowrite64(struct virtio_pci_modern_device * vdev,struct virtio_pci_region * region,u64 data,size_t offset_lo,size_t offset_hi)217 static inline void vpm_iowrite64(struct virtio_pci_modern_device *vdev,
218                                  struct virtio_pci_region *region,
219                                  u64 data, size_t offset_lo, size_t offset_hi)
220 {
221     vpm_iowrite32(vdev, region, (u32)data, offset_lo);
222     vpm_iowrite32(vdev, region, data >> 32, offset_hi);
223 }
224 
225 u8 vpm_ioread8(struct virtio_pci_modern_device *vdev,
226                struct virtio_pci_region *region, size_t offset);
227 
228 u16 vpm_ioread16(struct virtio_pci_modern_device *vdev,
229                  struct virtio_pci_region *region, size_t offset);
230 
231 u32 vpm_ioread32(struct virtio_pci_modern_device *vdev,
232                  struct virtio_pci_region *region, size_t offset);
233 
234 /* Virtio 1.0 device manipulation routines */
235 
236 #define COMMON_OFFSET(field) offsetof(struct virtio_pci_common_cfg, field)
237 
vpm_reset(struct virtio_pci_modern_device * vdev)238 static inline void vpm_reset(struct virtio_pci_modern_device *vdev)
239 {
240     vpm_iowrite8(vdev, &vdev->common, 0, COMMON_OFFSET(device_status));
241     while (vpm_ioread8(vdev, &vdev->common, COMMON_OFFSET(device_status)))
242         mdelay(1);
243 }
244 
vpm_get_status(struct virtio_pci_modern_device * vdev)245 static inline u8 vpm_get_status(struct virtio_pci_modern_device *vdev)
246 {
247     return vpm_ioread8(vdev, &vdev->common, COMMON_OFFSET(device_status));
248 }
249 
vpm_add_status(struct virtio_pci_modern_device * vdev,u8 status)250 static inline void vpm_add_status(struct virtio_pci_modern_device *vdev,
251                                   u8 status)
252 {
253     u8 curr_status = vpm_ioread8(vdev, &vdev->common, COMMON_OFFSET(device_status));
254     vpm_iowrite8(vdev, &vdev->common,
255                  curr_status | status, COMMON_OFFSET(device_status));
256 }
257 
vpm_get_features(struct virtio_pci_modern_device * vdev)258 static inline u64 vpm_get_features(struct virtio_pci_modern_device *vdev)
259 {
260     u32 features_lo, features_hi;
261 
262     vpm_iowrite32(vdev, &vdev->common, 0, COMMON_OFFSET(device_feature_select));
263     features_lo = vpm_ioread32(vdev, &vdev->common, COMMON_OFFSET(device_feature));
264     vpm_iowrite32(vdev, &vdev->common, 1, COMMON_OFFSET(device_feature_select));
265     features_hi = vpm_ioread32(vdev, &vdev->common, COMMON_OFFSET(device_feature));
266 
267     return ((u64)features_hi << 32) | features_lo;
268 }
269 
vpm_set_features(struct virtio_pci_modern_device * vdev,u64 features)270 static inline void vpm_set_features(struct virtio_pci_modern_device *vdev,
271                                     u64 features)
272 {
273     u32 features_lo = (u32)features;
274     u32 features_hi = features >> 32;
275 
276     vpm_iowrite32(vdev, &vdev->common, 0, COMMON_OFFSET(guest_feature_select));
277     vpm_iowrite32(vdev, &vdev->common, features_lo, COMMON_OFFSET(guest_feature));
278     vpm_iowrite32(vdev, &vdev->common, 1, COMMON_OFFSET(guest_feature_select));
279     vpm_iowrite32(vdev, &vdev->common, features_hi, COMMON_OFFSET(guest_feature));
280 }
281 
vpm_get(struct virtio_pci_modern_device * vdev,unsigned offset,void * buf,unsigned len)282 static inline void vpm_get(struct virtio_pci_modern_device *vdev,
283                            unsigned offset, void *buf, unsigned len)
284 {
285     u8 *ptr = buf;
286     unsigned i;
287 
288     for (i = 0; i < len; i++)
289         ptr[i] = vpm_ioread8(vdev, &vdev->device, offset + i);
290 }
291 
vpm_get_isr(struct virtio_pci_modern_device * vdev)292 static inline u8 vpm_get_isr(struct virtio_pci_modern_device *vdev)
293 {
294     return vpm_ioread8(vdev, &vdev->isr, 0);
295 }
296 
297 void vpm_notify(struct virtio_pci_modern_device *vdev,
298                 struct vring_virtqueue *vq);
299 
300 int vpm_find_vqs(struct virtio_pci_modern_device *vdev,
301                  unsigned nvqs, struct vring_virtqueue *vqs);
302 
303 int virtio_pci_find_capability(struct pci_device *pci, uint8_t cfg_type);
304 
305 int virtio_pci_map_capability(struct pci_device *pci, int cap, size_t minlen,
306                               u32 align, u32 start, u32 size,
307                               struct virtio_pci_region *region);
308 
309 void virtio_pci_unmap_capability(struct virtio_pci_region *region);
310 #endif /* _VIRTIO_PCI_H_ */
311