xref: /qemu/pc-bios/s390-ccw/virtio.c (revision 85129891)
1 /*
2  * Virtio driver bits
3  *
4  * Copyright (c) 2013 Alexander Graf <agraf@suse.de>
5  *
6  * This work is licensed under the terms of the GNU GPL, version 2 or (at
7  * your option) any later version. See the COPYING file in the top-level
8  * directory.
9  */
10 
11 #include "s390-ccw.h"
12 #include "virtio.h"
13 
14 static VRing block[VIRTIO_MAX_VQS];
15 static char ring_area[VIRTIO_RING_SIZE * VIRTIO_MAX_VQS]
16                      __attribute__((__aligned__(PAGE_SIZE)));
17 static int nr_vqs = 1;
18 
19 static char chsc_page[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
20 
21 /* virtio spec v1.0 para 4.3.3.2 */
22 static long kvm_hypercall(unsigned long nr, unsigned long param1,
23                           unsigned long param2, unsigned long param3)
24 {
25     register ulong r_nr asm("1") = nr;
26     register ulong r_param1 asm("2") = param1;
27     register ulong r_param2 asm("3") = param2;
28     register ulong r_param3 asm("4") = param3;
29     register long retval asm("2");
30 
31     asm volatile ("diag 2,4,0x500"
32                   : "=d" (retval)
33                   : "d" (r_nr), "0" (r_param1), "r"(r_param2), "d"(r_param3)
34                   : "memory", "cc");
35 
36     return retval;
37 }
38 
39 static long virtio_notify(SubChannelId schid, int vq_idx, long cookie)
40 {
41     return kvm_hypercall(KVM_S390_VIRTIO_CCW_NOTIFY, *(u32 *)&schid,
42                          vq_idx, cookie);
43 }
44 
45 /***********************************************
46  *             Virtio functions                *
47  ***********************************************/
48 
49 static int drain_irqs(SubChannelId schid)
50 {
51     Irb irb = {};
52     int r = 0;
53 
54     while (1) {
55         /* FIXME: make use of TPI, for that enable subchannel and isc */
56         if (tsch(schid, &irb)) {
57             /* Might want to differentiate error codes later on. */
58             if (irb.scsw.cstat) {
59                 r = -EIO;
60             } else if (irb.scsw.dstat != 0xc) {
61                 r = -EIO;
62             }
63             return r;
64         }
65     }
66 }
67 
68 static int run_ccw(SubChannelId schid, int cmd, void *ptr, int len)
69 {
70     Ccw1 ccw = {};
71     CmdOrb orb = {};
72     Schib schib;
73     int r;
74 
75     /* start command processing */
76     stsch_err(schid, &schib);
77     schib.scsw.ctrl = SCSW_FCTL_START_FUNC;
78     msch(schid, &schib);
79 
80     /* start subchannel command */
81     orb.fmt = 1;
82     orb.cpa = (u32)(long)&ccw;
83     orb.lpm = 0x80;
84 
85     ccw.cmd_code = cmd;
86     ccw.cda = (long)ptr;
87     ccw.count = len;
88 
89     r = ssch(schid, &orb);
90     /*
91      * XXX Wait until device is done processing the CCW. For now we can
92      *     assume that a simple tsch will have finished the CCW processing,
93      *     but the architecture allows for asynchronous operation
94      */
95     if (!r) {
96         r = drain_irqs(schid);
97     }
98     return r;
99 }
100 
101 static void virtio_set_status(SubChannelId schid,
102                               unsigned long dev_addr)
103 {
104     unsigned char status = dev_addr;
105     if (run_ccw(schid, CCW_CMD_WRITE_STATUS, &status, sizeof(status))) {
106         panic("Could not write status to host!\n");
107     }
108 }
109 
110 static void virtio_reset(SubChannelId schid)
111 {
112     run_ccw(schid, CCW_CMD_VDEV_RESET, NULL, 0);
113 }
114 
115 static void vring_init(VRing *vr, VqInfo *info)
116 {
117     void *p = (void *) info->queue;
118 
119     debug_print_addr("init p", p);
120     vr->id = info->index;
121     vr->num = info->num;
122     vr->desc = p;
123     vr->avail = p + info->num * sizeof(VRingDesc);
124     vr->used = (void *)(((unsigned long)&vr->avail->ring[info->num]
125                + info->align - 1) & ~(info->align - 1));
126 
127     /* Zero out all relevant field */
128     vr->avail->flags = 0;
129     vr->avail->idx = 0;
130 
131     /* We're running with interrupts off anyways, so don't bother */
132     vr->used->flags = VRING_USED_F_NO_NOTIFY;
133     vr->used->idx = 0;
134     vr->used_idx = 0;
135     vr->next_idx = 0;
136     vr->cookie = 0;
137 
138     debug_print_addr("init vr", vr);
139 }
140 
141 static bool vring_notify(VRing *vr)
142 {
143     vr->cookie = virtio_notify(vr->schid, vr->id, vr->cookie);
144     return vr->cookie >= 0;
145 }
146 
147 static void vring_send_buf(VRing *vr, void *p, int len, int flags)
148 {
149     /* For follow-up chains we need to keep the first entry point */
150     if (!(flags & VRING_HIDDEN_IS_CHAIN)) {
151         vr->avail->ring[vr->avail->idx % vr->num] = vr->next_idx;
152     }
153 
154     vr->desc[vr->next_idx].addr = (ulong)p;
155     vr->desc[vr->next_idx].len = len;
156     vr->desc[vr->next_idx].flags = flags & ~VRING_HIDDEN_IS_CHAIN;
157     vr->desc[vr->next_idx].next = vr->next_idx;
158     vr->desc[vr->next_idx].next++;
159     vr->next_idx++;
160 
161     /* Chains only have a single ID */
162     if (!(flags & VRING_DESC_F_NEXT)) {
163         vr->avail->idx++;
164     }
165 }
166 
167 static u64 get_clock(void)
168 {
169     u64 r;
170 
171     asm volatile("stck %0" : "=Q" (r) : : "cc");
172     return r;
173 }
174 
175 ulong get_second(void)
176 {
177     return (get_clock() >> 12) / 1000000;
178 }
179 
180 static int vr_poll(VRing *vr)
181 {
182     if (vr->used->idx == vr->used_idx) {
183         vring_notify(vr);
184         yield();
185         return 0;
186     }
187 
188     vr->used_idx = vr->used->idx;
189     vr->next_idx = 0;
190     vr->desc[0].len = 0;
191     vr->desc[0].flags = 0;
192     return 1; /* vr has been updated */
193 }
194 
195 /*
196  * Wait for the host to reply.
197  *
198  * timeout is in seconds if > 0.
199  *
200  * Returns 0 on success, 1 on timeout.
201  */
202 static int vring_wait_reply(int timeout)
203 {
204     ulong target_second = get_second() + timeout;
205 
206     /* Wait for any queue to be updated by the host */
207     do {
208         int i, r = 0;
209 
210         for (i = 0; i < nr_vqs; i++) {
211             r += vr_poll(&block[i]);
212         }
213         yield();
214         if (r) {
215             return 0;
216         }
217     } while (!timeout || (get_second() < target_second));
218 
219     return 1;
220 }
221 
222 /***********************************************
223  *               Virtio block                  *
224  ***********************************************/
225 
226 int virtio_read_many(ulong sector, void *load_addr, int sec_num)
227 {
228     VirtioBlkOuthdr out_hdr;
229     u8 status;
230     int r;
231 
232     /* Tell the host we want to read */
233     out_hdr.type = VIRTIO_BLK_T_IN;
234     out_hdr.ioprio = 99;
235     out_hdr.sector = virtio_sector_adjust(sector);
236 
237     vring_send_buf(&block[0], &out_hdr, sizeof(out_hdr), VRING_DESC_F_NEXT);
238 
239     /* This is where we want to receive data */
240     vring_send_buf(&block[0], load_addr, virtio_get_block_size() * sec_num,
241                    VRING_DESC_F_WRITE | VRING_HIDDEN_IS_CHAIN |
242                    VRING_DESC_F_NEXT);
243 
244     /* status field */
245     vring_send_buf(&block[0], &status, sizeof(u8), VRING_DESC_F_WRITE |
246                    VRING_HIDDEN_IS_CHAIN);
247 
248     /* Now we can tell the host to read */
249     vring_wait_reply(0);
250 
251     r = drain_irqs(block[0].schid);
252     if (r) {
253         /* Well, whatever status is supposed to contain... */
254         status = 1;
255     }
256     return status;
257 }
258 
259 unsigned long virtio_load_direct(ulong rec_list1, ulong rec_list2,
260                                  ulong subchan_id, void *load_addr)
261 {
262     u8 status;
263     int sec = rec_list1;
264     int sec_num = ((rec_list2 >> 32) & 0xffff) + 1;
265     int sec_len = rec_list2 >> 48;
266     ulong addr = (ulong)load_addr;
267 
268     if (sec_len != virtio_get_block_size()) {
269         return -1;
270     }
271 
272     sclp_print(".");
273     status = virtio_read_many(sec, (void *)addr, sec_num);
274     if (status) {
275         panic("I/O Error");
276     }
277     addr += sec_num * virtio_get_block_size();
278 
279     return addr;
280 }
281 
282 int virtio_read(ulong sector, void *load_addr)
283 {
284     return virtio_read_many(sector, load_addr, 1);
285 }
286 
287 static VirtioBlkConfig blk_cfg = {};
288 static bool guessed_disk_nature;
289 
290 bool virtio_guessed_disk_nature(void)
291 {
292     return guessed_disk_nature;
293 }
294 
295 void virtio_assume_scsi(void)
296 {
297     guessed_disk_nature = true;
298     blk_cfg.blk_size = 512;
299     blk_cfg.physical_block_exp = 0;
300 }
301 
302 void virtio_assume_iso9660(void)
303 {
304     guessed_disk_nature = true;
305     blk_cfg.blk_size = 2048;
306     blk_cfg.physical_block_exp = 0;
307 }
308 
309 void virtio_assume_eckd(void)
310 {
311     guessed_disk_nature = true;
312     blk_cfg.blk_size = 4096;
313     blk_cfg.physical_block_exp = 0;
314 
315     /* this must be here to calculate code segment position */
316     blk_cfg.geometry.heads = 15;
317     blk_cfg.geometry.sectors = 12;
318 }
319 
320 bool virtio_disk_is_scsi(void)
321 {
322     if (guessed_disk_nature) {
323         return (virtio_get_block_size()  == 512);
324     }
325     return (blk_cfg.geometry.heads == 255)
326         && (blk_cfg.geometry.sectors == 63)
327         && (virtio_get_block_size()  == 512);
328 }
329 
330 /*
331  * Other supported value pairs, if any, would need to be added here.
332  * Note: head count is always 15.
333  */
334 static inline u8 virtio_eckd_sectors_for_block_size(int size)
335 {
336     switch (size) {
337     case 512:
338         return 49;
339     case 1024:
340         return 33;
341     case 2048:
342         return 21;
343     case 4096:
344         return 12;
345     }
346     return 0;
347 }
348 
349 bool virtio_disk_is_eckd(void)
350 {
351     const int block_size = virtio_get_block_size();
352 
353     if (guessed_disk_nature) {
354         return (block_size  == 4096);
355     }
356     return (blk_cfg.geometry.heads == 15)
357         && (blk_cfg.geometry.sectors ==
358             virtio_eckd_sectors_for_block_size(block_size));
359 }
360 
361 bool virtio_ipl_disk_is_valid(void)
362 {
363     return virtio_disk_is_scsi() || virtio_disk_is_eckd();
364 }
365 
366 int virtio_get_block_size(void)
367 {
368     return blk_cfg.blk_size << blk_cfg.physical_block_exp;
369 }
370 
371 uint8_t virtio_get_heads(void)
372 {
373     return blk_cfg.geometry.heads;
374 }
375 
376 uint8_t virtio_get_sectors(void)
377 {
378     return blk_cfg.geometry.sectors;
379 }
380 
381 uint64_t virtio_get_blocks(void)
382 {
383     return blk_cfg.capacity /
384            (virtio_get_block_size() / VIRTIO_SECTOR_SIZE);
385 }
386 
387 static void virtio_setup_ccw(SubChannelId schid,
388                              int nvr, void *cfg, int cfg_size)
389 {
390     int i;
391 
392     blk_cfg.blk_size = 0; /* mark "illegal" - setup started... */
393     nr_vqs = nvr;
394     guessed_disk_nature = false;
395 
396     virtio_reset(schid);
397     IPL_assert(run_ccw(schid, CCW_CMD_READ_CONF, cfg, cfg_size) == 0,
398                "Could not get block device configuration");
399 
400     /*
401      * Skipping CCW_CMD_READ_FEAT. We're not doing anything fancy, and
402      * we'll just stop dead anyway if anything does not work like we
403      * expect it.
404      */
405 
406     for (i = 0; i < nr_vqs; i++) {
407         VqInfo info = {
408             .queue = (unsigned long long) ring_area + (i * VIRTIO_RING_SIZE),
409             .align = KVM_S390_VIRTIO_RING_ALIGN,
410             .index = i,
411             .num = 0,
412         };
413         VqConfig config = {
414             .index = i,
415             .num = 0,
416         };
417 
418         IPL_assert(
419             run_ccw(schid, CCW_CMD_READ_VQ_CONF, &config, sizeof(config)) == 0,
420             "Could not get block device VQ configuration");
421         info.num = config.num;
422         vring_init(&block[i], &info);
423         block[i].schid = schid;
424         IPL_assert(run_ccw(schid, CCW_CMD_SET_VQ, &info, sizeof(info)) == 0,
425                    "Cannot set VQ info");
426     }
427     virtio_set_status(schid, VIRTIO_CONFIG_S_DRIVER_OK);
428 }
429 
430 void virtio_setup_block(SubChannelId schid)
431 {
432     virtio_setup_ccw(schid, 1, &blk_cfg, sizeof(blk_cfg));
433 
434     if (!virtio_ipl_disk_is_valid()) {
435         /* make sure all getters but blocksize return 0 for invalid IPL disk */
436         memset(&blk_cfg, 0, sizeof(blk_cfg));
437         virtio_assume_scsi();
438     }
439 }
440 
441 bool virtio_is_blk(SubChannelId schid)
442 {
443     int r;
444     SenseId senseid = {};
445 
446     /* run sense id command */
447     r = run_ccw(schid, CCW_CMD_SENSE_ID, &senseid, sizeof(senseid));
448     if (r) {
449         return false;
450     }
451     if ((senseid.cu_type != 0x3832) || (senseid.cu_model != VIRTIO_ID_BLOCK)) {
452         return false;
453     }
454 
455     return true;
456 }
457 
458 int enable_mss_facility(void)
459 {
460     int ret;
461     ChscAreaSda *sda_area = (ChscAreaSda *) chsc_page;
462 
463     memset(sda_area, 0, PAGE_SIZE);
464     sda_area->request.length = 0x0400;
465     sda_area->request.code = 0x0031;
466     sda_area->operation_code = 0x2;
467 
468     ret = chsc(sda_area);
469     if ((ret == 0) && (sda_area->response.code == 0x0001)) {
470         return 0;
471     }
472     return -EIO;
473 }
474