xref: /qemu/hw/s390x/virtio-ccw.c (revision c4b8ffcb)
1 /*
2  * virtio ccw target implementation
3  *
4  * Copyright 2012,2015 IBM Corp.
5  * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
6  *            Pierre Morel <pmorel@linux.vnet.ibm.com>
7  *
8  * This work is licensed under the terms of the GNU GPL, version 2 or (at
9  * your option) any later version. See the COPYING file in the top-level
10  * directory.
11  */
12 
13 #include "qemu/osdep.h"
14 #include "qapi/error.h"
15 #include "exec/address-spaces.h"
16 #include "sysemu/kvm.h"
17 #include "net/net.h"
18 #include "hw/virtio/virtio.h"
19 #include "migration/qemu-file-types.h"
20 #include "hw/virtio/virtio-net.h"
21 #include "qemu/bitops.h"
22 #include "qemu/error-report.h"
23 #include "qemu/log.h"
24 #include "qemu/module.h"
25 #include "hw/virtio/virtio-access.h"
26 #include "hw/virtio/virtio-bus.h"
27 #include "hw/s390x/adapter.h"
28 #include "hw/s390x/s390_flic.h"
29 
30 #include "hw/s390x/ioinst.h"
31 #include "hw/s390x/css.h"
32 #include "virtio-ccw.h"
33 #include "trace.h"
34 #include "hw/s390x/css-bridge.h"
35 #include "hw/s390x/s390-virtio-ccw.h"
36 #include "sysemu/replay.h"
37 
38 #define NR_CLASSIC_INDICATOR_BITS 64
39 
40 bool have_virtio_ccw = true;
41 
42 static int virtio_ccw_dev_post_load(void *opaque, int version_id)
43 {
44     VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(opaque);
45     CcwDevice *ccw_dev = CCW_DEVICE(dev);
46     CCWDeviceClass *ck = CCW_DEVICE_GET_CLASS(ccw_dev);
47 
48     ccw_dev->sch->driver_data = dev;
49     if (ccw_dev->sch->thinint_active) {
50         dev->routes.adapter.adapter_id = css_get_adapter_id(
51                                          CSS_IO_ADAPTER_VIRTIO,
52                                          dev->thinint_isc);
53     }
54     /* Re-fill subch_id after loading the subchannel states.*/
55     if (ck->refill_ids) {
56         ck->refill_ids(ccw_dev);
57     }
58     return 0;
59 }
60 
61 typedef struct VirtioCcwDeviceTmp {
62     VirtioCcwDevice *parent;
63     uint16_t config_vector;
64 } VirtioCcwDeviceTmp;
65 
66 static int virtio_ccw_dev_tmp_pre_save(void *opaque)
67 {
68     VirtioCcwDeviceTmp *tmp = opaque;
69     VirtioCcwDevice *dev = tmp->parent;
70     VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
71 
72     tmp->config_vector = vdev->config_vector;
73 
74     return 0;
75 }
76 
77 static int virtio_ccw_dev_tmp_post_load(void *opaque, int version_id)
78 {
79     VirtioCcwDeviceTmp *tmp = opaque;
80     VirtioCcwDevice *dev = tmp->parent;
81     VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
82 
83     vdev->config_vector = tmp->config_vector;
84     return 0;
85 }
86 
87 const VMStateDescription vmstate_virtio_ccw_dev_tmp = {
88     .name = "s390_virtio_ccw_dev_tmp",
89     .pre_save = virtio_ccw_dev_tmp_pre_save,
90     .post_load = virtio_ccw_dev_tmp_post_load,
91     .fields = (VMStateField[]) {
92         VMSTATE_UINT16(config_vector, VirtioCcwDeviceTmp),
93         VMSTATE_END_OF_LIST()
94     }
95 };
96 
97 const VMStateDescription vmstate_virtio_ccw_dev = {
98     .name = "s390_virtio_ccw_dev",
99     .version_id = 1,
100     .minimum_version_id = 1,
101     .post_load = virtio_ccw_dev_post_load,
102     .fields = (VMStateField[]) {
103         VMSTATE_CCW_DEVICE(parent_obj, VirtioCcwDevice),
104         VMSTATE_PTR_TO_IND_ADDR(indicators, VirtioCcwDevice),
105         VMSTATE_PTR_TO_IND_ADDR(indicators2, VirtioCcwDevice),
106         VMSTATE_PTR_TO_IND_ADDR(summary_indicator, VirtioCcwDevice),
107         /*
108          * Ugly hack because VirtIODevice does not migrate itself.
109          * This also makes legacy via vmstate_save_state possible.
110          */
111         VMSTATE_WITH_TMP(VirtioCcwDevice, VirtioCcwDeviceTmp,
112                          vmstate_virtio_ccw_dev_tmp),
113         VMSTATE_STRUCT(routes, VirtioCcwDevice, 1, vmstate_adapter_routes,
114                        AdapterRoutes),
115         VMSTATE_UINT8(thinint_isc, VirtioCcwDevice),
116         VMSTATE_INT32(revision, VirtioCcwDevice),
117         VMSTATE_END_OF_LIST()
118     }
119 };
120 
121 static void virtio_ccw_bus_new(VirtioBusState *bus, size_t bus_size,
122                                VirtioCcwDevice *dev);
123 
124 VirtIODevice *virtio_ccw_get_vdev(SubchDev *sch)
125 {
126     VirtIODevice *vdev = NULL;
127     VirtioCcwDevice *dev = sch->driver_data;
128 
129     if (dev) {
130         vdev = virtio_bus_get_device(&dev->bus);
131     }
132     return vdev;
133 }
134 
135 static void virtio_ccw_start_ioeventfd(VirtioCcwDevice *dev)
136 {
137     virtio_bus_start_ioeventfd(&dev->bus);
138 }
139 
140 static void virtio_ccw_stop_ioeventfd(VirtioCcwDevice *dev)
141 {
142     virtio_bus_stop_ioeventfd(&dev->bus);
143 }
144 
145 static bool virtio_ccw_ioeventfd_enabled(DeviceState *d)
146 {
147     VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
148 
149     return (dev->flags & VIRTIO_CCW_FLAG_USE_IOEVENTFD) != 0;
150 }
151 
152 static int virtio_ccw_ioeventfd_assign(DeviceState *d, EventNotifier *notifier,
153                                        int n, bool assign)
154 {
155     VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
156     CcwDevice *ccw_dev = CCW_DEVICE(dev);
157     SubchDev *sch = ccw_dev->sch;
158     uint32_t sch_id = (css_build_subchannel_id(sch) << 16) | sch->schid;
159 
160     return s390_assign_subch_ioeventfd(notifier, sch_id, n, assign);
161 }
162 
163 /* Communication blocks used by several channel commands. */
164 typedef struct VqInfoBlockLegacy {
165     uint64_t queue;
166     uint32_t align;
167     uint16_t index;
168     uint16_t num;
169 } QEMU_PACKED VqInfoBlockLegacy;
170 
171 typedef struct VqInfoBlock {
172     uint64_t desc;
173     uint32_t res0;
174     uint16_t index;
175     uint16_t num;
176     uint64_t avail;
177     uint64_t used;
178 } QEMU_PACKED VqInfoBlock;
179 
180 typedef struct VqConfigBlock {
181     uint16_t index;
182     uint16_t num_max;
183 } QEMU_PACKED VqConfigBlock;
184 
185 typedef struct VirtioFeatDesc {
186     uint32_t features;
187     uint8_t index;
188 } QEMU_PACKED VirtioFeatDesc;
189 
190 typedef struct VirtioThinintInfo {
191     hwaddr summary_indicator;
192     hwaddr device_indicator;
193     uint64_t ind_bit;
194     uint8_t isc;
195 } QEMU_PACKED VirtioThinintInfo;
196 
197 typedef struct VirtioRevInfo {
198     uint16_t revision;
199     uint16_t length;
200     uint8_t data[];
201 } QEMU_PACKED VirtioRevInfo;
202 
203 /* Specify where the virtqueues for the subchannel are in guest memory. */
204 static int virtio_ccw_set_vqs(SubchDev *sch, VqInfoBlock *info,
205                               VqInfoBlockLegacy *linfo)
206 {
207     VirtIODevice *vdev = virtio_ccw_get_vdev(sch);
208     uint16_t index = info ? info->index : linfo->index;
209     uint16_t num = info ? info->num : linfo->num;
210     uint64_t desc = info ? info->desc : linfo->queue;
211 
212     if (index >= VIRTIO_QUEUE_MAX) {
213         return -EINVAL;
214     }
215 
216     /* Current code in virtio.c relies on 4K alignment. */
217     if (linfo && desc && (linfo->align != 4096)) {
218         return -EINVAL;
219     }
220 
221     if (!vdev) {
222         return -EINVAL;
223     }
224 
225     if (info) {
226         virtio_queue_set_rings(vdev, index, desc, info->avail, info->used);
227     } else {
228         virtio_queue_set_addr(vdev, index, desc);
229     }
230     if (!desc) {
231         virtio_queue_set_vector(vdev, index, VIRTIO_NO_VECTOR);
232     } else {
233         if (info) {
234             /* virtio-1 allows changing the ring size. */
235             if (virtio_queue_get_max_num(vdev, index) < num) {
236                 /* Fail if we exceed the maximum number. */
237                 return -EINVAL;
238             }
239             virtio_queue_set_num(vdev, index, num);
240         } else if (virtio_queue_get_num(vdev, index) > num) {
241             /* Fail if we don't have a big enough queue. */
242             return -EINVAL;
243         }
244         /* We ignore possible increased num for legacy for compatibility. */
245         virtio_queue_set_vector(vdev, index, index);
246     }
247     /* tell notify handler in case of config change */
248     vdev->config_vector = VIRTIO_QUEUE_MAX;
249     return 0;
250 }
251 
252 static void virtio_ccw_reset_virtio(VirtioCcwDevice *dev, VirtIODevice *vdev)
253 {
254     CcwDevice *ccw_dev = CCW_DEVICE(dev);
255 
256     virtio_ccw_stop_ioeventfd(dev);
257     virtio_reset(vdev);
258     if (dev->indicators) {
259         release_indicator(&dev->routes.adapter, dev->indicators);
260         dev->indicators = NULL;
261     }
262     if (dev->indicators2) {
263         release_indicator(&dev->routes.adapter, dev->indicators2);
264         dev->indicators2 = NULL;
265     }
266     if (dev->summary_indicator) {
267         release_indicator(&dev->routes.adapter, dev->summary_indicator);
268         dev->summary_indicator = NULL;
269     }
270     ccw_dev->sch->thinint_active = false;
271 }
272 
273 static int virtio_ccw_handle_set_vq(SubchDev *sch, CCW1 ccw, bool check_len,
274                                     bool is_legacy)
275 {
276     int ret;
277     VqInfoBlock info;
278     VqInfoBlockLegacy linfo;
279     size_t info_len = is_legacy ? sizeof(linfo) : sizeof(info);
280 
281     if (check_len) {
282         if (ccw.count != info_len) {
283             return -EINVAL;
284         }
285     } else if (ccw.count < info_len) {
286         /* Can't execute command. */
287         return -EINVAL;
288     }
289     if (!ccw.cda) {
290         return -EFAULT;
291     }
292     if (is_legacy) {
293         ret = ccw_dstream_read(&sch->cds, linfo);
294         if (ret) {
295             return ret;
296         }
297         linfo.queue = be64_to_cpu(linfo.queue);
298         linfo.align = be32_to_cpu(linfo.align);
299         linfo.index = be16_to_cpu(linfo.index);
300         linfo.num = be16_to_cpu(linfo.num);
301         ret = virtio_ccw_set_vqs(sch, NULL, &linfo);
302     } else {
303         ret = ccw_dstream_read(&sch->cds, info);
304         if (ret) {
305             return ret;
306         }
307         info.desc = be64_to_cpu(info.desc);
308         info.index = be16_to_cpu(info.index);
309         info.num = be16_to_cpu(info.num);
310         info.avail = be64_to_cpu(info.avail);
311         info.used = be64_to_cpu(info.used);
312         ret = virtio_ccw_set_vqs(sch, &info, NULL);
313     }
314     sch->curr_status.scsw.count = 0;
315     return ret;
316 }
317 
318 static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
319 {
320     int ret;
321     VirtioRevInfo revinfo;
322     uint8_t status;
323     VirtioFeatDesc features;
324     hwaddr indicators;
325     VqConfigBlock vq_config;
326     VirtioCcwDevice *dev = sch->driver_data;
327     VirtIODevice *vdev = virtio_ccw_get_vdev(sch);
328     bool check_len;
329     int len;
330     VirtioThinintInfo thinint;
331 
332     if (!dev) {
333         return -EINVAL;
334     }
335 
336     trace_virtio_ccw_interpret_ccw(sch->cssid, sch->ssid, sch->schid,
337                                    ccw.cmd_code);
338     check_len = !((ccw.flags & CCW_FLAG_SLI) && !(ccw.flags & CCW_FLAG_DC));
339 
340     if (dev->revision < 0 && ccw.cmd_code != CCW_CMD_SET_VIRTIO_REV) {
341         if (dev->force_revision_1) {
342             /*
343              * virtio-1 drivers must start with negotiating to a revision >= 1,
344              * so post a command reject for all other commands
345              */
346             return -ENOSYS;
347         } else {
348             /*
349              * If the driver issues any command that is not SET_VIRTIO_REV,
350              * we'll have to operate the device in legacy mode.
351              */
352             dev->revision = 0;
353         }
354     }
355 
356     /* Look at the command. */
357     switch (ccw.cmd_code) {
358     case CCW_CMD_SET_VQ:
359         ret = virtio_ccw_handle_set_vq(sch, ccw, check_len, dev->revision < 1);
360         break;
361     case CCW_CMD_VDEV_RESET:
362         virtio_ccw_reset_virtio(dev, vdev);
363         ret = 0;
364         break;
365     case CCW_CMD_READ_FEAT:
366         if (check_len) {
367             if (ccw.count != sizeof(features)) {
368                 ret = -EINVAL;
369                 break;
370             }
371         } else if (ccw.count < sizeof(features)) {
372             /* Can't execute command. */
373             ret = -EINVAL;
374             break;
375         }
376         if (!ccw.cda) {
377             ret = -EFAULT;
378         } else {
379             VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
380 
381             ccw_dstream_advance(&sch->cds, sizeof(features.features));
382             ret = ccw_dstream_read(&sch->cds, features.index);
383             if (ret) {
384                 break;
385             }
386             if (features.index == 0) {
387                 if (dev->revision >= 1) {
388                     /* Don't offer legacy features for modern devices. */
389                     features.features = (uint32_t)
390                         (vdev->host_features & ~vdc->legacy_features);
391                 } else {
392                     features.features = (uint32_t)vdev->host_features;
393                 }
394             } else if ((features.index == 1) && (dev->revision >= 1)) {
395                 /*
396                  * Only offer feature bits beyond 31 if the guest has
397                  * negotiated at least revision 1.
398                  */
399                 features.features = (uint32_t)(vdev->host_features >> 32);
400             } else {
401                 /* Return zeroes if the guest supports more feature bits. */
402                 features.features = 0;
403             }
404             ccw_dstream_rewind(&sch->cds);
405             features.features = cpu_to_le32(features.features);
406             ret = ccw_dstream_write(&sch->cds, features.features);
407             if (!ret) {
408                 sch->curr_status.scsw.count = ccw.count - sizeof(features);
409             }
410         }
411         break;
412     case CCW_CMD_WRITE_FEAT:
413         if (check_len) {
414             if (ccw.count != sizeof(features)) {
415                 ret = -EINVAL;
416                 break;
417             }
418         } else if (ccw.count < sizeof(features)) {
419             /* Can't execute command. */
420             ret = -EINVAL;
421             break;
422         }
423         if (!ccw.cda) {
424             ret = -EFAULT;
425         } else {
426             ret = ccw_dstream_read(&sch->cds, features);
427             if (ret) {
428                 break;
429             }
430             features.features = le32_to_cpu(features.features);
431             if (features.index == 0) {
432                 virtio_set_features(vdev,
433                                     (vdev->guest_features & 0xffffffff00000000ULL) |
434                                     features.features);
435             } else if ((features.index == 1) && (dev->revision >= 1)) {
436                 /*
437                  * If the guest did not negotiate at least revision 1,
438                  * we did not offer it any feature bits beyond 31. Such a
439                  * guest passing us any bit here is therefore buggy.
440                  */
441                 virtio_set_features(vdev,
442                                     (vdev->guest_features & 0x00000000ffffffffULL) |
443                                     ((uint64_t)features.features << 32));
444             } else {
445                 /*
446                  * If the guest supports more feature bits, assert that it
447                  * passes us zeroes for those we don't support.
448                  */
449                 if (features.features) {
450                     qemu_log_mask(LOG_GUEST_ERROR,
451                                   "Guest bug: features[%i]=%x (expected 0)",
452                                   features.index, features.features);
453                     /* XXX: do a unit check here? */
454                 }
455             }
456             sch->curr_status.scsw.count = ccw.count - sizeof(features);
457             ret = 0;
458         }
459         break;
460     case CCW_CMD_READ_CONF:
461         if (check_len) {
462             if (ccw.count > vdev->config_len) {
463                 ret = -EINVAL;
464                 break;
465             }
466         }
467         len = MIN(ccw.count, vdev->config_len);
468         if (!ccw.cda) {
469             ret = -EFAULT;
470         } else {
471             virtio_bus_get_vdev_config(&dev->bus, vdev->config);
472             ret = ccw_dstream_write_buf(&sch->cds, vdev->config, len);
473             if (ret) {
474                 sch->curr_status.scsw.count = ccw.count - len;
475             }
476         }
477         break;
478     case CCW_CMD_WRITE_CONF:
479         if (check_len) {
480             if (ccw.count > vdev->config_len) {
481                 ret = -EINVAL;
482                 break;
483             }
484         }
485         len = MIN(ccw.count, vdev->config_len);
486         if (!ccw.cda) {
487             ret = -EFAULT;
488         } else {
489             ret = ccw_dstream_read_buf(&sch->cds, vdev->config, len);
490             if (!ret) {
491                 virtio_bus_set_vdev_config(&dev->bus, vdev->config);
492                 sch->curr_status.scsw.count = ccw.count - len;
493             }
494         }
495         break;
496     case CCW_CMD_READ_STATUS:
497         if (check_len) {
498             if (ccw.count != sizeof(status)) {
499                 ret = -EINVAL;
500                 break;
501             }
502         } else if (ccw.count < sizeof(status)) {
503             /* Can't execute command. */
504             ret = -EINVAL;
505             break;
506         }
507         if (!ccw.cda) {
508             ret = -EFAULT;
509         } else {
510             address_space_stb(&address_space_memory, ccw.cda, vdev->status,
511                                         MEMTXATTRS_UNSPECIFIED, NULL);
512             sch->curr_status.scsw.count = ccw.count - sizeof(vdev->status);
513             ret = 0;
514         }
515         break;
516     case CCW_CMD_WRITE_STATUS:
517         if (check_len) {
518             if (ccw.count != sizeof(status)) {
519                 ret = -EINVAL;
520                 break;
521             }
522         } else if (ccw.count < sizeof(status)) {
523             /* Can't execute command. */
524             ret = -EINVAL;
525             break;
526         }
527         if (!ccw.cda) {
528             ret = -EFAULT;
529         } else {
530             ret = ccw_dstream_read(&sch->cds, status);
531             if (ret) {
532                 break;
533             }
534             if (!(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
535                 virtio_ccw_stop_ioeventfd(dev);
536             }
537             if (virtio_set_status(vdev, status) == 0) {
538                 if (vdev->status == 0) {
539                     virtio_ccw_reset_virtio(dev, vdev);
540                 }
541                 if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
542                     virtio_ccw_start_ioeventfd(dev);
543                 }
544                 sch->curr_status.scsw.count = ccw.count - sizeof(status);
545                 ret = 0;
546             } else {
547                 /* Trigger a command reject. */
548                 ret = -ENOSYS;
549             }
550         }
551         break;
552     case CCW_CMD_SET_IND:
553         if (check_len) {
554             if (ccw.count != sizeof(indicators)) {
555                 ret = -EINVAL;
556                 break;
557             }
558         } else if (ccw.count < sizeof(indicators)) {
559             /* Can't execute command. */
560             ret = -EINVAL;
561             break;
562         }
563         if (sch->thinint_active) {
564             /* Trigger a command reject. */
565             ret = -ENOSYS;
566             break;
567         }
568         if (virtio_get_num_queues(vdev) > NR_CLASSIC_INDICATOR_BITS) {
569             /* More queues than indicator bits --> trigger a reject */
570             ret = -ENOSYS;
571             break;
572         }
573         if (!ccw.cda) {
574             ret = -EFAULT;
575         } else {
576             ret = ccw_dstream_read(&sch->cds, indicators);
577             if (ret) {
578                 break;
579             }
580             indicators = be64_to_cpu(indicators);
581             dev->indicators = get_indicator(indicators, sizeof(uint64_t));
582             sch->curr_status.scsw.count = ccw.count - sizeof(indicators);
583             ret = 0;
584         }
585         break;
586     case CCW_CMD_SET_CONF_IND:
587         if (check_len) {
588             if (ccw.count != sizeof(indicators)) {
589                 ret = -EINVAL;
590                 break;
591             }
592         } else if (ccw.count < sizeof(indicators)) {
593             /* Can't execute command. */
594             ret = -EINVAL;
595             break;
596         }
597         if (!ccw.cda) {
598             ret = -EFAULT;
599         } else {
600             ret = ccw_dstream_read(&sch->cds, indicators);
601             if (ret) {
602                 break;
603             }
604             indicators = be64_to_cpu(indicators);
605             dev->indicators2 = get_indicator(indicators, sizeof(uint64_t));
606             sch->curr_status.scsw.count = ccw.count - sizeof(indicators);
607             ret = 0;
608         }
609         break;
610     case CCW_CMD_READ_VQ_CONF:
611         if (check_len) {
612             if (ccw.count != sizeof(vq_config)) {
613                 ret = -EINVAL;
614                 break;
615             }
616         } else if (ccw.count < sizeof(vq_config)) {
617             /* Can't execute command. */
618             ret = -EINVAL;
619             break;
620         }
621         if (!ccw.cda) {
622             ret = -EFAULT;
623         } else {
624             ret = ccw_dstream_read(&sch->cds, vq_config.index);
625             if (ret) {
626                 break;
627             }
628             vq_config.index = be16_to_cpu(vq_config.index);
629             if (vq_config.index >= VIRTIO_QUEUE_MAX) {
630                 ret = -EINVAL;
631                 break;
632             }
633             vq_config.num_max = virtio_queue_get_num(vdev,
634                                                      vq_config.index);
635             vq_config.num_max = cpu_to_be16(vq_config.num_max);
636             ret = ccw_dstream_write(&sch->cds, vq_config.num_max);
637             if (!ret) {
638                 sch->curr_status.scsw.count = ccw.count - sizeof(vq_config);
639             }
640         }
641         break;
642     case CCW_CMD_SET_IND_ADAPTER:
643         if (check_len) {
644             if (ccw.count != sizeof(thinint)) {
645                 ret = -EINVAL;
646                 break;
647             }
648         } else if (ccw.count < sizeof(thinint)) {
649             /* Can't execute command. */
650             ret = -EINVAL;
651             break;
652         }
653         if (!ccw.cda) {
654             ret = -EFAULT;
655         } else if (dev->indicators && !sch->thinint_active) {
656             /* Trigger a command reject. */
657             ret = -ENOSYS;
658         } else {
659             if (ccw_dstream_read(&sch->cds, thinint)) {
660                 ret = -EFAULT;
661             } else {
662                 thinint.ind_bit = be64_to_cpu(thinint.ind_bit);
663                 thinint.summary_indicator =
664                     be64_to_cpu(thinint.summary_indicator);
665                 thinint.device_indicator =
666                     be64_to_cpu(thinint.device_indicator);
667 
668                 dev->summary_indicator =
669                     get_indicator(thinint.summary_indicator, sizeof(uint8_t));
670                 dev->indicators =
671                     get_indicator(thinint.device_indicator,
672                                   thinint.ind_bit / 8 + 1);
673                 dev->thinint_isc = thinint.isc;
674                 dev->routes.adapter.ind_offset = thinint.ind_bit;
675                 dev->routes.adapter.summary_offset = 7;
676                 dev->routes.adapter.adapter_id = css_get_adapter_id(
677                                                  CSS_IO_ADAPTER_VIRTIO,
678                                                  dev->thinint_isc);
679                 sch->thinint_active = ((dev->indicators != NULL) &&
680                                        (dev->summary_indicator != NULL));
681                 sch->curr_status.scsw.count = ccw.count - sizeof(thinint);
682                 ret = 0;
683             }
684         }
685         break;
686     case CCW_CMD_SET_VIRTIO_REV:
687         len = sizeof(revinfo);
688         if (ccw.count < len) {
689             ret = -EINVAL;
690             break;
691         }
692         if (!ccw.cda) {
693             ret = -EFAULT;
694             break;
695         }
696         ret = ccw_dstream_read_buf(&sch->cds, &revinfo, 4);
697         if (ret < 0) {
698             break;
699         }
700         revinfo.revision = be16_to_cpu(revinfo.revision);
701         revinfo.length = be16_to_cpu(revinfo.length);
702         if (ccw.count < len + revinfo.length ||
703             (check_len && ccw.count > len + revinfo.length)) {
704             ret = -EINVAL;
705             break;
706         }
707         /*
708          * Once we start to support revisions with additional data, we'll
709          * need to fetch it here. Nothing to do for now, though.
710          */
711         if (dev->revision >= 0 ||
712             revinfo.revision > virtio_ccw_rev_max(dev) ||
713             (dev->force_revision_1 && !revinfo.revision)) {
714             ret = -ENOSYS;
715             break;
716         }
717         ret = 0;
718         dev->revision = revinfo.revision;
719         break;
720     default:
721         ret = -ENOSYS;
722         break;
723     }
724     return ret;
725 }
726 
727 static void virtio_sch_disable_cb(SubchDev *sch)
728 {
729     VirtioCcwDevice *dev = sch->driver_data;
730 
731     dev->revision = -1;
732 }
733 
734 static void virtio_ccw_device_realize(VirtioCcwDevice *dev, Error **errp)
735 {
736     VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_GET_CLASS(dev);
737     CcwDevice *ccw_dev = CCW_DEVICE(dev);
738     CCWDeviceClass *ck = CCW_DEVICE_GET_CLASS(ccw_dev);
739     SubchDev *sch;
740     Error *err = NULL;
741     int i;
742 
743     sch = css_create_sch(ccw_dev->devno, errp);
744     if (!sch) {
745         return;
746     }
747     if (!virtio_ccw_rev_max(dev) && dev->force_revision_1) {
748         error_setg(&err, "Invalid value of property max_rev "
749                    "(is %d expected >= 1)", virtio_ccw_rev_max(dev));
750         goto out_err;
751     }
752 
753     sch->driver_data = dev;
754     sch->ccw_cb = virtio_ccw_cb;
755     sch->disable_cb = virtio_sch_disable_cb;
756     sch->id.reserved = 0xff;
757     sch->id.cu_type = VIRTIO_CCW_CU_TYPE;
758     sch->do_subchannel_work = do_subchannel_work_virtual;
759     sch->irb_cb = build_irb_virtual;
760     ccw_dev->sch = sch;
761     dev->indicators = NULL;
762     dev->revision = -1;
763     for (i = 0; i < ADAPTER_ROUTES_MAX_GSI; i++) {
764         dev->routes.gsi[i] = -1;
765     }
766     css_sch_build_virtual_schib(sch, 0, VIRTIO_CCW_CHPID_TYPE);
767 
768     trace_virtio_ccw_new_device(
769         sch->cssid, sch->ssid, sch->schid, sch->devno,
770         ccw_dev->devno.valid ? "user-configured" : "auto-configured");
771 
772     if (kvm_enabled() && !kvm_eventfds_enabled()) {
773         dev->flags &= ~VIRTIO_CCW_FLAG_USE_IOEVENTFD;
774     }
775 
776     /* fd-based ioevents can't be synchronized in record/replay */
777     if (replay_mode != REPLAY_MODE_NONE) {
778         dev->flags &= ~VIRTIO_CCW_FLAG_USE_IOEVENTFD;
779     }
780 
781     if (k->realize) {
782         k->realize(dev, &err);
783         if (err) {
784             goto out_err;
785         }
786     }
787 
788     ck->realize(ccw_dev, &err);
789     if (err) {
790         goto out_err;
791     }
792 
793     return;
794 
795 out_err:
796     error_propagate(errp, err);
797     css_subch_assign(sch->cssid, sch->ssid, sch->schid, sch->devno, NULL);
798     ccw_dev->sch = NULL;
799     g_free(sch);
800 }
801 
802 static void virtio_ccw_device_unrealize(VirtioCcwDevice *dev)
803 {
804     VirtIOCCWDeviceClass *dc = VIRTIO_CCW_DEVICE_GET_CLASS(dev);
805     CcwDevice *ccw_dev = CCW_DEVICE(dev);
806     SubchDev *sch = ccw_dev->sch;
807 
808     if (dc->unrealize) {
809         dc->unrealize(dev);
810     }
811 
812     if (sch) {
813         css_subch_assign(sch->cssid, sch->ssid, sch->schid, sch->devno, NULL);
814         g_free(sch);
815         ccw_dev->sch = NULL;
816     }
817     if (dev->indicators) {
818         release_indicator(&dev->routes.adapter, dev->indicators);
819         dev->indicators = NULL;
820     }
821 }
822 
823 /* DeviceState to VirtioCcwDevice. Note: used on datapath,
824  * be careful and test performance if you change this.
825  */
826 static inline VirtioCcwDevice *to_virtio_ccw_dev_fast(DeviceState *d)
827 {
828     CcwDevice *ccw_dev = to_ccw_dev_fast(d);
829 
830     return container_of(ccw_dev, VirtioCcwDevice, parent_obj);
831 }
832 
833 static uint8_t virtio_set_ind_atomic(SubchDev *sch, uint64_t ind_loc,
834                                      uint8_t to_be_set)
835 {
836     uint8_t expected, actual;
837     hwaddr len = 1;
838     /* avoid  multiple fetches */
839     uint8_t volatile *ind_addr;
840 
841     ind_addr = cpu_physical_memory_map(ind_loc, &len, true);
842     if (!ind_addr) {
843         error_report("%s(%x.%x.%04x): unable to access indicator",
844                      __func__, sch->cssid, sch->ssid, sch->schid);
845         return -1;
846     }
847     actual = *ind_addr;
848     do {
849         expected = actual;
850         actual = qatomic_cmpxchg(ind_addr, expected, expected | to_be_set);
851     } while (actual != expected);
852     trace_virtio_ccw_set_ind(ind_loc, actual, actual | to_be_set);
853     cpu_physical_memory_unmap((void *)ind_addr, len, 1, len);
854 
855     return actual;
856 }
857 
858 static void virtio_ccw_notify(DeviceState *d, uint16_t vector)
859 {
860     VirtioCcwDevice *dev = to_virtio_ccw_dev_fast(d);
861     CcwDevice *ccw_dev = to_ccw_dev_fast(d);
862     SubchDev *sch = ccw_dev->sch;
863     uint64_t indicators;
864 
865     if (vector == VIRTIO_NO_VECTOR) {
866         return;
867     }
868     /*
869      * vector < VIRTIO_QUEUE_MAX: notification for a virtqueue
870      * vector == VIRTIO_QUEUE_MAX: configuration change notification
871      * bits beyond that are unused and should never be notified for
872      */
873     assert(vector <= VIRTIO_QUEUE_MAX);
874 
875     if (vector < VIRTIO_QUEUE_MAX) {
876         if (!dev->indicators) {
877             return;
878         }
879         if (sch->thinint_active) {
880             /*
881              * In the adapter interrupt case, indicators points to a
882              * memory area that may be (way) larger than 64 bit and
883              * ind_bit indicates the start of the indicators in a big
884              * endian notation.
885              */
886             uint64_t ind_bit = dev->routes.adapter.ind_offset;
887 
888             virtio_set_ind_atomic(sch, dev->indicators->addr +
889                                   (ind_bit + vector) / 8,
890                                   0x80 >> ((ind_bit + vector) % 8));
891             if (!virtio_set_ind_atomic(sch, dev->summary_indicator->addr,
892                                        0x01)) {
893                 css_adapter_interrupt(CSS_IO_ADAPTER_VIRTIO, dev->thinint_isc);
894             }
895         } else {
896             assert(vector < NR_CLASSIC_INDICATOR_BITS);
897             indicators = address_space_ldq(&address_space_memory,
898                                            dev->indicators->addr,
899                                            MEMTXATTRS_UNSPECIFIED,
900                                            NULL);
901             indicators |= 1ULL << vector;
902             address_space_stq(&address_space_memory, dev->indicators->addr,
903                               indicators, MEMTXATTRS_UNSPECIFIED, NULL);
904             css_conditional_io_interrupt(sch);
905         }
906     } else {
907         if (!dev->indicators2) {
908             return;
909         }
910         indicators = address_space_ldq(&address_space_memory,
911                                        dev->indicators2->addr,
912                                        MEMTXATTRS_UNSPECIFIED,
913                                        NULL);
914         indicators |= 1ULL;
915         address_space_stq(&address_space_memory, dev->indicators2->addr,
916                           indicators, MEMTXATTRS_UNSPECIFIED, NULL);
917         css_conditional_io_interrupt(sch);
918     }
919 }
920 
921 static void virtio_ccw_reset(DeviceState *d)
922 {
923     VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
924     VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
925     VirtIOCCWDeviceClass *vdc = VIRTIO_CCW_DEVICE_GET_CLASS(dev);
926 
927     virtio_ccw_reset_virtio(dev, vdev);
928     if (vdc->parent_reset) {
929         vdc->parent_reset(d);
930     }
931 }
932 
933 static void virtio_ccw_vmstate_change(DeviceState *d, bool running)
934 {
935     VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
936 
937     if (running) {
938         virtio_ccw_start_ioeventfd(dev);
939     } else {
940         virtio_ccw_stop_ioeventfd(dev);
941     }
942 }
943 
944 static bool virtio_ccw_query_guest_notifiers(DeviceState *d)
945 {
946     CcwDevice *dev = CCW_DEVICE(d);
947 
948     return !!(dev->sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ENA);
949 }
950 
951 static int virtio_ccw_get_mappings(VirtioCcwDevice *dev)
952 {
953     int r;
954     CcwDevice *ccw_dev = CCW_DEVICE(dev);
955 
956     if (!ccw_dev->sch->thinint_active) {
957         return -EINVAL;
958     }
959 
960     r = map_indicator(&dev->routes.adapter, dev->summary_indicator);
961     if (r) {
962         return r;
963     }
964     r = map_indicator(&dev->routes.adapter, dev->indicators);
965     if (r) {
966         return r;
967     }
968     dev->routes.adapter.summary_addr = dev->summary_indicator->map;
969     dev->routes.adapter.ind_addr = dev->indicators->map;
970 
971     return 0;
972 }
973 
974 static int virtio_ccw_setup_irqroutes(VirtioCcwDevice *dev, int nvqs)
975 {
976     int i;
977     VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
978     int ret;
979     S390FLICState *fs = s390_get_flic();
980     S390FLICStateClass *fsc = s390_get_flic_class(fs);
981 
982     ret = virtio_ccw_get_mappings(dev);
983     if (ret) {
984         return ret;
985     }
986     for (i = 0; i < nvqs; i++) {
987         if (!virtio_queue_get_num(vdev, i)) {
988             break;
989         }
990     }
991     dev->routes.num_routes = i;
992     return fsc->add_adapter_routes(fs, &dev->routes);
993 }
994 
995 static void virtio_ccw_release_irqroutes(VirtioCcwDevice *dev, int nvqs)
996 {
997     S390FLICState *fs = s390_get_flic();
998     S390FLICStateClass *fsc = s390_get_flic_class(fs);
999 
1000     fsc->release_adapter_routes(fs, &dev->routes);
1001 }
1002 
1003 static int virtio_ccw_add_irqfd(VirtioCcwDevice *dev, int n)
1004 {
1005     VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
1006     VirtQueue *vq = virtio_get_queue(vdev, n);
1007     EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
1008 
1009     return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, notifier, NULL,
1010                                               dev->routes.gsi[n]);
1011 }
1012 
1013 static void virtio_ccw_remove_irqfd(VirtioCcwDevice *dev, int n)
1014 {
1015     VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
1016     VirtQueue *vq = virtio_get_queue(vdev, n);
1017     EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
1018     int ret;
1019 
1020     ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, notifier,
1021                                                 dev->routes.gsi[n]);
1022     assert(ret == 0);
1023 }
1024 
1025 static int virtio_ccw_set_guest_notifier(VirtioCcwDevice *dev, int n,
1026                                          bool assign, bool with_irqfd)
1027 {
1028     VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
1029     VirtQueue *vq = virtio_get_queue(vdev, n);
1030     EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
1031     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1032 
1033     if (assign) {
1034         int r = event_notifier_init(notifier, 0);
1035 
1036         if (r < 0) {
1037             return r;
1038         }
1039         virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd);
1040         if (with_irqfd) {
1041             r = virtio_ccw_add_irqfd(dev, n);
1042             if (r) {
1043                 virtio_queue_set_guest_notifier_fd_handler(vq, false,
1044                                                            with_irqfd);
1045                 return r;
1046             }
1047         }
1048         /*
1049          * We do not support individual masking for channel devices, so we
1050          * need to manually trigger any guest masking callbacks here.
1051          */
1052         if (k->guest_notifier_mask && vdev->use_guest_notifier_mask) {
1053             k->guest_notifier_mask(vdev, n, false);
1054         }
1055         /* get lost events and re-inject */
1056         if (k->guest_notifier_pending &&
1057             k->guest_notifier_pending(vdev, n)) {
1058             event_notifier_set(notifier);
1059         }
1060     } else {
1061         if (k->guest_notifier_mask && vdev->use_guest_notifier_mask) {
1062             k->guest_notifier_mask(vdev, n, true);
1063         }
1064         if (with_irqfd) {
1065             virtio_ccw_remove_irqfd(dev, n);
1066         }
1067         virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd);
1068         event_notifier_cleanup(notifier);
1069     }
1070     return 0;
1071 }
1072 
1073 static int virtio_ccw_set_guest_notifiers(DeviceState *d, int nvqs,
1074                                           bool assigned)
1075 {
1076     VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
1077     VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
1078     CcwDevice *ccw_dev = CCW_DEVICE(d);
1079     bool with_irqfd = ccw_dev->sch->thinint_active && kvm_irqfds_enabled();
1080     int r, n;
1081 
1082     if (with_irqfd && assigned) {
1083         /* irq routes need to be set up before assigning irqfds */
1084         r = virtio_ccw_setup_irqroutes(dev, nvqs);
1085         if (r < 0) {
1086             goto irqroute_error;
1087         }
1088     }
1089     for (n = 0; n < nvqs; n++) {
1090         if (!virtio_queue_get_num(vdev, n)) {
1091             break;
1092         }
1093         r = virtio_ccw_set_guest_notifier(dev, n, assigned, with_irqfd);
1094         if (r < 0) {
1095             goto assign_error;
1096         }
1097     }
1098     if (with_irqfd && !assigned) {
1099         /* release irq routes after irqfds have been released */
1100         virtio_ccw_release_irqroutes(dev, nvqs);
1101     }
1102     return 0;
1103 
1104 assign_error:
1105     while (--n >= 0) {
1106         virtio_ccw_set_guest_notifier(dev, n, !assigned, false);
1107     }
1108 irqroute_error:
1109     if (with_irqfd && assigned) {
1110         virtio_ccw_release_irqroutes(dev, nvqs);
1111     }
1112     return r;
1113 }
1114 
1115 static void virtio_ccw_save_queue(DeviceState *d, int n, QEMUFile *f)
1116 {
1117     VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
1118     VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
1119 
1120     qemu_put_be16(f, virtio_queue_vector(vdev, n));
1121 }
1122 
1123 static int virtio_ccw_load_queue(DeviceState *d, int n, QEMUFile *f)
1124 {
1125     VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
1126     VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
1127     uint16_t vector;
1128 
1129     qemu_get_be16s(f, &vector);
1130     virtio_queue_set_vector(vdev, n , vector);
1131 
1132     return 0;
1133 }
1134 
1135 static void virtio_ccw_save_config(DeviceState *d, QEMUFile *f)
1136 {
1137     VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
1138     vmstate_save_state(f, &vmstate_virtio_ccw_dev, dev, NULL);
1139 }
1140 
1141 static int virtio_ccw_load_config(DeviceState *d, QEMUFile *f)
1142 {
1143     VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
1144     return vmstate_load_state(f, &vmstate_virtio_ccw_dev, dev, 1);
1145 }
1146 
1147 static void virtio_ccw_pre_plugged(DeviceState *d, Error **errp)
1148 {
1149    VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
1150    VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
1151 
1152     if (dev->max_rev >= 1) {
1153         virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1);
1154     }
1155 }
1156 
1157 /* This is called by virtio-bus just after the device is plugged. */
1158 static void virtio_ccw_device_plugged(DeviceState *d, Error **errp)
1159 {
1160     VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
1161     VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
1162     CcwDevice *ccw_dev = CCW_DEVICE(d);
1163     SubchDev *sch = ccw_dev->sch;
1164     int n = virtio_get_num_queues(vdev);
1165     S390FLICState *flic = s390_get_flic();
1166 
1167     if (!virtio_has_feature(vdev->host_features, VIRTIO_F_VERSION_1)) {
1168         dev->max_rev = 0;
1169     }
1170 
1171     if (!virtio_ccw_rev_max(dev) && !virtio_legacy_allowed(vdev)) {
1172         /*
1173          * To avoid migration issues, we allow legacy mode when legacy
1174          * check is disabled in the old machine types (< 5.1).
1175          */
1176         if (virtio_legacy_check_disabled(vdev)) {
1177             warn_report("device requires revision >= 1, but for backward "
1178                         "compatibility max_revision=0 is allowed");
1179         } else {
1180             error_setg(errp, "Invalid value of property max_rev "
1181                        "(is %d expected >= 1)", virtio_ccw_rev_max(dev));
1182             return;
1183         }
1184     }
1185 
1186     if (virtio_get_num_queues(vdev) > VIRTIO_QUEUE_MAX) {
1187         error_setg(errp, "The number of virtqueues %d "
1188                    "exceeds virtio limit %d", n,
1189                    VIRTIO_QUEUE_MAX);
1190         return;
1191     }
1192     if (virtio_get_num_queues(vdev) > flic->adapter_routes_max_batch) {
1193         error_setg(errp, "The number of virtqueues %d "
1194                    "exceeds flic adapter route limit %d", n,
1195                    flic->adapter_routes_max_batch);
1196         return;
1197     }
1198 
1199     sch->id.cu_model = virtio_bus_get_vdev_id(&dev->bus);
1200 
1201 
1202     css_generate_sch_crws(sch->cssid, sch->ssid, sch->schid,
1203                           d->hotplugged, 1);
1204 }
1205 
1206 static void virtio_ccw_device_unplugged(DeviceState *d)
1207 {
1208     VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
1209 
1210     virtio_ccw_stop_ioeventfd(dev);
1211 }
1212 /**************** Virtio-ccw Bus Device Descriptions *******************/
1213 
1214 static void virtio_ccw_busdev_realize(DeviceState *dev, Error **errp)
1215 {
1216     VirtioCcwDevice *_dev = (VirtioCcwDevice *)dev;
1217 
1218     virtio_ccw_bus_new(&_dev->bus, sizeof(_dev->bus), _dev);
1219     virtio_ccw_device_realize(_dev, errp);
1220 }
1221 
1222 static void virtio_ccw_busdev_unrealize(DeviceState *dev)
1223 {
1224     VirtioCcwDevice *_dev = (VirtioCcwDevice *)dev;
1225 
1226     virtio_ccw_device_unrealize(_dev);
1227 }
1228 
1229 static void virtio_ccw_busdev_unplug(HotplugHandler *hotplug_dev,
1230                                      DeviceState *dev, Error **errp)
1231 {
1232     VirtioCcwDevice *_dev = to_virtio_ccw_dev_fast(dev);
1233 
1234     virtio_ccw_stop_ioeventfd(_dev);
1235 }
1236 
1237 static void virtio_ccw_device_class_init(ObjectClass *klass, void *data)
1238 {
1239     DeviceClass *dc = DEVICE_CLASS(klass);
1240     CCWDeviceClass *k = CCW_DEVICE_CLASS(dc);
1241     VirtIOCCWDeviceClass *vdc = VIRTIO_CCW_DEVICE_CLASS(klass);
1242 
1243     k->unplug = virtio_ccw_busdev_unplug;
1244     dc->realize = virtio_ccw_busdev_realize;
1245     dc->unrealize = virtio_ccw_busdev_unrealize;
1246     device_class_set_parent_reset(dc, virtio_ccw_reset, &vdc->parent_reset);
1247 }
1248 
1249 static const TypeInfo virtio_ccw_device_info = {
1250     .name = TYPE_VIRTIO_CCW_DEVICE,
1251     .parent = TYPE_CCW_DEVICE,
1252     .instance_size = sizeof(VirtioCcwDevice),
1253     .class_init = virtio_ccw_device_class_init,
1254     .class_size = sizeof(VirtIOCCWDeviceClass),
1255     .abstract = true,
1256 };
1257 
1258 /* virtio-ccw-bus */
1259 
1260 static void virtio_ccw_bus_new(VirtioBusState *bus, size_t bus_size,
1261                                VirtioCcwDevice *dev)
1262 {
1263     DeviceState *qdev = DEVICE(dev);
1264     char virtio_bus_name[] = "virtio-bus";
1265 
1266     qbus_init(bus, bus_size, TYPE_VIRTIO_CCW_BUS, qdev, virtio_bus_name);
1267 }
1268 
1269 static void virtio_ccw_bus_class_init(ObjectClass *klass, void *data)
1270 {
1271     VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
1272     BusClass *bus_class = BUS_CLASS(klass);
1273 
1274     bus_class->max_dev = 1;
1275     k->notify = virtio_ccw_notify;
1276     k->vmstate_change = virtio_ccw_vmstate_change;
1277     k->query_guest_notifiers = virtio_ccw_query_guest_notifiers;
1278     k->set_guest_notifiers = virtio_ccw_set_guest_notifiers;
1279     k->save_queue = virtio_ccw_save_queue;
1280     k->load_queue = virtio_ccw_load_queue;
1281     k->save_config = virtio_ccw_save_config;
1282     k->load_config = virtio_ccw_load_config;
1283     k->pre_plugged = virtio_ccw_pre_plugged;
1284     k->device_plugged = virtio_ccw_device_plugged;
1285     k->device_unplugged = virtio_ccw_device_unplugged;
1286     k->ioeventfd_enabled = virtio_ccw_ioeventfd_enabled;
1287     k->ioeventfd_assign = virtio_ccw_ioeventfd_assign;
1288 }
1289 
1290 static const TypeInfo virtio_ccw_bus_info = {
1291     .name = TYPE_VIRTIO_CCW_BUS,
1292     .parent = TYPE_VIRTIO_BUS,
1293     .instance_size = sizeof(VirtioCcwBusState),
1294     .class_size = sizeof(VirtioCcwBusClass),
1295     .class_init = virtio_ccw_bus_class_init,
1296 };
1297 
1298 static void virtio_ccw_register(void)
1299 {
1300     type_register_static(&virtio_ccw_bus_info);
1301     type_register_static(&virtio_ccw_device_info);
1302 }
1303 
1304 type_init(virtio_ccw_register)
1305