xref: /qemu/hw/s390x/css.c (revision 85aad98a)
1 /*
2  * Channel subsystem base support.
3  *
4  * Copyright 2012 IBM Corp.
5  * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
6  *
7  * This work is licensed under the terms of the GNU GPL, version 2 or (at
8  * your option) any later version. See the COPYING file in the top-level
9  * directory.
10  */
11 
12 #include "qemu/osdep.h"
13 #include "qapi/error.h"
14 #include "qapi/visitor.h"
15 #include "hw/qdev.h"
16 #include "qemu/bitops.h"
17 #include "exec/address-spaces.h"
18 #include "cpu.h"
19 #include "hw/s390x/ioinst.h"
20 #include "hw/s390x/css.h"
21 #include "trace.h"
22 #include "hw/s390x/s390_flic.h"
23 
24 typedef struct CrwContainer {
25     CRW crw;
26     QTAILQ_ENTRY(CrwContainer) sibling;
27 } CrwContainer;
28 
29 typedef struct ChpInfo {
30     uint8_t in_use;
31     uint8_t type;
32     uint8_t is_virtual;
33 } ChpInfo;
34 
35 typedef struct SubchSet {
36     SubchDev *sch[MAX_SCHID + 1];
37     unsigned long schids_used[BITS_TO_LONGS(MAX_SCHID + 1)];
38     unsigned long devnos_used[BITS_TO_LONGS(MAX_SCHID + 1)];
39 } SubchSet;
40 
41 typedef struct CssImage {
42     SubchSet *sch_set[MAX_SSID + 1];
43     ChpInfo chpids[MAX_CHPID + 1];
44 } CssImage;
45 
46 typedef struct IoAdapter {
47     uint32_t id;
48     uint8_t type;
49     uint8_t isc;
50     QTAILQ_ENTRY(IoAdapter) sibling;
51 } IoAdapter;
52 
53 typedef struct ChannelSubSys {
54     QTAILQ_HEAD(, CrwContainer) pending_crws;
55     bool sei_pending;
56     bool do_crw_mchk;
57     bool crws_lost;
58     uint8_t max_cssid;
59     uint8_t max_ssid;
60     bool chnmon_active;
61     uint64_t chnmon_area;
62     CssImage *css[MAX_CSSID + 1];
63     uint8_t default_cssid;
64     QTAILQ_HEAD(, IoAdapter) io_adapters;
65     QTAILQ_HEAD(, IndAddr) indicator_addresses;
66 } ChannelSubSys;
67 
68 static ChannelSubSys channel_subsys = {
69     .pending_crws = QTAILQ_HEAD_INITIALIZER(channel_subsys.pending_crws),
70     .do_crw_mchk = true,
71     .sei_pending = false,
72     .do_crw_mchk = true,
73     .crws_lost = false,
74     .chnmon_active = false,
75     .io_adapters = QTAILQ_HEAD_INITIALIZER(channel_subsys.io_adapters),
76     .indicator_addresses =
77         QTAILQ_HEAD_INITIALIZER(channel_subsys.indicator_addresses),
78 };
79 
80 IndAddr *get_indicator(hwaddr ind_addr, int len)
81 {
82     IndAddr *indicator;
83 
84     QTAILQ_FOREACH(indicator, &channel_subsys.indicator_addresses, sibling) {
85         if (indicator->addr == ind_addr) {
86             indicator->refcnt++;
87             return indicator;
88         }
89     }
90     indicator = g_new0(IndAddr, 1);
91     indicator->addr = ind_addr;
92     indicator->len = len;
93     indicator->refcnt = 1;
94     QTAILQ_INSERT_TAIL(&channel_subsys.indicator_addresses,
95                        indicator, sibling);
96     return indicator;
97 }
98 
99 static int s390_io_adapter_map(AdapterInfo *adapter, uint64_t map_addr,
100                                bool do_map)
101 {
102     S390FLICState *fs = s390_get_flic();
103     S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs);
104 
105     return fsc->io_adapter_map(fs, adapter->adapter_id, map_addr, do_map);
106 }
107 
108 void release_indicator(AdapterInfo *adapter, IndAddr *indicator)
109 {
110     assert(indicator->refcnt > 0);
111     indicator->refcnt--;
112     if (indicator->refcnt > 0) {
113         return;
114     }
115     QTAILQ_REMOVE(&channel_subsys.indicator_addresses, indicator, sibling);
116     if (indicator->map) {
117         s390_io_adapter_map(adapter, indicator->map, false);
118     }
119     g_free(indicator);
120 }
121 
122 int map_indicator(AdapterInfo *adapter, IndAddr *indicator)
123 {
124     int ret;
125 
126     if (indicator->map) {
127         return 0; /* already mapped is not an error */
128     }
129     indicator->map = indicator->addr;
130     ret = s390_io_adapter_map(adapter, indicator->map, true);
131     if ((ret != 0) && (ret != -ENOSYS)) {
132         goto out_err;
133     }
134     return 0;
135 
136 out_err:
137     indicator->map = 0;
138     return ret;
139 }
140 
141 int css_create_css_image(uint8_t cssid, bool default_image)
142 {
143     trace_css_new_image(cssid, default_image ? "(default)" : "");
144     if (cssid > MAX_CSSID) {
145         return -EINVAL;
146     }
147     if (channel_subsys.css[cssid]) {
148         return -EBUSY;
149     }
150     channel_subsys.css[cssid] = g_malloc0(sizeof(CssImage));
151     if (default_image) {
152         channel_subsys.default_cssid = cssid;
153     }
154     return 0;
155 }
156 
157 int css_register_io_adapter(uint8_t type, uint8_t isc, bool swap,
158                             bool maskable, uint32_t *id)
159 {
160     IoAdapter *adapter;
161     bool found = false;
162     int ret;
163     S390FLICState *fs = s390_get_flic();
164     S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs);
165 
166     *id = 0;
167     QTAILQ_FOREACH(adapter, &channel_subsys.io_adapters, sibling) {
168         if ((adapter->type == type) && (adapter->isc == isc)) {
169             *id = adapter->id;
170             found = true;
171             ret = 0;
172             break;
173         }
174         if (adapter->id >= *id) {
175             *id = adapter->id + 1;
176         }
177     }
178     if (found) {
179         goto out;
180     }
181     adapter = g_new0(IoAdapter, 1);
182     ret = fsc->register_io_adapter(fs, *id, isc, swap, maskable);
183     if (ret == 0) {
184         adapter->id = *id;
185         adapter->isc = isc;
186         adapter->type = type;
187         QTAILQ_INSERT_TAIL(&channel_subsys.io_adapters, adapter, sibling);
188     } else {
189         g_free(adapter);
190         fprintf(stderr, "Unexpected error %d when registering adapter %d\n",
191                 ret, *id);
192     }
193 out:
194     return ret;
195 }
196 
197 static void css_clear_io_interrupt(uint16_t subchannel_id,
198                                    uint16_t subchannel_nr)
199 {
200     Error *err = NULL;
201     static bool no_clear_irq;
202     S390FLICState *fs = s390_get_flic();
203     S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs);
204     int r;
205 
206     if (unlikely(no_clear_irq)) {
207         return;
208     }
209     r = fsc->clear_io_irq(fs, subchannel_id, subchannel_nr);
210     switch (r) {
211     case 0:
212         break;
213     case -ENOSYS:
214         no_clear_irq = true;
215         /*
216         * Ignore unavailability, as the user can't do anything
217         * about it anyway.
218         */
219         break;
220     default:
221         error_setg_errno(&err, -r, "unexpected error condition");
222         error_propagate(&error_abort, err);
223     }
224 }
225 
226 static inline uint16_t css_do_build_subchannel_id(uint8_t cssid, uint8_t ssid)
227 {
228     if (channel_subsys.max_cssid > 0) {
229         return (cssid << 8) | (1 << 3) | (ssid << 1) | 1;
230     }
231     return (ssid << 1) | 1;
232 }
233 
234 uint16_t css_build_subchannel_id(SubchDev *sch)
235 {
236     return css_do_build_subchannel_id(sch->cssid, sch->ssid);
237 }
238 
239 static void css_inject_io_interrupt(SubchDev *sch)
240 {
241     uint8_t isc = (sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ISC) >> 11;
242 
243     trace_css_io_interrupt(sch->cssid, sch->ssid, sch->schid,
244                            sch->curr_status.pmcw.intparm, isc, "");
245     s390_io_interrupt(css_build_subchannel_id(sch),
246                       sch->schid,
247                       sch->curr_status.pmcw.intparm,
248                       isc << 27);
249 }
250 
251 void css_conditional_io_interrupt(SubchDev *sch)
252 {
253     /*
254      * If the subchannel is not currently status pending, make it pending
255      * with alert status.
256      */
257     if (!(sch->curr_status.scsw.ctrl & SCSW_STCTL_STATUS_PEND)) {
258         uint8_t isc = (sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ISC) >> 11;
259 
260         trace_css_io_interrupt(sch->cssid, sch->ssid, sch->schid,
261                                sch->curr_status.pmcw.intparm, isc,
262                                "(unsolicited)");
263         sch->curr_status.scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
264         sch->curr_status.scsw.ctrl |=
265             SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
266         /* Inject an I/O interrupt. */
267         s390_io_interrupt(css_build_subchannel_id(sch),
268                           sch->schid,
269                           sch->curr_status.pmcw.intparm,
270                           isc << 27);
271     }
272 }
273 
274 void css_adapter_interrupt(uint8_t isc)
275 {
276     uint32_t io_int_word = (isc << 27) | IO_INT_WORD_AI;
277 
278     trace_css_adapter_interrupt(isc);
279     s390_io_interrupt(0, 0, 0, io_int_word);
280 }
281 
282 static void sch_handle_clear_func(SubchDev *sch)
283 {
284     PMCW *p = &sch->curr_status.pmcw;
285     SCSW *s = &sch->curr_status.scsw;
286     int path;
287 
288     /* Path management: In our simple css, we always choose the only path. */
289     path = 0x80;
290 
291     /* Reset values prior to 'issuing the clear signal'. */
292     p->lpum = 0;
293     p->pom = 0xff;
294     s->flags &= ~SCSW_FLAGS_MASK_PNO;
295 
296     /* We always 'attempt to issue the clear signal', and we always succeed. */
297     sch->channel_prog = 0x0;
298     sch->last_cmd_valid = false;
299     s->ctrl &= ~SCSW_ACTL_CLEAR_PEND;
300     s->ctrl |= SCSW_STCTL_STATUS_PEND;
301 
302     s->dstat = 0;
303     s->cstat = 0;
304     p->lpum = path;
305 
306 }
307 
308 static void sch_handle_halt_func(SubchDev *sch)
309 {
310 
311     PMCW *p = &sch->curr_status.pmcw;
312     SCSW *s = &sch->curr_status.scsw;
313     hwaddr curr_ccw = sch->channel_prog;
314     int path;
315 
316     /* Path management: In our simple css, we always choose the only path. */
317     path = 0x80;
318 
319     /* We always 'attempt to issue the halt signal', and we always succeed. */
320     sch->channel_prog = 0x0;
321     sch->last_cmd_valid = false;
322     s->ctrl &= ~SCSW_ACTL_HALT_PEND;
323     s->ctrl |= SCSW_STCTL_STATUS_PEND;
324 
325     if ((s->ctrl & (SCSW_ACTL_SUBCH_ACTIVE | SCSW_ACTL_DEVICE_ACTIVE)) ||
326         !((s->ctrl & SCSW_ACTL_START_PEND) ||
327           (s->ctrl & SCSW_ACTL_SUSP))) {
328         s->dstat = SCSW_DSTAT_DEVICE_END;
329     }
330     if ((s->ctrl & (SCSW_ACTL_SUBCH_ACTIVE | SCSW_ACTL_DEVICE_ACTIVE)) ||
331         (s->ctrl & SCSW_ACTL_SUSP)) {
332         s->cpa = curr_ccw + 8;
333     }
334     s->cstat = 0;
335     p->lpum = path;
336 
337 }
338 
339 static void copy_sense_id_to_guest(SenseId *dest, SenseId *src)
340 {
341     int i;
342 
343     dest->reserved = src->reserved;
344     dest->cu_type = cpu_to_be16(src->cu_type);
345     dest->cu_model = src->cu_model;
346     dest->dev_type = cpu_to_be16(src->dev_type);
347     dest->dev_model = src->dev_model;
348     dest->unused = src->unused;
349     for (i = 0; i < ARRAY_SIZE(dest->ciw); i++) {
350         dest->ciw[i].type = src->ciw[i].type;
351         dest->ciw[i].command = src->ciw[i].command;
352         dest->ciw[i].count = cpu_to_be16(src->ciw[i].count);
353     }
354 }
355 
356 static CCW1 copy_ccw_from_guest(hwaddr addr, bool fmt1)
357 {
358     CCW0 tmp0;
359     CCW1 tmp1;
360     CCW1 ret;
361 
362     if (fmt1) {
363         cpu_physical_memory_read(addr, &tmp1, sizeof(tmp1));
364         ret.cmd_code = tmp1.cmd_code;
365         ret.flags = tmp1.flags;
366         ret.count = be16_to_cpu(tmp1.count);
367         ret.cda = be32_to_cpu(tmp1.cda);
368     } else {
369         cpu_physical_memory_read(addr, &tmp0, sizeof(tmp0));
370         ret.cmd_code = tmp0.cmd_code;
371         ret.flags = tmp0.flags;
372         ret.count = be16_to_cpu(tmp0.count);
373         ret.cda = be16_to_cpu(tmp0.cda1) | (tmp0.cda0 << 16);
374         if ((ret.cmd_code & 0x0f) == CCW_CMD_TIC) {
375             ret.cmd_code &= 0x0f;
376         }
377     }
378     return ret;
379 }
380 
381 static int css_interpret_ccw(SubchDev *sch, hwaddr ccw_addr,
382                              bool suspend_allowed)
383 {
384     int ret;
385     bool check_len;
386     int len;
387     CCW1 ccw;
388 
389     if (!ccw_addr) {
390         return -EIO;
391     }
392 
393     /* Translate everything to format-1 ccws - the information is the same. */
394     ccw = copy_ccw_from_guest(ccw_addr, sch->ccw_fmt_1);
395 
396     /* Check for invalid command codes. */
397     if ((ccw.cmd_code & 0x0f) == 0) {
398         return -EINVAL;
399     }
400     if (((ccw.cmd_code & 0x0f) == CCW_CMD_TIC) &&
401         ((ccw.cmd_code & 0xf0) != 0)) {
402         return -EINVAL;
403     }
404     if (!sch->ccw_fmt_1 && (ccw.count == 0) &&
405         (ccw.cmd_code != CCW_CMD_TIC)) {
406         return -EINVAL;
407     }
408 
409     if (ccw.flags & CCW_FLAG_SUSPEND) {
410         return suspend_allowed ? -EINPROGRESS : -EINVAL;
411     }
412 
413     check_len = !((ccw.flags & CCW_FLAG_SLI) && !(ccw.flags & CCW_FLAG_DC));
414 
415     if (!ccw.cda) {
416         if (sch->ccw_no_data_cnt == 255) {
417             return -EINVAL;
418         }
419         sch->ccw_no_data_cnt++;
420     }
421 
422     /* Look at the command. */
423     switch (ccw.cmd_code) {
424     case CCW_CMD_NOOP:
425         /* Nothing to do. */
426         ret = 0;
427         break;
428     case CCW_CMD_BASIC_SENSE:
429         if (check_len) {
430             if (ccw.count != sizeof(sch->sense_data)) {
431                 ret = -EINVAL;
432                 break;
433             }
434         }
435         len = MIN(ccw.count, sizeof(sch->sense_data));
436         cpu_physical_memory_write(ccw.cda, sch->sense_data, len);
437         sch->curr_status.scsw.count = ccw.count - len;
438         memset(sch->sense_data, 0, sizeof(sch->sense_data));
439         ret = 0;
440         break;
441     case CCW_CMD_SENSE_ID:
442     {
443         SenseId sense_id;
444 
445         copy_sense_id_to_guest(&sense_id, &sch->id);
446         /* Sense ID information is device specific. */
447         if (check_len) {
448             if (ccw.count != sizeof(sense_id)) {
449                 ret = -EINVAL;
450                 break;
451             }
452         }
453         len = MIN(ccw.count, sizeof(sense_id));
454         /*
455          * Only indicate 0xff in the first sense byte if we actually
456          * have enough place to store at least bytes 0-3.
457          */
458         if (len >= 4) {
459             sense_id.reserved = 0xff;
460         } else {
461             sense_id.reserved = 0;
462         }
463         cpu_physical_memory_write(ccw.cda, &sense_id, len);
464         sch->curr_status.scsw.count = ccw.count - len;
465         ret = 0;
466         break;
467     }
468     case CCW_CMD_TIC:
469         if (sch->last_cmd_valid && (sch->last_cmd.cmd_code == CCW_CMD_TIC)) {
470             ret = -EINVAL;
471             break;
472         }
473         if (ccw.flags & (CCW_FLAG_CC | CCW_FLAG_DC)) {
474             ret = -EINVAL;
475             break;
476         }
477         sch->channel_prog = ccw.cda;
478         ret = -EAGAIN;
479         break;
480     default:
481         if (sch->ccw_cb) {
482             /* Handle device specific commands. */
483             ret = sch->ccw_cb(sch, ccw);
484         } else {
485             ret = -ENOSYS;
486         }
487         break;
488     }
489     sch->last_cmd = ccw;
490     sch->last_cmd_valid = true;
491     if (ret == 0) {
492         if (ccw.flags & CCW_FLAG_CC) {
493             sch->channel_prog += 8;
494             ret = -EAGAIN;
495         }
496     }
497 
498     return ret;
499 }
500 
501 static void sch_handle_start_func(SubchDev *sch, ORB *orb)
502 {
503 
504     PMCW *p = &sch->curr_status.pmcw;
505     SCSW *s = &sch->curr_status.scsw;
506     int path;
507     int ret;
508     bool suspend_allowed;
509 
510     /* Path management: In our simple css, we always choose the only path. */
511     path = 0x80;
512 
513     if (!(s->ctrl & SCSW_ACTL_SUSP)) {
514         s->cstat = 0;
515         s->dstat = 0;
516         /* Look at the orb and try to execute the channel program. */
517         assert(orb != NULL); /* resume does not pass an orb */
518         p->intparm = orb->intparm;
519         if (!(orb->lpm & path)) {
520             /* Generate a deferred cc 3 condition. */
521             s->flags |= SCSW_FLAGS_MASK_CC;
522             s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
523             s->ctrl |= (SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND);
524             return;
525         }
526         sch->ccw_fmt_1 = !!(orb->ctrl0 & ORB_CTRL0_MASK_FMT);
527         sch->ccw_no_data_cnt = 0;
528         suspend_allowed = !!(orb->ctrl0 & ORB_CTRL0_MASK_SPND);
529     } else {
530         s->ctrl &= ~(SCSW_ACTL_SUSP | SCSW_ACTL_RESUME_PEND);
531         /* The channel program had been suspended before. */
532         suspend_allowed = true;
533     }
534     sch->last_cmd_valid = false;
535     do {
536         ret = css_interpret_ccw(sch, sch->channel_prog, suspend_allowed);
537         switch (ret) {
538         case -EAGAIN:
539             /* ccw chain, continue processing */
540             break;
541         case 0:
542             /* success */
543             s->ctrl &= ~SCSW_ACTL_START_PEND;
544             s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
545             s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
546                     SCSW_STCTL_STATUS_PEND;
547             s->dstat = SCSW_DSTAT_CHANNEL_END | SCSW_DSTAT_DEVICE_END;
548             s->cpa = sch->channel_prog + 8;
549             break;
550         case -ENOSYS:
551             /* unsupported command, generate unit check (command reject) */
552             s->ctrl &= ~SCSW_ACTL_START_PEND;
553             s->dstat = SCSW_DSTAT_UNIT_CHECK;
554             /* Set sense bit 0 in ecw0. */
555             sch->sense_data[0] = 0x80;
556             s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
557             s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
558                     SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
559             s->cpa = sch->channel_prog + 8;
560             break;
561         case -EFAULT:
562             /* memory problem, generate channel data check */
563             s->ctrl &= ~SCSW_ACTL_START_PEND;
564             s->cstat = SCSW_CSTAT_DATA_CHECK;
565             s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
566             s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
567                     SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
568             s->cpa = sch->channel_prog + 8;
569             break;
570         case -EBUSY:
571             /* subchannel busy, generate deferred cc 1 */
572             s->flags &= ~SCSW_FLAGS_MASK_CC;
573             s->flags |= (1 << 8);
574             s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
575             s->ctrl |= SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
576             break;
577         case -EINPROGRESS:
578             /* channel program has been suspended */
579             s->ctrl &= ~SCSW_ACTL_START_PEND;
580             s->ctrl |= SCSW_ACTL_SUSP;
581             break;
582         default:
583             /* error, generate channel program check */
584             s->ctrl &= ~SCSW_ACTL_START_PEND;
585             s->cstat = SCSW_CSTAT_PROG_CHECK;
586             s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
587             s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
588                     SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
589             s->cpa = sch->channel_prog + 8;
590             break;
591         }
592     } while (ret == -EAGAIN);
593 
594 }
595 
596 /*
597  * On real machines, this would run asynchronously to the main vcpus.
598  * We might want to make some parts of the ssch handling (interpreting
599  * read/writes) asynchronous later on if we start supporting more than
600  * our current very simple devices.
601  */
602 static void do_subchannel_work(SubchDev *sch, ORB *orb)
603 {
604 
605     SCSW *s = &sch->curr_status.scsw;
606 
607     if (s->ctrl & SCSW_FCTL_CLEAR_FUNC) {
608         sch_handle_clear_func(sch);
609     } else if (s->ctrl & SCSW_FCTL_HALT_FUNC) {
610         sch_handle_halt_func(sch);
611     } else if (s->ctrl & SCSW_FCTL_START_FUNC) {
612         sch_handle_start_func(sch, orb);
613     } else {
614         /* Cannot happen. */
615         return;
616     }
617     css_inject_io_interrupt(sch);
618 }
619 
620 static void copy_pmcw_to_guest(PMCW *dest, const PMCW *src)
621 {
622     int i;
623 
624     dest->intparm = cpu_to_be32(src->intparm);
625     dest->flags = cpu_to_be16(src->flags);
626     dest->devno = cpu_to_be16(src->devno);
627     dest->lpm = src->lpm;
628     dest->pnom = src->pnom;
629     dest->lpum = src->lpum;
630     dest->pim = src->pim;
631     dest->mbi = cpu_to_be16(src->mbi);
632     dest->pom = src->pom;
633     dest->pam = src->pam;
634     for (i = 0; i < ARRAY_SIZE(dest->chpid); i++) {
635         dest->chpid[i] = src->chpid[i];
636     }
637     dest->chars = cpu_to_be32(src->chars);
638 }
639 
640 static void copy_scsw_to_guest(SCSW *dest, const SCSW *src)
641 {
642     dest->flags = cpu_to_be16(src->flags);
643     dest->ctrl = cpu_to_be16(src->ctrl);
644     dest->cpa = cpu_to_be32(src->cpa);
645     dest->dstat = src->dstat;
646     dest->cstat = src->cstat;
647     dest->count = cpu_to_be16(src->count);
648 }
649 
650 static void copy_schib_to_guest(SCHIB *dest, const SCHIB *src)
651 {
652     int i;
653 
654     copy_pmcw_to_guest(&dest->pmcw, &src->pmcw);
655     copy_scsw_to_guest(&dest->scsw, &src->scsw);
656     dest->mba = cpu_to_be64(src->mba);
657     for (i = 0; i < ARRAY_SIZE(dest->mda); i++) {
658         dest->mda[i] = src->mda[i];
659     }
660 }
661 
662 int css_do_stsch(SubchDev *sch, SCHIB *schib)
663 {
664     /* Use current status. */
665     copy_schib_to_guest(schib, &sch->curr_status);
666     return 0;
667 }
668 
669 static void copy_pmcw_from_guest(PMCW *dest, const PMCW *src)
670 {
671     int i;
672 
673     dest->intparm = be32_to_cpu(src->intparm);
674     dest->flags = be16_to_cpu(src->flags);
675     dest->devno = be16_to_cpu(src->devno);
676     dest->lpm = src->lpm;
677     dest->pnom = src->pnom;
678     dest->lpum = src->lpum;
679     dest->pim = src->pim;
680     dest->mbi = be16_to_cpu(src->mbi);
681     dest->pom = src->pom;
682     dest->pam = src->pam;
683     for (i = 0; i < ARRAY_SIZE(dest->chpid); i++) {
684         dest->chpid[i] = src->chpid[i];
685     }
686     dest->chars = be32_to_cpu(src->chars);
687 }
688 
689 static void copy_scsw_from_guest(SCSW *dest, const SCSW *src)
690 {
691     dest->flags = be16_to_cpu(src->flags);
692     dest->ctrl = be16_to_cpu(src->ctrl);
693     dest->cpa = be32_to_cpu(src->cpa);
694     dest->dstat = src->dstat;
695     dest->cstat = src->cstat;
696     dest->count = be16_to_cpu(src->count);
697 }
698 
699 static void copy_schib_from_guest(SCHIB *dest, const SCHIB *src)
700 {
701     int i;
702 
703     copy_pmcw_from_guest(&dest->pmcw, &src->pmcw);
704     copy_scsw_from_guest(&dest->scsw, &src->scsw);
705     dest->mba = be64_to_cpu(src->mba);
706     for (i = 0; i < ARRAY_SIZE(dest->mda); i++) {
707         dest->mda[i] = src->mda[i];
708     }
709 }
710 
711 int css_do_msch(SubchDev *sch, const SCHIB *orig_schib)
712 {
713     SCSW *s = &sch->curr_status.scsw;
714     PMCW *p = &sch->curr_status.pmcw;
715     uint16_t oldflags;
716     int ret;
717     SCHIB schib;
718 
719     if (!(sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_DNV)) {
720         ret = 0;
721         goto out;
722     }
723 
724     if (s->ctrl & SCSW_STCTL_STATUS_PEND) {
725         ret = -EINPROGRESS;
726         goto out;
727     }
728 
729     if (s->ctrl &
730         (SCSW_FCTL_START_FUNC|SCSW_FCTL_HALT_FUNC|SCSW_FCTL_CLEAR_FUNC)) {
731         ret = -EBUSY;
732         goto out;
733     }
734 
735     copy_schib_from_guest(&schib, orig_schib);
736     /* Only update the program-modifiable fields. */
737     p->intparm = schib.pmcw.intparm;
738     oldflags = p->flags;
739     p->flags &= ~(PMCW_FLAGS_MASK_ISC | PMCW_FLAGS_MASK_ENA |
740                   PMCW_FLAGS_MASK_LM | PMCW_FLAGS_MASK_MME |
741                   PMCW_FLAGS_MASK_MP);
742     p->flags |= schib.pmcw.flags &
743             (PMCW_FLAGS_MASK_ISC | PMCW_FLAGS_MASK_ENA |
744              PMCW_FLAGS_MASK_LM | PMCW_FLAGS_MASK_MME |
745              PMCW_FLAGS_MASK_MP);
746     p->lpm = schib.pmcw.lpm;
747     p->mbi = schib.pmcw.mbi;
748     p->pom = schib.pmcw.pom;
749     p->chars &= ~(PMCW_CHARS_MASK_MBFC | PMCW_CHARS_MASK_CSENSE);
750     p->chars |= schib.pmcw.chars &
751             (PMCW_CHARS_MASK_MBFC | PMCW_CHARS_MASK_CSENSE);
752     sch->curr_status.mba = schib.mba;
753 
754     /* Has the channel been disabled? */
755     if (sch->disable_cb && (oldflags & PMCW_FLAGS_MASK_ENA) != 0
756         && (p->flags & PMCW_FLAGS_MASK_ENA) == 0) {
757         sch->disable_cb(sch);
758     }
759 
760     ret = 0;
761 
762 out:
763     return ret;
764 }
765 
766 int css_do_xsch(SubchDev *sch)
767 {
768     SCSW *s = &sch->curr_status.scsw;
769     PMCW *p = &sch->curr_status.pmcw;
770     int ret;
771 
772     if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) {
773         ret = -ENODEV;
774         goto out;
775     }
776 
777     if (!(s->ctrl & SCSW_CTRL_MASK_FCTL) ||
778         ((s->ctrl & SCSW_CTRL_MASK_FCTL) != SCSW_FCTL_START_FUNC) ||
779         (!(s->ctrl &
780            (SCSW_ACTL_RESUME_PEND | SCSW_ACTL_START_PEND | SCSW_ACTL_SUSP))) ||
781         (s->ctrl & SCSW_ACTL_SUBCH_ACTIVE)) {
782         ret = -EINPROGRESS;
783         goto out;
784     }
785 
786     if (s->ctrl & SCSW_CTRL_MASK_STCTL) {
787         ret = -EBUSY;
788         goto out;
789     }
790 
791     /* Cancel the current operation. */
792     s->ctrl &= ~(SCSW_FCTL_START_FUNC |
793                  SCSW_ACTL_RESUME_PEND |
794                  SCSW_ACTL_START_PEND |
795                  SCSW_ACTL_SUSP);
796     sch->channel_prog = 0x0;
797     sch->last_cmd_valid = false;
798     s->dstat = 0;
799     s->cstat = 0;
800     ret = 0;
801 
802 out:
803     return ret;
804 }
805 
806 int css_do_csch(SubchDev *sch)
807 {
808     SCSW *s = &sch->curr_status.scsw;
809     PMCW *p = &sch->curr_status.pmcw;
810     int ret;
811 
812     if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) {
813         ret = -ENODEV;
814         goto out;
815     }
816 
817     /* Trigger the clear function. */
818     s->ctrl &= ~(SCSW_CTRL_MASK_FCTL | SCSW_CTRL_MASK_ACTL);
819     s->ctrl |= SCSW_FCTL_CLEAR_FUNC | SCSW_ACTL_CLEAR_PEND;
820 
821     do_subchannel_work(sch, NULL);
822     ret = 0;
823 
824 out:
825     return ret;
826 }
827 
828 int css_do_hsch(SubchDev *sch)
829 {
830     SCSW *s = &sch->curr_status.scsw;
831     PMCW *p = &sch->curr_status.pmcw;
832     int ret;
833 
834     if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) {
835         ret = -ENODEV;
836         goto out;
837     }
838 
839     if (((s->ctrl & SCSW_CTRL_MASK_STCTL) == SCSW_STCTL_STATUS_PEND) ||
840         (s->ctrl & (SCSW_STCTL_PRIMARY |
841                     SCSW_STCTL_SECONDARY |
842                     SCSW_STCTL_ALERT))) {
843         ret = -EINPROGRESS;
844         goto out;
845     }
846 
847     if (s->ctrl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
848         ret = -EBUSY;
849         goto out;
850     }
851 
852     /* Trigger the halt function. */
853     s->ctrl |= SCSW_FCTL_HALT_FUNC;
854     s->ctrl &= ~SCSW_FCTL_START_FUNC;
855     if (((s->ctrl & SCSW_CTRL_MASK_ACTL) ==
856          (SCSW_ACTL_SUBCH_ACTIVE | SCSW_ACTL_DEVICE_ACTIVE)) &&
857         ((s->ctrl & SCSW_CTRL_MASK_STCTL) == SCSW_STCTL_INTERMEDIATE)) {
858         s->ctrl &= ~SCSW_STCTL_STATUS_PEND;
859     }
860     s->ctrl |= SCSW_ACTL_HALT_PEND;
861 
862     do_subchannel_work(sch, NULL);
863     ret = 0;
864 
865 out:
866     return ret;
867 }
868 
869 static void css_update_chnmon(SubchDev *sch)
870 {
871     if (!(sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_MME)) {
872         /* Not active. */
873         return;
874     }
875     /* The counter is conveniently located at the beginning of the struct. */
876     if (sch->curr_status.pmcw.chars & PMCW_CHARS_MASK_MBFC) {
877         /* Format 1, per-subchannel area. */
878         uint32_t count;
879 
880         count = address_space_ldl(&address_space_memory,
881                                   sch->curr_status.mba,
882                                   MEMTXATTRS_UNSPECIFIED,
883                                   NULL);
884         count++;
885         address_space_stl(&address_space_memory, sch->curr_status.mba, count,
886                           MEMTXATTRS_UNSPECIFIED, NULL);
887     } else {
888         /* Format 0, global area. */
889         uint32_t offset;
890         uint16_t count;
891 
892         offset = sch->curr_status.pmcw.mbi << 5;
893         count = address_space_lduw(&address_space_memory,
894                                    channel_subsys.chnmon_area + offset,
895                                    MEMTXATTRS_UNSPECIFIED,
896                                    NULL);
897         count++;
898         address_space_stw(&address_space_memory,
899                           channel_subsys.chnmon_area + offset, count,
900                           MEMTXATTRS_UNSPECIFIED, NULL);
901     }
902 }
903 
904 int css_do_ssch(SubchDev *sch, ORB *orb)
905 {
906     SCSW *s = &sch->curr_status.scsw;
907     PMCW *p = &sch->curr_status.pmcw;
908     int ret;
909 
910     if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) {
911         ret = -ENODEV;
912         goto out;
913     }
914 
915     if (s->ctrl & SCSW_STCTL_STATUS_PEND) {
916         ret = -EINPROGRESS;
917         goto out;
918     }
919 
920     if (s->ctrl & (SCSW_FCTL_START_FUNC |
921                    SCSW_FCTL_HALT_FUNC |
922                    SCSW_FCTL_CLEAR_FUNC)) {
923         ret = -EBUSY;
924         goto out;
925     }
926 
927     /* If monitoring is active, update counter. */
928     if (channel_subsys.chnmon_active) {
929         css_update_chnmon(sch);
930     }
931     sch->channel_prog = orb->cpa;
932     /* Trigger the start function. */
933     s->ctrl |= (SCSW_FCTL_START_FUNC | SCSW_ACTL_START_PEND);
934     s->flags &= ~SCSW_FLAGS_MASK_PNO;
935 
936     do_subchannel_work(sch, orb);
937     ret = 0;
938 
939 out:
940     return ret;
941 }
942 
943 static void copy_irb_to_guest(IRB *dest, const IRB *src, PMCW *pmcw,
944                               int *irb_len)
945 {
946     int i;
947     uint16_t stctl = src->scsw.ctrl & SCSW_CTRL_MASK_STCTL;
948     uint16_t actl = src->scsw.ctrl & SCSW_CTRL_MASK_ACTL;
949 
950     copy_scsw_to_guest(&dest->scsw, &src->scsw);
951 
952     for (i = 0; i < ARRAY_SIZE(dest->esw); i++) {
953         dest->esw[i] = cpu_to_be32(src->esw[i]);
954     }
955     for (i = 0; i < ARRAY_SIZE(dest->ecw); i++) {
956         dest->ecw[i] = cpu_to_be32(src->ecw[i]);
957     }
958     *irb_len = sizeof(*dest) - sizeof(dest->emw);
959 
960     /* extended measurements enabled? */
961     if ((src->scsw.flags & SCSW_FLAGS_MASK_ESWF) ||
962         !(pmcw->flags & PMCW_FLAGS_MASK_TF) ||
963         !(pmcw->chars & PMCW_CHARS_MASK_XMWME)) {
964         return;
965     }
966     /* extended measurements pending? */
967     if (!(stctl & SCSW_STCTL_STATUS_PEND)) {
968         return;
969     }
970     if ((stctl & SCSW_STCTL_PRIMARY) ||
971         (stctl == SCSW_STCTL_SECONDARY) ||
972         ((stctl & SCSW_STCTL_INTERMEDIATE) && (actl & SCSW_ACTL_SUSP))) {
973         for (i = 0; i < ARRAY_SIZE(dest->emw); i++) {
974             dest->emw[i] = cpu_to_be32(src->emw[i]);
975         }
976     }
977     *irb_len = sizeof(*dest);
978 }
979 
980 int css_do_tsch_get_irb(SubchDev *sch, IRB *target_irb, int *irb_len)
981 {
982     SCSW *s = &sch->curr_status.scsw;
983     PMCW *p = &sch->curr_status.pmcw;
984     uint16_t stctl;
985     IRB irb;
986 
987     if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) {
988         return 3;
989     }
990 
991     stctl = s->ctrl & SCSW_CTRL_MASK_STCTL;
992 
993     /* Prepare the irb for the guest. */
994     memset(&irb, 0, sizeof(IRB));
995 
996     /* Copy scsw from current status. */
997     memcpy(&irb.scsw, s, sizeof(SCSW));
998     if (stctl & SCSW_STCTL_STATUS_PEND) {
999         if (s->cstat & (SCSW_CSTAT_DATA_CHECK |
1000                         SCSW_CSTAT_CHN_CTRL_CHK |
1001                         SCSW_CSTAT_INTF_CTRL_CHK)) {
1002             irb.scsw.flags |= SCSW_FLAGS_MASK_ESWF;
1003             irb.esw[0] = 0x04804000;
1004         } else {
1005             irb.esw[0] = 0x00800000;
1006         }
1007         /* If a unit check is pending, copy sense data. */
1008         if ((s->dstat & SCSW_DSTAT_UNIT_CHECK) &&
1009             (p->chars & PMCW_CHARS_MASK_CSENSE)) {
1010             int i;
1011 
1012             irb.scsw.flags |= SCSW_FLAGS_MASK_ESWF | SCSW_FLAGS_MASK_ECTL;
1013             /* Attention: sense_data is already BE! */
1014             memcpy(irb.ecw, sch->sense_data, sizeof(sch->sense_data));
1015             for (i = 0; i < ARRAY_SIZE(irb.ecw); i++) {
1016                 irb.ecw[i] = be32_to_cpu(irb.ecw[i]);
1017             }
1018             irb.esw[1] = 0x01000000 | (sizeof(sch->sense_data) << 8);
1019         }
1020     }
1021     /* Store the irb to the guest. */
1022     copy_irb_to_guest(target_irb, &irb, p, irb_len);
1023 
1024     return ((stctl & SCSW_STCTL_STATUS_PEND) == 0);
1025 }
1026 
1027 void css_do_tsch_update_subch(SubchDev *sch)
1028 {
1029     SCSW *s = &sch->curr_status.scsw;
1030     PMCW *p = &sch->curr_status.pmcw;
1031     uint16_t stctl;
1032     uint16_t fctl;
1033     uint16_t actl;
1034 
1035     stctl = s->ctrl & SCSW_CTRL_MASK_STCTL;
1036     fctl = s->ctrl & SCSW_CTRL_MASK_FCTL;
1037     actl = s->ctrl & SCSW_CTRL_MASK_ACTL;
1038 
1039     /* Clear conditions on subchannel, if applicable. */
1040     if (stctl & SCSW_STCTL_STATUS_PEND) {
1041         s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
1042         if ((stctl != (SCSW_STCTL_INTERMEDIATE | SCSW_STCTL_STATUS_PEND)) ||
1043             ((fctl & SCSW_FCTL_HALT_FUNC) &&
1044              (actl & SCSW_ACTL_SUSP))) {
1045             s->ctrl &= ~SCSW_CTRL_MASK_FCTL;
1046         }
1047         if (stctl != (SCSW_STCTL_INTERMEDIATE | SCSW_STCTL_STATUS_PEND)) {
1048             s->flags &= ~SCSW_FLAGS_MASK_PNO;
1049             s->ctrl &= ~(SCSW_ACTL_RESUME_PEND |
1050                          SCSW_ACTL_START_PEND |
1051                          SCSW_ACTL_HALT_PEND |
1052                          SCSW_ACTL_CLEAR_PEND |
1053                          SCSW_ACTL_SUSP);
1054         } else {
1055             if ((actl & SCSW_ACTL_SUSP) &&
1056                 (fctl & SCSW_FCTL_START_FUNC)) {
1057                 s->flags &= ~SCSW_FLAGS_MASK_PNO;
1058                 if (fctl & SCSW_FCTL_HALT_FUNC) {
1059                     s->ctrl &= ~(SCSW_ACTL_RESUME_PEND |
1060                                  SCSW_ACTL_START_PEND |
1061                                  SCSW_ACTL_HALT_PEND |
1062                                  SCSW_ACTL_CLEAR_PEND |
1063                                  SCSW_ACTL_SUSP);
1064                 } else {
1065                     s->ctrl &= ~SCSW_ACTL_RESUME_PEND;
1066                 }
1067             }
1068         }
1069         /* Clear pending sense data. */
1070         if (p->chars & PMCW_CHARS_MASK_CSENSE) {
1071             memset(sch->sense_data, 0 , sizeof(sch->sense_data));
1072         }
1073     }
1074 }
1075 
1076 static void copy_crw_to_guest(CRW *dest, const CRW *src)
1077 {
1078     dest->flags = cpu_to_be16(src->flags);
1079     dest->rsid = cpu_to_be16(src->rsid);
1080 }
1081 
1082 int css_do_stcrw(CRW *crw)
1083 {
1084     CrwContainer *crw_cont;
1085     int ret;
1086 
1087     crw_cont = QTAILQ_FIRST(&channel_subsys.pending_crws);
1088     if (crw_cont) {
1089         QTAILQ_REMOVE(&channel_subsys.pending_crws, crw_cont, sibling);
1090         copy_crw_to_guest(crw, &crw_cont->crw);
1091         g_free(crw_cont);
1092         ret = 0;
1093     } else {
1094         /* List was empty, turn crw machine checks on again. */
1095         memset(crw, 0, sizeof(*crw));
1096         channel_subsys.do_crw_mchk = true;
1097         ret = 1;
1098     }
1099 
1100     return ret;
1101 }
1102 
1103 static void copy_crw_from_guest(CRW *dest, const CRW *src)
1104 {
1105     dest->flags = be16_to_cpu(src->flags);
1106     dest->rsid = be16_to_cpu(src->rsid);
1107 }
1108 
1109 void css_undo_stcrw(CRW *crw)
1110 {
1111     CrwContainer *crw_cont;
1112 
1113     crw_cont = g_try_malloc0(sizeof(CrwContainer));
1114     if (!crw_cont) {
1115         channel_subsys.crws_lost = true;
1116         return;
1117     }
1118     copy_crw_from_guest(&crw_cont->crw, crw);
1119 
1120     QTAILQ_INSERT_HEAD(&channel_subsys.pending_crws, crw_cont, sibling);
1121 }
1122 
1123 int css_do_tpi(IOIntCode *int_code, int lowcore)
1124 {
1125     /* No pending interrupts for !KVM. */
1126     return 0;
1127  }
1128 
1129 int css_collect_chp_desc(int m, uint8_t cssid, uint8_t f_chpid, uint8_t l_chpid,
1130                          int rfmt, void *buf)
1131 {
1132     int i, desc_size;
1133     uint32_t words[8];
1134     uint32_t chpid_type_word;
1135     CssImage *css;
1136 
1137     if (!m && !cssid) {
1138         css = channel_subsys.css[channel_subsys.default_cssid];
1139     } else {
1140         css = channel_subsys.css[cssid];
1141     }
1142     if (!css) {
1143         return 0;
1144     }
1145     desc_size = 0;
1146     for (i = f_chpid; i <= l_chpid; i++) {
1147         if (css->chpids[i].in_use) {
1148             chpid_type_word = 0x80000000 | (css->chpids[i].type << 8) | i;
1149             if (rfmt == 0) {
1150                 words[0] = cpu_to_be32(chpid_type_word);
1151                 words[1] = 0;
1152                 memcpy(buf + desc_size, words, 8);
1153                 desc_size += 8;
1154             } else if (rfmt == 1) {
1155                 words[0] = cpu_to_be32(chpid_type_word);
1156                 words[1] = 0;
1157                 words[2] = 0;
1158                 words[3] = 0;
1159                 words[4] = 0;
1160                 words[5] = 0;
1161                 words[6] = 0;
1162                 words[7] = 0;
1163                 memcpy(buf + desc_size, words, 32);
1164                 desc_size += 32;
1165             }
1166         }
1167     }
1168     return desc_size;
1169 }
1170 
1171 void css_do_schm(uint8_t mbk, int update, int dct, uint64_t mbo)
1172 {
1173     /* dct is currently ignored (not really meaningful for our devices) */
1174     /* TODO: Don't ignore mbk. */
1175     if (update && !channel_subsys.chnmon_active) {
1176         /* Enable measuring. */
1177         channel_subsys.chnmon_area = mbo;
1178         channel_subsys.chnmon_active = true;
1179     }
1180     if (!update && channel_subsys.chnmon_active) {
1181         /* Disable measuring. */
1182         channel_subsys.chnmon_area = 0;
1183         channel_subsys.chnmon_active = false;
1184     }
1185 }
1186 
1187 int css_do_rsch(SubchDev *sch)
1188 {
1189     SCSW *s = &sch->curr_status.scsw;
1190     PMCW *p = &sch->curr_status.pmcw;
1191     int ret;
1192 
1193     if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) {
1194         ret = -ENODEV;
1195         goto out;
1196     }
1197 
1198     if (s->ctrl & SCSW_STCTL_STATUS_PEND) {
1199         ret = -EINPROGRESS;
1200         goto out;
1201     }
1202 
1203     if (((s->ctrl & SCSW_CTRL_MASK_FCTL) != SCSW_FCTL_START_FUNC) ||
1204         (s->ctrl & SCSW_ACTL_RESUME_PEND) ||
1205         (!(s->ctrl & SCSW_ACTL_SUSP))) {
1206         ret = -EINVAL;
1207         goto out;
1208     }
1209 
1210     /* If monitoring is active, update counter. */
1211     if (channel_subsys.chnmon_active) {
1212         css_update_chnmon(sch);
1213     }
1214 
1215     s->ctrl |= SCSW_ACTL_RESUME_PEND;
1216     do_subchannel_work(sch, NULL);
1217     ret = 0;
1218 
1219 out:
1220     return ret;
1221 }
1222 
1223 int css_do_rchp(uint8_t cssid, uint8_t chpid)
1224 {
1225     uint8_t real_cssid;
1226 
1227     if (cssid > channel_subsys.max_cssid) {
1228         return -EINVAL;
1229     }
1230     if (channel_subsys.max_cssid == 0) {
1231         real_cssid = channel_subsys.default_cssid;
1232     } else {
1233         real_cssid = cssid;
1234     }
1235     if (!channel_subsys.css[real_cssid]) {
1236         return -EINVAL;
1237     }
1238 
1239     if (!channel_subsys.css[real_cssid]->chpids[chpid].in_use) {
1240         return -ENODEV;
1241     }
1242 
1243     if (!channel_subsys.css[real_cssid]->chpids[chpid].is_virtual) {
1244         fprintf(stderr,
1245                 "rchp unsupported for non-virtual chpid %x.%02x!\n",
1246                 real_cssid, chpid);
1247         return -ENODEV;
1248     }
1249 
1250     /* We don't really use a channel path, so we're done here. */
1251     css_queue_crw(CRW_RSC_CHP, CRW_ERC_INIT,
1252                   channel_subsys.max_cssid > 0 ? 1 : 0, chpid);
1253     if (channel_subsys.max_cssid > 0) {
1254         css_queue_crw(CRW_RSC_CHP, CRW_ERC_INIT, 0, real_cssid << 8);
1255     }
1256     return 0;
1257 }
1258 
1259 bool css_schid_final(int m, uint8_t cssid, uint8_t ssid, uint16_t schid)
1260 {
1261     SubchSet *set;
1262     uint8_t real_cssid;
1263 
1264     real_cssid = (!m && (cssid == 0)) ? channel_subsys.default_cssid : cssid;
1265     if (real_cssid > MAX_CSSID || ssid > MAX_SSID ||
1266         !channel_subsys.css[real_cssid] ||
1267         !channel_subsys.css[real_cssid]->sch_set[ssid]) {
1268         return true;
1269     }
1270     set = channel_subsys.css[real_cssid]->sch_set[ssid];
1271     return schid > find_last_bit(set->schids_used,
1272                                  (MAX_SCHID + 1) / sizeof(unsigned long));
1273 }
1274 
1275 static int css_add_virtual_chpid(uint8_t cssid, uint8_t chpid, uint8_t type)
1276 {
1277     CssImage *css;
1278 
1279     trace_css_chpid_add(cssid, chpid, type);
1280     if (cssid > MAX_CSSID) {
1281         return -EINVAL;
1282     }
1283     css = channel_subsys.css[cssid];
1284     if (!css) {
1285         return -EINVAL;
1286     }
1287     if (css->chpids[chpid].in_use) {
1288         return -EEXIST;
1289     }
1290     css->chpids[chpid].in_use = 1;
1291     css->chpids[chpid].type = type;
1292     css->chpids[chpid].is_virtual = 1;
1293 
1294     css_generate_chp_crws(cssid, chpid);
1295 
1296     return 0;
1297 }
1298 
1299 void css_sch_build_virtual_schib(SubchDev *sch, uint8_t chpid, uint8_t type)
1300 {
1301     PMCW *p = &sch->curr_status.pmcw;
1302     SCSW *s = &sch->curr_status.scsw;
1303     int i;
1304     CssImage *css = channel_subsys.css[sch->cssid];
1305 
1306     assert(css != NULL);
1307     memset(p, 0, sizeof(PMCW));
1308     p->flags |= PMCW_FLAGS_MASK_DNV;
1309     p->devno = sch->devno;
1310     /* single path */
1311     p->pim = 0x80;
1312     p->pom = 0xff;
1313     p->pam = 0x80;
1314     p->chpid[0] = chpid;
1315     if (!css->chpids[chpid].in_use) {
1316         css_add_virtual_chpid(sch->cssid, chpid, type);
1317     }
1318 
1319     memset(s, 0, sizeof(SCSW));
1320     sch->curr_status.mba = 0;
1321     for (i = 0; i < ARRAY_SIZE(sch->curr_status.mda); i++) {
1322         sch->curr_status.mda[i] = 0;
1323     }
1324 }
1325 
1326 SubchDev *css_find_subch(uint8_t m, uint8_t cssid, uint8_t ssid, uint16_t schid)
1327 {
1328     uint8_t real_cssid;
1329 
1330     real_cssid = (!m && (cssid == 0)) ? channel_subsys.default_cssid : cssid;
1331 
1332     if (!channel_subsys.css[real_cssid]) {
1333         return NULL;
1334     }
1335 
1336     if (!channel_subsys.css[real_cssid]->sch_set[ssid]) {
1337         return NULL;
1338     }
1339 
1340     return channel_subsys.css[real_cssid]->sch_set[ssid]->sch[schid];
1341 }
1342 
1343 /**
1344  * Return free device number in subchannel set.
1345  *
1346  * Return index of the first free device number in the subchannel set
1347  * identified by @p cssid and @p ssid, beginning the search at @p
1348  * start and wrapping around at MAX_DEVNO. Return a value exceeding
1349  * MAX_SCHID if there are no free device numbers in the subchannel
1350  * set.
1351  */
1352 static uint32_t css_find_free_devno(uint8_t cssid, uint8_t ssid,
1353                                     uint16_t start)
1354 {
1355     uint32_t round;
1356 
1357     for (round = 0; round <= MAX_DEVNO; round++) {
1358         uint16_t devno = (start + round) % MAX_DEVNO;
1359 
1360         if (!css_devno_used(cssid, ssid, devno)) {
1361             return devno;
1362         }
1363     }
1364     return MAX_DEVNO + 1;
1365 }
1366 
1367 /**
1368  * Return first free subchannel (id) in subchannel set.
1369  *
1370  * Return index of the first free subchannel in the subchannel set
1371  * identified by @p cssid and @p ssid, if there is any. Return a value
1372  * exceeding MAX_SCHID if there are no free subchannels in the
1373  * subchannel set.
1374  */
1375 static uint32_t css_find_free_subch(uint8_t cssid, uint8_t ssid)
1376 {
1377     uint32_t schid;
1378 
1379     for (schid = 0; schid <= MAX_SCHID; schid++) {
1380         if (!css_find_subch(1, cssid, ssid, schid)) {
1381             return schid;
1382         }
1383     }
1384     return MAX_SCHID + 1;
1385 }
1386 
1387 /**
1388  * Return first free subchannel (id) in subchannel set for a device number
1389  *
1390  * Verify the device number @p devno is not used yet in the subchannel
1391  * set identified by @p cssid and @p ssid. Set @p schid to the index
1392  * of the first free subchannel in the subchannel set, if there is
1393  * any. Return true if everything succeeded and false otherwise.
1394  */
1395 static bool css_find_free_subch_for_devno(uint8_t cssid, uint8_t ssid,
1396                                           uint16_t devno, uint16_t *schid,
1397                                           Error **errp)
1398 {
1399     uint32_t free_schid;
1400 
1401     assert(schid);
1402     if (css_devno_used(cssid, ssid, devno)) {
1403         error_setg(errp, "Device %x.%x.%04x already exists",
1404                    cssid, ssid, devno);
1405         return false;
1406     }
1407     free_schid = css_find_free_subch(cssid, ssid);
1408     if (free_schid > MAX_SCHID) {
1409         error_setg(errp, "No free subchannel found for %x.%x.%04x",
1410                    cssid, ssid, devno);
1411         return false;
1412     }
1413     *schid = free_schid;
1414     return true;
1415 }
1416 
1417 /**
1418  * Return first free subchannel (id) and device number
1419  *
1420  * Locate the first free subchannel and first free device number in
1421  * any of the subchannel sets of the channel subsystem identified by
1422  * @p cssid. Return false if no free subchannel / device number could
1423  * be found. Otherwise set @p ssid, @p devno and @p schid to identify
1424  * the available subchannel and device number and return true.
1425  *
1426  * May modify @p ssid, @p devno and / or @p schid even if no free
1427  * subchannel / device number could be found.
1428  */
1429 static bool css_find_free_subch_and_devno(uint8_t cssid, uint8_t *ssid,
1430                                           uint16_t *devno, uint16_t *schid,
1431                                           Error **errp)
1432 {
1433     uint32_t free_schid, free_devno;
1434 
1435     assert(ssid && devno && schid);
1436     for (*ssid = 0; *ssid <= MAX_SSID; (*ssid)++) {
1437         free_schid = css_find_free_subch(cssid, *ssid);
1438         if (free_schid > MAX_SCHID) {
1439             continue;
1440         }
1441         free_devno = css_find_free_devno(cssid, *ssid, free_schid);
1442         if (free_devno > MAX_DEVNO) {
1443             continue;
1444         }
1445         *schid = free_schid;
1446         *devno = free_devno;
1447         return true;
1448     }
1449     error_setg(errp, "Virtual channel subsystem is full!");
1450     return false;
1451 }
1452 
1453 bool css_subch_visible(SubchDev *sch)
1454 {
1455     if (sch->ssid > channel_subsys.max_ssid) {
1456         return false;
1457     }
1458 
1459     if (sch->cssid != channel_subsys.default_cssid) {
1460         return (channel_subsys.max_cssid > 0);
1461     }
1462 
1463     return true;
1464 }
1465 
1466 bool css_present(uint8_t cssid)
1467 {
1468     return (channel_subsys.css[cssid] != NULL);
1469 }
1470 
1471 bool css_devno_used(uint8_t cssid, uint8_t ssid, uint16_t devno)
1472 {
1473     if (!channel_subsys.css[cssid]) {
1474         return false;
1475     }
1476     if (!channel_subsys.css[cssid]->sch_set[ssid]) {
1477         return false;
1478     }
1479 
1480     return !!test_bit(devno,
1481                       channel_subsys.css[cssid]->sch_set[ssid]->devnos_used);
1482 }
1483 
1484 void css_subch_assign(uint8_t cssid, uint8_t ssid, uint16_t schid,
1485                       uint16_t devno, SubchDev *sch)
1486 {
1487     CssImage *css;
1488     SubchSet *s_set;
1489 
1490     trace_css_assign_subch(sch ? "assign" : "deassign", cssid, ssid, schid,
1491                            devno);
1492     if (!channel_subsys.css[cssid]) {
1493         fprintf(stderr,
1494                 "Suspicious call to %s (%x.%x.%04x) for non-existing css!\n",
1495                 __func__, cssid, ssid, schid);
1496         return;
1497     }
1498     css = channel_subsys.css[cssid];
1499 
1500     if (!css->sch_set[ssid]) {
1501         css->sch_set[ssid] = g_malloc0(sizeof(SubchSet));
1502     }
1503     s_set = css->sch_set[ssid];
1504 
1505     s_set->sch[schid] = sch;
1506     if (sch) {
1507         set_bit(schid, s_set->schids_used);
1508         set_bit(devno, s_set->devnos_used);
1509     } else {
1510         clear_bit(schid, s_set->schids_used);
1511         clear_bit(devno, s_set->devnos_used);
1512     }
1513 }
1514 
1515 void css_queue_crw(uint8_t rsc, uint8_t erc, int chain, uint16_t rsid)
1516 {
1517     CrwContainer *crw_cont;
1518 
1519     trace_css_crw(rsc, erc, rsid, chain ? "(chained)" : "");
1520     /* TODO: Maybe use a static crw pool? */
1521     crw_cont = g_try_malloc0(sizeof(CrwContainer));
1522     if (!crw_cont) {
1523         channel_subsys.crws_lost = true;
1524         return;
1525     }
1526     crw_cont->crw.flags = (rsc << 8) | erc;
1527     if (chain) {
1528         crw_cont->crw.flags |= CRW_FLAGS_MASK_C;
1529     }
1530     crw_cont->crw.rsid = rsid;
1531     if (channel_subsys.crws_lost) {
1532         crw_cont->crw.flags |= CRW_FLAGS_MASK_R;
1533         channel_subsys.crws_lost = false;
1534     }
1535 
1536     QTAILQ_INSERT_TAIL(&channel_subsys.pending_crws, crw_cont, sibling);
1537 
1538     if (channel_subsys.do_crw_mchk) {
1539         channel_subsys.do_crw_mchk = false;
1540         /* Inject crw pending machine check. */
1541         s390_crw_mchk();
1542     }
1543 }
1544 
1545 void css_generate_sch_crws(uint8_t cssid, uint8_t ssid, uint16_t schid,
1546                            int hotplugged, int add)
1547 {
1548     uint8_t guest_cssid;
1549     bool chain_crw;
1550 
1551     if (add && !hotplugged) {
1552         return;
1553     }
1554     if (channel_subsys.max_cssid == 0) {
1555         /* Default cssid shows up as 0. */
1556         guest_cssid = (cssid == channel_subsys.default_cssid) ? 0 : cssid;
1557     } else {
1558         /* Show real cssid to the guest. */
1559         guest_cssid = cssid;
1560     }
1561     /*
1562      * Only notify for higher subchannel sets/channel subsystems if the
1563      * guest has enabled it.
1564      */
1565     if ((ssid > channel_subsys.max_ssid) ||
1566         (guest_cssid > channel_subsys.max_cssid) ||
1567         ((channel_subsys.max_cssid == 0) &&
1568          (cssid != channel_subsys.default_cssid))) {
1569         return;
1570     }
1571     chain_crw = (channel_subsys.max_ssid > 0) ||
1572             (channel_subsys.max_cssid > 0);
1573     css_queue_crw(CRW_RSC_SUBCH, CRW_ERC_IPI, chain_crw ? 1 : 0, schid);
1574     if (chain_crw) {
1575         css_queue_crw(CRW_RSC_SUBCH, CRW_ERC_IPI, 0,
1576                       (guest_cssid << 8) | (ssid << 4));
1577     }
1578     /* RW_ERC_IPI --> clear pending interrupts */
1579     css_clear_io_interrupt(css_do_build_subchannel_id(cssid, ssid), schid);
1580 }
1581 
1582 void css_generate_chp_crws(uint8_t cssid, uint8_t chpid)
1583 {
1584     /* TODO */
1585 }
1586 
1587 void css_generate_css_crws(uint8_t cssid)
1588 {
1589     if (!channel_subsys.sei_pending) {
1590         css_queue_crw(CRW_RSC_CSS, 0, 0, cssid);
1591     }
1592     channel_subsys.sei_pending = true;
1593 }
1594 
1595 void css_clear_sei_pending(void)
1596 {
1597     channel_subsys.sei_pending = false;
1598 }
1599 
1600 int css_enable_mcsse(void)
1601 {
1602     trace_css_enable_facility("mcsse");
1603     channel_subsys.max_cssid = MAX_CSSID;
1604     return 0;
1605 }
1606 
1607 int css_enable_mss(void)
1608 {
1609     trace_css_enable_facility("mss");
1610     channel_subsys.max_ssid = MAX_SSID;
1611     return 0;
1612 }
1613 
1614 void subch_device_save(SubchDev *s, QEMUFile *f)
1615 {
1616     int i;
1617 
1618     qemu_put_byte(f, s->cssid);
1619     qemu_put_byte(f, s->ssid);
1620     qemu_put_be16(f, s->schid);
1621     qemu_put_be16(f, s->devno);
1622     qemu_put_byte(f, s->thinint_active);
1623     /* SCHIB */
1624     /*     PMCW */
1625     qemu_put_be32(f, s->curr_status.pmcw.intparm);
1626     qemu_put_be16(f, s->curr_status.pmcw.flags);
1627     qemu_put_be16(f, s->curr_status.pmcw.devno);
1628     qemu_put_byte(f, s->curr_status.pmcw.lpm);
1629     qemu_put_byte(f, s->curr_status.pmcw.pnom);
1630     qemu_put_byte(f, s->curr_status.pmcw.lpum);
1631     qemu_put_byte(f, s->curr_status.pmcw.pim);
1632     qemu_put_be16(f, s->curr_status.pmcw.mbi);
1633     qemu_put_byte(f, s->curr_status.pmcw.pom);
1634     qemu_put_byte(f, s->curr_status.pmcw.pam);
1635     qemu_put_buffer(f, s->curr_status.pmcw.chpid, 8);
1636     qemu_put_be32(f, s->curr_status.pmcw.chars);
1637     /*     SCSW */
1638     qemu_put_be16(f, s->curr_status.scsw.flags);
1639     qemu_put_be16(f, s->curr_status.scsw.ctrl);
1640     qemu_put_be32(f, s->curr_status.scsw.cpa);
1641     qemu_put_byte(f, s->curr_status.scsw.dstat);
1642     qemu_put_byte(f, s->curr_status.scsw.cstat);
1643     qemu_put_be16(f, s->curr_status.scsw.count);
1644     qemu_put_be64(f, s->curr_status.mba);
1645     qemu_put_buffer(f, s->curr_status.mda, 4);
1646     /* end SCHIB */
1647     qemu_put_buffer(f, s->sense_data, 32);
1648     qemu_put_be64(f, s->channel_prog);
1649     /* last cmd */
1650     qemu_put_byte(f, s->last_cmd.cmd_code);
1651     qemu_put_byte(f, s->last_cmd.flags);
1652     qemu_put_be16(f, s->last_cmd.count);
1653     qemu_put_be32(f, s->last_cmd.cda);
1654     qemu_put_byte(f, s->last_cmd_valid);
1655     qemu_put_byte(f, s->id.reserved);
1656     qemu_put_be16(f, s->id.cu_type);
1657     qemu_put_byte(f, s->id.cu_model);
1658     qemu_put_be16(f, s->id.dev_type);
1659     qemu_put_byte(f, s->id.dev_model);
1660     qemu_put_byte(f, s->id.unused);
1661     for (i = 0; i < ARRAY_SIZE(s->id.ciw); i++) {
1662         qemu_put_byte(f, s->id.ciw[i].type);
1663         qemu_put_byte(f, s->id.ciw[i].command);
1664         qemu_put_be16(f, s->id.ciw[i].count);
1665     }
1666     qemu_put_byte(f, s->ccw_fmt_1);
1667     qemu_put_byte(f, s->ccw_no_data_cnt);
1668 }
1669 
1670 int subch_device_load(SubchDev *s, QEMUFile *f)
1671 {
1672     int i;
1673 
1674     s->cssid = qemu_get_byte(f);
1675     s->ssid = qemu_get_byte(f);
1676     s->schid = qemu_get_be16(f);
1677     s->devno = qemu_get_be16(f);
1678     s->thinint_active = qemu_get_byte(f);
1679     /* SCHIB */
1680     /*     PMCW */
1681     s->curr_status.pmcw.intparm = qemu_get_be32(f);
1682     s->curr_status.pmcw.flags = qemu_get_be16(f);
1683     s->curr_status.pmcw.devno = qemu_get_be16(f);
1684     s->curr_status.pmcw.lpm = qemu_get_byte(f);
1685     s->curr_status.pmcw.pnom  = qemu_get_byte(f);
1686     s->curr_status.pmcw.lpum = qemu_get_byte(f);
1687     s->curr_status.pmcw.pim = qemu_get_byte(f);
1688     s->curr_status.pmcw.mbi = qemu_get_be16(f);
1689     s->curr_status.pmcw.pom = qemu_get_byte(f);
1690     s->curr_status.pmcw.pam = qemu_get_byte(f);
1691     qemu_get_buffer(f, s->curr_status.pmcw.chpid, 8);
1692     s->curr_status.pmcw.chars = qemu_get_be32(f);
1693     /*     SCSW */
1694     s->curr_status.scsw.flags = qemu_get_be16(f);
1695     s->curr_status.scsw.ctrl = qemu_get_be16(f);
1696     s->curr_status.scsw.cpa = qemu_get_be32(f);
1697     s->curr_status.scsw.dstat = qemu_get_byte(f);
1698     s->curr_status.scsw.cstat = qemu_get_byte(f);
1699     s->curr_status.scsw.count = qemu_get_be16(f);
1700     s->curr_status.mba = qemu_get_be64(f);
1701     qemu_get_buffer(f, s->curr_status.mda, 4);
1702     /* end SCHIB */
1703     qemu_get_buffer(f, s->sense_data, 32);
1704     s->channel_prog = qemu_get_be64(f);
1705     /* last cmd */
1706     s->last_cmd.cmd_code = qemu_get_byte(f);
1707     s->last_cmd.flags = qemu_get_byte(f);
1708     s->last_cmd.count = qemu_get_be16(f);
1709     s->last_cmd.cda = qemu_get_be32(f);
1710     s->last_cmd_valid = qemu_get_byte(f);
1711     s->id.reserved = qemu_get_byte(f);
1712     s->id.cu_type = qemu_get_be16(f);
1713     s->id.cu_model = qemu_get_byte(f);
1714     s->id.dev_type = qemu_get_be16(f);
1715     s->id.dev_model = qemu_get_byte(f);
1716     s->id.unused = qemu_get_byte(f);
1717     for (i = 0; i < ARRAY_SIZE(s->id.ciw); i++) {
1718         s->id.ciw[i].type = qemu_get_byte(f);
1719         s->id.ciw[i].command = qemu_get_byte(f);
1720         s->id.ciw[i].count = qemu_get_be16(f);
1721     }
1722     s->ccw_fmt_1 = qemu_get_byte(f);
1723     s->ccw_no_data_cnt = qemu_get_byte(f);
1724     /*
1725      * Hack alert. We don't migrate the channel subsystem status (no
1726      * device!), but we need to find out if the guest enabled mss/mcss-e.
1727      * If the subchannel is enabled, it certainly was able to access it,
1728      * so adjust the max_ssid/max_cssid values for relevant ssid/cssid
1729      * values. This is not watertight, but better than nothing.
1730      */
1731     if (s->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ENA) {
1732         if (s->ssid) {
1733             channel_subsys.max_ssid = MAX_SSID;
1734         }
1735         if (s->cssid != channel_subsys.default_cssid) {
1736             channel_subsys.max_cssid = MAX_CSSID;
1737         }
1738     }
1739     return 0;
1740 }
1741 
1742 void css_reset_sch(SubchDev *sch)
1743 {
1744     PMCW *p = &sch->curr_status.pmcw;
1745 
1746     if ((p->flags & PMCW_FLAGS_MASK_ENA) != 0 && sch->disable_cb) {
1747         sch->disable_cb(sch);
1748     }
1749 
1750     p->intparm = 0;
1751     p->flags &= ~(PMCW_FLAGS_MASK_ISC | PMCW_FLAGS_MASK_ENA |
1752                   PMCW_FLAGS_MASK_LM | PMCW_FLAGS_MASK_MME |
1753                   PMCW_FLAGS_MASK_MP | PMCW_FLAGS_MASK_TF);
1754     p->flags |= PMCW_FLAGS_MASK_DNV;
1755     p->devno = sch->devno;
1756     p->pim = 0x80;
1757     p->lpm = p->pim;
1758     p->pnom = 0;
1759     p->lpum = 0;
1760     p->mbi = 0;
1761     p->pom = 0xff;
1762     p->pam = 0x80;
1763     p->chars &= ~(PMCW_CHARS_MASK_MBFC | PMCW_CHARS_MASK_XMWME |
1764                   PMCW_CHARS_MASK_CSENSE);
1765 
1766     memset(&sch->curr_status.scsw, 0, sizeof(sch->curr_status.scsw));
1767     sch->curr_status.mba = 0;
1768 
1769     sch->channel_prog = 0x0;
1770     sch->last_cmd_valid = false;
1771     sch->thinint_active = false;
1772 }
1773 
1774 void css_reset(void)
1775 {
1776     CrwContainer *crw_cont;
1777 
1778     /* Clean up monitoring. */
1779     channel_subsys.chnmon_active = false;
1780     channel_subsys.chnmon_area = 0;
1781 
1782     /* Clear pending CRWs. */
1783     while ((crw_cont = QTAILQ_FIRST(&channel_subsys.pending_crws))) {
1784         QTAILQ_REMOVE(&channel_subsys.pending_crws, crw_cont, sibling);
1785         g_free(crw_cont);
1786     }
1787     channel_subsys.sei_pending = false;
1788     channel_subsys.do_crw_mchk = true;
1789     channel_subsys.crws_lost = false;
1790 
1791     /* Reset maximum ids. */
1792     channel_subsys.max_cssid = 0;
1793     channel_subsys.max_ssid = 0;
1794 }
1795 
1796 static void get_css_devid(Object *obj, Visitor *v, const char *name,
1797                           void *opaque, Error **errp)
1798 {
1799     DeviceState *dev = DEVICE(obj);
1800     Property *prop = opaque;
1801     CssDevId *dev_id = qdev_get_prop_ptr(dev, prop);
1802     char buffer[] = "xx.x.xxxx";
1803     char *p = buffer;
1804     int r;
1805 
1806     if (dev_id->valid) {
1807 
1808         r = snprintf(buffer, sizeof(buffer), "%02x.%1x.%04x", dev_id->cssid,
1809                      dev_id->ssid, dev_id->devid);
1810         assert(r == sizeof(buffer) - 1);
1811 
1812         /* drop leading zero */
1813         if (dev_id->cssid <= 0xf) {
1814             p++;
1815         }
1816     } else {
1817         snprintf(buffer, sizeof(buffer), "<unset>");
1818     }
1819 
1820     visit_type_str(v, name, &p, errp);
1821 }
1822 
1823 /*
1824  * parse <cssid>.<ssid>.<devid> and assert valid range for cssid/ssid
1825  */
1826 static void set_css_devid(Object *obj, Visitor *v, const char *name,
1827                           void *opaque, Error **errp)
1828 {
1829     DeviceState *dev = DEVICE(obj);
1830     Property *prop = opaque;
1831     CssDevId *dev_id = qdev_get_prop_ptr(dev, prop);
1832     Error *local_err = NULL;
1833     char *str;
1834     int num, n1, n2;
1835     unsigned int cssid, ssid, devid;
1836 
1837     if (dev->realized) {
1838         qdev_prop_set_after_realize(dev, name, errp);
1839         return;
1840     }
1841 
1842     visit_type_str(v, name, &str, &local_err);
1843     if (local_err) {
1844         error_propagate(errp, local_err);
1845         return;
1846     }
1847 
1848     num = sscanf(str, "%2x.%1x%n.%4x%n", &cssid, &ssid, &n1, &devid, &n2);
1849     if (num != 3 || (n2 - n1) != 5 || strlen(str) != n2) {
1850         error_set_from_qdev_prop_error(errp, EINVAL, dev, prop, str);
1851         goto out;
1852     }
1853     if ((cssid > MAX_CSSID) || (ssid > MAX_SSID)) {
1854         error_setg(errp, "Invalid cssid or ssid: cssid %x, ssid %x",
1855                    cssid, ssid);
1856         goto out;
1857     }
1858 
1859     dev_id->cssid = cssid;
1860     dev_id->ssid = ssid;
1861     dev_id->devid = devid;
1862     dev_id->valid = true;
1863 
1864 out:
1865     g_free(str);
1866 }
1867 
1868 PropertyInfo css_devid_propinfo = {
1869     .name = "str",
1870     .description = "Identifier of an I/O device in the channel "
1871                    "subsystem, example: fe.1.23ab",
1872     .get = get_css_devid,
1873     .set = set_css_devid,
1874 };
1875 
1876 SubchDev *css_create_virtual_sch(CssDevId bus_id, Error **errp)
1877 {
1878     uint16_t schid = 0;
1879     SubchDev *sch;
1880 
1881     if (bus_id.valid) {
1882         /* Enforce use of virtual cssid. */
1883         if (bus_id.cssid != VIRTUAL_CSSID) {
1884             error_setg(errp, "cssid %hhx not valid for virtual devices",
1885                        bus_id.cssid);
1886             return NULL;
1887         }
1888         if (!css_find_free_subch_for_devno(bus_id.cssid, bus_id.ssid,
1889                                            bus_id.devid, &schid, errp)) {
1890             return NULL;
1891         }
1892     } else {
1893         bus_id.cssid = VIRTUAL_CSSID;
1894         if (!css_find_free_subch_and_devno(bus_id.cssid, &bus_id.ssid,
1895                                            &bus_id.devid, &schid, errp)) {
1896             return NULL;
1897         }
1898     }
1899 
1900     sch = g_malloc0(sizeof(*sch));
1901     sch->cssid = bus_id.cssid;
1902     sch->ssid = bus_id.ssid;
1903     sch->devno = bus_id.devid;
1904     sch->schid = schid;
1905     css_subch_assign(sch->cssid, sch->ssid, schid, sch->devno, sch);
1906     return sch;
1907 }
1908