xref: /qemu/hw/s390x/css.c (revision ac06724a)
1 /*
2  * Channel subsystem base support.
3  *
4  * Copyright 2012 IBM Corp.
5  * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
6  *
7  * This work is licensed under the terms of the GNU GPL, version 2 or (at
8  * your option) any later version. See the COPYING file in the top-level
9  * directory.
10  */
11 
12 #include "qemu/osdep.h"
13 #include "qapi/error.h"
14 #include "qapi/visitor.h"
15 #include "hw/qdev.h"
16 #include "qemu/error-report.h"
17 #include "qemu/bitops.h"
18 #include "exec/address-spaces.h"
19 #include "cpu.h"
20 #include "hw/s390x/ioinst.h"
21 #include "hw/s390x/css.h"
22 #include "trace.h"
23 #include "hw/s390x/s390_flic.h"
24 
25 typedef struct CrwContainer {
26     CRW crw;
27     QTAILQ_ENTRY(CrwContainer) sibling;
28 } CrwContainer;
29 
30 typedef struct ChpInfo {
31     uint8_t in_use;
32     uint8_t type;
33     uint8_t is_virtual;
34 } ChpInfo;
35 
36 typedef struct SubchSet {
37     SubchDev *sch[MAX_SCHID + 1];
38     unsigned long schids_used[BITS_TO_LONGS(MAX_SCHID + 1)];
39     unsigned long devnos_used[BITS_TO_LONGS(MAX_SCHID + 1)];
40 } SubchSet;
41 
42 typedef struct CssImage {
43     SubchSet *sch_set[MAX_SSID + 1];
44     ChpInfo chpids[MAX_CHPID + 1];
45 } CssImage;
46 
47 typedef struct IoAdapter {
48     uint32_t id;
49     uint8_t type;
50     uint8_t isc;
51 } IoAdapter;
52 
53 typedef struct ChannelSubSys {
54     QTAILQ_HEAD(, CrwContainer) pending_crws;
55     bool sei_pending;
56     bool do_crw_mchk;
57     bool crws_lost;
58     uint8_t max_cssid;
59     uint8_t max_ssid;
60     bool chnmon_active;
61     uint64_t chnmon_area;
62     CssImage *css[MAX_CSSID + 1];
63     uint8_t default_cssid;
64     IoAdapter *io_adapters[CSS_IO_ADAPTER_TYPE_NUMS][MAX_ISC + 1];
65     QTAILQ_HEAD(, IndAddr) indicator_addresses;
66 } ChannelSubSys;
67 
68 static ChannelSubSys channel_subsys = {
69     .pending_crws = QTAILQ_HEAD_INITIALIZER(channel_subsys.pending_crws),
70     .do_crw_mchk = true,
71     .sei_pending = false,
72     .do_crw_mchk = true,
73     .crws_lost = false,
74     .chnmon_active = false,
75     .indicator_addresses =
76         QTAILQ_HEAD_INITIALIZER(channel_subsys.indicator_addresses),
77 };
78 
79 IndAddr *get_indicator(hwaddr ind_addr, int len)
80 {
81     IndAddr *indicator;
82 
83     QTAILQ_FOREACH(indicator, &channel_subsys.indicator_addresses, sibling) {
84         if (indicator->addr == ind_addr) {
85             indicator->refcnt++;
86             return indicator;
87         }
88     }
89     indicator = g_new0(IndAddr, 1);
90     indicator->addr = ind_addr;
91     indicator->len = len;
92     indicator->refcnt = 1;
93     QTAILQ_INSERT_TAIL(&channel_subsys.indicator_addresses,
94                        indicator, sibling);
95     return indicator;
96 }
97 
98 static int s390_io_adapter_map(AdapterInfo *adapter, uint64_t map_addr,
99                                bool do_map)
100 {
101     S390FLICState *fs = s390_get_flic();
102     S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs);
103 
104     return fsc->io_adapter_map(fs, adapter->adapter_id, map_addr, do_map);
105 }
106 
107 void release_indicator(AdapterInfo *adapter, IndAddr *indicator)
108 {
109     assert(indicator->refcnt > 0);
110     indicator->refcnt--;
111     if (indicator->refcnt > 0) {
112         return;
113     }
114     QTAILQ_REMOVE(&channel_subsys.indicator_addresses, indicator, sibling);
115     if (indicator->map) {
116         s390_io_adapter_map(adapter, indicator->map, false);
117     }
118     g_free(indicator);
119 }
120 
121 int map_indicator(AdapterInfo *adapter, IndAddr *indicator)
122 {
123     int ret;
124 
125     if (indicator->map) {
126         return 0; /* already mapped is not an error */
127     }
128     indicator->map = indicator->addr;
129     ret = s390_io_adapter_map(adapter, indicator->map, true);
130     if ((ret != 0) && (ret != -ENOSYS)) {
131         goto out_err;
132     }
133     return 0;
134 
135 out_err:
136     indicator->map = 0;
137     return ret;
138 }
139 
140 int css_create_css_image(uint8_t cssid, bool default_image)
141 {
142     trace_css_new_image(cssid, default_image ? "(default)" : "");
143     /* 255 is reserved */
144     if (cssid == 255) {
145         return -EINVAL;
146     }
147     if (channel_subsys.css[cssid]) {
148         return -EBUSY;
149     }
150     channel_subsys.css[cssid] = g_malloc0(sizeof(CssImage));
151     if (default_image) {
152         channel_subsys.default_cssid = cssid;
153     }
154     return 0;
155 }
156 
157 uint32_t css_get_adapter_id(CssIoAdapterType type, uint8_t isc)
158 {
159     if (type >= CSS_IO_ADAPTER_TYPE_NUMS || isc > MAX_ISC ||
160         !channel_subsys.io_adapters[type][isc]) {
161         return -1;
162     }
163 
164     return channel_subsys.io_adapters[type][isc]->id;
165 }
166 
167 /**
168  * css_register_io_adapters: Register I/O adapters per ISC during init
169  *
170  * @swap: an indication if byte swap is needed.
171  * @maskable: an indication if the adapter is subject to the mask operation.
172  * @errp: location to store error information.
173  */
174 void css_register_io_adapters(CssIoAdapterType type, bool swap, bool maskable,
175                               Error **errp)
176 {
177     uint32_t id;
178     int ret, isc;
179     IoAdapter *adapter;
180     S390FLICState *fs = s390_get_flic();
181     S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs);
182 
183     /*
184      * Disallow multiple registrations for the same device type.
185      * Report an error if registering for an already registered type.
186      */
187     if (channel_subsys.io_adapters[type][0]) {
188         error_setg(errp, "Adapters for type %d already registered", type);
189     }
190 
191     for (isc = 0; isc <= MAX_ISC; isc++) {
192         id = (type << 3) | isc;
193         ret = fsc->register_io_adapter(fs, id, isc, swap, maskable);
194         if (ret == 0) {
195             adapter = g_new0(IoAdapter, 1);
196             adapter->id = id;
197             adapter->isc = isc;
198             adapter->type = type;
199             channel_subsys.io_adapters[type][isc] = adapter;
200         } else {
201             error_setg_errno(errp, -ret, "Unexpected error %d when "
202                              "registering adapter %d", ret, id);
203             break;
204         }
205     }
206 
207     /*
208      * No need to free registered adapters in kvm: kvm will clean up
209      * when the machine goes away.
210      */
211     if (ret) {
212         for (isc--; isc >= 0; isc--) {
213             g_free(channel_subsys.io_adapters[type][isc]);
214             channel_subsys.io_adapters[type][isc] = NULL;
215         }
216     }
217 
218 }
219 
220 static void css_clear_io_interrupt(uint16_t subchannel_id,
221                                    uint16_t subchannel_nr)
222 {
223     Error *err = NULL;
224     static bool no_clear_irq;
225     S390FLICState *fs = s390_get_flic();
226     S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs);
227     int r;
228 
229     if (unlikely(no_clear_irq)) {
230         return;
231     }
232     r = fsc->clear_io_irq(fs, subchannel_id, subchannel_nr);
233     switch (r) {
234     case 0:
235         break;
236     case -ENOSYS:
237         no_clear_irq = true;
238         /*
239         * Ignore unavailability, as the user can't do anything
240         * about it anyway.
241         */
242         break;
243     default:
244         error_setg_errno(&err, -r, "unexpected error condition");
245         error_propagate(&error_abort, err);
246     }
247 }
248 
249 static inline uint16_t css_do_build_subchannel_id(uint8_t cssid, uint8_t ssid)
250 {
251     if (channel_subsys.max_cssid > 0) {
252         return (cssid << 8) | (1 << 3) | (ssid << 1) | 1;
253     }
254     return (ssid << 1) | 1;
255 }
256 
257 uint16_t css_build_subchannel_id(SubchDev *sch)
258 {
259     return css_do_build_subchannel_id(sch->cssid, sch->ssid);
260 }
261 
262 void css_inject_io_interrupt(SubchDev *sch)
263 {
264     uint8_t isc = (sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ISC) >> 11;
265 
266     trace_css_io_interrupt(sch->cssid, sch->ssid, sch->schid,
267                            sch->curr_status.pmcw.intparm, isc, "");
268     s390_io_interrupt(css_build_subchannel_id(sch),
269                       sch->schid,
270                       sch->curr_status.pmcw.intparm,
271                       isc << 27);
272 }
273 
274 void css_conditional_io_interrupt(SubchDev *sch)
275 {
276     /*
277      * If the subchannel is not currently status pending, make it pending
278      * with alert status.
279      */
280     if (!(sch->curr_status.scsw.ctrl & SCSW_STCTL_STATUS_PEND)) {
281         uint8_t isc = (sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ISC) >> 11;
282 
283         trace_css_io_interrupt(sch->cssid, sch->ssid, sch->schid,
284                                sch->curr_status.pmcw.intparm, isc,
285                                "(unsolicited)");
286         sch->curr_status.scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
287         sch->curr_status.scsw.ctrl |=
288             SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
289         /* Inject an I/O interrupt. */
290         s390_io_interrupt(css_build_subchannel_id(sch),
291                           sch->schid,
292                           sch->curr_status.pmcw.intparm,
293                           isc << 27);
294     }
295 }
296 
297 void css_adapter_interrupt(uint8_t isc)
298 {
299     uint32_t io_int_word = (isc << 27) | IO_INT_WORD_AI;
300 
301     trace_css_adapter_interrupt(isc);
302     s390_io_interrupt(0, 0, 0, io_int_word);
303 }
304 
305 static void sch_handle_clear_func(SubchDev *sch)
306 {
307     PMCW *p = &sch->curr_status.pmcw;
308     SCSW *s = &sch->curr_status.scsw;
309     int path;
310 
311     /* Path management: In our simple css, we always choose the only path. */
312     path = 0x80;
313 
314     /* Reset values prior to 'issuing the clear signal'. */
315     p->lpum = 0;
316     p->pom = 0xff;
317     s->flags &= ~SCSW_FLAGS_MASK_PNO;
318 
319     /* We always 'attempt to issue the clear signal', and we always succeed. */
320     sch->channel_prog = 0x0;
321     sch->last_cmd_valid = false;
322     s->ctrl &= ~SCSW_ACTL_CLEAR_PEND;
323     s->ctrl |= SCSW_STCTL_STATUS_PEND;
324 
325     s->dstat = 0;
326     s->cstat = 0;
327     p->lpum = path;
328 
329 }
330 
331 static void sch_handle_halt_func(SubchDev *sch)
332 {
333 
334     PMCW *p = &sch->curr_status.pmcw;
335     SCSW *s = &sch->curr_status.scsw;
336     hwaddr curr_ccw = sch->channel_prog;
337     int path;
338 
339     /* Path management: In our simple css, we always choose the only path. */
340     path = 0x80;
341 
342     /* We always 'attempt to issue the halt signal', and we always succeed. */
343     sch->channel_prog = 0x0;
344     sch->last_cmd_valid = false;
345     s->ctrl &= ~SCSW_ACTL_HALT_PEND;
346     s->ctrl |= SCSW_STCTL_STATUS_PEND;
347 
348     if ((s->ctrl & (SCSW_ACTL_SUBCH_ACTIVE | SCSW_ACTL_DEVICE_ACTIVE)) ||
349         !((s->ctrl & SCSW_ACTL_START_PEND) ||
350           (s->ctrl & SCSW_ACTL_SUSP))) {
351         s->dstat = SCSW_DSTAT_DEVICE_END;
352     }
353     if ((s->ctrl & (SCSW_ACTL_SUBCH_ACTIVE | SCSW_ACTL_DEVICE_ACTIVE)) ||
354         (s->ctrl & SCSW_ACTL_SUSP)) {
355         s->cpa = curr_ccw + 8;
356     }
357     s->cstat = 0;
358     p->lpum = path;
359 
360 }
361 
362 static void copy_sense_id_to_guest(SenseId *dest, SenseId *src)
363 {
364     int i;
365 
366     dest->reserved = src->reserved;
367     dest->cu_type = cpu_to_be16(src->cu_type);
368     dest->cu_model = src->cu_model;
369     dest->dev_type = cpu_to_be16(src->dev_type);
370     dest->dev_model = src->dev_model;
371     dest->unused = src->unused;
372     for (i = 0; i < ARRAY_SIZE(dest->ciw); i++) {
373         dest->ciw[i].type = src->ciw[i].type;
374         dest->ciw[i].command = src->ciw[i].command;
375         dest->ciw[i].count = cpu_to_be16(src->ciw[i].count);
376     }
377 }
378 
379 static CCW1 copy_ccw_from_guest(hwaddr addr, bool fmt1)
380 {
381     CCW0 tmp0;
382     CCW1 tmp1;
383     CCW1 ret;
384 
385     if (fmt1) {
386         cpu_physical_memory_read(addr, &tmp1, sizeof(tmp1));
387         ret.cmd_code = tmp1.cmd_code;
388         ret.flags = tmp1.flags;
389         ret.count = be16_to_cpu(tmp1.count);
390         ret.cda = be32_to_cpu(tmp1.cda);
391     } else {
392         cpu_physical_memory_read(addr, &tmp0, sizeof(tmp0));
393         if ((tmp0.cmd_code & 0x0f) == CCW_CMD_TIC) {
394             ret.cmd_code = CCW_CMD_TIC;
395             ret.flags = 0;
396             ret.count = 0;
397         } else {
398             ret.cmd_code = tmp0.cmd_code;
399             ret.flags = tmp0.flags;
400             ret.count = be16_to_cpu(tmp0.count);
401         }
402         ret.cda = be16_to_cpu(tmp0.cda1) | (tmp0.cda0 << 16);
403     }
404     return ret;
405 }
406 
407 static int css_interpret_ccw(SubchDev *sch, hwaddr ccw_addr,
408                              bool suspend_allowed)
409 {
410     int ret;
411     bool check_len;
412     int len;
413     CCW1 ccw;
414 
415     if (!ccw_addr) {
416         return -EIO;
417     }
418 
419     /* Translate everything to format-1 ccws - the information is the same. */
420     ccw = copy_ccw_from_guest(ccw_addr, sch->ccw_fmt_1);
421 
422     /* Check for invalid command codes. */
423     if ((ccw.cmd_code & 0x0f) == 0) {
424         return -EINVAL;
425     }
426     if (((ccw.cmd_code & 0x0f) == CCW_CMD_TIC) &&
427         ((ccw.cmd_code & 0xf0) != 0)) {
428         return -EINVAL;
429     }
430     if (!sch->ccw_fmt_1 && (ccw.count == 0) &&
431         (ccw.cmd_code != CCW_CMD_TIC)) {
432         return -EINVAL;
433     }
434 
435     if (ccw.flags & CCW_FLAG_SUSPEND) {
436         return suspend_allowed ? -EINPROGRESS : -EINVAL;
437     }
438 
439     check_len = !((ccw.flags & CCW_FLAG_SLI) && !(ccw.flags & CCW_FLAG_DC));
440 
441     if (!ccw.cda) {
442         if (sch->ccw_no_data_cnt == 255) {
443             return -EINVAL;
444         }
445         sch->ccw_no_data_cnt++;
446     }
447 
448     /* Look at the command. */
449     switch (ccw.cmd_code) {
450     case CCW_CMD_NOOP:
451         /* Nothing to do. */
452         ret = 0;
453         break;
454     case CCW_CMD_BASIC_SENSE:
455         if (check_len) {
456             if (ccw.count != sizeof(sch->sense_data)) {
457                 ret = -EINVAL;
458                 break;
459             }
460         }
461         len = MIN(ccw.count, sizeof(sch->sense_data));
462         cpu_physical_memory_write(ccw.cda, sch->sense_data, len);
463         sch->curr_status.scsw.count = ccw.count - len;
464         memset(sch->sense_data, 0, sizeof(sch->sense_data));
465         ret = 0;
466         break;
467     case CCW_CMD_SENSE_ID:
468     {
469         SenseId sense_id;
470 
471         copy_sense_id_to_guest(&sense_id, &sch->id);
472         /* Sense ID information is device specific. */
473         if (check_len) {
474             if (ccw.count != sizeof(sense_id)) {
475                 ret = -EINVAL;
476                 break;
477             }
478         }
479         len = MIN(ccw.count, sizeof(sense_id));
480         /*
481          * Only indicate 0xff in the first sense byte if we actually
482          * have enough place to store at least bytes 0-3.
483          */
484         if (len >= 4) {
485             sense_id.reserved = 0xff;
486         } else {
487             sense_id.reserved = 0;
488         }
489         cpu_physical_memory_write(ccw.cda, &sense_id, len);
490         sch->curr_status.scsw.count = ccw.count - len;
491         ret = 0;
492         break;
493     }
494     case CCW_CMD_TIC:
495         if (sch->last_cmd_valid && (sch->last_cmd.cmd_code == CCW_CMD_TIC)) {
496             ret = -EINVAL;
497             break;
498         }
499         if (ccw.flags & (CCW_FLAG_CC | CCW_FLAG_DC)) {
500             ret = -EINVAL;
501             break;
502         }
503         sch->channel_prog = ccw.cda;
504         ret = -EAGAIN;
505         break;
506     default:
507         if (sch->ccw_cb) {
508             /* Handle device specific commands. */
509             ret = sch->ccw_cb(sch, ccw);
510         } else {
511             ret = -ENOSYS;
512         }
513         break;
514     }
515     sch->last_cmd = ccw;
516     sch->last_cmd_valid = true;
517     if (ret == 0) {
518         if (ccw.flags & CCW_FLAG_CC) {
519             sch->channel_prog += 8;
520             ret = -EAGAIN;
521         }
522     }
523 
524     return ret;
525 }
526 
527 static void sch_handle_start_func_virtual(SubchDev *sch, ORB *orb)
528 {
529 
530     PMCW *p = &sch->curr_status.pmcw;
531     SCSW *s = &sch->curr_status.scsw;
532     int path;
533     int ret;
534     bool suspend_allowed;
535 
536     /* Path management: In our simple css, we always choose the only path. */
537     path = 0x80;
538 
539     if (!(s->ctrl & SCSW_ACTL_SUSP)) {
540         /* Start Function triggered via ssch, i.e. we have an ORB */
541         s->cstat = 0;
542         s->dstat = 0;
543         /* Look at the orb and try to execute the channel program. */
544         assert(orb != NULL); /* resume does not pass an orb */
545         p->intparm = orb->intparm;
546         if (!(orb->lpm & path)) {
547             /* Generate a deferred cc 3 condition. */
548             s->flags |= SCSW_FLAGS_MASK_CC;
549             s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
550             s->ctrl |= (SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND);
551             return;
552         }
553         sch->ccw_fmt_1 = !!(orb->ctrl0 & ORB_CTRL0_MASK_FMT);
554         s->flags |= (sch->ccw_fmt_1) ? SCSW_FLAGS_MASK_FMT : 0;
555         sch->ccw_no_data_cnt = 0;
556         suspend_allowed = !!(orb->ctrl0 & ORB_CTRL0_MASK_SPND);
557     } else {
558         /* Start Function resumed via rsch, i.e. we don't have an
559          * ORB */
560         s->ctrl &= ~(SCSW_ACTL_SUSP | SCSW_ACTL_RESUME_PEND);
561         /* The channel program had been suspended before. */
562         suspend_allowed = true;
563     }
564     sch->last_cmd_valid = false;
565     do {
566         ret = css_interpret_ccw(sch, sch->channel_prog, suspend_allowed);
567         switch (ret) {
568         case -EAGAIN:
569             /* ccw chain, continue processing */
570             break;
571         case 0:
572             /* success */
573             s->ctrl &= ~SCSW_ACTL_START_PEND;
574             s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
575             s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
576                     SCSW_STCTL_STATUS_PEND;
577             s->dstat = SCSW_DSTAT_CHANNEL_END | SCSW_DSTAT_DEVICE_END;
578             s->cpa = sch->channel_prog + 8;
579             break;
580         case -EIO:
581             /* I/O errors, status depends on specific devices */
582             break;
583         case -ENOSYS:
584             /* unsupported command, generate unit check (command reject) */
585             s->ctrl &= ~SCSW_ACTL_START_PEND;
586             s->dstat = SCSW_DSTAT_UNIT_CHECK;
587             /* Set sense bit 0 in ecw0. */
588             sch->sense_data[0] = 0x80;
589             s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
590             s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
591                     SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
592             s->cpa = sch->channel_prog + 8;
593             break;
594         case -EFAULT:
595             /* memory problem, generate channel data check */
596             s->ctrl &= ~SCSW_ACTL_START_PEND;
597             s->cstat = SCSW_CSTAT_DATA_CHECK;
598             s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
599             s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
600                     SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
601             s->cpa = sch->channel_prog + 8;
602             break;
603         case -EBUSY:
604             /* subchannel busy, generate deferred cc 1 */
605             s->flags &= ~SCSW_FLAGS_MASK_CC;
606             s->flags |= (1 << 8);
607             s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
608             s->ctrl |= SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
609             break;
610         case -EINPROGRESS:
611             /* channel program has been suspended */
612             s->ctrl &= ~SCSW_ACTL_START_PEND;
613             s->ctrl |= SCSW_ACTL_SUSP;
614             break;
615         default:
616             /* error, generate channel program check */
617             s->ctrl &= ~SCSW_ACTL_START_PEND;
618             s->cstat = SCSW_CSTAT_PROG_CHECK;
619             s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
620             s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
621                     SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
622             s->cpa = sch->channel_prog + 8;
623             break;
624         }
625     } while (ret == -EAGAIN);
626 
627 }
628 
629 static int sch_handle_start_func_passthrough(SubchDev *sch, ORB *orb)
630 {
631 
632     PMCW *p = &sch->curr_status.pmcw;
633     SCSW *s = &sch->curr_status.scsw;
634     int ret;
635 
636     if (!(s->ctrl & SCSW_ACTL_SUSP)) {
637         assert(orb != NULL);
638         p->intparm = orb->intparm;
639     }
640 
641     /*
642      * Only support prefetch enable mode.
643      * Only support 64bit addressing idal.
644      */
645     if (!(orb->ctrl0 & ORB_CTRL0_MASK_PFCH) ||
646         !(orb->ctrl0 & ORB_CTRL0_MASK_C64)) {
647         return -EINVAL;
648     }
649 
650     ret = s390_ccw_cmd_request(orb, s, sch->driver_data);
651     switch (ret) {
652     /* Currently we don't update control block and just return the cc code. */
653     case 0:
654         break;
655     case -EBUSY:
656         break;
657     case -ENODEV:
658         break;
659     case -EACCES:
660         /* Let's reflect an inaccessible host device by cc 3. */
661         ret = -ENODEV;
662         break;
663     default:
664        /*
665         * All other return codes will trigger a program check,
666         * or set cc to 1.
667         */
668        break;
669     };
670 
671     return ret;
672 }
673 
674 /*
675  * On real machines, this would run asynchronously to the main vcpus.
676  * We might want to make some parts of the ssch handling (interpreting
677  * read/writes) asynchronous later on if we start supporting more than
678  * our current very simple devices.
679  */
680 int do_subchannel_work_virtual(SubchDev *sch, ORB *orb)
681 {
682 
683     SCSW *s = &sch->curr_status.scsw;
684 
685     if (s->ctrl & SCSW_FCTL_CLEAR_FUNC) {
686         sch_handle_clear_func(sch);
687     } else if (s->ctrl & SCSW_FCTL_HALT_FUNC) {
688         sch_handle_halt_func(sch);
689     } else if (s->ctrl & SCSW_FCTL_START_FUNC) {
690         /* Triggered by both ssch and rsch. */
691         sch_handle_start_func_virtual(sch, orb);
692     } else {
693         /* Cannot happen. */
694         return 0;
695     }
696     css_inject_io_interrupt(sch);
697     return 0;
698 }
699 
700 int do_subchannel_work_passthrough(SubchDev *sch, ORB *orb)
701 {
702     int ret;
703     SCSW *s = &sch->curr_status.scsw;
704 
705     if (s->ctrl & SCSW_FCTL_CLEAR_FUNC) {
706         /* TODO: Clear handling */
707         sch_handle_clear_func(sch);
708         ret = 0;
709     } else if (s->ctrl & SCSW_FCTL_HALT_FUNC) {
710         /* TODO: Halt handling */
711         sch_handle_halt_func(sch);
712         ret = 0;
713     } else if (s->ctrl & SCSW_FCTL_START_FUNC) {
714         ret = sch_handle_start_func_passthrough(sch, orb);
715     } else {
716         /* Cannot happen. */
717         return -ENODEV;
718     }
719 
720     return ret;
721 }
722 
723 static int do_subchannel_work(SubchDev *sch, ORB *orb)
724 {
725     if (sch->do_subchannel_work) {
726         return sch->do_subchannel_work(sch, orb);
727     } else {
728         return -EINVAL;
729     }
730 }
731 
732 static void copy_pmcw_to_guest(PMCW *dest, const PMCW *src)
733 {
734     int i;
735 
736     dest->intparm = cpu_to_be32(src->intparm);
737     dest->flags = cpu_to_be16(src->flags);
738     dest->devno = cpu_to_be16(src->devno);
739     dest->lpm = src->lpm;
740     dest->pnom = src->pnom;
741     dest->lpum = src->lpum;
742     dest->pim = src->pim;
743     dest->mbi = cpu_to_be16(src->mbi);
744     dest->pom = src->pom;
745     dest->pam = src->pam;
746     for (i = 0; i < ARRAY_SIZE(dest->chpid); i++) {
747         dest->chpid[i] = src->chpid[i];
748     }
749     dest->chars = cpu_to_be32(src->chars);
750 }
751 
752 void copy_scsw_to_guest(SCSW *dest, const SCSW *src)
753 {
754     dest->flags = cpu_to_be16(src->flags);
755     dest->ctrl = cpu_to_be16(src->ctrl);
756     dest->cpa = cpu_to_be32(src->cpa);
757     dest->dstat = src->dstat;
758     dest->cstat = src->cstat;
759     dest->count = cpu_to_be16(src->count);
760 }
761 
762 static void copy_schib_to_guest(SCHIB *dest, const SCHIB *src)
763 {
764     int i;
765 
766     copy_pmcw_to_guest(&dest->pmcw, &src->pmcw);
767     copy_scsw_to_guest(&dest->scsw, &src->scsw);
768     dest->mba = cpu_to_be64(src->mba);
769     for (i = 0; i < ARRAY_SIZE(dest->mda); i++) {
770         dest->mda[i] = src->mda[i];
771     }
772 }
773 
774 int css_do_stsch(SubchDev *sch, SCHIB *schib)
775 {
776     /* Use current status. */
777     copy_schib_to_guest(schib, &sch->curr_status);
778     return 0;
779 }
780 
781 static void copy_pmcw_from_guest(PMCW *dest, const PMCW *src)
782 {
783     int i;
784 
785     dest->intparm = be32_to_cpu(src->intparm);
786     dest->flags = be16_to_cpu(src->flags);
787     dest->devno = be16_to_cpu(src->devno);
788     dest->lpm = src->lpm;
789     dest->pnom = src->pnom;
790     dest->lpum = src->lpum;
791     dest->pim = src->pim;
792     dest->mbi = be16_to_cpu(src->mbi);
793     dest->pom = src->pom;
794     dest->pam = src->pam;
795     for (i = 0; i < ARRAY_SIZE(dest->chpid); i++) {
796         dest->chpid[i] = src->chpid[i];
797     }
798     dest->chars = be32_to_cpu(src->chars);
799 }
800 
801 static void copy_scsw_from_guest(SCSW *dest, const SCSW *src)
802 {
803     dest->flags = be16_to_cpu(src->flags);
804     dest->ctrl = be16_to_cpu(src->ctrl);
805     dest->cpa = be32_to_cpu(src->cpa);
806     dest->dstat = src->dstat;
807     dest->cstat = src->cstat;
808     dest->count = be16_to_cpu(src->count);
809 }
810 
811 static void copy_schib_from_guest(SCHIB *dest, const SCHIB *src)
812 {
813     int i;
814 
815     copy_pmcw_from_guest(&dest->pmcw, &src->pmcw);
816     copy_scsw_from_guest(&dest->scsw, &src->scsw);
817     dest->mba = be64_to_cpu(src->mba);
818     for (i = 0; i < ARRAY_SIZE(dest->mda); i++) {
819         dest->mda[i] = src->mda[i];
820     }
821 }
822 
823 int css_do_msch(SubchDev *sch, const SCHIB *orig_schib)
824 {
825     SCSW *s = &sch->curr_status.scsw;
826     PMCW *p = &sch->curr_status.pmcw;
827     uint16_t oldflags;
828     int ret;
829     SCHIB schib;
830 
831     if (!(sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_DNV)) {
832         ret = 0;
833         goto out;
834     }
835 
836     if (s->ctrl & SCSW_STCTL_STATUS_PEND) {
837         ret = -EINPROGRESS;
838         goto out;
839     }
840 
841     if (s->ctrl &
842         (SCSW_FCTL_START_FUNC|SCSW_FCTL_HALT_FUNC|SCSW_FCTL_CLEAR_FUNC)) {
843         ret = -EBUSY;
844         goto out;
845     }
846 
847     copy_schib_from_guest(&schib, orig_schib);
848     /* Only update the program-modifiable fields. */
849     p->intparm = schib.pmcw.intparm;
850     oldflags = p->flags;
851     p->flags &= ~(PMCW_FLAGS_MASK_ISC | PMCW_FLAGS_MASK_ENA |
852                   PMCW_FLAGS_MASK_LM | PMCW_FLAGS_MASK_MME |
853                   PMCW_FLAGS_MASK_MP);
854     p->flags |= schib.pmcw.flags &
855             (PMCW_FLAGS_MASK_ISC | PMCW_FLAGS_MASK_ENA |
856              PMCW_FLAGS_MASK_LM | PMCW_FLAGS_MASK_MME |
857              PMCW_FLAGS_MASK_MP);
858     p->lpm = schib.pmcw.lpm;
859     p->mbi = schib.pmcw.mbi;
860     p->pom = schib.pmcw.pom;
861     p->chars &= ~(PMCW_CHARS_MASK_MBFC | PMCW_CHARS_MASK_CSENSE);
862     p->chars |= schib.pmcw.chars &
863             (PMCW_CHARS_MASK_MBFC | PMCW_CHARS_MASK_CSENSE);
864     sch->curr_status.mba = schib.mba;
865 
866     /* Has the channel been disabled? */
867     if (sch->disable_cb && (oldflags & PMCW_FLAGS_MASK_ENA) != 0
868         && (p->flags & PMCW_FLAGS_MASK_ENA) == 0) {
869         sch->disable_cb(sch);
870     }
871 
872     ret = 0;
873 
874 out:
875     return ret;
876 }
877 
878 int css_do_xsch(SubchDev *sch)
879 {
880     SCSW *s = &sch->curr_status.scsw;
881     PMCW *p = &sch->curr_status.pmcw;
882     int ret;
883 
884     if (~(p->flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) {
885         ret = -ENODEV;
886         goto out;
887     }
888 
889     if (!(s->ctrl & SCSW_CTRL_MASK_FCTL) ||
890         ((s->ctrl & SCSW_CTRL_MASK_FCTL) != SCSW_FCTL_START_FUNC) ||
891         (!(s->ctrl &
892            (SCSW_ACTL_RESUME_PEND | SCSW_ACTL_START_PEND | SCSW_ACTL_SUSP))) ||
893         (s->ctrl & SCSW_ACTL_SUBCH_ACTIVE)) {
894         ret = -EINPROGRESS;
895         goto out;
896     }
897 
898     if (s->ctrl & SCSW_CTRL_MASK_STCTL) {
899         ret = -EBUSY;
900         goto out;
901     }
902 
903     /* Cancel the current operation. */
904     s->ctrl &= ~(SCSW_FCTL_START_FUNC |
905                  SCSW_ACTL_RESUME_PEND |
906                  SCSW_ACTL_START_PEND |
907                  SCSW_ACTL_SUSP);
908     sch->channel_prog = 0x0;
909     sch->last_cmd_valid = false;
910     s->dstat = 0;
911     s->cstat = 0;
912     ret = 0;
913 
914 out:
915     return ret;
916 }
917 
918 int css_do_csch(SubchDev *sch)
919 {
920     SCSW *s = &sch->curr_status.scsw;
921     PMCW *p = &sch->curr_status.pmcw;
922     int ret;
923 
924     if (~(p->flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) {
925         ret = -ENODEV;
926         goto out;
927     }
928 
929     /* Trigger the clear function. */
930     s->ctrl &= ~(SCSW_CTRL_MASK_FCTL | SCSW_CTRL_MASK_ACTL);
931     s->ctrl |= SCSW_FCTL_CLEAR_FUNC | SCSW_ACTL_CLEAR_PEND;
932 
933     do_subchannel_work(sch, NULL);
934     ret = 0;
935 
936 out:
937     return ret;
938 }
939 
940 int css_do_hsch(SubchDev *sch)
941 {
942     SCSW *s = &sch->curr_status.scsw;
943     PMCW *p = &sch->curr_status.pmcw;
944     int ret;
945 
946     if (~(p->flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) {
947         ret = -ENODEV;
948         goto out;
949     }
950 
951     if (((s->ctrl & SCSW_CTRL_MASK_STCTL) == SCSW_STCTL_STATUS_PEND) ||
952         (s->ctrl & (SCSW_STCTL_PRIMARY |
953                     SCSW_STCTL_SECONDARY |
954                     SCSW_STCTL_ALERT))) {
955         ret = -EINPROGRESS;
956         goto out;
957     }
958 
959     if (s->ctrl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
960         ret = -EBUSY;
961         goto out;
962     }
963 
964     /* Trigger the halt function. */
965     s->ctrl |= SCSW_FCTL_HALT_FUNC;
966     s->ctrl &= ~SCSW_FCTL_START_FUNC;
967     if (((s->ctrl & SCSW_CTRL_MASK_ACTL) ==
968          (SCSW_ACTL_SUBCH_ACTIVE | SCSW_ACTL_DEVICE_ACTIVE)) &&
969         ((s->ctrl & SCSW_CTRL_MASK_STCTL) == SCSW_STCTL_INTERMEDIATE)) {
970         s->ctrl &= ~SCSW_STCTL_STATUS_PEND;
971     }
972     s->ctrl |= SCSW_ACTL_HALT_PEND;
973 
974     do_subchannel_work(sch, NULL);
975     ret = 0;
976 
977 out:
978     return ret;
979 }
980 
981 static void css_update_chnmon(SubchDev *sch)
982 {
983     if (!(sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_MME)) {
984         /* Not active. */
985         return;
986     }
987     /* The counter is conveniently located at the beginning of the struct. */
988     if (sch->curr_status.pmcw.chars & PMCW_CHARS_MASK_MBFC) {
989         /* Format 1, per-subchannel area. */
990         uint32_t count;
991 
992         count = address_space_ldl(&address_space_memory,
993                                   sch->curr_status.mba,
994                                   MEMTXATTRS_UNSPECIFIED,
995                                   NULL);
996         count++;
997         address_space_stl(&address_space_memory, sch->curr_status.mba, count,
998                           MEMTXATTRS_UNSPECIFIED, NULL);
999     } else {
1000         /* Format 0, global area. */
1001         uint32_t offset;
1002         uint16_t count;
1003 
1004         offset = sch->curr_status.pmcw.mbi << 5;
1005         count = address_space_lduw(&address_space_memory,
1006                                    channel_subsys.chnmon_area + offset,
1007                                    MEMTXATTRS_UNSPECIFIED,
1008                                    NULL);
1009         count++;
1010         address_space_stw(&address_space_memory,
1011                           channel_subsys.chnmon_area + offset, count,
1012                           MEMTXATTRS_UNSPECIFIED, NULL);
1013     }
1014 }
1015 
1016 int css_do_ssch(SubchDev *sch, ORB *orb)
1017 {
1018     SCSW *s = &sch->curr_status.scsw;
1019     PMCW *p = &sch->curr_status.pmcw;
1020     int ret;
1021 
1022     if (~(p->flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) {
1023         ret = -ENODEV;
1024         goto out;
1025     }
1026 
1027     if (s->ctrl & SCSW_STCTL_STATUS_PEND) {
1028         ret = -EINPROGRESS;
1029         goto out;
1030     }
1031 
1032     if (s->ctrl & (SCSW_FCTL_START_FUNC |
1033                    SCSW_FCTL_HALT_FUNC |
1034                    SCSW_FCTL_CLEAR_FUNC)) {
1035         ret = -EBUSY;
1036         goto out;
1037     }
1038 
1039     /* If monitoring is active, update counter. */
1040     if (channel_subsys.chnmon_active) {
1041         css_update_chnmon(sch);
1042     }
1043     sch->channel_prog = orb->cpa;
1044     /* Trigger the start function. */
1045     s->ctrl |= (SCSW_FCTL_START_FUNC | SCSW_ACTL_START_PEND);
1046     s->flags &= ~SCSW_FLAGS_MASK_PNO;
1047 
1048     ret = do_subchannel_work(sch, orb);
1049 
1050 out:
1051     return ret;
1052 }
1053 
1054 static void copy_irb_to_guest(IRB *dest, const IRB *src, PMCW *pmcw,
1055                               int *irb_len)
1056 {
1057     int i;
1058     uint16_t stctl = src->scsw.ctrl & SCSW_CTRL_MASK_STCTL;
1059     uint16_t actl = src->scsw.ctrl & SCSW_CTRL_MASK_ACTL;
1060 
1061     copy_scsw_to_guest(&dest->scsw, &src->scsw);
1062 
1063     for (i = 0; i < ARRAY_SIZE(dest->esw); i++) {
1064         dest->esw[i] = cpu_to_be32(src->esw[i]);
1065     }
1066     for (i = 0; i < ARRAY_SIZE(dest->ecw); i++) {
1067         dest->ecw[i] = cpu_to_be32(src->ecw[i]);
1068     }
1069     *irb_len = sizeof(*dest) - sizeof(dest->emw);
1070 
1071     /* extended measurements enabled? */
1072     if ((src->scsw.flags & SCSW_FLAGS_MASK_ESWF) ||
1073         !(pmcw->flags & PMCW_FLAGS_MASK_TF) ||
1074         !(pmcw->chars & PMCW_CHARS_MASK_XMWME)) {
1075         return;
1076     }
1077     /* extended measurements pending? */
1078     if (!(stctl & SCSW_STCTL_STATUS_PEND)) {
1079         return;
1080     }
1081     if ((stctl & SCSW_STCTL_PRIMARY) ||
1082         (stctl == SCSW_STCTL_SECONDARY) ||
1083         ((stctl & SCSW_STCTL_INTERMEDIATE) && (actl & SCSW_ACTL_SUSP))) {
1084         for (i = 0; i < ARRAY_SIZE(dest->emw); i++) {
1085             dest->emw[i] = cpu_to_be32(src->emw[i]);
1086         }
1087     }
1088     *irb_len = sizeof(*dest);
1089 }
1090 
1091 int css_do_tsch_get_irb(SubchDev *sch, IRB *target_irb, int *irb_len)
1092 {
1093     SCSW *s = &sch->curr_status.scsw;
1094     PMCW *p = &sch->curr_status.pmcw;
1095     uint16_t stctl;
1096     IRB irb;
1097 
1098     if (~(p->flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) {
1099         return 3;
1100     }
1101 
1102     stctl = s->ctrl & SCSW_CTRL_MASK_STCTL;
1103 
1104     /* Prepare the irb for the guest. */
1105     memset(&irb, 0, sizeof(IRB));
1106 
1107     /* Copy scsw from current status. */
1108     memcpy(&irb.scsw, s, sizeof(SCSW));
1109     if (stctl & SCSW_STCTL_STATUS_PEND) {
1110         if (s->cstat & (SCSW_CSTAT_DATA_CHECK |
1111                         SCSW_CSTAT_CHN_CTRL_CHK |
1112                         SCSW_CSTAT_INTF_CTRL_CHK)) {
1113             irb.scsw.flags |= SCSW_FLAGS_MASK_ESWF;
1114             irb.esw[0] = 0x04804000;
1115         } else {
1116             irb.esw[0] = 0x00800000;
1117         }
1118         /* If a unit check is pending, copy sense data. */
1119         if ((s->dstat & SCSW_DSTAT_UNIT_CHECK) &&
1120             (p->chars & PMCW_CHARS_MASK_CSENSE)) {
1121             int i;
1122 
1123             irb.scsw.flags |= SCSW_FLAGS_MASK_ESWF | SCSW_FLAGS_MASK_ECTL;
1124             /* Attention: sense_data is already BE! */
1125             memcpy(irb.ecw, sch->sense_data, sizeof(sch->sense_data));
1126             for (i = 0; i < ARRAY_SIZE(irb.ecw); i++) {
1127                 irb.ecw[i] = be32_to_cpu(irb.ecw[i]);
1128             }
1129             irb.esw[1] = 0x01000000 | (sizeof(sch->sense_data) << 8);
1130         }
1131     }
1132     /* Store the irb to the guest. */
1133     copy_irb_to_guest(target_irb, &irb, p, irb_len);
1134 
1135     return ((stctl & SCSW_STCTL_STATUS_PEND) == 0);
1136 }
1137 
1138 void css_do_tsch_update_subch(SubchDev *sch)
1139 {
1140     SCSW *s = &sch->curr_status.scsw;
1141     PMCW *p = &sch->curr_status.pmcw;
1142     uint16_t stctl;
1143     uint16_t fctl;
1144     uint16_t actl;
1145 
1146     stctl = s->ctrl & SCSW_CTRL_MASK_STCTL;
1147     fctl = s->ctrl & SCSW_CTRL_MASK_FCTL;
1148     actl = s->ctrl & SCSW_CTRL_MASK_ACTL;
1149 
1150     /* Clear conditions on subchannel, if applicable. */
1151     if (stctl & SCSW_STCTL_STATUS_PEND) {
1152         s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
1153         if ((stctl != (SCSW_STCTL_INTERMEDIATE | SCSW_STCTL_STATUS_PEND)) ||
1154             ((fctl & SCSW_FCTL_HALT_FUNC) &&
1155              (actl & SCSW_ACTL_SUSP))) {
1156             s->ctrl &= ~SCSW_CTRL_MASK_FCTL;
1157         }
1158         if (stctl != (SCSW_STCTL_INTERMEDIATE | SCSW_STCTL_STATUS_PEND)) {
1159             s->flags &= ~SCSW_FLAGS_MASK_PNO;
1160             s->ctrl &= ~(SCSW_ACTL_RESUME_PEND |
1161                          SCSW_ACTL_START_PEND |
1162                          SCSW_ACTL_HALT_PEND |
1163                          SCSW_ACTL_CLEAR_PEND |
1164                          SCSW_ACTL_SUSP);
1165         } else {
1166             if ((actl & SCSW_ACTL_SUSP) &&
1167                 (fctl & SCSW_FCTL_START_FUNC)) {
1168                 s->flags &= ~SCSW_FLAGS_MASK_PNO;
1169                 if (fctl & SCSW_FCTL_HALT_FUNC) {
1170                     s->ctrl &= ~(SCSW_ACTL_RESUME_PEND |
1171                                  SCSW_ACTL_START_PEND |
1172                                  SCSW_ACTL_HALT_PEND |
1173                                  SCSW_ACTL_CLEAR_PEND |
1174                                  SCSW_ACTL_SUSP);
1175                 } else {
1176                     s->ctrl &= ~SCSW_ACTL_RESUME_PEND;
1177                 }
1178             }
1179         }
1180         /* Clear pending sense data. */
1181         if (p->chars & PMCW_CHARS_MASK_CSENSE) {
1182             memset(sch->sense_data, 0 , sizeof(sch->sense_data));
1183         }
1184     }
1185 }
1186 
1187 static void copy_crw_to_guest(CRW *dest, const CRW *src)
1188 {
1189     dest->flags = cpu_to_be16(src->flags);
1190     dest->rsid = cpu_to_be16(src->rsid);
1191 }
1192 
1193 int css_do_stcrw(CRW *crw)
1194 {
1195     CrwContainer *crw_cont;
1196     int ret;
1197 
1198     crw_cont = QTAILQ_FIRST(&channel_subsys.pending_crws);
1199     if (crw_cont) {
1200         QTAILQ_REMOVE(&channel_subsys.pending_crws, crw_cont, sibling);
1201         copy_crw_to_guest(crw, &crw_cont->crw);
1202         g_free(crw_cont);
1203         ret = 0;
1204     } else {
1205         /* List was empty, turn crw machine checks on again. */
1206         memset(crw, 0, sizeof(*crw));
1207         channel_subsys.do_crw_mchk = true;
1208         ret = 1;
1209     }
1210 
1211     return ret;
1212 }
1213 
1214 static void copy_crw_from_guest(CRW *dest, const CRW *src)
1215 {
1216     dest->flags = be16_to_cpu(src->flags);
1217     dest->rsid = be16_to_cpu(src->rsid);
1218 }
1219 
1220 void css_undo_stcrw(CRW *crw)
1221 {
1222     CrwContainer *crw_cont;
1223 
1224     crw_cont = g_try_malloc0(sizeof(CrwContainer));
1225     if (!crw_cont) {
1226         channel_subsys.crws_lost = true;
1227         return;
1228     }
1229     copy_crw_from_guest(&crw_cont->crw, crw);
1230 
1231     QTAILQ_INSERT_HEAD(&channel_subsys.pending_crws, crw_cont, sibling);
1232 }
1233 
1234 int css_do_tpi(IOIntCode *int_code, int lowcore)
1235 {
1236     /* No pending interrupts for !KVM. */
1237     return 0;
1238  }
1239 
1240 int css_collect_chp_desc(int m, uint8_t cssid, uint8_t f_chpid, uint8_t l_chpid,
1241                          int rfmt, void *buf)
1242 {
1243     int i, desc_size;
1244     uint32_t words[8];
1245     uint32_t chpid_type_word;
1246     CssImage *css;
1247 
1248     if (!m && !cssid) {
1249         css = channel_subsys.css[channel_subsys.default_cssid];
1250     } else {
1251         css = channel_subsys.css[cssid];
1252     }
1253     if (!css) {
1254         return 0;
1255     }
1256     desc_size = 0;
1257     for (i = f_chpid; i <= l_chpid; i++) {
1258         if (css->chpids[i].in_use) {
1259             chpid_type_word = 0x80000000 | (css->chpids[i].type << 8) | i;
1260             if (rfmt == 0) {
1261                 words[0] = cpu_to_be32(chpid_type_word);
1262                 words[1] = 0;
1263                 memcpy(buf + desc_size, words, 8);
1264                 desc_size += 8;
1265             } else if (rfmt == 1) {
1266                 words[0] = cpu_to_be32(chpid_type_word);
1267                 words[1] = 0;
1268                 words[2] = 0;
1269                 words[3] = 0;
1270                 words[4] = 0;
1271                 words[5] = 0;
1272                 words[6] = 0;
1273                 words[7] = 0;
1274                 memcpy(buf + desc_size, words, 32);
1275                 desc_size += 32;
1276             }
1277         }
1278     }
1279     return desc_size;
1280 }
1281 
1282 void css_do_schm(uint8_t mbk, int update, int dct, uint64_t mbo)
1283 {
1284     /* dct is currently ignored (not really meaningful for our devices) */
1285     /* TODO: Don't ignore mbk. */
1286     if (update && !channel_subsys.chnmon_active) {
1287         /* Enable measuring. */
1288         channel_subsys.chnmon_area = mbo;
1289         channel_subsys.chnmon_active = true;
1290     }
1291     if (!update && channel_subsys.chnmon_active) {
1292         /* Disable measuring. */
1293         channel_subsys.chnmon_area = 0;
1294         channel_subsys.chnmon_active = false;
1295     }
1296 }
1297 
1298 int css_do_rsch(SubchDev *sch)
1299 {
1300     SCSW *s = &sch->curr_status.scsw;
1301     PMCW *p = &sch->curr_status.pmcw;
1302     int ret;
1303 
1304     if (~(p->flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) {
1305         ret = -ENODEV;
1306         goto out;
1307     }
1308 
1309     if (s->ctrl & SCSW_STCTL_STATUS_PEND) {
1310         ret = -EINPROGRESS;
1311         goto out;
1312     }
1313 
1314     if (((s->ctrl & SCSW_CTRL_MASK_FCTL) != SCSW_FCTL_START_FUNC) ||
1315         (s->ctrl & SCSW_ACTL_RESUME_PEND) ||
1316         (!(s->ctrl & SCSW_ACTL_SUSP))) {
1317         ret = -EINVAL;
1318         goto out;
1319     }
1320 
1321     /* If monitoring is active, update counter. */
1322     if (channel_subsys.chnmon_active) {
1323         css_update_chnmon(sch);
1324     }
1325 
1326     s->ctrl |= SCSW_ACTL_RESUME_PEND;
1327     do_subchannel_work(sch, NULL);
1328     ret = 0;
1329 
1330 out:
1331     return ret;
1332 }
1333 
1334 int css_do_rchp(uint8_t cssid, uint8_t chpid)
1335 {
1336     uint8_t real_cssid;
1337 
1338     if (cssid > channel_subsys.max_cssid) {
1339         return -EINVAL;
1340     }
1341     if (channel_subsys.max_cssid == 0) {
1342         real_cssid = channel_subsys.default_cssid;
1343     } else {
1344         real_cssid = cssid;
1345     }
1346     if (!channel_subsys.css[real_cssid]) {
1347         return -EINVAL;
1348     }
1349 
1350     if (!channel_subsys.css[real_cssid]->chpids[chpid].in_use) {
1351         return -ENODEV;
1352     }
1353 
1354     if (!channel_subsys.css[real_cssid]->chpids[chpid].is_virtual) {
1355         fprintf(stderr,
1356                 "rchp unsupported for non-virtual chpid %x.%02x!\n",
1357                 real_cssid, chpid);
1358         return -ENODEV;
1359     }
1360 
1361     /* We don't really use a channel path, so we're done here. */
1362     css_queue_crw(CRW_RSC_CHP, CRW_ERC_INIT,
1363                   channel_subsys.max_cssid > 0 ? 1 : 0, chpid);
1364     if (channel_subsys.max_cssid > 0) {
1365         css_queue_crw(CRW_RSC_CHP, CRW_ERC_INIT, 0, real_cssid << 8);
1366     }
1367     return 0;
1368 }
1369 
1370 bool css_schid_final(int m, uint8_t cssid, uint8_t ssid, uint16_t schid)
1371 {
1372     SubchSet *set;
1373     uint8_t real_cssid;
1374 
1375     real_cssid = (!m && (cssid == 0)) ? channel_subsys.default_cssid : cssid;
1376     if (ssid > MAX_SSID ||
1377         !channel_subsys.css[real_cssid] ||
1378         !channel_subsys.css[real_cssid]->sch_set[ssid]) {
1379         return true;
1380     }
1381     set = channel_subsys.css[real_cssid]->sch_set[ssid];
1382     return schid > find_last_bit(set->schids_used,
1383                                  (MAX_SCHID + 1) / sizeof(unsigned long));
1384 }
1385 
1386 unsigned int css_find_free_chpid(uint8_t cssid)
1387 {
1388     CssImage *css = channel_subsys.css[cssid];
1389     unsigned int chpid;
1390 
1391     if (!css) {
1392         return MAX_CHPID + 1;
1393     }
1394 
1395     for (chpid = 0; chpid <= MAX_CHPID; chpid++) {
1396         /* skip reserved chpid */
1397         if (chpid == VIRTIO_CCW_CHPID) {
1398             continue;
1399         }
1400         if (!css->chpids[chpid].in_use) {
1401             return chpid;
1402         }
1403     }
1404     return MAX_CHPID + 1;
1405 }
1406 
1407 static int css_add_chpid(uint8_t cssid, uint8_t chpid, uint8_t type,
1408                          bool is_virt)
1409 {
1410     CssImage *css;
1411 
1412     trace_css_chpid_add(cssid, chpid, type);
1413     css = channel_subsys.css[cssid];
1414     if (!css) {
1415         return -EINVAL;
1416     }
1417     if (css->chpids[chpid].in_use) {
1418         return -EEXIST;
1419     }
1420     css->chpids[chpid].in_use = 1;
1421     css->chpids[chpid].type = type;
1422     css->chpids[chpid].is_virtual = is_virt;
1423 
1424     css_generate_chp_crws(cssid, chpid);
1425 
1426     return 0;
1427 }
1428 
1429 void css_sch_build_virtual_schib(SubchDev *sch, uint8_t chpid, uint8_t type)
1430 {
1431     PMCW *p = &sch->curr_status.pmcw;
1432     SCSW *s = &sch->curr_status.scsw;
1433     int i;
1434     CssImage *css = channel_subsys.css[sch->cssid];
1435 
1436     assert(css != NULL);
1437     memset(p, 0, sizeof(PMCW));
1438     p->flags |= PMCW_FLAGS_MASK_DNV;
1439     p->devno = sch->devno;
1440     /* single path */
1441     p->pim = 0x80;
1442     p->pom = 0xff;
1443     p->pam = 0x80;
1444     p->chpid[0] = chpid;
1445     if (!css->chpids[chpid].in_use) {
1446         css_add_chpid(sch->cssid, chpid, type, true);
1447     }
1448 
1449     memset(s, 0, sizeof(SCSW));
1450     sch->curr_status.mba = 0;
1451     for (i = 0; i < ARRAY_SIZE(sch->curr_status.mda); i++) {
1452         sch->curr_status.mda[i] = 0;
1453     }
1454 }
1455 
1456 SubchDev *css_find_subch(uint8_t m, uint8_t cssid, uint8_t ssid, uint16_t schid)
1457 {
1458     uint8_t real_cssid;
1459 
1460     real_cssid = (!m && (cssid == 0)) ? channel_subsys.default_cssid : cssid;
1461 
1462     if (!channel_subsys.css[real_cssid]) {
1463         return NULL;
1464     }
1465 
1466     if (!channel_subsys.css[real_cssid]->sch_set[ssid]) {
1467         return NULL;
1468     }
1469 
1470     return channel_subsys.css[real_cssid]->sch_set[ssid]->sch[schid];
1471 }
1472 
1473 /**
1474  * Return free device number in subchannel set.
1475  *
1476  * Return index of the first free device number in the subchannel set
1477  * identified by @p cssid and @p ssid, beginning the search at @p
1478  * start and wrapping around at MAX_DEVNO. Return a value exceeding
1479  * MAX_SCHID if there are no free device numbers in the subchannel
1480  * set.
1481  */
1482 static uint32_t css_find_free_devno(uint8_t cssid, uint8_t ssid,
1483                                     uint16_t start)
1484 {
1485     uint32_t round;
1486 
1487     for (round = 0; round <= MAX_DEVNO; round++) {
1488         uint16_t devno = (start + round) % MAX_DEVNO;
1489 
1490         if (!css_devno_used(cssid, ssid, devno)) {
1491             return devno;
1492         }
1493     }
1494     return MAX_DEVNO + 1;
1495 }
1496 
1497 /**
1498  * Return first free subchannel (id) in subchannel set.
1499  *
1500  * Return index of the first free subchannel in the subchannel set
1501  * identified by @p cssid and @p ssid, if there is any. Return a value
1502  * exceeding MAX_SCHID if there are no free subchannels in the
1503  * subchannel set.
1504  */
1505 static uint32_t css_find_free_subch(uint8_t cssid, uint8_t ssid)
1506 {
1507     uint32_t schid;
1508 
1509     for (schid = 0; schid <= MAX_SCHID; schid++) {
1510         if (!css_find_subch(1, cssid, ssid, schid)) {
1511             return schid;
1512         }
1513     }
1514     return MAX_SCHID + 1;
1515 }
1516 
1517 /**
1518  * Return first free subchannel (id) in subchannel set for a device number
1519  *
1520  * Verify the device number @p devno is not used yet in the subchannel
1521  * set identified by @p cssid and @p ssid. Set @p schid to the index
1522  * of the first free subchannel in the subchannel set, if there is
1523  * any. Return true if everything succeeded and false otherwise.
1524  */
1525 static bool css_find_free_subch_for_devno(uint8_t cssid, uint8_t ssid,
1526                                           uint16_t devno, uint16_t *schid,
1527                                           Error **errp)
1528 {
1529     uint32_t free_schid;
1530 
1531     assert(schid);
1532     if (css_devno_used(cssid, ssid, devno)) {
1533         error_setg(errp, "Device %x.%x.%04x already exists",
1534                    cssid, ssid, devno);
1535         return false;
1536     }
1537     free_schid = css_find_free_subch(cssid, ssid);
1538     if (free_schid > MAX_SCHID) {
1539         error_setg(errp, "No free subchannel found for %x.%x.%04x",
1540                    cssid, ssid, devno);
1541         return false;
1542     }
1543     *schid = free_schid;
1544     return true;
1545 }
1546 
1547 /**
1548  * Return first free subchannel (id) and device number
1549  *
1550  * Locate the first free subchannel and first free device number in
1551  * any of the subchannel sets of the channel subsystem identified by
1552  * @p cssid. Return false if no free subchannel / device number could
1553  * be found. Otherwise set @p ssid, @p devno and @p schid to identify
1554  * the available subchannel and device number and return true.
1555  *
1556  * May modify @p ssid, @p devno and / or @p schid even if no free
1557  * subchannel / device number could be found.
1558  */
1559 static bool css_find_free_subch_and_devno(uint8_t cssid, uint8_t *ssid,
1560                                           uint16_t *devno, uint16_t *schid,
1561                                           Error **errp)
1562 {
1563     uint32_t free_schid, free_devno;
1564 
1565     assert(ssid && devno && schid);
1566     for (*ssid = 0; *ssid <= MAX_SSID; (*ssid)++) {
1567         free_schid = css_find_free_subch(cssid, *ssid);
1568         if (free_schid > MAX_SCHID) {
1569             continue;
1570         }
1571         free_devno = css_find_free_devno(cssid, *ssid, free_schid);
1572         if (free_devno > MAX_DEVNO) {
1573             continue;
1574         }
1575         *schid = free_schid;
1576         *devno = free_devno;
1577         return true;
1578     }
1579     error_setg(errp, "Virtual channel subsystem is full!");
1580     return false;
1581 }
1582 
1583 bool css_subch_visible(SubchDev *sch)
1584 {
1585     if (sch->ssid > channel_subsys.max_ssid) {
1586         return false;
1587     }
1588 
1589     if (sch->cssid != channel_subsys.default_cssid) {
1590         return (channel_subsys.max_cssid > 0);
1591     }
1592 
1593     return true;
1594 }
1595 
1596 bool css_present(uint8_t cssid)
1597 {
1598     return (channel_subsys.css[cssid] != NULL);
1599 }
1600 
1601 bool css_devno_used(uint8_t cssid, uint8_t ssid, uint16_t devno)
1602 {
1603     if (!channel_subsys.css[cssid]) {
1604         return false;
1605     }
1606     if (!channel_subsys.css[cssid]->sch_set[ssid]) {
1607         return false;
1608     }
1609 
1610     return !!test_bit(devno,
1611                       channel_subsys.css[cssid]->sch_set[ssid]->devnos_used);
1612 }
1613 
1614 void css_subch_assign(uint8_t cssid, uint8_t ssid, uint16_t schid,
1615                       uint16_t devno, SubchDev *sch)
1616 {
1617     CssImage *css;
1618     SubchSet *s_set;
1619 
1620     trace_css_assign_subch(sch ? "assign" : "deassign", cssid, ssid, schid,
1621                            devno);
1622     if (!channel_subsys.css[cssid]) {
1623         fprintf(stderr,
1624                 "Suspicious call to %s (%x.%x.%04x) for non-existing css!\n",
1625                 __func__, cssid, ssid, schid);
1626         return;
1627     }
1628     css = channel_subsys.css[cssid];
1629 
1630     if (!css->sch_set[ssid]) {
1631         css->sch_set[ssid] = g_malloc0(sizeof(SubchSet));
1632     }
1633     s_set = css->sch_set[ssid];
1634 
1635     s_set->sch[schid] = sch;
1636     if (sch) {
1637         set_bit(schid, s_set->schids_used);
1638         set_bit(devno, s_set->devnos_used);
1639     } else {
1640         clear_bit(schid, s_set->schids_used);
1641         clear_bit(devno, s_set->devnos_used);
1642     }
1643 }
1644 
1645 void css_queue_crw(uint8_t rsc, uint8_t erc, int chain, uint16_t rsid)
1646 {
1647     CrwContainer *crw_cont;
1648 
1649     trace_css_crw(rsc, erc, rsid, chain ? "(chained)" : "");
1650     /* TODO: Maybe use a static crw pool? */
1651     crw_cont = g_try_malloc0(sizeof(CrwContainer));
1652     if (!crw_cont) {
1653         channel_subsys.crws_lost = true;
1654         return;
1655     }
1656     crw_cont->crw.flags = (rsc << 8) | erc;
1657     if (chain) {
1658         crw_cont->crw.flags |= CRW_FLAGS_MASK_C;
1659     }
1660     crw_cont->crw.rsid = rsid;
1661     if (channel_subsys.crws_lost) {
1662         crw_cont->crw.flags |= CRW_FLAGS_MASK_R;
1663         channel_subsys.crws_lost = false;
1664     }
1665 
1666     QTAILQ_INSERT_TAIL(&channel_subsys.pending_crws, crw_cont, sibling);
1667 
1668     if (channel_subsys.do_crw_mchk) {
1669         channel_subsys.do_crw_mchk = false;
1670         /* Inject crw pending machine check. */
1671         s390_crw_mchk();
1672     }
1673 }
1674 
1675 void css_generate_sch_crws(uint8_t cssid, uint8_t ssid, uint16_t schid,
1676                            int hotplugged, int add)
1677 {
1678     uint8_t guest_cssid;
1679     bool chain_crw;
1680 
1681     if (add && !hotplugged) {
1682         return;
1683     }
1684     if (channel_subsys.max_cssid == 0) {
1685         /* Default cssid shows up as 0. */
1686         guest_cssid = (cssid == channel_subsys.default_cssid) ? 0 : cssid;
1687     } else {
1688         /* Show real cssid to the guest. */
1689         guest_cssid = cssid;
1690     }
1691     /*
1692      * Only notify for higher subchannel sets/channel subsystems if the
1693      * guest has enabled it.
1694      */
1695     if ((ssid > channel_subsys.max_ssid) ||
1696         (guest_cssid > channel_subsys.max_cssid) ||
1697         ((channel_subsys.max_cssid == 0) &&
1698          (cssid != channel_subsys.default_cssid))) {
1699         return;
1700     }
1701     chain_crw = (channel_subsys.max_ssid > 0) ||
1702             (channel_subsys.max_cssid > 0);
1703     css_queue_crw(CRW_RSC_SUBCH, CRW_ERC_IPI, chain_crw ? 1 : 0, schid);
1704     if (chain_crw) {
1705         css_queue_crw(CRW_RSC_SUBCH, CRW_ERC_IPI, 0,
1706                       (guest_cssid << 8) | (ssid << 4));
1707     }
1708     /* RW_ERC_IPI --> clear pending interrupts */
1709     css_clear_io_interrupt(css_do_build_subchannel_id(cssid, ssid), schid);
1710 }
1711 
1712 void css_generate_chp_crws(uint8_t cssid, uint8_t chpid)
1713 {
1714     /* TODO */
1715 }
1716 
1717 void css_generate_css_crws(uint8_t cssid)
1718 {
1719     if (!channel_subsys.sei_pending) {
1720         css_queue_crw(CRW_RSC_CSS, 0, 0, cssid);
1721     }
1722     channel_subsys.sei_pending = true;
1723 }
1724 
1725 void css_clear_sei_pending(void)
1726 {
1727     channel_subsys.sei_pending = false;
1728 }
1729 
1730 int css_enable_mcsse(void)
1731 {
1732     trace_css_enable_facility("mcsse");
1733     channel_subsys.max_cssid = MAX_CSSID;
1734     return 0;
1735 }
1736 
1737 int css_enable_mss(void)
1738 {
1739     trace_css_enable_facility("mss");
1740     channel_subsys.max_ssid = MAX_SSID;
1741     return 0;
1742 }
1743 
1744 void subch_device_save(SubchDev *s, QEMUFile *f)
1745 {
1746     int i;
1747 
1748     qemu_put_byte(f, s->cssid);
1749     qemu_put_byte(f, s->ssid);
1750     qemu_put_be16(f, s->schid);
1751     qemu_put_be16(f, s->devno);
1752     qemu_put_byte(f, s->thinint_active);
1753     /* SCHIB */
1754     /*     PMCW */
1755     qemu_put_be32(f, s->curr_status.pmcw.intparm);
1756     qemu_put_be16(f, s->curr_status.pmcw.flags);
1757     qemu_put_be16(f, s->curr_status.pmcw.devno);
1758     qemu_put_byte(f, s->curr_status.pmcw.lpm);
1759     qemu_put_byte(f, s->curr_status.pmcw.pnom);
1760     qemu_put_byte(f, s->curr_status.pmcw.lpum);
1761     qemu_put_byte(f, s->curr_status.pmcw.pim);
1762     qemu_put_be16(f, s->curr_status.pmcw.mbi);
1763     qemu_put_byte(f, s->curr_status.pmcw.pom);
1764     qemu_put_byte(f, s->curr_status.pmcw.pam);
1765     qemu_put_buffer(f, s->curr_status.pmcw.chpid, 8);
1766     qemu_put_be32(f, s->curr_status.pmcw.chars);
1767     /*     SCSW */
1768     qemu_put_be16(f, s->curr_status.scsw.flags);
1769     qemu_put_be16(f, s->curr_status.scsw.ctrl);
1770     qemu_put_be32(f, s->curr_status.scsw.cpa);
1771     qemu_put_byte(f, s->curr_status.scsw.dstat);
1772     qemu_put_byte(f, s->curr_status.scsw.cstat);
1773     qemu_put_be16(f, s->curr_status.scsw.count);
1774     qemu_put_be64(f, s->curr_status.mba);
1775     qemu_put_buffer(f, s->curr_status.mda, 4);
1776     /* end SCHIB */
1777     qemu_put_buffer(f, s->sense_data, 32);
1778     qemu_put_be64(f, s->channel_prog);
1779     /* last cmd */
1780     qemu_put_byte(f, s->last_cmd.cmd_code);
1781     qemu_put_byte(f, s->last_cmd.flags);
1782     qemu_put_be16(f, s->last_cmd.count);
1783     qemu_put_be32(f, s->last_cmd.cda);
1784     qemu_put_byte(f, s->last_cmd_valid);
1785     qemu_put_byte(f, s->id.reserved);
1786     qemu_put_be16(f, s->id.cu_type);
1787     qemu_put_byte(f, s->id.cu_model);
1788     qemu_put_be16(f, s->id.dev_type);
1789     qemu_put_byte(f, s->id.dev_model);
1790     qemu_put_byte(f, s->id.unused);
1791     for (i = 0; i < ARRAY_SIZE(s->id.ciw); i++) {
1792         qemu_put_byte(f, s->id.ciw[i].type);
1793         qemu_put_byte(f, s->id.ciw[i].command);
1794         qemu_put_be16(f, s->id.ciw[i].count);
1795     }
1796     qemu_put_byte(f, s->ccw_fmt_1);
1797     qemu_put_byte(f, s->ccw_no_data_cnt);
1798 }
1799 
1800 int subch_device_load(SubchDev *s, QEMUFile *f)
1801 {
1802     SubchDev *old_s;
1803     uint16_t old_schid = s->schid;
1804     int i;
1805 
1806     s->cssid = qemu_get_byte(f);
1807     s->ssid = qemu_get_byte(f);
1808     s->schid = qemu_get_be16(f);
1809     s->devno = qemu_get_be16(f);
1810     /* Re-assign subch. */
1811     if (old_schid != s->schid) {
1812         old_s = channel_subsys.css[s->cssid]->sch_set[s->ssid]->sch[old_schid];
1813         /*
1814          * (old_s != s) means that some other device has its correct
1815          * subchannel already assigned (in load).
1816          */
1817         if (old_s == s) {
1818             css_subch_assign(s->cssid, s->ssid, old_schid, s->devno, NULL);
1819         }
1820         /* It's OK to re-assign without a prior de-assign. */
1821         css_subch_assign(s->cssid, s->ssid, s->schid, s->devno, s);
1822     }
1823     s->thinint_active = qemu_get_byte(f);
1824     /* SCHIB */
1825     /*     PMCW */
1826     s->curr_status.pmcw.intparm = qemu_get_be32(f);
1827     s->curr_status.pmcw.flags = qemu_get_be16(f);
1828     s->curr_status.pmcw.devno = qemu_get_be16(f);
1829     s->curr_status.pmcw.lpm = qemu_get_byte(f);
1830     s->curr_status.pmcw.pnom  = qemu_get_byte(f);
1831     s->curr_status.pmcw.lpum = qemu_get_byte(f);
1832     s->curr_status.pmcw.pim = qemu_get_byte(f);
1833     s->curr_status.pmcw.mbi = qemu_get_be16(f);
1834     s->curr_status.pmcw.pom = qemu_get_byte(f);
1835     s->curr_status.pmcw.pam = qemu_get_byte(f);
1836     qemu_get_buffer(f, s->curr_status.pmcw.chpid, 8);
1837     s->curr_status.pmcw.chars = qemu_get_be32(f);
1838     /*     SCSW */
1839     s->curr_status.scsw.flags = qemu_get_be16(f);
1840     s->curr_status.scsw.ctrl = qemu_get_be16(f);
1841     s->curr_status.scsw.cpa = qemu_get_be32(f);
1842     s->curr_status.scsw.dstat = qemu_get_byte(f);
1843     s->curr_status.scsw.cstat = qemu_get_byte(f);
1844     s->curr_status.scsw.count = qemu_get_be16(f);
1845     s->curr_status.mba = qemu_get_be64(f);
1846     qemu_get_buffer(f, s->curr_status.mda, 4);
1847     /* end SCHIB */
1848     qemu_get_buffer(f, s->sense_data, 32);
1849     s->channel_prog = qemu_get_be64(f);
1850     /* last cmd */
1851     s->last_cmd.cmd_code = qemu_get_byte(f);
1852     s->last_cmd.flags = qemu_get_byte(f);
1853     s->last_cmd.count = qemu_get_be16(f);
1854     s->last_cmd.cda = qemu_get_be32(f);
1855     s->last_cmd_valid = qemu_get_byte(f);
1856     s->id.reserved = qemu_get_byte(f);
1857     s->id.cu_type = qemu_get_be16(f);
1858     s->id.cu_model = qemu_get_byte(f);
1859     s->id.dev_type = qemu_get_be16(f);
1860     s->id.dev_model = qemu_get_byte(f);
1861     s->id.unused = qemu_get_byte(f);
1862     for (i = 0; i < ARRAY_SIZE(s->id.ciw); i++) {
1863         s->id.ciw[i].type = qemu_get_byte(f);
1864         s->id.ciw[i].command = qemu_get_byte(f);
1865         s->id.ciw[i].count = qemu_get_be16(f);
1866     }
1867     s->ccw_fmt_1 = qemu_get_byte(f);
1868     s->ccw_no_data_cnt = qemu_get_byte(f);
1869     /*
1870      * Hack alert. We don't migrate the channel subsystem status (no
1871      * device!), but we need to find out if the guest enabled mss/mcss-e.
1872      * If the subchannel is enabled, it certainly was able to access it,
1873      * so adjust the max_ssid/max_cssid values for relevant ssid/cssid
1874      * values. This is not watertight, but better than nothing.
1875      */
1876     if (s->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ENA) {
1877         if (s->ssid) {
1878             channel_subsys.max_ssid = MAX_SSID;
1879         }
1880         if (s->cssid != channel_subsys.default_cssid) {
1881             channel_subsys.max_cssid = MAX_CSSID;
1882         }
1883     }
1884     return 0;
1885 }
1886 
1887 void css_reset_sch(SubchDev *sch)
1888 {
1889     PMCW *p = &sch->curr_status.pmcw;
1890 
1891     if ((p->flags & PMCW_FLAGS_MASK_ENA) != 0 && sch->disable_cb) {
1892         sch->disable_cb(sch);
1893     }
1894 
1895     p->intparm = 0;
1896     p->flags &= ~(PMCW_FLAGS_MASK_ISC | PMCW_FLAGS_MASK_ENA |
1897                   PMCW_FLAGS_MASK_LM | PMCW_FLAGS_MASK_MME |
1898                   PMCW_FLAGS_MASK_MP | PMCW_FLAGS_MASK_TF);
1899     p->flags |= PMCW_FLAGS_MASK_DNV;
1900     p->devno = sch->devno;
1901     p->pim = 0x80;
1902     p->lpm = p->pim;
1903     p->pnom = 0;
1904     p->lpum = 0;
1905     p->mbi = 0;
1906     p->pom = 0xff;
1907     p->pam = 0x80;
1908     p->chars &= ~(PMCW_CHARS_MASK_MBFC | PMCW_CHARS_MASK_XMWME |
1909                   PMCW_CHARS_MASK_CSENSE);
1910 
1911     memset(&sch->curr_status.scsw, 0, sizeof(sch->curr_status.scsw));
1912     sch->curr_status.mba = 0;
1913 
1914     sch->channel_prog = 0x0;
1915     sch->last_cmd_valid = false;
1916     sch->thinint_active = false;
1917 }
1918 
1919 void css_reset(void)
1920 {
1921     CrwContainer *crw_cont;
1922 
1923     /* Clean up monitoring. */
1924     channel_subsys.chnmon_active = false;
1925     channel_subsys.chnmon_area = 0;
1926 
1927     /* Clear pending CRWs. */
1928     while ((crw_cont = QTAILQ_FIRST(&channel_subsys.pending_crws))) {
1929         QTAILQ_REMOVE(&channel_subsys.pending_crws, crw_cont, sibling);
1930         g_free(crw_cont);
1931     }
1932     channel_subsys.sei_pending = false;
1933     channel_subsys.do_crw_mchk = true;
1934     channel_subsys.crws_lost = false;
1935 
1936     /* Reset maximum ids. */
1937     channel_subsys.max_cssid = 0;
1938     channel_subsys.max_ssid = 0;
1939 }
1940 
1941 static void get_css_devid(Object *obj, Visitor *v, const char *name,
1942                           void *opaque, Error **errp)
1943 {
1944     DeviceState *dev = DEVICE(obj);
1945     Property *prop = opaque;
1946     CssDevId *dev_id = qdev_get_prop_ptr(dev, prop);
1947     char buffer[] = "xx.x.xxxx";
1948     char *p = buffer;
1949     int r;
1950 
1951     if (dev_id->valid) {
1952 
1953         r = snprintf(buffer, sizeof(buffer), "%02x.%1x.%04x", dev_id->cssid,
1954                      dev_id->ssid, dev_id->devid);
1955         assert(r == sizeof(buffer) - 1);
1956 
1957         /* drop leading zero */
1958         if (dev_id->cssid <= 0xf) {
1959             p++;
1960         }
1961     } else {
1962         snprintf(buffer, sizeof(buffer), "<unset>");
1963     }
1964 
1965     visit_type_str(v, name, &p, errp);
1966 }
1967 
1968 /*
1969  * parse <cssid>.<ssid>.<devid> and assert valid range for cssid/ssid
1970  */
1971 static void set_css_devid(Object *obj, Visitor *v, const char *name,
1972                           void *opaque, Error **errp)
1973 {
1974     DeviceState *dev = DEVICE(obj);
1975     Property *prop = opaque;
1976     CssDevId *dev_id = qdev_get_prop_ptr(dev, prop);
1977     Error *local_err = NULL;
1978     char *str;
1979     int num, n1, n2;
1980     unsigned int cssid, ssid, devid;
1981 
1982     if (dev->realized) {
1983         qdev_prop_set_after_realize(dev, name, errp);
1984         return;
1985     }
1986 
1987     visit_type_str(v, name, &str, &local_err);
1988     if (local_err) {
1989         error_propagate(errp, local_err);
1990         return;
1991     }
1992 
1993     num = sscanf(str, "%2x.%1x%n.%4x%n", &cssid, &ssid, &n1, &devid, &n2);
1994     if (num != 3 || (n2 - n1) != 5 || strlen(str) != n2) {
1995         error_set_from_qdev_prop_error(errp, EINVAL, dev, prop, str);
1996         goto out;
1997     }
1998     if ((cssid > MAX_CSSID) || (ssid > MAX_SSID)) {
1999         error_setg(errp, "Invalid cssid or ssid: cssid %x, ssid %x",
2000                    cssid, ssid);
2001         goto out;
2002     }
2003 
2004     dev_id->cssid = cssid;
2005     dev_id->ssid = ssid;
2006     dev_id->devid = devid;
2007     dev_id->valid = true;
2008 
2009 out:
2010     g_free(str);
2011 }
2012 
2013 PropertyInfo css_devid_propinfo = {
2014     .name = "str",
2015     .description = "Identifier of an I/O device in the channel "
2016                    "subsystem, example: fe.1.23ab",
2017     .get = get_css_devid,
2018     .set = set_css_devid,
2019 };
2020 
2021 PropertyInfo css_devid_ro_propinfo = {
2022     .name = "str",
2023     .description = "Read-only identifier of an I/O device in the channel "
2024                    "subsystem, example: fe.1.23ab",
2025     .get = get_css_devid,
2026 };
2027 
2028 SubchDev *css_create_sch(CssDevId bus_id, bool is_virtual, bool squash_mcss,
2029                          Error **errp)
2030 {
2031     uint16_t schid = 0;
2032     SubchDev *sch;
2033 
2034     if (bus_id.valid) {
2035         if (is_virtual != (bus_id.cssid == VIRTUAL_CSSID)) {
2036             error_setg(errp, "cssid %hhx not valid for %s devices",
2037                        bus_id.cssid,
2038                        (is_virtual ? "virtual" : "non-virtual"));
2039             return NULL;
2040         }
2041     }
2042 
2043     if (bus_id.valid) {
2044         if (squash_mcss) {
2045             bus_id.cssid = channel_subsys.default_cssid;
2046         } else if (!channel_subsys.css[bus_id.cssid]) {
2047             css_create_css_image(bus_id.cssid, false);
2048         }
2049 
2050         if (!css_find_free_subch_for_devno(bus_id.cssid, bus_id.ssid,
2051                                            bus_id.devid, &schid, errp)) {
2052             return NULL;
2053         }
2054     } else if (squash_mcss || is_virtual) {
2055         bus_id.cssid = channel_subsys.default_cssid;
2056 
2057         if (!css_find_free_subch_and_devno(bus_id.cssid, &bus_id.ssid,
2058                                            &bus_id.devid, &schid, errp)) {
2059             return NULL;
2060         }
2061     } else {
2062         for (bus_id.cssid = 0; bus_id.cssid < MAX_CSSID; ++bus_id.cssid) {
2063             if (bus_id.cssid == VIRTUAL_CSSID) {
2064                 continue;
2065             }
2066 
2067             if (!channel_subsys.css[bus_id.cssid]) {
2068                 css_create_css_image(bus_id.cssid, false);
2069             }
2070 
2071             if   (css_find_free_subch_and_devno(bus_id.cssid, &bus_id.ssid,
2072                                                 &bus_id.devid, &schid,
2073                                                 NULL)) {
2074                 break;
2075             }
2076             if (bus_id.cssid == MAX_CSSID) {
2077                 error_setg(errp, "Virtual channel subsystem is full!");
2078                 return NULL;
2079             }
2080         }
2081     }
2082 
2083     sch = g_malloc0(sizeof(*sch));
2084     sch->cssid = bus_id.cssid;
2085     sch->ssid = bus_id.ssid;
2086     sch->devno = bus_id.devid;
2087     sch->schid = schid;
2088     css_subch_assign(sch->cssid, sch->ssid, schid, sch->devno, sch);
2089     return sch;
2090 }
2091 
2092 static int css_sch_get_chpids(SubchDev *sch, CssDevId *dev_id)
2093 {
2094     char *fid_path;
2095     FILE *fd;
2096     uint32_t chpid[8];
2097     int i;
2098     PMCW *p = &sch->curr_status.pmcw;
2099 
2100     fid_path = g_strdup_printf("/sys/bus/css/devices/%x.%x.%04x/chpids",
2101                                dev_id->cssid, dev_id->ssid, dev_id->devid);
2102     fd = fopen(fid_path, "r");
2103     if (fd == NULL) {
2104         error_report("%s: open %s failed", __func__, fid_path);
2105         g_free(fid_path);
2106         return -EINVAL;
2107     }
2108 
2109     if (fscanf(fd, "%x %x %x %x %x %x %x %x",
2110         &chpid[0], &chpid[1], &chpid[2], &chpid[3],
2111         &chpid[4], &chpid[5], &chpid[6], &chpid[7]) != 8) {
2112         fclose(fd);
2113         g_free(fid_path);
2114         return -EINVAL;
2115     }
2116 
2117     for (i = 0; i < ARRAY_SIZE(p->chpid); i++) {
2118         p->chpid[i] = chpid[i];
2119     }
2120 
2121     fclose(fd);
2122     g_free(fid_path);
2123 
2124     return 0;
2125 }
2126 
2127 static int css_sch_get_path_masks(SubchDev *sch, CssDevId *dev_id)
2128 {
2129     char *fid_path;
2130     FILE *fd;
2131     uint32_t pim, pam, pom;
2132     PMCW *p = &sch->curr_status.pmcw;
2133 
2134     fid_path = g_strdup_printf("/sys/bus/css/devices/%x.%x.%04x/pimpampom",
2135                                dev_id->cssid, dev_id->ssid, dev_id->devid);
2136     fd = fopen(fid_path, "r");
2137     if (fd == NULL) {
2138         error_report("%s: open %s failed", __func__, fid_path);
2139         g_free(fid_path);
2140         return -EINVAL;
2141     }
2142 
2143     if (fscanf(fd, "%x %x %x", &pim, &pam, &pom) != 3) {
2144         fclose(fd);
2145         g_free(fid_path);
2146         return -EINVAL;
2147     }
2148 
2149     p->pim = pim;
2150     p->pam = pam;
2151     p->pom = pom;
2152     fclose(fd);
2153     g_free(fid_path);
2154 
2155     return 0;
2156 }
2157 
2158 static int css_sch_get_chpid_type(uint8_t chpid, uint32_t *type,
2159                                   CssDevId *dev_id)
2160 {
2161     char *fid_path;
2162     FILE *fd;
2163 
2164     fid_path = g_strdup_printf("/sys/devices/css%x/chp0.%02x/type",
2165                                dev_id->cssid, chpid);
2166     fd = fopen(fid_path, "r");
2167     if (fd == NULL) {
2168         error_report("%s: open %s failed", __func__, fid_path);
2169         g_free(fid_path);
2170         return -EINVAL;
2171     }
2172 
2173     if (fscanf(fd, "%x", type) != 1) {
2174         fclose(fd);
2175         g_free(fid_path);
2176         return -EINVAL;
2177     }
2178 
2179     fclose(fd);
2180     g_free(fid_path);
2181 
2182     return 0;
2183 }
2184 
2185 /*
2186  * We currently retrieve the real device information from sysfs to build the
2187  * guest subchannel information block without considering the migration feature.
2188  * We need to revisit this problem when we want to add migration support.
2189  */
2190 int css_sch_build_schib(SubchDev *sch, CssDevId *dev_id)
2191 {
2192     CssImage *css = channel_subsys.css[sch->cssid];
2193     PMCW *p = &sch->curr_status.pmcw;
2194     SCSW *s = &sch->curr_status.scsw;
2195     uint32_t type;
2196     int i, ret;
2197 
2198     assert(css != NULL);
2199     memset(p, 0, sizeof(PMCW));
2200     p->flags |= PMCW_FLAGS_MASK_DNV;
2201     /* We are dealing with I/O subchannels only. */
2202     p->devno = sch->devno;
2203 
2204     /* Grab path mask from sysfs. */
2205     ret = css_sch_get_path_masks(sch, dev_id);
2206     if (ret) {
2207         return ret;
2208     }
2209 
2210     /* Grab chpids from sysfs. */
2211     ret = css_sch_get_chpids(sch, dev_id);
2212     if (ret) {
2213         return ret;
2214     }
2215 
2216    /* Build chpid type. */
2217     for (i = 0; i < ARRAY_SIZE(p->chpid); i++) {
2218         if (p->chpid[i] && !css->chpids[p->chpid[i]].in_use) {
2219             ret = css_sch_get_chpid_type(p->chpid[i], &type, dev_id);
2220             if (ret) {
2221                 return ret;
2222             }
2223             css_add_chpid(sch->cssid, p->chpid[i], type, false);
2224         }
2225     }
2226 
2227     memset(s, 0, sizeof(SCSW));
2228     sch->curr_status.mba = 0;
2229     for (i = 0; i < ARRAY_SIZE(sch->curr_status.mda); i++) {
2230         sch->curr_status.mda[i] = 0;
2231     }
2232 
2233     return 0;
2234 }
2235