xref: /qemu/hw/s390x/css.c (revision f5956d71)
1 /*
2  * Channel subsystem base support.
3  *
4  * Copyright 2012 IBM Corp.
5  * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
6  *
7  * This work is licensed under the terms of the GNU GPL, version 2 or (at
8  * your option) any later version. See the COPYING file in the top-level
9  * directory.
10  */
11 
12 #include "qemu/osdep.h"
13 #include "qapi/error.h"
14 #include "qapi/visitor.h"
15 #include "hw/qdev.h"
16 #include "qemu/error-report.h"
17 #include "qemu/bitops.h"
18 #include "qemu/error-report.h"
19 #include "exec/address-spaces.h"
20 #include "cpu.h"
21 #include "hw/s390x/ioinst.h"
22 #include "hw/s390x/css.h"
23 #include "trace.h"
24 #include "hw/s390x/s390_flic.h"
25 #include "hw/s390x/s390-virtio-ccw.h"
26 
27 typedef struct CrwContainer {
28     CRW crw;
29     QTAILQ_ENTRY(CrwContainer) sibling;
30 } CrwContainer;
31 
32 typedef struct ChpInfo {
33     uint8_t in_use;
34     uint8_t type;
35     uint8_t is_virtual;
36 } ChpInfo;
37 
38 typedef struct SubchSet {
39     SubchDev *sch[MAX_SCHID + 1];
40     unsigned long schids_used[BITS_TO_LONGS(MAX_SCHID + 1)];
41     unsigned long devnos_used[BITS_TO_LONGS(MAX_SCHID + 1)];
42 } SubchSet;
43 
44 static const VMStateDescription vmstate_scsw = {
45     .name = "s390_scsw",
46     .version_id = 1,
47     .minimum_version_id = 1,
48     .fields = (VMStateField[]) {
49         VMSTATE_UINT16(flags, SCSW),
50         VMSTATE_UINT16(ctrl, SCSW),
51         VMSTATE_UINT32(cpa, SCSW),
52         VMSTATE_UINT8(dstat, SCSW),
53         VMSTATE_UINT8(cstat, SCSW),
54         VMSTATE_UINT16(count, SCSW),
55         VMSTATE_END_OF_LIST()
56     }
57 };
58 
59 static const VMStateDescription vmstate_pmcw = {
60     .name = "s390_pmcw",
61     .version_id = 1,
62     .minimum_version_id = 1,
63     .fields = (VMStateField[]) {
64         VMSTATE_UINT32(intparm, PMCW),
65         VMSTATE_UINT16(flags, PMCW),
66         VMSTATE_UINT16(devno, PMCW),
67         VMSTATE_UINT8(lpm, PMCW),
68         VMSTATE_UINT8(pnom, PMCW),
69         VMSTATE_UINT8(lpum, PMCW),
70         VMSTATE_UINT8(pim, PMCW),
71         VMSTATE_UINT16(mbi, PMCW),
72         VMSTATE_UINT8(pom, PMCW),
73         VMSTATE_UINT8(pam, PMCW),
74         VMSTATE_UINT8_ARRAY(chpid, PMCW, 8),
75         VMSTATE_UINT32(chars, PMCW),
76         VMSTATE_END_OF_LIST()
77     }
78 };
79 
80 static const VMStateDescription vmstate_schib = {
81     .name = "s390_schib",
82     .version_id = 1,
83     .minimum_version_id = 1,
84     .fields = (VMStateField[]) {
85         VMSTATE_STRUCT(pmcw, SCHIB, 0, vmstate_pmcw, PMCW),
86         VMSTATE_STRUCT(scsw, SCHIB, 0, vmstate_scsw, SCSW),
87         VMSTATE_UINT64(mba, SCHIB),
88         VMSTATE_UINT8_ARRAY(mda, SCHIB, 4),
89         VMSTATE_END_OF_LIST()
90     }
91 };
92 
93 
94 static const VMStateDescription vmstate_ccw1 = {
95     .name = "s390_ccw1",
96     .version_id = 1,
97     .minimum_version_id = 1,
98     .fields = (VMStateField[]) {
99         VMSTATE_UINT8(cmd_code, CCW1),
100         VMSTATE_UINT8(flags, CCW1),
101         VMSTATE_UINT16(count, CCW1),
102         VMSTATE_UINT32(cda, CCW1),
103         VMSTATE_END_OF_LIST()
104     }
105 };
106 
107 static const VMStateDescription vmstate_ciw = {
108     .name = "s390_ciw",
109     .version_id = 1,
110     .minimum_version_id = 1,
111     .fields = (VMStateField[]) {
112         VMSTATE_UINT8(type, CIW),
113         VMSTATE_UINT8(command, CIW),
114         VMSTATE_UINT16(count, CIW),
115         VMSTATE_END_OF_LIST()
116     }
117 };
118 
119 static const VMStateDescription vmstate_sense_id = {
120     .name = "s390_sense_id",
121     .version_id = 1,
122     .minimum_version_id = 1,
123     .fields = (VMStateField[]) {
124         VMSTATE_UINT8(reserved, SenseId),
125         VMSTATE_UINT16(cu_type, SenseId),
126         VMSTATE_UINT8(cu_model, SenseId),
127         VMSTATE_UINT16(dev_type, SenseId),
128         VMSTATE_UINT8(dev_model, SenseId),
129         VMSTATE_UINT8(unused, SenseId),
130         VMSTATE_STRUCT_ARRAY(ciw, SenseId, MAX_CIWS, 0, vmstate_ciw, CIW),
131         VMSTATE_END_OF_LIST()
132     }
133 };
134 
135 static int subch_dev_post_load(void *opaque, int version_id);
136 static void subch_dev_pre_save(void *opaque);
137 
138 const char err_hint_devno[] = "Devno mismatch, tried to load wrong section!"
139     " Likely reason: some sequences of plug and unplug  can break"
140     " migration for machine versions prior to  2.7 (known design flaw).";
141 
142 const VMStateDescription vmstate_subch_dev = {
143     .name = "s390_subch_dev",
144     .version_id = 1,
145     .minimum_version_id = 1,
146     .post_load = subch_dev_post_load,
147     .pre_save = subch_dev_pre_save,
148     .fields = (VMStateField[]) {
149         VMSTATE_UINT8_EQUAL(cssid, SubchDev, "Bug!"),
150         VMSTATE_UINT8_EQUAL(ssid, SubchDev, "Bug!"),
151         VMSTATE_UINT16(migrated_schid, SubchDev),
152         VMSTATE_UINT16_EQUAL(devno, SubchDev, err_hint_devno),
153         VMSTATE_BOOL(thinint_active, SubchDev),
154         VMSTATE_STRUCT(curr_status, SubchDev, 0, vmstate_schib, SCHIB),
155         VMSTATE_UINT8_ARRAY(sense_data, SubchDev, 32),
156         VMSTATE_UINT64(channel_prog, SubchDev),
157         VMSTATE_STRUCT(last_cmd, SubchDev, 0, vmstate_ccw1, CCW1),
158         VMSTATE_BOOL(last_cmd_valid, SubchDev),
159         VMSTATE_STRUCT(id, SubchDev, 0, vmstate_sense_id, SenseId),
160         VMSTATE_BOOL(ccw_fmt_1, SubchDev),
161         VMSTATE_UINT8(ccw_no_data_cnt, SubchDev),
162         VMSTATE_END_OF_LIST()
163     }
164 };
165 
166 typedef struct IndAddrPtrTmp {
167     IndAddr **parent;
168     uint64_t addr;
169     int32_t len;
170 } IndAddrPtrTmp;
171 
172 static int post_load_ind_addr(void *opaque, int version_id)
173 {
174     IndAddrPtrTmp *ptmp = opaque;
175     IndAddr **ind_addr = ptmp->parent;
176 
177     if (ptmp->len != 0) {
178         *ind_addr = get_indicator(ptmp->addr, ptmp->len);
179     } else {
180         *ind_addr = NULL;
181     }
182     return 0;
183 }
184 
185 static void pre_save_ind_addr(void *opaque)
186 {
187     IndAddrPtrTmp *ptmp = opaque;
188     IndAddr *ind_addr = *(ptmp->parent);
189 
190     if (ind_addr != NULL) {
191         ptmp->len = ind_addr->len;
192         ptmp->addr = ind_addr->addr;
193     } else {
194         ptmp->len = 0;
195         ptmp->addr = 0L;
196     }
197 }
198 
199 const VMStateDescription vmstate_ind_addr_tmp = {
200     .name = "s390_ind_addr_tmp",
201     .pre_save = pre_save_ind_addr,
202     .post_load = post_load_ind_addr,
203 
204     .fields = (VMStateField[]) {
205         VMSTATE_INT32(len, IndAddrPtrTmp),
206         VMSTATE_UINT64(addr, IndAddrPtrTmp),
207         VMSTATE_END_OF_LIST()
208     }
209 };
210 
211 const VMStateDescription vmstate_ind_addr = {
212     .name = "s390_ind_addr_tmp",
213     .fields = (VMStateField[]) {
214         VMSTATE_WITH_TMP(IndAddr*, IndAddrPtrTmp, vmstate_ind_addr_tmp),
215         VMSTATE_END_OF_LIST()
216     }
217 };
218 
219 typedef struct CssImage {
220     SubchSet *sch_set[MAX_SSID + 1];
221     ChpInfo chpids[MAX_CHPID + 1];
222 } CssImage;
223 
224 typedef struct IoAdapter {
225     uint32_t id;
226     uint8_t type;
227     uint8_t isc;
228 } IoAdapter;
229 
230 typedef struct ChannelSubSys {
231     QTAILQ_HEAD(, CrwContainer) pending_crws;
232     bool sei_pending;
233     bool do_crw_mchk;
234     bool crws_lost;
235     uint8_t max_cssid;
236     uint8_t max_ssid;
237     bool chnmon_active;
238     uint64_t chnmon_area;
239     CssImage *css[MAX_CSSID + 1];
240     uint8_t default_cssid;
241     IoAdapter *io_adapters[CSS_IO_ADAPTER_TYPE_NUMS][MAX_ISC + 1];
242     QTAILQ_HEAD(, IndAddr) indicator_addresses;
243 } ChannelSubSys;
244 
245 static ChannelSubSys channel_subsys = {
246     .pending_crws = QTAILQ_HEAD_INITIALIZER(channel_subsys.pending_crws),
247     .do_crw_mchk = true,
248     .sei_pending = false,
249     .do_crw_mchk = true,
250     .crws_lost = false,
251     .chnmon_active = false,
252     .indicator_addresses =
253         QTAILQ_HEAD_INITIALIZER(channel_subsys.indicator_addresses),
254 };
255 
256 static void subch_dev_pre_save(void *opaque)
257 {
258     SubchDev *s = opaque;
259 
260     /* Prepare remote_schid for save */
261     s->migrated_schid = s->schid;
262 }
263 
264 static int subch_dev_post_load(void *opaque, int version_id)
265 {
266 
267     SubchDev *s = opaque;
268 
269     /* Re-assign the subchannel to remote_schid if necessary */
270     if (s->migrated_schid != s->schid) {
271         if (css_find_subch(true, s->cssid, s->ssid, s->schid) == s) {
272             /*
273              * Cleanup the slot before moving to s->migrated_schid provided
274              * it still belongs to us, i.e. it was not changed by previous
275              * invocation of this function.
276              */
277             css_subch_assign(s->cssid, s->ssid, s->schid, s->devno, NULL);
278         }
279         /* It's OK to re-assign without a prior de-assign. */
280         s->schid = s->migrated_schid;
281         css_subch_assign(s->cssid, s->ssid, s->schid, s->devno, s);
282     }
283 
284     /*
285      * Hack alert. If we don't migrate the channel subsystem status
286      * we still need to find out if the guest enabled mss/mcss-e.
287      * If the subchannel is enabled, it certainly was able to access it,
288      * so adjust the max_ssid/max_cssid values for relevant ssid/cssid
289      * values. This is not watertight, but better than nothing.
290      */
291     if (s->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ENA) {
292         if (s->ssid) {
293             channel_subsys.max_ssid = MAX_SSID;
294         }
295         if (s->cssid != channel_subsys.default_cssid) {
296             channel_subsys.max_cssid = MAX_CSSID;
297         }
298     }
299     return 0;
300 }
301 
302 IndAddr *get_indicator(hwaddr ind_addr, int len)
303 {
304     IndAddr *indicator;
305 
306     QTAILQ_FOREACH(indicator, &channel_subsys.indicator_addresses, sibling) {
307         if (indicator->addr == ind_addr) {
308             indicator->refcnt++;
309             return indicator;
310         }
311     }
312     indicator = g_new0(IndAddr, 1);
313     indicator->addr = ind_addr;
314     indicator->len = len;
315     indicator->refcnt = 1;
316     QTAILQ_INSERT_TAIL(&channel_subsys.indicator_addresses,
317                        indicator, sibling);
318     return indicator;
319 }
320 
321 static int s390_io_adapter_map(AdapterInfo *adapter, uint64_t map_addr,
322                                bool do_map)
323 {
324     S390FLICState *fs = s390_get_flic();
325     S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs);
326 
327     return fsc->io_adapter_map(fs, adapter->adapter_id, map_addr, do_map);
328 }
329 
330 void release_indicator(AdapterInfo *adapter, IndAddr *indicator)
331 {
332     assert(indicator->refcnt > 0);
333     indicator->refcnt--;
334     if (indicator->refcnt > 0) {
335         return;
336     }
337     QTAILQ_REMOVE(&channel_subsys.indicator_addresses, indicator, sibling);
338     if (indicator->map) {
339         s390_io_adapter_map(adapter, indicator->map, false);
340     }
341     g_free(indicator);
342 }
343 
344 int map_indicator(AdapterInfo *adapter, IndAddr *indicator)
345 {
346     int ret;
347 
348     if (indicator->map) {
349         return 0; /* already mapped is not an error */
350     }
351     indicator->map = indicator->addr;
352     ret = s390_io_adapter_map(adapter, indicator->map, true);
353     if ((ret != 0) && (ret != -ENOSYS)) {
354         goto out_err;
355     }
356     return 0;
357 
358 out_err:
359     indicator->map = 0;
360     return ret;
361 }
362 
363 int css_create_css_image(uint8_t cssid, bool default_image)
364 {
365     trace_css_new_image(cssid, default_image ? "(default)" : "");
366     /* 255 is reserved */
367     if (cssid == 255) {
368         return -EINVAL;
369     }
370     if (channel_subsys.css[cssid]) {
371         return -EBUSY;
372     }
373     channel_subsys.css[cssid] = g_malloc0(sizeof(CssImage));
374     if (default_image) {
375         channel_subsys.default_cssid = cssid;
376     }
377     return 0;
378 }
379 
380 uint32_t css_get_adapter_id(CssIoAdapterType type, uint8_t isc)
381 {
382     if (type >= CSS_IO_ADAPTER_TYPE_NUMS || isc > MAX_ISC ||
383         !channel_subsys.io_adapters[type][isc]) {
384         return -1;
385     }
386 
387     return channel_subsys.io_adapters[type][isc]->id;
388 }
389 
390 /**
391  * css_register_io_adapters: Register I/O adapters per ISC during init
392  *
393  * @swap: an indication if byte swap is needed.
394  * @maskable: an indication if the adapter is subject to the mask operation.
395  * @errp: location to store error information.
396  */
397 void css_register_io_adapters(CssIoAdapterType type, bool swap, bool maskable,
398                               Error **errp)
399 {
400     uint32_t id;
401     int ret, isc;
402     IoAdapter *adapter;
403     S390FLICState *fs = s390_get_flic();
404     S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs);
405 
406     /*
407      * Disallow multiple registrations for the same device type.
408      * Report an error if registering for an already registered type.
409      */
410     if (channel_subsys.io_adapters[type][0]) {
411         error_setg(errp, "Adapters for type %d already registered", type);
412     }
413 
414     for (isc = 0; isc <= MAX_ISC; isc++) {
415         id = (type << 3) | isc;
416         ret = fsc->register_io_adapter(fs, id, isc, swap, maskable);
417         if (ret == 0) {
418             adapter = g_new0(IoAdapter, 1);
419             adapter->id = id;
420             adapter->isc = isc;
421             adapter->type = type;
422             channel_subsys.io_adapters[type][isc] = adapter;
423         } else {
424             error_setg_errno(errp, -ret, "Unexpected error %d when "
425                              "registering adapter %d", ret, id);
426             break;
427         }
428     }
429 
430     /*
431      * No need to free registered adapters in kvm: kvm will clean up
432      * when the machine goes away.
433      */
434     if (ret) {
435         for (isc--; isc >= 0; isc--) {
436             g_free(channel_subsys.io_adapters[type][isc]);
437             channel_subsys.io_adapters[type][isc] = NULL;
438         }
439     }
440 
441 }
442 
443 static void css_clear_io_interrupt(uint16_t subchannel_id,
444                                    uint16_t subchannel_nr)
445 {
446     Error *err = NULL;
447     static bool no_clear_irq;
448     S390FLICState *fs = s390_get_flic();
449     S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs);
450     int r;
451 
452     if (unlikely(no_clear_irq)) {
453         return;
454     }
455     r = fsc->clear_io_irq(fs, subchannel_id, subchannel_nr);
456     switch (r) {
457     case 0:
458         break;
459     case -ENOSYS:
460         no_clear_irq = true;
461         /*
462         * Ignore unavailability, as the user can't do anything
463         * about it anyway.
464         */
465         break;
466     default:
467         error_setg_errno(&err, -r, "unexpected error condition");
468         error_propagate(&error_abort, err);
469     }
470 }
471 
472 static inline uint16_t css_do_build_subchannel_id(uint8_t cssid, uint8_t ssid)
473 {
474     if (channel_subsys.max_cssid > 0) {
475         return (cssid << 8) | (1 << 3) | (ssid << 1) | 1;
476     }
477     return (ssid << 1) | 1;
478 }
479 
480 uint16_t css_build_subchannel_id(SubchDev *sch)
481 {
482     return css_do_build_subchannel_id(sch->cssid, sch->ssid);
483 }
484 
485 void css_inject_io_interrupt(SubchDev *sch)
486 {
487     uint8_t isc = (sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ISC) >> 11;
488 
489     trace_css_io_interrupt(sch->cssid, sch->ssid, sch->schid,
490                            sch->curr_status.pmcw.intparm, isc, "");
491     s390_io_interrupt(css_build_subchannel_id(sch),
492                       sch->schid,
493                       sch->curr_status.pmcw.intparm,
494                       isc << 27);
495 }
496 
497 void css_conditional_io_interrupt(SubchDev *sch)
498 {
499     /*
500      * If the subchannel is not currently status pending, make it pending
501      * with alert status.
502      */
503     if (!(sch->curr_status.scsw.ctrl & SCSW_STCTL_STATUS_PEND)) {
504         uint8_t isc = (sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ISC) >> 11;
505 
506         trace_css_io_interrupt(sch->cssid, sch->ssid, sch->schid,
507                                sch->curr_status.pmcw.intparm, isc,
508                                "(unsolicited)");
509         sch->curr_status.scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
510         sch->curr_status.scsw.ctrl |=
511             SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
512         /* Inject an I/O interrupt. */
513         s390_io_interrupt(css_build_subchannel_id(sch),
514                           sch->schid,
515                           sch->curr_status.pmcw.intparm,
516                           isc << 27);
517     }
518 }
519 
520 void css_adapter_interrupt(uint8_t isc)
521 {
522     uint32_t io_int_word = (isc << 27) | IO_INT_WORD_AI;
523 
524     trace_css_adapter_interrupt(isc);
525     s390_io_interrupt(0, 0, 0, io_int_word);
526 }
527 
528 static void sch_handle_clear_func(SubchDev *sch)
529 {
530     PMCW *p = &sch->curr_status.pmcw;
531     SCSW *s = &sch->curr_status.scsw;
532     int path;
533 
534     /* Path management: In our simple css, we always choose the only path. */
535     path = 0x80;
536 
537     /* Reset values prior to 'issuing the clear signal'. */
538     p->lpum = 0;
539     p->pom = 0xff;
540     s->flags &= ~SCSW_FLAGS_MASK_PNO;
541 
542     /* We always 'attempt to issue the clear signal', and we always succeed. */
543     sch->channel_prog = 0x0;
544     sch->last_cmd_valid = false;
545     s->ctrl &= ~SCSW_ACTL_CLEAR_PEND;
546     s->ctrl |= SCSW_STCTL_STATUS_PEND;
547 
548     s->dstat = 0;
549     s->cstat = 0;
550     p->lpum = path;
551 
552 }
553 
554 static void sch_handle_halt_func(SubchDev *sch)
555 {
556 
557     PMCW *p = &sch->curr_status.pmcw;
558     SCSW *s = &sch->curr_status.scsw;
559     hwaddr curr_ccw = sch->channel_prog;
560     int path;
561 
562     /* Path management: In our simple css, we always choose the only path. */
563     path = 0x80;
564 
565     /* We always 'attempt to issue the halt signal', and we always succeed. */
566     sch->channel_prog = 0x0;
567     sch->last_cmd_valid = false;
568     s->ctrl &= ~SCSW_ACTL_HALT_PEND;
569     s->ctrl |= SCSW_STCTL_STATUS_PEND;
570 
571     if ((s->ctrl & (SCSW_ACTL_SUBCH_ACTIVE | SCSW_ACTL_DEVICE_ACTIVE)) ||
572         !((s->ctrl & SCSW_ACTL_START_PEND) ||
573           (s->ctrl & SCSW_ACTL_SUSP))) {
574         s->dstat = SCSW_DSTAT_DEVICE_END;
575     }
576     if ((s->ctrl & (SCSW_ACTL_SUBCH_ACTIVE | SCSW_ACTL_DEVICE_ACTIVE)) ||
577         (s->ctrl & SCSW_ACTL_SUSP)) {
578         s->cpa = curr_ccw + 8;
579     }
580     s->cstat = 0;
581     p->lpum = path;
582 
583 }
584 
585 static void copy_sense_id_to_guest(SenseId *dest, SenseId *src)
586 {
587     int i;
588 
589     dest->reserved = src->reserved;
590     dest->cu_type = cpu_to_be16(src->cu_type);
591     dest->cu_model = src->cu_model;
592     dest->dev_type = cpu_to_be16(src->dev_type);
593     dest->dev_model = src->dev_model;
594     dest->unused = src->unused;
595     for (i = 0; i < ARRAY_SIZE(dest->ciw); i++) {
596         dest->ciw[i].type = src->ciw[i].type;
597         dest->ciw[i].command = src->ciw[i].command;
598         dest->ciw[i].count = cpu_to_be16(src->ciw[i].count);
599     }
600 }
601 
602 static CCW1 copy_ccw_from_guest(hwaddr addr, bool fmt1)
603 {
604     CCW0 tmp0;
605     CCW1 tmp1;
606     CCW1 ret;
607 
608     if (fmt1) {
609         cpu_physical_memory_read(addr, &tmp1, sizeof(tmp1));
610         ret.cmd_code = tmp1.cmd_code;
611         ret.flags = tmp1.flags;
612         ret.count = be16_to_cpu(tmp1.count);
613         ret.cda = be32_to_cpu(tmp1.cda);
614     } else {
615         cpu_physical_memory_read(addr, &tmp0, sizeof(tmp0));
616         if ((tmp0.cmd_code & 0x0f) == CCW_CMD_TIC) {
617             ret.cmd_code = CCW_CMD_TIC;
618             ret.flags = 0;
619             ret.count = 0;
620         } else {
621             ret.cmd_code = tmp0.cmd_code;
622             ret.flags = tmp0.flags;
623             ret.count = be16_to_cpu(tmp0.count);
624         }
625         ret.cda = be16_to_cpu(tmp0.cda1) | (tmp0.cda0 << 16);
626     }
627     return ret;
628 }
629 
630 static int css_interpret_ccw(SubchDev *sch, hwaddr ccw_addr,
631                              bool suspend_allowed)
632 {
633     int ret;
634     bool check_len;
635     int len;
636     CCW1 ccw;
637 
638     if (!ccw_addr) {
639         return -EIO;
640     }
641 
642     /* Translate everything to format-1 ccws - the information is the same. */
643     ccw = copy_ccw_from_guest(ccw_addr, sch->ccw_fmt_1);
644 
645     /* Check for invalid command codes. */
646     if ((ccw.cmd_code & 0x0f) == 0) {
647         return -EINVAL;
648     }
649     if (((ccw.cmd_code & 0x0f) == CCW_CMD_TIC) &&
650         ((ccw.cmd_code & 0xf0) != 0)) {
651         return -EINVAL;
652     }
653     if (!sch->ccw_fmt_1 && (ccw.count == 0) &&
654         (ccw.cmd_code != CCW_CMD_TIC)) {
655         return -EINVAL;
656     }
657 
658     /* We don't support MIDA. */
659     if (ccw.flags & CCW_FLAG_MIDA) {
660         return -EINVAL;
661     }
662 
663     if (ccw.flags & CCW_FLAG_SUSPEND) {
664         return suspend_allowed ? -EINPROGRESS : -EINVAL;
665     }
666 
667     check_len = !((ccw.flags & CCW_FLAG_SLI) && !(ccw.flags & CCW_FLAG_DC));
668 
669     if (!ccw.cda) {
670         if (sch->ccw_no_data_cnt == 255) {
671             return -EINVAL;
672         }
673         sch->ccw_no_data_cnt++;
674     }
675 
676     /* Look at the command. */
677     switch (ccw.cmd_code) {
678     case CCW_CMD_NOOP:
679         /* Nothing to do. */
680         ret = 0;
681         break;
682     case CCW_CMD_BASIC_SENSE:
683         if (check_len) {
684             if (ccw.count != sizeof(sch->sense_data)) {
685                 ret = -EINVAL;
686                 break;
687             }
688         }
689         len = MIN(ccw.count, sizeof(sch->sense_data));
690         cpu_physical_memory_write(ccw.cda, sch->sense_data, len);
691         sch->curr_status.scsw.count = ccw.count - len;
692         memset(sch->sense_data, 0, sizeof(sch->sense_data));
693         ret = 0;
694         break;
695     case CCW_CMD_SENSE_ID:
696     {
697         SenseId sense_id;
698 
699         copy_sense_id_to_guest(&sense_id, &sch->id);
700         /* Sense ID information is device specific. */
701         if (check_len) {
702             if (ccw.count != sizeof(sense_id)) {
703                 ret = -EINVAL;
704                 break;
705             }
706         }
707         len = MIN(ccw.count, sizeof(sense_id));
708         /*
709          * Only indicate 0xff in the first sense byte if we actually
710          * have enough place to store at least bytes 0-3.
711          */
712         if (len >= 4) {
713             sense_id.reserved = 0xff;
714         } else {
715             sense_id.reserved = 0;
716         }
717         cpu_physical_memory_write(ccw.cda, &sense_id, len);
718         sch->curr_status.scsw.count = ccw.count - len;
719         ret = 0;
720         break;
721     }
722     case CCW_CMD_TIC:
723         if (sch->last_cmd_valid && (sch->last_cmd.cmd_code == CCW_CMD_TIC)) {
724             ret = -EINVAL;
725             break;
726         }
727         if (ccw.flags & (CCW_FLAG_CC | CCW_FLAG_DC)) {
728             ret = -EINVAL;
729             break;
730         }
731         sch->channel_prog = ccw.cda;
732         ret = -EAGAIN;
733         break;
734     default:
735         if (sch->ccw_cb) {
736             /* Handle device specific commands. */
737             ret = sch->ccw_cb(sch, ccw);
738         } else {
739             ret = -ENOSYS;
740         }
741         break;
742     }
743     sch->last_cmd = ccw;
744     sch->last_cmd_valid = true;
745     if (ret == 0) {
746         if (ccw.flags & CCW_FLAG_CC) {
747             sch->channel_prog += 8;
748             ret = -EAGAIN;
749         }
750     }
751 
752     return ret;
753 }
754 
755 static void sch_handle_start_func_virtual(SubchDev *sch, ORB *orb)
756 {
757 
758     PMCW *p = &sch->curr_status.pmcw;
759     SCSW *s = &sch->curr_status.scsw;
760     int path;
761     int ret;
762     bool suspend_allowed;
763 
764     /* Path management: In our simple css, we always choose the only path. */
765     path = 0x80;
766 
767     if (!(s->ctrl & SCSW_ACTL_SUSP)) {
768         /* Start Function triggered via ssch, i.e. we have an ORB */
769         s->cstat = 0;
770         s->dstat = 0;
771         /* Look at the orb and try to execute the channel program. */
772         assert(orb != NULL); /* resume does not pass an orb */
773         p->intparm = orb->intparm;
774         if (!(orb->lpm & path)) {
775             /* Generate a deferred cc 3 condition. */
776             s->flags |= SCSW_FLAGS_MASK_CC;
777             s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
778             s->ctrl |= (SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND);
779             return;
780         }
781         sch->ccw_fmt_1 = !!(orb->ctrl0 & ORB_CTRL0_MASK_FMT);
782         s->flags |= (sch->ccw_fmt_1) ? SCSW_FLAGS_MASK_FMT : 0;
783         sch->ccw_no_data_cnt = 0;
784         suspend_allowed = !!(orb->ctrl0 & ORB_CTRL0_MASK_SPND);
785     } else {
786         /* Start Function resumed via rsch, i.e. we don't have an
787          * ORB */
788         s->ctrl &= ~(SCSW_ACTL_SUSP | SCSW_ACTL_RESUME_PEND);
789         /* The channel program had been suspended before. */
790         suspend_allowed = true;
791     }
792     sch->last_cmd_valid = false;
793     do {
794         ret = css_interpret_ccw(sch, sch->channel_prog, suspend_allowed);
795         switch (ret) {
796         case -EAGAIN:
797             /* ccw chain, continue processing */
798             break;
799         case 0:
800             /* success */
801             s->ctrl &= ~SCSW_ACTL_START_PEND;
802             s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
803             s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
804                     SCSW_STCTL_STATUS_PEND;
805             s->dstat = SCSW_DSTAT_CHANNEL_END | SCSW_DSTAT_DEVICE_END;
806             s->cpa = sch->channel_prog + 8;
807             break;
808         case -EIO:
809             /* I/O errors, status depends on specific devices */
810             break;
811         case -ENOSYS:
812             /* unsupported command, generate unit check (command reject) */
813             s->ctrl &= ~SCSW_ACTL_START_PEND;
814             s->dstat = SCSW_DSTAT_UNIT_CHECK;
815             /* Set sense bit 0 in ecw0. */
816             sch->sense_data[0] = 0x80;
817             s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
818             s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
819                     SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
820             s->cpa = sch->channel_prog + 8;
821             break;
822         case -EFAULT:
823             /* memory problem, generate channel data check */
824             s->ctrl &= ~SCSW_ACTL_START_PEND;
825             s->cstat = SCSW_CSTAT_DATA_CHECK;
826             s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
827             s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
828                     SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
829             s->cpa = sch->channel_prog + 8;
830             break;
831         case -EBUSY:
832             /* subchannel busy, generate deferred cc 1 */
833             s->flags &= ~SCSW_FLAGS_MASK_CC;
834             s->flags |= (1 << 8);
835             s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
836             s->ctrl |= SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
837             break;
838         case -EINPROGRESS:
839             /* channel program has been suspended */
840             s->ctrl &= ~SCSW_ACTL_START_PEND;
841             s->ctrl |= SCSW_ACTL_SUSP;
842             break;
843         default:
844             /* error, generate channel program check */
845             s->ctrl &= ~SCSW_ACTL_START_PEND;
846             s->cstat = SCSW_CSTAT_PROG_CHECK;
847             s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
848             s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
849                     SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
850             s->cpa = sch->channel_prog + 8;
851             break;
852         }
853     } while (ret == -EAGAIN);
854 
855 }
856 
857 static int sch_handle_start_func_passthrough(SubchDev *sch, ORB *orb)
858 {
859 
860     PMCW *p = &sch->curr_status.pmcw;
861     SCSW *s = &sch->curr_status.scsw;
862     int ret;
863 
864     if (!(s->ctrl & SCSW_ACTL_SUSP)) {
865         assert(orb != NULL);
866         p->intparm = orb->intparm;
867     }
868 
869     /*
870      * Only support prefetch enable mode.
871      * Only support 64bit addressing idal.
872      */
873     if (!(orb->ctrl0 & ORB_CTRL0_MASK_PFCH) ||
874         !(orb->ctrl0 & ORB_CTRL0_MASK_C64)) {
875         return -EINVAL;
876     }
877 
878     ret = s390_ccw_cmd_request(orb, s, sch->driver_data);
879     switch (ret) {
880     /* Currently we don't update control block and just return the cc code. */
881     case 0:
882         break;
883     case -EBUSY:
884         break;
885     case -ENODEV:
886         break;
887     case -EACCES:
888         /* Let's reflect an inaccessible host device by cc 3. */
889         ret = -ENODEV;
890         break;
891     default:
892        /*
893         * All other return codes will trigger a program check,
894         * or set cc to 1.
895         */
896        break;
897     };
898 
899     return ret;
900 }
901 
902 /*
903  * On real machines, this would run asynchronously to the main vcpus.
904  * We might want to make some parts of the ssch handling (interpreting
905  * read/writes) asynchronous later on if we start supporting more than
906  * our current very simple devices.
907  */
908 int do_subchannel_work_virtual(SubchDev *sch, ORB *orb)
909 {
910 
911     SCSW *s = &sch->curr_status.scsw;
912 
913     if (s->ctrl & SCSW_FCTL_CLEAR_FUNC) {
914         sch_handle_clear_func(sch);
915     } else if (s->ctrl & SCSW_FCTL_HALT_FUNC) {
916         sch_handle_halt_func(sch);
917     } else if (s->ctrl & SCSW_FCTL_START_FUNC) {
918         /* Triggered by both ssch and rsch. */
919         sch_handle_start_func_virtual(sch, orb);
920     } else {
921         /* Cannot happen. */
922         return 0;
923     }
924     css_inject_io_interrupt(sch);
925     return 0;
926 }
927 
928 int do_subchannel_work_passthrough(SubchDev *sch, ORB *orb)
929 {
930     int ret;
931     SCSW *s = &sch->curr_status.scsw;
932 
933     if (s->ctrl & SCSW_FCTL_CLEAR_FUNC) {
934         /* TODO: Clear handling */
935         sch_handle_clear_func(sch);
936         ret = 0;
937     } else if (s->ctrl & SCSW_FCTL_HALT_FUNC) {
938         /* TODO: Halt handling */
939         sch_handle_halt_func(sch);
940         ret = 0;
941     } else if (s->ctrl & SCSW_FCTL_START_FUNC) {
942         ret = sch_handle_start_func_passthrough(sch, orb);
943     } else {
944         /* Cannot happen. */
945         return -ENODEV;
946     }
947 
948     return ret;
949 }
950 
951 static int do_subchannel_work(SubchDev *sch, ORB *orb)
952 {
953     if (sch->do_subchannel_work) {
954         return sch->do_subchannel_work(sch, orb);
955     } else {
956         return -EINVAL;
957     }
958 }
959 
960 static void copy_pmcw_to_guest(PMCW *dest, const PMCW *src)
961 {
962     int i;
963 
964     dest->intparm = cpu_to_be32(src->intparm);
965     dest->flags = cpu_to_be16(src->flags);
966     dest->devno = cpu_to_be16(src->devno);
967     dest->lpm = src->lpm;
968     dest->pnom = src->pnom;
969     dest->lpum = src->lpum;
970     dest->pim = src->pim;
971     dest->mbi = cpu_to_be16(src->mbi);
972     dest->pom = src->pom;
973     dest->pam = src->pam;
974     for (i = 0; i < ARRAY_SIZE(dest->chpid); i++) {
975         dest->chpid[i] = src->chpid[i];
976     }
977     dest->chars = cpu_to_be32(src->chars);
978 }
979 
980 void copy_scsw_to_guest(SCSW *dest, const SCSW *src)
981 {
982     dest->flags = cpu_to_be16(src->flags);
983     dest->ctrl = cpu_to_be16(src->ctrl);
984     dest->cpa = cpu_to_be32(src->cpa);
985     dest->dstat = src->dstat;
986     dest->cstat = src->cstat;
987     dest->count = cpu_to_be16(src->count);
988 }
989 
990 static void copy_schib_to_guest(SCHIB *dest, const SCHIB *src)
991 {
992     int i;
993 
994     copy_pmcw_to_guest(&dest->pmcw, &src->pmcw);
995     copy_scsw_to_guest(&dest->scsw, &src->scsw);
996     dest->mba = cpu_to_be64(src->mba);
997     for (i = 0; i < ARRAY_SIZE(dest->mda); i++) {
998         dest->mda[i] = src->mda[i];
999     }
1000 }
1001 
1002 int css_do_stsch(SubchDev *sch, SCHIB *schib)
1003 {
1004     /* Use current status. */
1005     copy_schib_to_guest(schib, &sch->curr_status);
1006     return 0;
1007 }
1008 
1009 static void copy_pmcw_from_guest(PMCW *dest, const PMCW *src)
1010 {
1011     int i;
1012 
1013     dest->intparm = be32_to_cpu(src->intparm);
1014     dest->flags = be16_to_cpu(src->flags);
1015     dest->devno = be16_to_cpu(src->devno);
1016     dest->lpm = src->lpm;
1017     dest->pnom = src->pnom;
1018     dest->lpum = src->lpum;
1019     dest->pim = src->pim;
1020     dest->mbi = be16_to_cpu(src->mbi);
1021     dest->pom = src->pom;
1022     dest->pam = src->pam;
1023     for (i = 0; i < ARRAY_SIZE(dest->chpid); i++) {
1024         dest->chpid[i] = src->chpid[i];
1025     }
1026     dest->chars = be32_to_cpu(src->chars);
1027 }
1028 
1029 static void copy_scsw_from_guest(SCSW *dest, const SCSW *src)
1030 {
1031     dest->flags = be16_to_cpu(src->flags);
1032     dest->ctrl = be16_to_cpu(src->ctrl);
1033     dest->cpa = be32_to_cpu(src->cpa);
1034     dest->dstat = src->dstat;
1035     dest->cstat = src->cstat;
1036     dest->count = be16_to_cpu(src->count);
1037 }
1038 
1039 static void copy_schib_from_guest(SCHIB *dest, const SCHIB *src)
1040 {
1041     int i;
1042 
1043     copy_pmcw_from_guest(&dest->pmcw, &src->pmcw);
1044     copy_scsw_from_guest(&dest->scsw, &src->scsw);
1045     dest->mba = be64_to_cpu(src->mba);
1046     for (i = 0; i < ARRAY_SIZE(dest->mda); i++) {
1047         dest->mda[i] = src->mda[i];
1048     }
1049 }
1050 
1051 int css_do_msch(SubchDev *sch, const SCHIB *orig_schib)
1052 {
1053     SCSW *s = &sch->curr_status.scsw;
1054     PMCW *p = &sch->curr_status.pmcw;
1055     uint16_t oldflags;
1056     int ret;
1057     SCHIB schib;
1058 
1059     if (!(sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_DNV)) {
1060         ret = 0;
1061         goto out;
1062     }
1063 
1064     if (s->ctrl & SCSW_STCTL_STATUS_PEND) {
1065         ret = -EINPROGRESS;
1066         goto out;
1067     }
1068 
1069     if (s->ctrl &
1070         (SCSW_FCTL_START_FUNC|SCSW_FCTL_HALT_FUNC|SCSW_FCTL_CLEAR_FUNC)) {
1071         ret = -EBUSY;
1072         goto out;
1073     }
1074 
1075     copy_schib_from_guest(&schib, orig_schib);
1076     /* Only update the program-modifiable fields. */
1077     p->intparm = schib.pmcw.intparm;
1078     oldflags = p->flags;
1079     p->flags &= ~(PMCW_FLAGS_MASK_ISC | PMCW_FLAGS_MASK_ENA |
1080                   PMCW_FLAGS_MASK_LM | PMCW_FLAGS_MASK_MME |
1081                   PMCW_FLAGS_MASK_MP);
1082     p->flags |= schib.pmcw.flags &
1083             (PMCW_FLAGS_MASK_ISC | PMCW_FLAGS_MASK_ENA |
1084              PMCW_FLAGS_MASK_LM | PMCW_FLAGS_MASK_MME |
1085              PMCW_FLAGS_MASK_MP);
1086     p->lpm = schib.pmcw.lpm;
1087     p->mbi = schib.pmcw.mbi;
1088     p->pom = schib.pmcw.pom;
1089     p->chars &= ~(PMCW_CHARS_MASK_MBFC | PMCW_CHARS_MASK_CSENSE);
1090     p->chars |= schib.pmcw.chars &
1091             (PMCW_CHARS_MASK_MBFC | PMCW_CHARS_MASK_CSENSE);
1092     sch->curr_status.mba = schib.mba;
1093 
1094     /* Has the channel been disabled? */
1095     if (sch->disable_cb && (oldflags & PMCW_FLAGS_MASK_ENA) != 0
1096         && (p->flags & PMCW_FLAGS_MASK_ENA) == 0) {
1097         sch->disable_cb(sch);
1098     }
1099 
1100     ret = 0;
1101 
1102 out:
1103     return ret;
1104 }
1105 
1106 int css_do_xsch(SubchDev *sch)
1107 {
1108     SCSW *s = &sch->curr_status.scsw;
1109     PMCW *p = &sch->curr_status.pmcw;
1110     int ret;
1111 
1112     if (~(p->flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) {
1113         ret = -ENODEV;
1114         goto out;
1115     }
1116 
1117     if (!(s->ctrl & SCSW_CTRL_MASK_FCTL) ||
1118         ((s->ctrl & SCSW_CTRL_MASK_FCTL) != SCSW_FCTL_START_FUNC) ||
1119         (!(s->ctrl &
1120            (SCSW_ACTL_RESUME_PEND | SCSW_ACTL_START_PEND | SCSW_ACTL_SUSP))) ||
1121         (s->ctrl & SCSW_ACTL_SUBCH_ACTIVE)) {
1122         ret = -EINPROGRESS;
1123         goto out;
1124     }
1125 
1126     if (s->ctrl & SCSW_CTRL_MASK_STCTL) {
1127         ret = -EBUSY;
1128         goto out;
1129     }
1130 
1131     /* Cancel the current operation. */
1132     s->ctrl &= ~(SCSW_FCTL_START_FUNC |
1133                  SCSW_ACTL_RESUME_PEND |
1134                  SCSW_ACTL_START_PEND |
1135                  SCSW_ACTL_SUSP);
1136     sch->channel_prog = 0x0;
1137     sch->last_cmd_valid = false;
1138     s->dstat = 0;
1139     s->cstat = 0;
1140     ret = 0;
1141 
1142 out:
1143     return ret;
1144 }
1145 
1146 int css_do_csch(SubchDev *sch)
1147 {
1148     SCSW *s = &sch->curr_status.scsw;
1149     PMCW *p = &sch->curr_status.pmcw;
1150     int ret;
1151 
1152     if (~(p->flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) {
1153         ret = -ENODEV;
1154         goto out;
1155     }
1156 
1157     /* Trigger the clear function. */
1158     s->ctrl &= ~(SCSW_CTRL_MASK_FCTL | SCSW_CTRL_MASK_ACTL);
1159     s->ctrl |= SCSW_FCTL_CLEAR_FUNC | SCSW_ACTL_CLEAR_PEND;
1160 
1161     do_subchannel_work(sch, NULL);
1162     ret = 0;
1163 
1164 out:
1165     return ret;
1166 }
1167 
1168 int css_do_hsch(SubchDev *sch)
1169 {
1170     SCSW *s = &sch->curr_status.scsw;
1171     PMCW *p = &sch->curr_status.pmcw;
1172     int ret;
1173 
1174     if (~(p->flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) {
1175         ret = -ENODEV;
1176         goto out;
1177     }
1178 
1179     if (((s->ctrl & SCSW_CTRL_MASK_STCTL) == SCSW_STCTL_STATUS_PEND) ||
1180         (s->ctrl & (SCSW_STCTL_PRIMARY |
1181                     SCSW_STCTL_SECONDARY |
1182                     SCSW_STCTL_ALERT))) {
1183         ret = -EINPROGRESS;
1184         goto out;
1185     }
1186 
1187     if (s->ctrl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
1188         ret = -EBUSY;
1189         goto out;
1190     }
1191 
1192     /* Trigger the halt function. */
1193     s->ctrl |= SCSW_FCTL_HALT_FUNC;
1194     s->ctrl &= ~SCSW_FCTL_START_FUNC;
1195     if (((s->ctrl & SCSW_CTRL_MASK_ACTL) ==
1196          (SCSW_ACTL_SUBCH_ACTIVE | SCSW_ACTL_DEVICE_ACTIVE)) &&
1197         ((s->ctrl & SCSW_CTRL_MASK_STCTL) == SCSW_STCTL_INTERMEDIATE)) {
1198         s->ctrl &= ~SCSW_STCTL_STATUS_PEND;
1199     }
1200     s->ctrl |= SCSW_ACTL_HALT_PEND;
1201 
1202     do_subchannel_work(sch, NULL);
1203     ret = 0;
1204 
1205 out:
1206     return ret;
1207 }
1208 
1209 static void css_update_chnmon(SubchDev *sch)
1210 {
1211     if (!(sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_MME)) {
1212         /* Not active. */
1213         return;
1214     }
1215     /* The counter is conveniently located at the beginning of the struct. */
1216     if (sch->curr_status.pmcw.chars & PMCW_CHARS_MASK_MBFC) {
1217         /* Format 1, per-subchannel area. */
1218         uint32_t count;
1219 
1220         count = address_space_ldl(&address_space_memory,
1221                                   sch->curr_status.mba,
1222                                   MEMTXATTRS_UNSPECIFIED,
1223                                   NULL);
1224         count++;
1225         address_space_stl(&address_space_memory, sch->curr_status.mba, count,
1226                           MEMTXATTRS_UNSPECIFIED, NULL);
1227     } else {
1228         /* Format 0, global area. */
1229         uint32_t offset;
1230         uint16_t count;
1231 
1232         offset = sch->curr_status.pmcw.mbi << 5;
1233         count = address_space_lduw(&address_space_memory,
1234                                    channel_subsys.chnmon_area + offset,
1235                                    MEMTXATTRS_UNSPECIFIED,
1236                                    NULL);
1237         count++;
1238         address_space_stw(&address_space_memory,
1239                           channel_subsys.chnmon_area + offset, count,
1240                           MEMTXATTRS_UNSPECIFIED, NULL);
1241     }
1242 }
1243 
1244 int css_do_ssch(SubchDev *sch, ORB *orb)
1245 {
1246     SCSW *s = &sch->curr_status.scsw;
1247     PMCW *p = &sch->curr_status.pmcw;
1248     int ret;
1249 
1250     if (~(p->flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) {
1251         ret = -ENODEV;
1252         goto out;
1253     }
1254 
1255     if (s->ctrl & SCSW_STCTL_STATUS_PEND) {
1256         ret = -EINPROGRESS;
1257         goto out;
1258     }
1259 
1260     if (s->ctrl & (SCSW_FCTL_START_FUNC |
1261                    SCSW_FCTL_HALT_FUNC |
1262                    SCSW_FCTL_CLEAR_FUNC)) {
1263         ret = -EBUSY;
1264         goto out;
1265     }
1266 
1267     /* If monitoring is active, update counter. */
1268     if (channel_subsys.chnmon_active) {
1269         css_update_chnmon(sch);
1270     }
1271     sch->channel_prog = orb->cpa;
1272     /* Trigger the start function. */
1273     s->ctrl |= (SCSW_FCTL_START_FUNC | SCSW_ACTL_START_PEND);
1274     s->flags &= ~SCSW_FLAGS_MASK_PNO;
1275 
1276     ret = do_subchannel_work(sch, orb);
1277 
1278 out:
1279     return ret;
1280 }
1281 
1282 static void copy_irb_to_guest(IRB *dest, const IRB *src, PMCW *pmcw,
1283                               int *irb_len)
1284 {
1285     int i;
1286     uint16_t stctl = src->scsw.ctrl & SCSW_CTRL_MASK_STCTL;
1287     uint16_t actl = src->scsw.ctrl & SCSW_CTRL_MASK_ACTL;
1288 
1289     copy_scsw_to_guest(&dest->scsw, &src->scsw);
1290 
1291     for (i = 0; i < ARRAY_SIZE(dest->esw); i++) {
1292         dest->esw[i] = cpu_to_be32(src->esw[i]);
1293     }
1294     for (i = 0; i < ARRAY_SIZE(dest->ecw); i++) {
1295         dest->ecw[i] = cpu_to_be32(src->ecw[i]);
1296     }
1297     *irb_len = sizeof(*dest) - sizeof(dest->emw);
1298 
1299     /* extended measurements enabled? */
1300     if ((src->scsw.flags & SCSW_FLAGS_MASK_ESWF) ||
1301         !(pmcw->flags & PMCW_FLAGS_MASK_TF) ||
1302         !(pmcw->chars & PMCW_CHARS_MASK_XMWME)) {
1303         return;
1304     }
1305     /* extended measurements pending? */
1306     if (!(stctl & SCSW_STCTL_STATUS_PEND)) {
1307         return;
1308     }
1309     if ((stctl & SCSW_STCTL_PRIMARY) ||
1310         (stctl == SCSW_STCTL_SECONDARY) ||
1311         ((stctl & SCSW_STCTL_INTERMEDIATE) && (actl & SCSW_ACTL_SUSP))) {
1312         for (i = 0; i < ARRAY_SIZE(dest->emw); i++) {
1313             dest->emw[i] = cpu_to_be32(src->emw[i]);
1314         }
1315     }
1316     *irb_len = sizeof(*dest);
1317 }
1318 
1319 int css_do_tsch_get_irb(SubchDev *sch, IRB *target_irb, int *irb_len)
1320 {
1321     SCSW *s = &sch->curr_status.scsw;
1322     PMCW *p = &sch->curr_status.pmcw;
1323     uint16_t stctl;
1324     IRB irb;
1325 
1326     if (~(p->flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) {
1327         return 3;
1328     }
1329 
1330     stctl = s->ctrl & SCSW_CTRL_MASK_STCTL;
1331 
1332     /* Prepare the irb for the guest. */
1333     memset(&irb, 0, sizeof(IRB));
1334 
1335     /* Copy scsw from current status. */
1336     memcpy(&irb.scsw, s, sizeof(SCSW));
1337     if (stctl & SCSW_STCTL_STATUS_PEND) {
1338         if (s->cstat & (SCSW_CSTAT_DATA_CHECK |
1339                         SCSW_CSTAT_CHN_CTRL_CHK |
1340                         SCSW_CSTAT_INTF_CTRL_CHK)) {
1341             irb.scsw.flags |= SCSW_FLAGS_MASK_ESWF;
1342             irb.esw[0] = 0x04804000;
1343         } else {
1344             irb.esw[0] = 0x00800000;
1345         }
1346         /* If a unit check is pending, copy sense data. */
1347         if ((s->dstat & SCSW_DSTAT_UNIT_CHECK) &&
1348             (p->chars & PMCW_CHARS_MASK_CSENSE)) {
1349             int i;
1350 
1351             irb.scsw.flags |= SCSW_FLAGS_MASK_ESWF | SCSW_FLAGS_MASK_ECTL;
1352             /* Attention: sense_data is already BE! */
1353             memcpy(irb.ecw, sch->sense_data, sizeof(sch->sense_data));
1354             for (i = 0; i < ARRAY_SIZE(irb.ecw); i++) {
1355                 irb.ecw[i] = be32_to_cpu(irb.ecw[i]);
1356             }
1357             irb.esw[1] = 0x01000000 | (sizeof(sch->sense_data) << 8);
1358         }
1359     }
1360     /* Store the irb to the guest. */
1361     copy_irb_to_guest(target_irb, &irb, p, irb_len);
1362 
1363     return ((stctl & SCSW_STCTL_STATUS_PEND) == 0);
1364 }
1365 
1366 void css_do_tsch_update_subch(SubchDev *sch)
1367 {
1368     SCSW *s = &sch->curr_status.scsw;
1369     PMCW *p = &sch->curr_status.pmcw;
1370     uint16_t stctl;
1371     uint16_t fctl;
1372     uint16_t actl;
1373 
1374     stctl = s->ctrl & SCSW_CTRL_MASK_STCTL;
1375     fctl = s->ctrl & SCSW_CTRL_MASK_FCTL;
1376     actl = s->ctrl & SCSW_CTRL_MASK_ACTL;
1377 
1378     /* Clear conditions on subchannel, if applicable. */
1379     if (stctl & SCSW_STCTL_STATUS_PEND) {
1380         s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
1381         if ((stctl != (SCSW_STCTL_INTERMEDIATE | SCSW_STCTL_STATUS_PEND)) ||
1382             ((fctl & SCSW_FCTL_HALT_FUNC) &&
1383              (actl & SCSW_ACTL_SUSP))) {
1384             s->ctrl &= ~SCSW_CTRL_MASK_FCTL;
1385         }
1386         if (stctl != (SCSW_STCTL_INTERMEDIATE | SCSW_STCTL_STATUS_PEND)) {
1387             s->flags &= ~SCSW_FLAGS_MASK_PNO;
1388             s->ctrl &= ~(SCSW_ACTL_RESUME_PEND |
1389                          SCSW_ACTL_START_PEND |
1390                          SCSW_ACTL_HALT_PEND |
1391                          SCSW_ACTL_CLEAR_PEND |
1392                          SCSW_ACTL_SUSP);
1393         } else {
1394             if ((actl & SCSW_ACTL_SUSP) &&
1395                 (fctl & SCSW_FCTL_START_FUNC)) {
1396                 s->flags &= ~SCSW_FLAGS_MASK_PNO;
1397                 if (fctl & SCSW_FCTL_HALT_FUNC) {
1398                     s->ctrl &= ~(SCSW_ACTL_RESUME_PEND |
1399                                  SCSW_ACTL_START_PEND |
1400                                  SCSW_ACTL_HALT_PEND |
1401                                  SCSW_ACTL_CLEAR_PEND |
1402                                  SCSW_ACTL_SUSP);
1403                 } else {
1404                     s->ctrl &= ~SCSW_ACTL_RESUME_PEND;
1405                 }
1406             }
1407         }
1408         /* Clear pending sense data. */
1409         if (p->chars & PMCW_CHARS_MASK_CSENSE) {
1410             memset(sch->sense_data, 0 , sizeof(sch->sense_data));
1411         }
1412     }
1413 }
1414 
1415 static void copy_crw_to_guest(CRW *dest, const CRW *src)
1416 {
1417     dest->flags = cpu_to_be16(src->flags);
1418     dest->rsid = cpu_to_be16(src->rsid);
1419 }
1420 
1421 int css_do_stcrw(CRW *crw)
1422 {
1423     CrwContainer *crw_cont;
1424     int ret;
1425 
1426     crw_cont = QTAILQ_FIRST(&channel_subsys.pending_crws);
1427     if (crw_cont) {
1428         QTAILQ_REMOVE(&channel_subsys.pending_crws, crw_cont, sibling);
1429         copy_crw_to_guest(crw, &crw_cont->crw);
1430         g_free(crw_cont);
1431         ret = 0;
1432     } else {
1433         /* List was empty, turn crw machine checks on again. */
1434         memset(crw, 0, sizeof(*crw));
1435         channel_subsys.do_crw_mchk = true;
1436         ret = 1;
1437     }
1438 
1439     return ret;
1440 }
1441 
1442 static void copy_crw_from_guest(CRW *dest, const CRW *src)
1443 {
1444     dest->flags = be16_to_cpu(src->flags);
1445     dest->rsid = be16_to_cpu(src->rsid);
1446 }
1447 
1448 void css_undo_stcrw(CRW *crw)
1449 {
1450     CrwContainer *crw_cont;
1451 
1452     crw_cont = g_try_malloc0(sizeof(CrwContainer));
1453     if (!crw_cont) {
1454         channel_subsys.crws_lost = true;
1455         return;
1456     }
1457     copy_crw_from_guest(&crw_cont->crw, crw);
1458 
1459     QTAILQ_INSERT_HEAD(&channel_subsys.pending_crws, crw_cont, sibling);
1460 }
1461 
1462 int css_do_tpi(IOIntCode *int_code, int lowcore)
1463 {
1464     /* No pending interrupts for !KVM. */
1465     return 0;
1466  }
1467 
1468 int css_collect_chp_desc(int m, uint8_t cssid, uint8_t f_chpid, uint8_t l_chpid,
1469                          int rfmt, void *buf)
1470 {
1471     int i, desc_size;
1472     uint32_t words[8];
1473     uint32_t chpid_type_word;
1474     CssImage *css;
1475 
1476     if (!m && !cssid) {
1477         css = channel_subsys.css[channel_subsys.default_cssid];
1478     } else {
1479         css = channel_subsys.css[cssid];
1480     }
1481     if (!css) {
1482         return 0;
1483     }
1484     desc_size = 0;
1485     for (i = f_chpid; i <= l_chpid; i++) {
1486         if (css->chpids[i].in_use) {
1487             chpid_type_word = 0x80000000 | (css->chpids[i].type << 8) | i;
1488             if (rfmt == 0) {
1489                 words[0] = cpu_to_be32(chpid_type_word);
1490                 words[1] = 0;
1491                 memcpy(buf + desc_size, words, 8);
1492                 desc_size += 8;
1493             } else if (rfmt == 1) {
1494                 words[0] = cpu_to_be32(chpid_type_word);
1495                 words[1] = 0;
1496                 words[2] = 0;
1497                 words[3] = 0;
1498                 words[4] = 0;
1499                 words[5] = 0;
1500                 words[6] = 0;
1501                 words[7] = 0;
1502                 memcpy(buf + desc_size, words, 32);
1503                 desc_size += 32;
1504             }
1505         }
1506     }
1507     return desc_size;
1508 }
1509 
1510 void css_do_schm(uint8_t mbk, int update, int dct, uint64_t mbo)
1511 {
1512     /* dct is currently ignored (not really meaningful for our devices) */
1513     /* TODO: Don't ignore mbk. */
1514     if (update && !channel_subsys.chnmon_active) {
1515         /* Enable measuring. */
1516         channel_subsys.chnmon_area = mbo;
1517         channel_subsys.chnmon_active = true;
1518     }
1519     if (!update && channel_subsys.chnmon_active) {
1520         /* Disable measuring. */
1521         channel_subsys.chnmon_area = 0;
1522         channel_subsys.chnmon_active = false;
1523     }
1524 }
1525 
1526 int css_do_rsch(SubchDev *sch)
1527 {
1528     SCSW *s = &sch->curr_status.scsw;
1529     PMCW *p = &sch->curr_status.pmcw;
1530     int ret;
1531 
1532     if (~(p->flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) {
1533         ret = -ENODEV;
1534         goto out;
1535     }
1536 
1537     if (s->ctrl & SCSW_STCTL_STATUS_PEND) {
1538         ret = -EINPROGRESS;
1539         goto out;
1540     }
1541 
1542     if (((s->ctrl & SCSW_CTRL_MASK_FCTL) != SCSW_FCTL_START_FUNC) ||
1543         (s->ctrl & SCSW_ACTL_RESUME_PEND) ||
1544         (!(s->ctrl & SCSW_ACTL_SUSP))) {
1545         ret = -EINVAL;
1546         goto out;
1547     }
1548 
1549     /* If monitoring is active, update counter. */
1550     if (channel_subsys.chnmon_active) {
1551         css_update_chnmon(sch);
1552     }
1553 
1554     s->ctrl |= SCSW_ACTL_RESUME_PEND;
1555     do_subchannel_work(sch, NULL);
1556     ret = 0;
1557 
1558 out:
1559     return ret;
1560 }
1561 
1562 int css_do_rchp(uint8_t cssid, uint8_t chpid)
1563 {
1564     uint8_t real_cssid;
1565 
1566     if (cssid > channel_subsys.max_cssid) {
1567         return -EINVAL;
1568     }
1569     if (channel_subsys.max_cssid == 0) {
1570         real_cssid = channel_subsys.default_cssid;
1571     } else {
1572         real_cssid = cssid;
1573     }
1574     if (!channel_subsys.css[real_cssid]) {
1575         return -EINVAL;
1576     }
1577 
1578     if (!channel_subsys.css[real_cssid]->chpids[chpid].in_use) {
1579         return -ENODEV;
1580     }
1581 
1582     if (!channel_subsys.css[real_cssid]->chpids[chpid].is_virtual) {
1583         fprintf(stderr,
1584                 "rchp unsupported for non-virtual chpid %x.%02x!\n",
1585                 real_cssid, chpid);
1586         return -ENODEV;
1587     }
1588 
1589     /* We don't really use a channel path, so we're done here. */
1590     css_queue_crw(CRW_RSC_CHP, CRW_ERC_INIT,
1591                   channel_subsys.max_cssid > 0 ? 1 : 0, chpid);
1592     if (channel_subsys.max_cssid > 0) {
1593         css_queue_crw(CRW_RSC_CHP, CRW_ERC_INIT, 0, real_cssid << 8);
1594     }
1595     return 0;
1596 }
1597 
1598 bool css_schid_final(int m, uint8_t cssid, uint8_t ssid, uint16_t schid)
1599 {
1600     SubchSet *set;
1601     uint8_t real_cssid;
1602 
1603     real_cssid = (!m && (cssid == 0)) ? channel_subsys.default_cssid : cssid;
1604     if (ssid > MAX_SSID ||
1605         !channel_subsys.css[real_cssid] ||
1606         !channel_subsys.css[real_cssid]->sch_set[ssid]) {
1607         return true;
1608     }
1609     set = channel_subsys.css[real_cssid]->sch_set[ssid];
1610     return schid > find_last_bit(set->schids_used,
1611                                  (MAX_SCHID + 1) / sizeof(unsigned long));
1612 }
1613 
1614 unsigned int css_find_free_chpid(uint8_t cssid)
1615 {
1616     CssImage *css = channel_subsys.css[cssid];
1617     unsigned int chpid;
1618 
1619     if (!css) {
1620         return MAX_CHPID + 1;
1621     }
1622 
1623     for (chpid = 0; chpid <= MAX_CHPID; chpid++) {
1624         /* skip reserved chpid */
1625         if (chpid == VIRTIO_CCW_CHPID) {
1626             continue;
1627         }
1628         if (!css->chpids[chpid].in_use) {
1629             return chpid;
1630         }
1631     }
1632     return MAX_CHPID + 1;
1633 }
1634 
1635 static int css_add_chpid(uint8_t cssid, uint8_t chpid, uint8_t type,
1636                          bool is_virt)
1637 {
1638     CssImage *css;
1639 
1640     trace_css_chpid_add(cssid, chpid, type);
1641     css = channel_subsys.css[cssid];
1642     if (!css) {
1643         return -EINVAL;
1644     }
1645     if (css->chpids[chpid].in_use) {
1646         return -EEXIST;
1647     }
1648     css->chpids[chpid].in_use = 1;
1649     css->chpids[chpid].type = type;
1650     css->chpids[chpid].is_virtual = is_virt;
1651 
1652     css_generate_chp_crws(cssid, chpid);
1653 
1654     return 0;
1655 }
1656 
1657 void css_sch_build_virtual_schib(SubchDev *sch, uint8_t chpid, uint8_t type)
1658 {
1659     PMCW *p = &sch->curr_status.pmcw;
1660     SCSW *s = &sch->curr_status.scsw;
1661     int i;
1662     CssImage *css = channel_subsys.css[sch->cssid];
1663 
1664     assert(css != NULL);
1665     memset(p, 0, sizeof(PMCW));
1666     p->flags |= PMCW_FLAGS_MASK_DNV;
1667     p->devno = sch->devno;
1668     /* single path */
1669     p->pim = 0x80;
1670     p->pom = 0xff;
1671     p->pam = 0x80;
1672     p->chpid[0] = chpid;
1673     if (!css->chpids[chpid].in_use) {
1674         css_add_chpid(sch->cssid, chpid, type, true);
1675     }
1676 
1677     memset(s, 0, sizeof(SCSW));
1678     sch->curr_status.mba = 0;
1679     for (i = 0; i < ARRAY_SIZE(sch->curr_status.mda); i++) {
1680         sch->curr_status.mda[i] = 0;
1681     }
1682 }
1683 
1684 SubchDev *css_find_subch(uint8_t m, uint8_t cssid, uint8_t ssid, uint16_t schid)
1685 {
1686     uint8_t real_cssid;
1687 
1688     real_cssid = (!m && (cssid == 0)) ? channel_subsys.default_cssid : cssid;
1689 
1690     if (!channel_subsys.css[real_cssid]) {
1691         return NULL;
1692     }
1693 
1694     if (!channel_subsys.css[real_cssid]->sch_set[ssid]) {
1695         return NULL;
1696     }
1697 
1698     return channel_subsys.css[real_cssid]->sch_set[ssid]->sch[schid];
1699 }
1700 
1701 /**
1702  * Return free device number in subchannel set.
1703  *
1704  * Return index of the first free device number in the subchannel set
1705  * identified by @p cssid and @p ssid, beginning the search at @p
1706  * start and wrapping around at MAX_DEVNO. Return a value exceeding
1707  * MAX_SCHID if there are no free device numbers in the subchannel
1708  * set.
1709  */
1710 static uint32_t css_find_free_devno(uint8_t cssid, uint8_t ssid,
1711                                     uint16_t start)
1712 {
1713     uint32_t round;
1714 
1715     for (round = 0; round <= MAX_DEVNO; round++) {
1716         uint16_t devno = (start + round) % MAX_DEVNO;
1717 
1718         if (!css_devno_used(cssid, ssid, devno)) {
1719             return devno;
1720         }
1721     }
1722     return MAX_DEVNO + 1;
1723 }
1724 
1725 /**
1726  * Return first free subchannel (id) in subchannel set.
1727  *
1728  * Return index of the first free subchannel in the subchannel set
1729  * identified by @p cssid and @p ssid, if there is any. Return a value
1730  * exceeding MAX_SCHID if there are no free subchannels in the
1731  * subchannel set.
1732  */
1733 static uint32_t css_find_free_subch(uint8_t cssid, uint8_t ssid)
1734 {
1735     uint32_t schid;
1736 
1737     for (schid = 0; schid <= MAX_SCHID; schid++) {
1738         if (!css_find_subch(1, cssid, ssid, schid)) {
1739             return schid;
1740         }
1741     }
1742     return MAX_SCHID + 1;
1743 }
1744 
1745 /**
1746  * Return first free subchannel (id) in subchannel set for a device number
1747  *
1748  * Verify the device number @p devno is not used yet in the subchannel
1749  * set identified by @p cssid and @p ssid. Set @p schid to the index
1750  * of the first free subchannel in the subchannel set, if there is
1751  * any. Return true if everything succeeded and false otherwise.
1752  */
1753 static bool css_find_free_subch_for_devno(uint8_t cssid, uint8_t ssid,
1754                                           uint16_t devno, uint16_t *schid,
1755                                           Error **errp)
1756 {
1757     uint32_t free_schid;
1758 
1759     assert(schid);
1760     if (css_devno_used(cssid, ssid, devno)) {
1761         error_setg(errp, "Device %x.%x.%04x already exists",
1762                    cssid, ssid, devno);
1763         return false;
1764     }
1765     free_schid = css_find_free_subch(cssid, ssid);
1766     if (free_schid > MAX_SCHID) {
1767         error_setg(errp, "No free subchannel found for %x.%x.%04x",
1768                    cssid, ssid, devno);
1769         return false;
1770     }
1771     *schid = free_schid;
1772     return true;
1773 }
1774 
1775 /**
1776  * Return first free subchannel (id) and device number
1777  *
1778  * Locate the first free subchannel and first free device number in
1779  * any of the subchannel sets of the channel subsystem identified by
1780  * @p cssid. Return false if no free subchannel / device number could
1781  * be found. Otherwise set @p ssid, @p devno and @p schid to identify
1782  * the available subchannel and device number and return true.
1783  *
1784  * May modify @p ssid, @p devno and / or @p schid even if no free
1785  * subchannel / device number could be found.
1786  */
1787 static bool css_find_free_subch_and_devno(uint8_t cssid, uint8_t *ssid,
1788                                           uint16_t *devno, uint16_t *schid,
1789                                           Error **errp)
1790 {
1791     uint32_t free_schid, free_devno;
1792 
1793     assert(ssid && devno && schid);
1794     for (*ssid = 0; *ssid <= MAX_SSID; (*ssid)++) {
1795         free_schid = css_find_free_subch(cssid, *ssid);
1796         if (free_schid > MAX_SCHID) {
1797             continue;
1798         }
1799         free_devno = css_find_free_devno(cssid, *ssid, free_schid);
1800         if (free_devno > MAX_DEVNO) {
1801             continue;
1802         }
1803         *schid = free_schid;
1804         *devno = free_devno;
1805         return true;
1806     }
1807     error_setg(errp, "Virtual channel subsystem is full!");
1808     return false;
1809 }
1810 
1811 bool css_subch_visible(SubchDev *sch)
1812 {
1813     if (sch->ssid > channel_subsys.max_ssid) {
1814         return false;
1815     }
1816 
1817     if (sch->cssid != channel_subsys.default_cssid) {
1818         return (channel_subsys.max_cssid > 0);
1819     }
1820 
1821     return true;
1822 }
1823 
1824 bool css_present(uint8_t cssid)
1825 {
1826     return (channel_subsys.css[cssid] != NULL);
1827 }
1828 
1829 bool css_devno_used(uint8_t cssid, uint8_t ssid, uint16_t devno)
1830 {
1831     if (!channel_subsys.css[cssid]) {
1832         return false;
1833     }
1834     if (!channel_subsys.css[cssid]->sch_set[ssid]) {
1835         return false;
1836     }
1837 
1838     return !!test_bit(devno,
1839                       channel_subsys.css[cssid]->sch_set[ssid]->devnos_used);
1840 }
1841 
1842 void css_subch_assign(uint8_t cssid, uint8_t ssid, uint16_t schid,
1843                       uint16_t devno, SubchDev *sch)
1844 {
1845     CssImage *css;
1846     SubchSet *s_set;
1847 
1848     trace_css_assign_subch(sch ? "assign" : "deassign", cssid, ssid, schid,
1849                            devno);
1850     if (!channel_subsys.css[cssid]) {
1851         fprintf(stderr,
1852                 "Suspicious call to %s (%x.%x.%04x) for non-existing css!\n",
1853                 __func__, cssid, ssid, schid);
1854         return;
1855     }
1856     css = channel_subsys.css[cssid];
1857 
1858     if (!css->sch_set[ssid]) {
1859         css->sch_set[ssid] = g_malloc0(sizeof(SubchSet));
1860     }
1861     s_set = css->sch_set[ssid];
1862 
1863     s_set->sch[schid] = sch;
1864     if (sch) {
1865         set_bit(schid, s_set->schids_used);
1866         set_bit(devno, s_set->devnos_used);
1867     } else {
1868         clear_bit(schid, s_set->schids_used);
1869         clear_bit(devno, s_set->devnos_used);
1870     }
1871 }
1872 
1873 void css_queue_crw(uint8_t rsc, uint8_t erc, int chain, uint16_t rsid)
1874 {
1875     CrwContainer *crw_cont;
1876 
1877     trace_css_crw(rsc, erc, rsid, chain ? "(chained)" : "");
1878     /* TODO: Maybe use a static crw pool? */
1879     crw_cont = g_try_malloc0(sizeof(CrwContainer));
1880     if (!crw_cont) {
1881         channel_subsys.crws_lost = true;
1882         return;
1883     }
1884     crw_cont->crw.flags = (rsc << 8) | erc;
1885     if (chain) {
1886         crw_cont->crw.flags |= CRW_FLAGS_MASK_C;
1887     }
1888     crw_cont->crw.rsid = rsid;
1889     if (channel_subsys.crws_lost) {
1890         crw_cont->crw.flags |= CRW_FLAGS_MASK_R;
1891         channel_subsys.crws_lost = false;
1892     }
1893 
1894     QTAILQ_INSERT_TAIL(&channel_subsys.pending_crws, crw_cont, sibling);
1895 
1896     if (channel_subsys.do_crw_mchk) {
1897         channel_subsys.do_crw_mchk = false;
1898         /* Inject crw pending machine check. */
1899         s390_crw_mchk();
1900     }
1901 }
1902 
1903 void css_generate_sch_crws(uint8_t cssid, uint8_t ssid, uint16_t schid,
1904                            int hotplugged, int add)
1905 {
1906     uint8_t guest_cssid;
1907     bool chain_crw;
1908 
1909     if (add && !hotplugged) {
1910         return;
1911     }
1912     if (channel_subsys.max_cssid == 0) {
1913         /* Default cssid shows up as 0. */
1914         guest_cssid = (cssid == channel_subsys.default_cssid) ? 0 : cssid;
1915     } else {
1916         /* Show real cssid to the guest. */
1917         guest_cssid = cssid;
1918     }
1919     /*
1920      * Only notify for higher subchannel sets/channel subsystems if the
1921      * guest has enabled it.
1922      */
1923     if ((ssid > channel_subsys.max_ssid) ||
1924         (guest_cssid > channel_subsys.max_cssid) ||
1925         ((channel_subsys.max_cssid == 0) &&
1926          (cssid != channel_subsys.default_cssid))) {
1927         return;
1928     }
1929     chain_crw = (channel_subsys.max_ssid > 0) ||
1930             (channel_subsys.max_cssid > 0);
1931     css_queue_crw(CRW_RSC_SUBCH, CRW_ERC_IPI, chain_crw ? 1 : 0, schid);
1932     if (chain_crw) {
1933         css_queue_crw(CRW_RSC_SUBCH, CRW_ERC_IPI, 0,
1934                       (guest_cssid << 8) | (ssid << 4));
1935     }
1936     /* RW_ERC_IPI --> clear pending interrupts */
1937     css_clear_io_interrupt(css_do_build_subchannel_id(cssid, ssid), schid);
1938 }
1939 
1940 void css_generate_chp_crws(uint8_t cssid, uint8_t chpid)
1941 {
1942     /* TODO */
1943 }
1944 
1945 void css_generate_css_crws(uint8_t cssid)
1946 {
1947     if (!channel_subsys.sei_pending) {
1948         css_queue_crw(CRW_RSC_CSS, 0, 0, cssid);
1949     }
1950     channel_subsys.sei_pending = true;
1951 }
1952 
1953 void css_clear_sei_pending(void)
1954 {
1955     channel_subsys.sei_pending = false;
1956 }
1957 
1958 int css_enable_mcsse(void)
1959 {
1960     trace_css_enable_facility("mcsse");
1961     channel_subsys.max_cssid = MAX_CSSID;
1962     return 0;
1963 }
1964 
1965 int css_enable_mss(void)
1966 {
1967     trace_css_enable_facility("mss");
1968     channel_subsys.max_ssid = MAX_SSID;
1969     return 0;
1970 }
1971 
1972 void css_reset_sch(SubchDev *sch)
1973 {
1974     PMCW *p = &sch->curr_status.pmcw;
1975 
1976     if ((p->flags & PMCW_FLAGS_MASK_ENA) != 0 && sch->disable_cb) {
1977         sch->disable_cb(sch);
1978     }
1979 
1980     p->intparm = 0;
1981     p->flags &= ~(PMCW_FLAGS_MASK_ISC | PMCW_FLAGS_MASK_ENA |
1982                   PMCW_FLAGS_MASK_LM | PMCW_FLAGS_MASK_MME |
1983                   PMCW_FLAGS_MASK_MP | PMCW_FLAGS_MASK_TF);
1984     p->flags |= PMCW_FLAGS_MASK_DNV;
1985     p->devno = sch->devno;
1986     p->pim = 0x80;
1987     p->lpm = p->pim;
1988     p->pnom = 0;
1989     p->lpum = 0;
1990     p->mbi = 0;
1991     p->pom = 0xff;
1992     p->pam = 0x80;
1993     p->chars &= ~(PMCW_CHARS_MASK_MBFC | PMCW_CHARS_MASK_XMWME |
1994                   PMCW_CHARS_MASK_CSENSE);
1995 
1996     memset(&sch->curr_status.scsw, 0, sizeof(sch->curr_status.scsw));
1997     sch->curr_status.mba = 0;
1998 
1999     sch->channel_prog = 0x0;
2000     sch->last_cmd_valid = false;
2001     sch->thinint_active = false;
2002 }
2003 
2004 void css_reset(void)
2005 {
2006     CrwContainer *crw_cont;
2007 
2008     /* Clean up monitoring. */
2009     channel_subsys.chnmon_active = false;
2010     channel_subsys.chnmon_area = 0;
2011 
2012     /* Clear pending CRWs. */
2013     while ((crw_cont = QTAILQ_FIRST(&channel_subsys.pending_crws))) {
2014         QTAILQ_REMOVE(&channel_subsys.pending_crws, crw_cont, sibling);
2015         g_free(crw_cont);
2016     }
2017     channel_subsys.sei_pending = false;
2018     channel_subsys.do_crw_mchk = true;
2019     channel_subsys.crws_lost = false;
2020 
2021     /* Reset maximum ids. */
2022     channel_subsys.max_cssid = 0;
2023     channel_subsys.max_ssid = 0;
2024 }
2025 
2026 static void get_css_devid(Object *obj, Visitor *v, const char *name,
2027                           void *opaque, Error **errp)
2028 {
2029     DeviceState *dev = DEVICE(obj);
2030     Property *prop = opaque;
2031     CssDevId *dev_id = qdev_get_prop_ptr(dev, prop);
2032     char buffer[] = "xx.x.xxxx";
2033     char *p = buffer;
2034     int r;
2035 
2036     if (dev_id->valid) {
2037 
2038         r = snprintf(buffer, sizeof(buffer), "%02x.%1x.%04x", dev_id->cssid,
2039                      dev_id->ssid, dev_id->devid);
2040         assert(r == sizeof(buffer) - 1);
2041 
2042         /* drop leading zero */
2043         if (dev_id->cssid <= 0xf) {
2044             p++;
2045         }
2046     } else {
2047         snprintf(buffer, sizeof(buffer), "<unset>");
2048     }
2049 
2050     visit_type_str(v, name, &p, errp);
2051 }
2052 
2053 /*
2054  * parse <cssid>.<ssid>.<devid> and assert valid range for cssid/ssid
2055  */
2056 static void set_css_devid(Object *obj, Visitor *v, const char *name,
2057                           void *opaque, Error **errp)
2058 {
2059     DeviceState *dev = DEVICE(obj);
2060     Property *prop = opaque;
2061     CssDevId *dev_id = qdev_get_prop_ptr(dev, prop);
2062     Error *local_err = NULL;
2063     char *str;
2064     int num, n1, n2;
2065     unsigned int cssid, ssid, devid;
2066 
2067     if (dev->realized) {
2068         qdev_prop_set_after_realize(dev, name, errp);
2069         return;
2070     }
2071 
2072     visit_type_str(v, name, &str, &local_err);
2073     if (local_err) {
2074         error_propagate(errp, local_err);
2075         return;
2076     }
2077 
2078     num = sscanf(str, "%2x.%1x%n.%4x%n", &cssid, &ssid, &n1, &devid, &n2);
2079     if (num != 3 || (n2 - n1) != 5 || strlen(str) != n2) {
2080         error_set_from_qdev_prop_error(errp, EINVAL, dev, prop, str);
2081         goto out;
2082     }
2083     if ((cssid > MAX_CSSID) || (ssid > MAX_SSID)) {
2084         error_setg(errp, "Invalid cssid or ssid: cssid %x, ssid %x",
2085                    cssid, ssid);
2086         goto out;
2087     }
2088 
2089     dev_id->cssid = cssid;
2090     dev_id->ssid = ssid;
2091     dev_id->devid = devid;
2092     dev_id->valid = true;
2093 
2094 out:
2095     g_free(str);
2096 }
2097 
2098 PropertyInfo css_devid_propinfo = {
2099     .name = "str",
2100     .description = "Identifier of an I/O device in the channel "
2101                    "subsystem, example: fe.1.23ab",
2102     .get = get_css_devid,
2103     .set = set_css_devid,
2104 };
2105 
2106 PropertyInfo css_devid_ro_propinfo = {
2107     .name = "str",
2108     .description = "Read-only identifier of an I/O device in the channel "
2109                    "subsystem, example: fe.1.23ab",
2110     .get = get_css_devid,
2111 };
2112 
2113 SubchDev *css_create_sch(CssDevId bus_id, bool is_virtual, bool squash_mcss,
2114                          Error **errp)
2115 {
2116     uint16_t schid = 0;
2117     SubchDev *sch;
2118 
2119     if (bus_id.valid) {
2120         if (is_virtual != (bus_id.cssid == VIRTUAL_CSSID)) {
2121             error_setg(errp, "cssid %hhx not valid for %s devices",
2122                        bus_id.cssid,
2123                        (is_virtual ? "virtual" : "non-virtual"));
2124             return NULL;
2125         }
2126     }
2127 
2128     if (bus_id.valid) {
2129         if (squash_mcss) {
2130             bus_id.cssid = channel_subsys.default_cssid;
2131         } else if (!channel_subsys.css[bus_id.cssid]) {
2132             css_create_css_image(bus_id.cssid, false);
2133         }
2134 
2135         if (!css_find_free_subch_for_devno(bus_id.cssid, bus_id.ssid,
2136                                            bus_id.devid, &schid, errp)) {
2137             return NULL;
2138         }
2139     } else if (squash_mcss || is_virtual) {
2140         bus_id.cssid = channel_subsys.default_cssid;
2141 
2142         if (!css_find_free_subch_and_devno(bus_id.cssid, &bus_id.ssid,
2143                                            &bus_id.devid, &schid, errp)) {
2144             return NULL;
2145         }
2146     } else {
2147         for (bus_id.cssid = 0; bus_id.cssid < MAX_CSSID; ++bus_id.cssid) {
2148             if (bus_id.cssid == VIRTUAL_CSSID) {
2149                 continue;
2150             }
2151 
2152             if (!channel_subsys.css[bus_id.cssid]) {
2153                 css_create_css_image(bus_id.cssid, false);
2154             }
2155 
2156             if   (css_find_free_subch_and_devno(bus_id.cssid, &bus_id.ssid,
2157                                                 &bus_id.devid, &schid,
2158                                                 NULL)) {
2159                 break;
2160             }
2161             if (bus_id.cssid == MAX_CSSID) {
2162                 error_setg(errp, "Virtual channel subsystem is full!");
2163                 return NULL;
2164             }
2165         }
2166     }
2167 
2168     sch = g_malloc0(sizeof(*sch));
2169     sch->cssid = bus_id.cssid;
2170     sch->ssid = bus_id.ssid;
2171     sch->devno = bus_id.devid;
2172     sch->schid = schid;
2173     css_subch_assign(sch->cssid, sch->ssid, schid, sch->devno, sch);
2174     return sch;
2175 }
2176 
2177 static int css_sch_get_chpids(SubchDev *sch, CssDevId *dev_id)
2178 {
2179     char *fid_path;
2180     FILE *fd;
2181     uint32_t chpid[8];
2182     int i;
2183     PMCW *p = &sch->curr_status.pmcw;
2184 
2185     fid_path = g_strdup_printf("/sys/bus/css/devices/%x.%x.%04x/chpids",
2186                                dev_id->cssid, dev_id->ssid, dev_id->devid);
2187     fd = fopen(fid_path, "r");
2188     if (fd == NULL) {
2189         error_report("%s: open %s failed", __func__, fid_path);
2190         g_free(fid_path);
2191         return -EINVAL;
2192     }
2193 
2194     if (fscanf(fd, "%x %x %x %x %x %x %x %x",
2195         &chpid[0], &chpid[1], &chpid[2], &chpid[3],
2196         &chpid[4], &chpid[5], &chpid[6], &chpid[7]) != 8) {
2197         fclose(fd);
2198         g_free(fid_path);
2199         return -EINVAL;
2200     }
2201 
2202     for (i = 0; i < ARRAY_SIZE(p->chpid); i++) {
2203         p->chpid[i] = chpid[i];
2204     }
2205 
2206     fclose(fd);
2207     g_free(fid_path);
2208 
2209     return 0;
2210 }
2211 
2212 static int css_sch_get_path_masks(SubchDev *sch, CssDevId *dev_id)
2213 {
2214     char *fid_path;
2215     FILE *fd;
2216     uint32_t pim, pam, pom;
2217     PMCW *p = &sch->curr_status.pmcw;
2218 
2219     fid_path = g_strdup_printf("/sys/bus/css/devices/%x.%x.%04x/pimpampom",
2220                                dev_id->cssid, dev_id->ssid, dev_id->devid);
2221     fd = fopen(fid_path, "r");
2222     if (fd == NULL) {
2223         error_report("%s: open %s failed", __func__, fid_path);
2224         g_free(fid_path);
2225         return -EINVAL;
2226     }
2227 
2228     if (fscanf(fd, "%x %x %x", &pim, &pam, &pom) != 3) {
2229         fclose(fd);
2230         g_free(fid_path);
2231         return -EINVAL;
2232     }
2233 
2234     p->pim = pim;
2235     p->pam = pam;
2236     p->pom = pom;
2237     fclose(fd);
2238     g_free(fid_path);
2239 
2240     return 0;
2241 }
2242 
2243 static int css_sch_get_chpid_type(uint8_t chpid, uint32_t *type,
2244                                   CssDevId *dev_id)
2245 {
2246     char *fid_path;
2247     FILE *fd;
2248 
2249     fid_path = g_strdup_printf("/sys/devices/css%x/chp0.%02x/type",
2250                                dev_id->cssid, chpid);
2251     fd = fopen(fid_path, "r");
2252     if (fd == NULL) {
2253         error_report("%s: open %s failed", __func__, fid_path);
2254         g_free(fid_path);
2255         return -EINVAL;
2256     }
2257 
2258     if (fscanf(fd, "%x", type) != 1) {
2259         fclose(fd);
2260         g_free(fid_path);
2261         return -EINVAL;
2262     }
2263 
2264     fclose(fd);
2265     g_free(fid_path);
2266 
2267     return 0;
2268 }
2269 
2270 /*
2271  * We currently retrieve the real device information from sysfs to build the
2272  * guest subchannel information block without considering the migration feature.
2273  * We need to revisit this problem when we want to add migration support.
2274  */
2275 int css_sch_build_schib(SubchDev *sch, CssDevId *dev_id)
2276 {
2277     CssImage *css = channel_subsys.css[sch->cssid];
2278     PMCW *p = &sch->curr_status.pmcw;
2279     SCSW *s = &sch->curr_status.scsw;
2280     uint32_t type;
2281     int i, ret;
2282 
2283     assert(css != NULL);
2284     memset(p, 0, sizeof(PMCW));
2285     p->flags |= PMCW_FLAGS_MASK_DNV;
2286     /* We are dealing with I/O subchannels only. */
2287     p->devno = sch->devno;
2288 
2289     /* Grab path mask from sysfs. */
2290     ret = css_sch_get_path_masks(sch, dev_id);
2291     if (ret) {
2292         return ret;
2293     }
2294 
2295     /* Grab chpids from sysfs. */
2296     ret = css_sch_get_chpids(sch, dev_id);
2297     if (ret) {
2298         return ret;
2299     }
2300 
2301    /* Build chpid type. */
2302     for (i = 0; i < ARRAY_SIZE(p->chpid); i++) {
2303         if (p->chpid[i] && !css->chpids[p->chpid[i]].in_use) {
2304             ret = css_sch_get_chpid_type(p->chpid[i], &type, dev_id);
2305             if (ret) {
2306                 return ret;
2307             }
2308             css_add_chpid(sch->cssid, p->chpid[i], type, false);
2309         }
2310     }
2311 
2312     memset(s, 0, sizeof(SCSW));
2313     sch->curr_status.mba = 0;
2314     for (i = 0; i < ARRAY_SIZE(sch->curr_status.mda); i++) {
2315         sch->curr_status.mda[i] = 0;
2316     }
2317 
2318     return 0;
2319 }
2320