1 /*
2 * QEMU S390x floating interrupt controller (flic)
3 *
4 * Copyright 2014 IBM Corp.
5 * Author(s): Jens Freimann <jfrei@linux.vnet.ibm.com>
6 * Cornelia Huck <cornelia.huck@de.ibm.com>
7 *
8 * This work is licensed under the terms of the GNU GPL, version 2 or (at
9 * your option) any later version. See the COPYING file in the top-level
10 * directory.
11 */
12
13 #include "qemu/osdep.h"
14 #include "qemu/error-report.h"
15 #include "qemu/main-loop.h"
16 #include "qemu/module.h"
17 #include "hw/sysbus.h"
18 #include "hw/s390x/ioinst.h"
19 #include "hw/s390x/s390_flic.h"
20 #include "hw/qdev-properties.h"
21 #include "hw/s390x/css.h"
22 #include "trace.h"
23 #include "qapi/error.h"
24 #include "hw/s390x/s390-virtio-ccw.h"
25
s390_get_flic_class(S390FLICState * fs)26 S390FLICStateClass *s390_get_flic_class(S390FLICState *fs)
27 {
28 static S390FLICStateClass *class;
29
30 if (!class) {
31 /* we only have one flic device, so this is fine to cache */
32 class = S390_FLIC_COMMON_GET_CLASS(fs);
33 }
34 return class;
35 }
36
s390_get_qemu_flic(S390FLICState * fs)37 QEMUS390FLICState *s390_get_qemu_flic(S390FLICState *fs)
38 {
39 static QEMUS390FLICState *flic;
40
41 if (!flic) {
42 /* we only have one flic device, so this is fine to cache */
43 flic = QEMU_S390_FLIC(fs);
44 }
45 return flic;
46 }
47
s390_get_flic(void)48 S390FLICState *s390_get_flic(void)
49 {
50 static S390FLICState *fs;
51
52 if (!fs) {
53 fs = S390_FLIC_COMMON(object_resolve_path_type("",
54 TYPE_S390_FLIC_COMMON,
55 NULL));
56 }
57 return fs;
58 }
59
s390_flic_init(void)60 void s390_flic_init(void)
61 {
62 DeviceState *dev;
63
64 if (kvm_enabled()) {
65 dev = qdev_new(TYPE_KVM_S390_FLIC);
66 object_property_add_child(qdev_get_machine(), TYPE_KVM_S390_FLIC,
67 OBJECT(dev));
68 } else {
69 dev = qdev_new(TYPE_QEMU_S390_FLIC);
70 object_property_add_child(qdev_get_machine(), TYPE_QEMU_S390_FLIC,
71 OBJECT(dev));
72 }
73 sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
74 }
75
qemu_s390_register_io_adapter(S390FLICState * fs,uint32_t id,uint8_t isc,bool swap,bool is_maskable,uint8_t flags)76 static int qemu_s390_register_io_adapter(S390FLICState *fs, uint32_t id,
77 uint8_t isc, bool swap,
78 bool is_maskable, uint8_t flags)
79 {
80 /* nothing to do */
81 return 0;
82 }
83
qemu_s390_io_adapter_map(S390FLICState * fs,uint32_t id,uint64_t map_addr,bool do_map)84 static int qemu_s390_io_adapter_map(S390FLICState *fs, uint32_t id,
85 uint64_t map_addr, bool do_map)
86 {
87 /* nothing to do */
88 return 0;
89 }
90
qemu_s390_add_adapter_routes(S390FLICState * fs,AdapterRoutes * routes)91 static int qemu_s390_add_adapter_routes(S390FLICState *fs,
92 AdapterRoutes *routes)
93 {
94 return -ENOSYS;
95 }
96
qemu_s390_release_adapter_routes(S390FLICState * fs,AdapterRoutes * routes)97 static void qemu_s390_release_adapter_routes(S390FLICState *fs,
98 AdapterRoutes *routes)
99 {
100 }
101
qemu_s390_clear_io_flic(S390FLICState * fs,uint16_t subchannel_id,uint16_t subchannel_nr)102 static int qemu_s390_clear_io_flic(S390FLICState *fs, uint16_t subchannel_id,
103 uint16_t subchannel_nr)
104 {
105 QEMUS390FLICState *flic = s390_get_qemu_flic(fs);
106 QEMUS390FlicIO *cur, *next;
107 uint8_t isc;
108
109 g_assert(qemu_mutex_iothread_locked());
110 if (!(flic->pending & FLIC_PENDING_IO)) {
111 return 0;
112 }
113
114 /* check all iscs */
115 for (isc = 0; isc < 8; isc++) {
116 if (QLIST_EMPTY(&flic->io[isc])) {
117 continue;
118 }
119
120 /* search and delete any matching one */
121 QLIST_FOREACH_SAFE(cur, &flic->io[isc], next, next) {
122 if (cur->id == subchannel_id && cur->nr == subchannel_nr) {
123 QLIST_REMOVE(cur, next);
124 g_free(cur);
125 }
126 }
127
128 /* update our indicator bit */
129 if (QLIST_EMPTY(&flic->io[isc])) {
130 flic->pending &= ~ISC_TO_PENDING_IO(isc);
131 }
132 }
133 return 0;
134 }
135
qemu_s390_modify_ais_mode(S390FLICState * fs,uint8_t isc,uint16_t mode)136 static int qemu_s390_modify_ais_mode(S390FLICState *fs, uint8_t isc,
137 uint16_t mode)
138 {
139 QEMUS390FLICState *flic = s390_get_qemu_flic(fs);
140
141 switch (mode) {
142 case SIC_IRQ_MODE_ALL:
143 flic->simm &= ~AIS_MODE_MASK(isc);
144 flic->nimm &= ~AIS_MODE_MASK(isc);
145 break;
146 case SIC_IRQ_MODE_SINGLE:
147 flic->simm |= AIS_MODE_MASK(isc);
148 flic->nimm &= ~AIS_MODE_MASK(isc);
149 break;
150 default:
151 return -EINVAL;
152 }
153
154 return 0;
155 }
156
qemu_s390_inject_airq(S390FLICState * fs,uint8_t type,uint8_t isc,uint8_t flags)157 static int qemu_s390_inject_airq(S390FLICState *fs, uint8_t type,
158 uint8_t isc, uint8_t flags)
159 {
160 QEMUS390FLICState *flic = s390_get_qemu_flic(fs);
161 S390FLICStateClass *fsc = s390_get_flic_class(fs);
162 bool flag = flags & S390_ADAPTER_SUPPRESSIBLE;
163 uint32_t io_int_word = (isc << 27) | IO_INT_WORD_AI;
164
165 if (flag && (flic->nimm & AIS_MODE_MASK(isc))) {
166 trace_qemu_s390_airq_suppressed(type, isc);
167 return 0;
168 }
169
170 fsc->inject_io(fs, 0, 0, 0, io_int_word);
171
172 if (flag && (flic->simm & AIS_MODE_MASK(isc))) {
173 flic->nimm |= AIS_MODE_MASK(isc);
174 trace_qemu_s390_suppress_airq(isc, "Single-Interruption Mode",
175 "NO-Interruptions Mode");
176 }
177
178 return 0;
179 }
180
qemu_s390_flic_notify(uint32_t type)181 static void qemu_s390_flic_notify(uint32_t type)
182 {
183 CPUState *cs;
184
185 /*
186 * We have to make all CPUs see CPU_INTERRUPT_HARD, so they might
187 * consider it. We will kick all running CPUs and only relevant
188 * sleeping ones.
189 */
190 CPU_FOREACH(cs) {
191 S390CPU *cpu = S390_CPU(cs);
192
193 cs->interrupt_request |= CPU_INTERRUPT_HARD;
194
195 /* ignore CPUs that are not sleeping */
196 if (s390_cpu_get_state(cpu) != S390_CPU_STATE_OPERATING &&
197 s390_cpu_get_state(cpu) != S390_CPU_STATE_LOAD) {
198 continue;
199 }
200
201 /* we always kick running CPUs for now, this is tricky */
202 if (cs->halted) {
203 /* don't check for subclasses, CPUs double check when waking up */
204 if (type & FLIC_PENDING_SERVICE) {
205 if (!(cpu->env.psw.mask & PSW_MASK_EXT)) {
206 continue;
207 }
208 } else if (type & FLIC_PENDING_IO) {
209 if (!(cpu->env.psw.mask & PSW_MASK_IO)) {
210 continue;
211 }
212 } else if (type & FLIC_PENDING_MCHK_CR) {
213 if (!(cpu->env.psw.mask & PSW_MASK_MCHECK)) {
214 continue;
215 }
216 }
217 }
218 cpu_interrupt(cs, CPU_INTERRUPT_HARD);
219 }
220 }
221
qemu_s390_flic_dequeue_service(QEMUS390FLICState * flic)222 uint32_t qemu_s390_flic_dequeue_service(QEMUS390FLICState *flic)
223 {
224 uint32_t tmp;
225
226 g_assert(qemu_mutex_iothread_locked());
227 g_assert(flic->pending & FLIC_PENDING_SERVICE);
228 tmp = flic->service_param;
229 flic->service_param = 0;
230 flic->pending &= ~FLIC_PENDING_SERVICE;
231
232 return tmp;
233 }
234
235 /* caller has to free the returned object */
qemu_s390_flic_dequeue_io(QEMUS390FLICState * flic,uint64_t cr6)236 QEMUS390FlicIO *qemu_s390_flic_dequeue_io(QEMUS390FLICState *flic, uint64_t cr6)
237 {
238 QEMUS390FlicIO *io;
239 uint8_t isc;
240
241 g_assert(qemu_mutex_iothread_locked());
242 if (!(flic->pending & CR6_TO_PENDING_IO(cr6))) {
243 return NULL;
244 }
245
246 for (isc = 0; isc < 8; isc++) {
247 if (QLIST_EMPTY(&flic->io[isc]) || !(cr6 & ISC_TO_ISC_BITS(isc))) {
248 continue;
249 }
250 io = QLIST_FIRST(&flic->io[isc]);
251 QLIST_REMOVE(io, next);
252
253 /* update our indicator bit */
254 if (QLIST_EMPTY(&flic->io[isc])) {
255 flic->pending &= ~ISC_TO_PENDING_IO(isc);
256 }
257 return io;
258 }
259
260 return NULL;
261 }
262
qemu_s390_flic_dequeue_crw_mchk(QEMUS390FLICState * flic)263 void qemu_s390_flic_dequeue_crw_mchk(QEMUS390FLICState *flic)
264 {
265 g_assert(qemu_mutex_iothread_locked());
266 g_assert(flic->pending & FLIC_PENDING_MCHK_CR);
267 flic->pending &= ~FLIC_PENDING_MCHK_CR;
268 }
269
qemu_s390_inject_service(S390FLICState * fs,uint32_t parm)270 static void qemu_s390_inject_service(S390FLICState *fs, uint32_t parm)
271 {
272 QEMUS390FLICState *flic = s390_get_qemu_flic(fs);
273
274 g_assert(qemu_mutex_iothread_locked());
275 /* multiplexing is good enough for sclp - kvm does it internally as well */
276 flic->service_param |= parm;
277 flic->pending |= FLIC_PENDING_SERVICE;
278
279 qemu_s390_flic_notify(FLIC_PENDING_SERVICE);
280 }
281
qemu_s390_inject_io(S390FLICState * fs,uint16_t subchannel_id,uint16_t subchannel_nr,uint32_t io_int_parm,uint32_t io_int_word)282 static void qemu_s390_inject_io(S390FLICState *fs, uint16_t subchannel_id,
283 uint16_t subchannel_nr, uint32_t io_int_parm,
284 uint32_t io_int_word)
285 {
286 const uint8_t isc = IO_INT_WORD_ISC(io_int_word);
287 QEMUS390FLICState *flic = s390_get_qemu_flic(fs);
288 QEMUS390FlicIO *io;
289
290 g_assert(qemu_mutex_iothread_locked());
291 io = g_new0(QEMUS390FlicIO, 1);
292 io->id = subchannel_id;
293 io->nr = subchannel_nr;
294 io->parm = io_int_parm;
295 io->word = io_int_word;
296
297 QLIST_INSERT_HEAD(&flic->io[isc], io, next);
298 flic->pending |= ISC_TO_PENDING_IO(isc);
299
300 qemu_s390_flic_notify(ISC_TO_PENDING_IO(isc));
301 }
302
qemu_s390_inject_crw_mchk(S390FLICState * fs)303 static void qemu_s390_inject_crw_mchk(S390FLICState *fs)
304 {
305 QEMUS390FLICState *flic = s390_get_qemu_flic(fs);
306
307 g_assert(qemu_mutex_iothread_locked());
308 flic->pending |= FLIC_PENDING_MCHK_CR;
309
310 qemu_s390_flic_notify(FLIC_PENDING_MCHK_CR);
311 }
312
qemu_s390_flic_has_service(QEMUS390FLICState * flic)313 bool qemu_s390_flic_has_service(QEMUS390FLICState *flic)
314 {
315 /* called without lock via cc->has_work, will be validated under lock */
316 return !!(flic->pending & FLIC_PENDING_SERVICE);
317 }
318
qemu_s390_flic_has_io(QEMUS390FLICState * flic,uint64_t cr6)319 bool qemu_s390_flic_has_io(QEMUS390FLICState *flic, uint64_t cr6)
320 {
321 /* called without lock via cc->has_work, will be validated under lock */
322 return !!(flic->pending & CR6_TO_PENDING_IO(cr6));
323 }
324
qemu_s390_flic_has_crw_mchk(QEMUS390FLICState * flic)325 bool qemu_s390_flic_has_crw_mchk(QEMUS390FLICState *flic)
326 {
327 /* called without lock via cc->has_work, will be validated under lock */
328 return !!(flic->pending & FLIC_PENDING_MCHK_CR);
329 }
330
qemu_s390_flic_has_any(QEMUS390FLICState * flic)331 bool qemu_s390_flic_has_any(QEMUS390FLICState *flic)
332 {
333 g_assert(qemu_mutex_iothread_locked());
334 return !!flic->pending;
335 }
336
qemu_s390_flic_reset(DeviceState * dev)337 static void qemu_s390_flic_reset(DeviceState *dev)
338 {
339 QEMUS390FLICState *flic = QEMU_S390_FLIC(dev);
340 QEMUS390FlicIO *cur, *next;
341 int isc;
342
343 g_assert(qemu_mutex_iothread_locked());
344 flic->simm = 0;
345 flic->nimm = 0;
346 flic->pending = 0;
347
348 /* remove all pending io interrupts */
349 for (isc = 0; isc < 8; isc++) {
350 QLIST_FOREACH_SAFE(cur, &flic->io[isc], next, next) {
351 QLIST_REMOVE(cur, next);
352 g_free(cur);
353 }
354 }
355 }
356
ais_needed(void * opaque)357 bool ais_needed(void *opaque)
358 {
359 S390FLICState *s = opaque;
360
361 return s->ais_supported;
362 }
363
364 static const VMStateDescription qemu_s390_flic_vmstate = {
365 .name = "qemu-s390-flic",
366 .version_id = 1,
367 .minimum_version_id = 1,
368 .needed = ais_needed,
369 .fields = (VMStateField[]) {
370 VMSTATE_UINT8(simm, QEMUS390FLICState),
371 VMSTATE_UINT8(nimm, QEMUS390FLICState),
372 VMSTATE_END_OF_LIST()
373 }
374 };
375
qemu_s390_flic_instance_init(Object * obj)376 static void qemu_s390_flic_instance_init(Object *obj)
377 {
378 QEMUS390FLICState *flic = QEMU_S390_FLIC(obj);
379 int isc;
380
381 for (isc = 0; isc < 8; isc++) {
382 QLIST_INIT(&flic->io[isc]);
383 }
384 }
385
qemu_s390_flic_class_init(ObjectClass * oc,void * data)386 static void qemu_s390_flic_class_init(ObjectClass *oc, void *data)
387 {
388 DeviceClass *dc = DEVICE_CLASS(oc);
389 S390FLICStateClass *fsc = S390_FLIC_COMMON_CLASS(oc);
390
391 dc->reset = qemu_s390_flic_reset;
392 dc->vmsd = &qemu_s390_flic_vmstate;
393 fsc->register_io_adapter = qemu_s390_register_io_adapter;
394 fsc->io_adapter_map = qemu_s390_io_adapter_map;
395 fsc->add_adapter_routes = qemu_s390_add_adapter_routes;
396 fsc->release_adapter_routes = qemu_s390_release_adapter_routes;
397 fsc->clear_io_irq = qemu_s390_clear_io_flic;
398 fsc->modify_ais_mode = qemu_s390_modify_ais_mode;
399 fsc->inject_airq = qemu_s390_inject_airq;
400 fsc->inject_service = qemu_s390_inject_service;
401 fsc->inject_io = qemu_s390_inject_io;
402 fsc->inject_crw_mchk = qemu_s390_inject_crw_mchk;
403 }
404
405 static Property s390_flic_common_properties[] = {
406 DEFINE_PROP_UINT32("adapter_routes_max_batch", S390FLICState,
407 adapter_routes_max_batch, ADAPTER_ROUTES_MAX_GSI),
408 DEFINE_PROP_END_OF_LIST(),
409 };
410
s390_flic_common_realize(DeviceState * dev,Error ** errp)411 static void s390_flic_common_realize(DeviceState *dev, Error **errp)
412 {
413 S390FLICState *fs = S390_FLIC_COMMON(dev);
414 uint32_t max_batch = fs->adapter_routes_max_batch;
415
416 if (max_batch > ADAPTER_ROUTES_MAX_GSI) {
417 error_setg(errp, "flic property adapter_routes_max_batch too big"
418 " (%d > %d)", max_batch, ADAPTER_ROUTES_MAX_GSI);
419 return;
420 }
421
422 fs->ais_supported = s390_has_feat(S390_FEAT_ADAPTER_INT_SUPPRESSION);
423 }
424
s390_flic_class_init(ObjectClass * oc,void * data)425 static void s390_flic_class_init(ObjectClass *oc, void *data)
426 {
427 DeviceClass *dc = DEVICE_CLASS(oc);
428
429 device_class_set_props(dc, s390_flic_common_properties);
430 dc->realize = s390_flic_common_realize;
431 }
432
433 static const TypeInfo qemu_s390_flic_info = {
434 .name = TYPE_QEMU_S390_FLIC,
435 .parent = TYPE_S390_FLIC_COMMON,
436 .instance_size = sizeof(QEMUS390FLICState),
437 .instance_init = qemu_s390_flic_instance_init,
438 .class_init = qemu_s390_flic_class_init,
439 };
440
441
442 static const TypeInfo s390_flic_common_info = {
443 .name = TYPE_S390_FLIC_COMMON,
444 .parent = TYPE_SYS_BUS_DEVICE,
445 .instance_size = sizeof(S390FLICState),
446 .class_init = s390_flic_class_init,
447 .class_size = sizeof(S390FLICStateClass),
448 };
449
qemu_s390_flic_register_types(void)450 static void qemu_s390_flic_register_types(void)
451 {
452 type_register_static(&s390_flic_common_info);
453 type_register_static(&qemu_s390_flic_info);
454 }
455
type_init(qemu_s390_flic_register_types)456 type_init(qemu_s390_flic_register_types)
457
458 static bool adapter_info_so_needed(void *opaque)
459 {
460 return css_migration_enabled();
461 }
462
463 const VMStateDescription vmstate_adapter_info_so = {
464 .name = "s390_adapter_info/summary_offset",
465 .version_id = 1,
466 .minimum_version_id = 1,
467 .needed = adapter_info_so_needed,
468 .fields = (VMStateField[]) {
469 VMSTATE_UINT32(summary_offset, AdapterInfo),
470 VMSTATE_END_OF_LIST()
471 }
472 };
473
474 const VMStateDescription vmstate_adapter_info = {
475 .name = "s390_adapter_info",
476 .version_id = 1,
477 .minimum_version_id = 1,
478 .fields = (VMStateField[]) {
479 VMSTATE_UINT64(ind_offset, AdapterInfo),
480 /*
481 * We do not have to migrate neither the id nor the addresses.
482 * The id is set by css_register_io_adapter and the addresses
483 * are set based on the IndAddr objects after those get mapped.
484 */
485 VMSTATE_END_OF_LIST()
486 },
487 .subsections = (const VMStateDescription * []) {
488 &vmstate_adapter_info_so,
489 NULL
490 }
491 };
492
493 const VMStateDescription vmstate_adapter_routes = {
494
495 .name = "s390_adapter_routes",
496 .version_id = 1,
497 .minimum_version_id = 1,
498 .fields = (VMStateField[]) {
499 VMSTATE_STRUCT(adapter, AdapterRoutes, 1, vmstate_adapter_info,
500 AdapterInfo),
501 VMSTATE_END_OF_LIST()
502 }
503 };
504