1 /*
2 * OpenPIC emulation
3 *
4 * Copyright (c) 2004 Jocelyn Mayer
5 * 2011 Alexander Graf
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 */
25 /*
26 *
27 * Based on OpenPic implementations:
28 * - Intel GW80314 I/O companion chip developer's manual
29 * - Motorola MPC8245 & MPC8540 user manuals.
30 * - Motorola MCP750 (aka Raven) programmer manual.
31 * - Motorola Harrier programmer manuel
32 *
33 * Serial interrupts, as implemented in Raven chipset are not supported yet.
34 *
35 */
36 #include "qemu/osdep.h"
37 #include "hw/hw.h"
38 #include "hw/ppc/mac.h"
39 #include "hw/pci/pci.h"
40 #include "hw/ppc/openpic.h"
41 #include "hw/ppc/ppc_e500.h"
42 #include "hw/sysbus.h"
43 #include "hw/pci/msi.h"
44 #include "qapi/error.h"
45 #include "qemu/bitops.h"
46 #include "qapi/qmp/qerror.h"
47 #include "qemu/log.h"
48 #include "qemu/timer.h"
49 #include "qemu/error-report.h"
50
51 //#define DEBUG_OPENPIC
52
53 #ifdef DEBUG_OPENPIC
54 static const int debug_openpic = 1;
55 #else
56 static const int debug_openpic = 0;
57 #endif
58
59 static int get_current_cpu(void);
60 #define DPRINTF(fmt, ...) do { \
61 if (debug_openpic) { \
62 info_report("Core%d: " fmt, get_current_cpu(), ## __VA_ARGS__); \
63 } \
64 } while (0)
65
66 /* OpenPIC capability flags */
67 #define OPENPIC_FLAG_IDR_CRIT (1 << 0)
68 #define OPENPIC_FLAG_ILR (2 << 0)
69
70 /* OpenPIC address map */
71 #define OPENPIC_GLB_REG_START 0x0
72 #define OPENPIC_GLB_REG_SIZE 0x10F0
73 #define OPENPIC_TMR_REG_START 0x10F0
74 #define OPENPIC_TMR_REG_SIZE 0x220
75 #define OPENPIC_MSI_REG_START 0x1600
76 #define OPENPIC_MSI_REG_SIZE 0x200
77 #define OPENPIC_SUMMARY_REG_START 0x3800
78 #define OPENPIC_SUMMARY_REG_SIZE 0x800
79 #define OPENPIC_SRC_REG_START 0x10000
80 #define OPENPIC_SRC_REG_SIZE (OPENPIC_MAX_SRC * 0x20)
81 #define OPENPIC_CPU_REG_START 0x20000
82 #define OPENPIC_CPU_REG_SIZE 0x100 + ((MAX_CPU - 1) * 0x1000)
83
84 static FslMpicInfo fsl_mpic_20 = {
85 .max_ext = 12,
86 };
87
88 static FslMpicInfo fsl_mpic_42 = {
89 .max_ext = 12,
90 };
91
92 #define FRR_NIRQ_SHIFT 16
93 #define FRR_NCPU_SHIFT 8
94 #define FRR_VID_SHIFT 0
95
96 #define VID_REVISION_1_2 2
97 #define VID_REVISION_1_3 3
98
99 #define VIR_GENERIC 0x00000000 /* Generic Vendor ID */
100 #define VIR_MPIC2A 0x00004614 /* IBM MPIC-2A */
101
102 #define GCR_RESET 0x80000000
103 #define GCR_MODE_PASS 0x00000000
104 #define GCR_MODE_MIXED 0x20000000
105 #define GCR_MODE_PROXY 0x60000000
106
107 #define TBCR_CI 0x80000000 /* count inhibit */
108 #define TCCR_TOG 0x80000000 /* toggles when decrement to zero */
109
110 #define IDR_EP_SHIFT 31
111 #define IDR_EP_MASK (1U << IDR_EP_SHIFT)
112 #define IDR_CI0_SHIFT 30
113 #define IDR_CI1_SHIFT 29
114 #define IDR_P1_SHIFT 1
115 #define IDR_P0_SHIFT 0
116
117 #define ILR_INTTGT_MASK 0x000000ff
118 #define ILR_INTTGT_INT 0x00
119 #define ILR_INTTGT_CINT 0x01 /* critical */
120 #define ILR_INTTGT_MCP 0x02 /* machine check */
121
122 /* The currently supported INTTGT values happen to be the same as QEMU's
123 * openpic output codes, but don't depend on this. The output codes
124 * could change (unlikely, but...) or support could be added for
125 * more INTTGT values.
126 */
127 static const int inttgt_output[][2] = {
128 { ILR_INTTGT_INT, OPENPIC_OUTPUT_INT },
129 { ILR_INTTGT_CINT, OPENPIC_OUTPUT_CINT },
130 { ILR_INTTGT_MCP, OPENPIC_OUTPUT_MCK },
131 };
132
inttgt_to_output(int inttgt)133 static int inttgt_to_output(int inttgt)
134 {
135 int i;
136
137 for (i = 0; i < ARRAY_SIZE(inttgt_output); i++) {
138 if (inttgt_output[i][0] == inttgt) {
139 return inttgt_output[i][1];
140 }
141 }
142
143 error_report("%s: unsupported inttgt %d", __func__, inttgt);
144 return OPENPIC_OUTPUT_INT;
145 }
146
output_to_inttgt(int output)147 static int output_to_inttgt(int output)
148 {
149 int i;
150
151 for (i = 0; i < ARRAY_SIZE(inttgt_output); i++) {
152 if (inttgt_output[i][1] == output) {
153 return inttgt_output[i][0];
154 }
155 }
156
157 abort();
158 }
159
160 #define MSIIR_OFFSET 0x140
161 #define MSIIR_SRS_SHIFT 29
162 #define MSIIR_SRS_MASK (0x7 << MSIIR_SRS_SHIFT)
163 #define MSIIR_IBS_SHIFT 24
164 #define MSIIR_IBS_MASK (0x1f << MSIIR_IBS_SHIFT)
165
get_current_cpu(void)166 static int get_current_cpu(void)
167 {
168 if (!current_cpu) {
169 return -1;
170 }
171
172 return current_cpu->cpu_index;
173 }
174
175 static uint32_t openpic_cpu_read_internal(void *opaque, hwaddr addr,
176 int idx);
177 static void openpic_cpu_write_internal(void *opaque, hwaddr addr,
178 uint32_t val, int idx);
179 static void openpic_reset(DeviceState *d);
180
181 /* Convert between openpic clock ticks and nanosecs. In the hardware the clock
182 frequency is driven by board inputs to the PIC which the PIC would then
183 divide by 4 or 8. For now hard code to 25MZ.
184 */
185 #define OPENPIC_TIMER_FREQ_MHZ 25
186 #define OPENPIC_TIMER_NS_PER_TICK (1000 / OPENPIC_TIMER_FREQ_MHZ)
ns_to_ticks(uint64_t ns)187 static inline uint64_t ns_to_ticks(uint64_t ns)
188 {
189 return ns / OPENPIC_TIMER_NS_PER_TICK;
190 }
ticks_to_ns(uint64_t ticks)191 static inline uint64_t ticks_to_ns(uint64_t ticks)
192 {
193 return ticks * OPENPIC_TIMER_NS_PER_TICK;
194 }
195
IRQ_setbit(IRQQueue * q,int n_IRQ)196 static inline void IRQ_setbit(IRQQueue *q, int n_IRQ)
197 {
198 set_bit(n_IRQ, q->queue);
199 }
200
IRQ_resetbit(IRQQueue * q,int n_IRQ)201 static inline void IRQ_resetbit(IRQQueue *q, int n_IRQ)
202 {
203 clear_bit(n_IRQ, q->queue);
204 }
205
IRQ_check(OpenPICState * opp,IRQQueue * q)206 static void IRQ_check(OpenPICState *opp, IRQQueue *q)
207 {
208 int irq = -1;
209 int next = -1;
210 int priority = -1;
211
212 for (;;) {
213 irq = find_next_bit(q->queue, opp->max_irq, irq + 1);
214 if (irq == opp->max_irq) {
215 break;
216 }
217
218 DPRINTF("IRQ_check: irq %d set ivpr_pr=%d pr=%d",
219 irq, IVPR_PRIORITY(opp->src[irq].ivpr), priority);
220
221 if (IVPR_PRIORITY(opp->src[irq].ivpr) > priority) {
222 next = irq;
223 priority = IVPR_PRIORITY(opp->src[irq].ivpr);
224 }
225 }
226
227 q->next = next;
228 q->priority = priority;
229 }
230
IRQ_get_next(OpenPICState * opp,IRQQueue * q)231 static int IRQ_get_next(OpenPICState *opp, IRQQueue *q)
232 {
233 /* XXX: optimize */
234 IRQ_check(opp, q);
235
236 return q->next;
237 }
238
IRQ_local_pipe(OpenPICState * opp,int n_CPU,int n_IRQ,bool active,bool was_active)239 static void IRQ_local_pipe(OpenPICState *opp, int n_CPU, int n_IRQ,
240 bool active, bool was_active)
241 {
242 IRQDest *dst;
243 IRQSource *src;
244 int priority;
245
246 dst = &opp->dst[n_CPU];
247 src = &opp->src[n_IRQ];
248
249 DPRINTF("%s: IRQ %d active %d was %d",
250 __func__, n_IRQ, active, was_active);
251
252 if (src->output != OPENPIC_OUTPUT_INT) {
253 DPRINTF("%s: output %d irq %d active %d was %d count %d",
254 __func__, src->output, n_IRQ, active, was_active,
255 dst->outputs_active[src->output]);
256
257 /* On Freescale MPIC, critical interrupts ignore priority,
258 * IACK, EOI, etc. Before MPIC v4.1 they also ignore
259 * masking.
260 */
261 if (active) {
262 if (!was_active && dst->outputs_active[src->output]++ == 0) {
263 DPRINTF("%s: Raise OpenPIC output %d cpu %d irq %d",
264 __func__, src->output, n_CPU, n_IRQ);
265 qemu_irq_raise(dst->irqs[src->output]);
266 }
267 } else {
268 if (was_active && --dst->outputs_active[src->output] == 0) {
269 DPRINTF("%s: Lower OpenPIC output %d cpu %d irq %d",
270 __func__, src->output, n_CPU, n_IRQ);
271 qemu_irq_lower(dst->irqs[src->output]);
272 }
273 }
274
275 return;
276 }
277
278 priority = IVPR_PRIORITY(src->ivpr);
279
280 /* Even if the interrupt doesn't have enough priority,
281 * it is still raised, in case ctpr is lowered later.
282 */
283 if (active) {
284 IRQ_setbit(&dst->raised, n_IRQ);
285 } else {
286 IRQ_resetbit(&dst->raised, n_IRQ);
287 }
288
289 IRQ_check(opp, &dst->raised);
290
291 if (active && priority <= dst->ctpr) {
292 DPRINTF("%s: IRQ %d priority %d too low for ctpr %d on CPU %d",
293 __func__, n_IRQ, priority, dst->ctpr, n_CPU);
294 active = 0;
295 }
296
297 if (active) {
298 if (IRQ_get_next(opp, &dst->servicing) >= 0 &&
299 priority <= dst->servicing.priority) {
300 DPRINTF("%s: IRQ %d is hidden by servicing IRQ %d on CPU %d",
301 __func__, n_IRQ, dst->servicing.next, n_CPU);
302 } else {
303 DPRINTF("%s: Raise OpenPIC INT output cpu %d irq %d/%d",
304 __func__, n_CPU, n_IRQ, dst->raised.next);
305 qemu_irq_raise(opp->dst[n_CPU].irqs[OPENPIC_OUTPUT_INT]);
306 }
307 } else {
308 IRQ_get_next(opp, &dst->servicing);
309 if (dst->raised.priority > dst->ctpr &&
310 dst->raised.priority > dst->servicing.priority) {
311 DPRINTF("%s: IRQ %d inactive, IRQ %d prio %d above %d/%d, CPU %d",
312 __func__, n_IRQ, dst->raised.next, dst->raised.priority,
313 dst->ctpr, dst->servicing.priority, n_CPU);
314 /* IRQ line stays asserted */
315 } else {
316 DPRINTF("%s: IRQ %d inactive, current prio %d/%d, CPU %d",
317 __func__, n_IRQ, dst->ctpr, dst->servicing.priority, n_CPU);
318 qemu_irq_lower(opp->dst[n_CPU].irqs[OPENPIC_OUTPUT_INT]);
319 }
320 }
321 }
322
323 /* update pic state because registers for n_IRQ have changed value */
openpic_update_irq(OpenPICState * opp,int n_IRQ)324 static void openpic_update_irq(OpenPICState *opp, int n_IRQ)
325 {
326 IRQSource *src;
327 bool active, was_active;
328 int i;
329
330 src = &opp->src[n_IRQ];
331 active = src->pending;
332
333 if ((src->ivpr & IVPR_MASK_MASK) && !src->nomask) {
334 /* Interrupt source is disabled */
335 DPRINTF("%s: IRQ %d is disabled", __func__, n_IRQ);
336 active = false;
337 }
338
339 was_active = !!(src->ivpr & IVPR_ACTIVITY_MASK);
340
341 /*
342 * We don't have a similar check for already-active because
343 * ctpr may have changed and we need to withdraw the interrupt.
344 */
345 if (!active && !was_active) {
346 DPRINTF("%s: IRQ %d is already inactive", __func__, n_IRQ);
347 return;
348 }
349
350 if (active) {
351 src->ivpr |= IVPR_ACTIVITY_MASK;
352 } else {
353 src->ivpr &= ~IVPR_ACTIVITY_MASK;
354 }
355
356 if (src->destmask == 0) {
357 /* No target */
358 DPRINTF("%s: IRQ %d has no target", __func__, n_IRQ);
359 return;
360 }
361
362 if (src->destmask == (1 << src->last_cpu)) {
363 /* Only one CPU is allowed to receive this IRQ */
364 IRQ_local_pipe(opp, src->last_cpu, n_IRQ, active, was_active);
365 } else if (!(src->ivpr & IVPR_MODE_MASK)) {
366 /* Directed delivery mode */
367 for (i = 0; i < opp->nb_cpus; i++) {
368 if (src->destmask & (1 << i)) {
369 IRQ_local_pipe(opp, i, n_IRQ, active, was_active);
370 }
371 }
372 } else {
373 /* Distributed delivery mode */
374 for (i = src->last_cpu + 1; i != src->last_cpu; i++) {
375 if (i == opp->nb_cpus) {
376 i = 0;
377 }
378 if (src->destmask & (1 << i)) {
379 IRQ_local_pipe(opp, i, n_IRQ, active, was_active);
380 src->last_cpu = i;
381 break;
382 }
383 }
384 }
385 }
386
openpic_set_irq(void * opaque,int n_IRQ,int level)387 static void openpic_set_irq(void *opaque, int n_IRQ, int level)
388 {
389 OpenPICState *opp = opaque;
390 IRQSource *src;
391
392 if (n_IRQ >= OPENPIC_MAX_IRQ) {
393 error_report("%s: IRQ %d out of range", __func__, n_IRQ);
394 abort();
395 }
396
397 src = &opp->src[n_IRQ];
398 DPRINTF("openpic: set irq %d = %d ivpr=0x%08x",
399 n_IRQ, level, src->ivpr);
400 if (src->level) {
401 /* level-sensitive irq */
402 src->pending = level;
403 openpic_update_irq(opp, n_IRQ);
404 } else {
405 /* edge-sensitive irq */
406 if (level) {
407 src->pending = 1;
408 openpic_update_irq(opp, n_IRQ);
409 }
410
411 if (src->output != OPENPIC_OUTPUT_INT) {
412 /* Edge-triggered interrupts shouldn't be used
413 * with non-INT delivery, but just in case,
414 * try to make it do something sane rather than
415 * cause an interrupt storm. This is close to
416 * what you'd probably see happen in real hardware.
417 */
418 src->pending = 0;
419 openpic_update_irq(opp, n_IRQ);
420 }
421 }
422 }
423
read_IRQreg_idr(OpenPICState * opp,int n_IRQ)424 static inline uint32_t read_IRQreg_idr(OpenPICState *opp, int n_IRQ)
425 {
426 return opp->src[n_IRQ].idr;
427 }
428
read_IRQreg_ilr(OpenPICState * opp,int n_IRQ)429 static inline uint32_t read_IRQreg_ilr(OpenPICState *opp, int n_IRQ)
430 {
431 if (opp->flags & OPENPIC_FLAG_ILR) {
432 return output_to_inttgt(opp->src[n_IRQ].output);
433 }
434
435 return 0xffffffff;
436 }
437
read_IRQreg_ivpr(OpenPICState * opp,int n_IRQ)438 static inline uint32_t read_IRQreg_ivpr(OpenPICState *opp, int n_IRQ)
439 {
440 return opp->src[n_IRQ].ivpr;
441 }
442
write_IRQreg_idr(OpenPICState * opp,int n_IRQ,uint32_t val)443 static inline void write_IRQreg_idr(OpenPICState *opp, int n_IRQ, uint32_t val)
444 {
445 IRQSource *src = &opp->src[n_IRQ];
446 uint32_t normal_mask = (1UL << opp->nb_cpus) - 1;
447 uint32_t crit_mask = 0;
448 uint32_t mask = normal_mask;
449 int crit_shift = IDR_EP_SHIFT - opp->nb_cpus;
450 int i;
451
452 if (opp->flags & OPENPIC_FLAG_IDR_CRIT) {
453 crit_mask = mask << crit_shift;
454 mask |= crit_mask | IDR_EP;
455 }
456
457 src->idr = val & mask;
458 DPRINTF("Set IDR %d to 0x%08x", n_IRQ, src->idr);
459
460 if (opp->flags & OPENPIC_FLAG_IDR_CRIT) {
461 if (src->idr & crit_mask) {
462 if (src->idr & normal_mask) {
463 DPRINTF("%s: IRQ configured for multiple output types, using "
464 "critical", __func__);
465 }
466
467 src->output = OPENPIC_OUTPUT_CINT;
468 src->nomask = true;
469 src->destmask = 0;
470
471 for (i = 0; i < opp->nb_cpus; i++) {
472 int n_ci = IDR_CI0_SHIFT - i;
473
474 if (src->idr & (1UL << n_ci)) {
475 src->destmask |= 1UL << i;
476 }
477 }
478 } else {
479 src->output = OPENPIC_OUTPUT_INT;
480 src->nomask = false;
481 src->destmask = src->idr & normal_mask;
482 }
483 } else {
484 src->destmask = src->idr;
485 }
486 }
487
write_IRQreg_ilr(OpenPICState * opp,int n_IRQ,uint32_t val)488 static inline void write_IRQreg_ilr(OpenPICState *opp, int n_IRQ, uint32_t val)
489 {
490 if (opp->flags & OPENPIC_FLAG_ILR) {
491 IRQSource *src = &opp->src[n_IRQ];
492
493 src->output = inttgt_to_output(val & ILR_INTTGT_MASK);
494 DPRINTF("Set ILR %d to 0x%08x, output %d", n_IRQ, src->idr,
495 src->output);
496
497 /* TODO: on MPIC v4.0 only, set nomask for non-INT */
498 }
499 }
500
write_IRQreg_ivpr(OpenPICState * opp,int n_IRQ,uint32_t val)501 static inline void write_IRQreg_ivpr(OpenPICState *opp, int n_IRQ, uint32_t val)
502 {
503 uint32_t mask;
504
505 /* NOTE when implementing newer FSL MPIC models: starting with v4.0,
506 * the polarity bit is read-only on internal interrupts.
507 */
508 mask = IVPR_MASK_MASK | IVPR_PRIORITY_MASK | IVPR_SENSE_MASK |
509 IVPR_POLARITY_MASK | opp->vector_mask;
510
511 /* ACTIVITY bit is read-only */
512 opp->src[n_IRQ].ivpr =
513 (opp->src[n_IRQ].ivpr & IVPR_ACTIVITY_MASK) | (val & mask);
514
515 /* For FSL internal interrupts, The sense bit is reserved and zero,
516 * and the interrupt is always level-triggered. Timers and IPIs
517 * have no sense or polarity bits, and are edge-triggered.
518 */
519 switch (opp->src[n_IRQ].type) {
520 case IRQ_TYPE_NORMAL:
521 opp->src[n_IRQ].level = !!(opp->src[n_IRQ].ivpr & IVPR_SENSE_MASK);
522 break;
523
524 case IRQ_TYPE_FSLINT:
525 opp->src[n_IRQ].ivpr &= ~IVPR_SENSE_MASK;
526 break;
527
528 case IRQ_TYPE_FSLSPECIAL:
529 opp->src[n_IRQ].ivpr &= ~(IVPR_POLARITY_MASK | IVPR_SENSE_MASK);
530 break;
531 }
532
533 openpic_update_irq(opp, n_IRQ);
534 DPRINTF("Set IVPR %d to 0x%08x -> 0x%08x", n_IRQ, val,
535 opp->src[n_IRQ].ivpr);
536 }
537
openpic_gcr_write(OpenPICState * opp,uint64_t val)538 static void openpic_gcr_write(OpenPICState *opp, uint64_t val)
539 {
540 bool mpic_proxy = false;
541
542 if (val & GCR_RESET) {
543 openpic_reset(DEVICE(opp));
544 return;
545 }
546
547 opp->gcr &= ~opp->mpic_mode_mask;
548 opp->gcr |= val & opp->mpic_mode_mask;
549
550 /* Set external proxy mode */
551 if ((val & opp->mpic_mode_mask) == GCR_MODE_PROXY) {
552 mpic_proxy = true;
553 }
554
555 ppce500_set_mpic_proxy(mpic_proxy);
556 }
557
openpic_gbl_write(void * opaque,hwaddr addr,uint64_t val,unsigned len)558 static void openpic_gbl_write(void *opaque, hwaddr addr, uint64_t val,
559 unsigned len)
560 {
561 OpenPICState *opp = opaque;
562 IRQDest *dst;
563 int idx;
564
565 DPRINTF("%s: addr %#" HWADDR_PRIx " <= %08" PRIx64,
566 __func__, addr, val);
567 if (addr & 0xF) {
568 return;
569 }
570 switch (addr) {
571 case 0x00: /* Block Revision Register1 (BRR1) is Readonly */
572 break;
573 case 0x40:
574 case 0x50:
575 case 0x60:
576 case 0x70:
577 case 0x80:
578 case 0x90:
579 case 0xA0:
580 case 0xB0:
581 openpic_cpu_write_internal(opp, addr, val, get_current_cpu());
582 break;
583 case 0x1000: /* FRR */
584 break;
585 case 0x1020: /* GCR */
586 openpic_gcr_write(opp, val);
587 break;
588 case 0x1080: /* VIR */
589 break;
590 case 0x1090: /* PIR */
591 for (idx = 0; idx < opp->nb_cpus; idx++) {
592 if ((val & (1 << idx)) && !(opp->pir & (1 << idx))) {
593 DPRINTF("Raise OpenPIC RESET output for CPU %d", idx);
594 dst = &opp->dst[idx];
595 qemu_irq_raise(dst->irqs[OPENPIC_OUTPUT_RESET]);
596 } else if (!(val & (1 << idx)) && (opp->pir & (1 << idx))) {
597 DPRINTF("Lower OpenPIC RESET output for CPU %d", idx);
598 dst = &opp->dst[idx];
599 qemu_irq_lower(dst->irqs[OPENPIC_OUTPUT_RESET]);
600 }
601 }
602 opp->pir = val;
603 break;
604 case 0x10A0: /* IPI_IVPR */
605 case 0x10B0:
606 case 0x10C0:
607 case 0x10D0:
608 {
609 int idx;
610 idx = (addr - 0x10A0) >> 4;
611 write_IRQreg_ivpr(opp, opp->irq_ipi0 + idx, val);
612 }
613 break;
614 case 0x10E0: /* SPVE */
615 opp->spve = val & opp->vector_mask;
616 break;
617 default:
618 break;
619 }
620 }
621
openpic_gbl_read(void * opaque,hwaddr addr,unsigned len)622 static uint64_t openpic_gbl_read(void *opaque, hwaddr addr, unsigned len)
623 {
624 OpenPICState *opp = opaque;
625 uint32_t retval;
626
627 DPRINTF("%s: addr %#" HWADDR_PRIx, __func__, addr);
628 retval = 0xFFFFFFFF;
629 if (addr & 0xF) {
630 return retval;
631 }
632 switch (addr) {
633 case 0x1000: /* FRR */
634 retval = opp->frr;
635 break;
636 case 0x1020: /* GCR */
637 retval = opp->gcr;
638 break;
639 case 0x1080: /* VIR */
640 retval = opp->vir;
641 break;
642 case 0x1090: /* PIR */
643 retval = 0x00000000;
644 break;
645 case 0x00: /* Block Revision Register1 (BRR1) */
646 retval = opp->brr1;
647 break;
648 case 0x40:
649 case 0x50:
650 case 0x60:
651 case 0x70:
652 case 0x80:
653 case 0x90:
654 case 0xA0:
655 case 0xB0:
656 retval = openpic_cpu_read_internal(opp, addr, get_current_cpu());
657 break;
658 case 0x10A0: /* IPI_IVPR */
659 case 0x10B0:
660 case 0x10C0:
661 case 0x10D0:
662 {
663 int idx;
664 idx = (addr - 0x10A0) >> 4;
665 retval = read_IRQreg_ivpr(opp, opp->irq_ipi0 + idx);
666 }
667 break;
668 case 0x10E0: /* SPVE */
669 retval = opp->spve;
670 break;
671 default:
672 break;
673 }
674 DPRINTF("%s: => 0x%08x", __func__, retval);
675
676 return retval;
677 }
678
679 static void openpic_tmr_set_tmr(OpenPICTimer *tmr, uint32_t val, bool enabled);
680
qemu_timer_cb(void * opaque)681 static void qemu_timer_cb(void *opaque)
682 {
683 OpenPICTimer *tmr = opaque;
684 OpenPICState *opp = tmr->opp;
685 uint32_t n_IRQ = tmr->n_IRQ;
686 uint32_t val = tmr->tbcr & ~TBCR_CI;
687 uint32_t tog = ((tmr->tccr & TCCR_TOG) ^ TCCR_TOG); /* invert toggle. */
688
689 DPRINTF("%s n_IRQ=%d", __func__, n_IRQ);
690 /* Reload current count from base count and setup timer. */
691 tmr->tccr = val | tog;
692 openpic_tmr_set_tmr(tmr, val, /*enabled=*/true);
693 /* Raise the interrupt. */
694 opp->src[n_IRQ].destmask = read_IRQreg_idr(opp, n_IRQ);
695 openpic_set_irq(opp, n_IRQ, 1);
696 openpic_set_irq(opp, n_IRQ, 0);
697 }
698
699 /* If enabled is true, arranges for an interrupt to be raised val clocks into
700 the future, if enabled is false cancels the timer. */
openpic_tmr_set_tmr(OpenPICTimer * tmr,uint32_t val,bool enabled)701 static void openpic_tmr_set_tmr(OpenPICTimer *tmr, uint32_t val, bool enabled)
702 {
703 uint64_t ns = ticks_to_ns(val & ~TCCR_TOG);
704 /* A count of zero causes a timer to be set to expire immediately. This
705 effectively stops the simulation since the timer is constantly expiring
706 which prevents guest code execution, so we don't honor that
707 configuration. On real hardware, this situation would generate an
708 interrupt on every clock cycle if the interrupt was unmasked. */
709 if ((ns == 0) || !enabled) {
710 tmr->qemu_timer_active = false;
711 tmr->tccr = tmr->tccr & TCCR_TOG;
712 timer_del(tmr->qemu_timer); /* set timer to never expire. */
713 } else {
714 tmr->qemu_timer_active = true;
715 uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
716 tmr->origin_time = now;
717 timer_mod(tmr->qemu_timer, now + ns); /* set timer expiration. */
718 }
719 }
720
721 /* Returns the currrent tccr value, i.e., timer value (in clocks) with
722 appropriate TOG. */
openpic_tmr_get_timer(OpenPICTimer * tmr)723 static uint64_t openpic_tmr_get_timer(OpenPICTimer *tmr)
724 {
725 uint64_t retval;
726 if (!tmr->qemu_timer_active) {
727 retval = tmr->tccr;
728 } else {
729 uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
730 uint64_t used = now - tmr->origin_time; /* nsecs */
731 uint32_t used_ticks = (uint32_t)ns_to_ticks(used);
732 uint32_t count = (tmr->tccr & ~TCCR_TOG) - used_ticks;
733 retval = (uint32_t)((tmr->tccr & TCCR_TOG) | (count & ~TCCR_TOG));
734 }
735 return retval;
736 }
737
openpic_tmr_write(void * opaque,hwaddr addr,uint64_t val,unsigned len)738 static void openpic_tmr_write(void *opaque, hwaddr addr, uint64_t val,
739 unsigned len)
740 {
741 OpenPICState *opp = opaque;
742 int idx;
743
744 DPRINTF("%s: addr %#" HWADDR_PRIx " <= %08" PRIx64,
745 __func__, (addr + 0x10f0), val);
746 if (addr & 0xF) {
747 return;
748 }
749
750 if (addr == 0) {
751 /* TFRR */
752 opp->tfrr = val;
753 return;
754 }
755 addr -= 0x10; /* correct for TFRR */
756 idx = (addr >> 6) & 0x3;
757
758 switch (addr & 0x30) {
759 case 0x00: /* TCCR */
760 break;
761 case 0x10: /* TBCR */
762 /* Did the enable status change? */
763 if ((opp->timers[idx].tbcr & TBCR_CI) != (val & TBCR_CI)) {
764 /* Did "Count Inhibit" transition from 1 to 0? */
765 if ((val & TBCR_CI) == 0) {
766 opp->timers[idx].tccr = val & ~TCCR_TOG;
767 }
768 openpic_tmr_set_tmr(&opp->timers[idx],
769 (val & ~TBCR_CI),
770 /*enabled=*/((val & TBCR_CI) == 0));
771 }
772 opp->timers[idx].tbcr = val;
773 break;
774 case 0x20: /* TVPR */
775 write_IRQreg_ivpr(opp, opp->irq_tim0 + idx, val);
776 break;
777 case 0x30: /* TDR */
778 write_IRQreg_idr(opp, opp->irq_tim0 + idx, val);
779 break;
780 }
781 }
782
openpic_tmr_read(void * opaque,hwaddr addr,unsigned len)783 static uint64_t openpic_tmr_read(void *opaque, hwaddr addr, unsigned len)
784 {
785 OpenPICState *opp = opaque;
786 uint32_t retval = -1;
787 int idx;
788
789 DPRINTF("%s: addr %#" HWADDR_PRIx, __func__, addr + 0x10f0);
790 if (addr & 0xF) {
791 goto out;
792 }
793 if (addr == 0) {
794 /* TFRR */
795 retval = opp->tfrr;
796 goto out;
797 }
798 addr -= 0x10; /* correct for TFRR */
799 idx = (addr >> 6) & 0x3;
800 switch (addr & 0x30) {
801 case 0x00: /* TCCR */
802 retval = openpic_tmr_get_timer(&opp->timers[idx]);
803 break;
804 case 0x10: /* TBCR */
805 retval = opp->timers[idx].tbcr;
806 break;
807 case 0x20: /* TVPR */
808 retval = read_IRQreg_ivpr(opp, opp->irq_tim0 + idx);
809 break;
810 case 0x30: /* TDR */
811 retval = read_IRQreg_idr(opp, opp->irq_tim0 + idx);
812 break;
813 }
814
815 out:
816 DPRINTF("%s: => 0x%08x", __func__, retval);
817
818 return retval;
819 }
820
openpic_src_write(void * opaque,hwaddr addr,uint64_t val,unsigned len)821 static void openpic_src_write(void *opaque, hwaddr addr, uint64_t val,
822 unsigned len)
823 {
824 OpenPICState *opp = opaque;
825 int idx;
826
827 DPRINTF("%s: addr %#" HWADDR_PRIx " <= %08" PRIx64,
828 __func__, addr, val);
829
830 addr = addr & 0xffff;
831 idx = addr >> 5;
832
833 switch (addr & 0x1f) {
834 case 0x00:
835 write_IRQreg_ivpr(opp, idx, val);
836 break;
837 case 0x10:
838 write_IRQreg_idr(opp, idx, val);
839 break;
840 case 0x18:
841 write_IRQreg_ilr(opp, idx, val);
842 break;
843 }
844 }
845
openpic_src_read(void * opaque,uint64_t addr,unsigned len)846 static uint64_t openpic_src_read(void *opaque, uint64_t addr, unsigned len)
847 {
848 OpenPICState *opp = opaque;
849 uint32_t retval;
850 int idx;
851
852 DPRINTF("%s: addr %#" HWADDR_PRIx, __func__, addr);
853 retval = 0xFFFFFFFF;
854
855 addr = addr & 0xffff;
856 idx = addr >> 5;
857
858 switch (addr & 0x1f) {
859 case 0x00:
860 retval = read_IRQreg_ivpr(opp, idx);
861 break;
862 case 0x10:
863 retval = read_IRQreg_idr(opp, idx);
864 break;
865 case 0x18:
866 retval = read_IRQreg_ilr(opp, idx);
867 break;
868 }
869
870 DPRINTF("%s: => 0x%08x", __func__, retval);
871 return retval;
872 }
873
openpic_msi_write(void * opaque,hwaddr addr,uint64_t val,unsigned size)874 static void openpic_msi_write(void *opaque, hwaddr addr, uint64_t val,
875 unsigned size)
876 {
877 OpenPICState *opp = opaque;
878 int idx = opp->irq_msi;
879 int srs, ibs;
880
881 DPRINTF("%s: addr %#" HWADDR_PRIx " <= 0x%08" PRIx64,
882 __func__, addr, val);
883 if (addr & 0xF) {
884 return;
885 }
886
887 switch (addr) {
888 case MSIIR_OFFSET:
889 srs = val >> MSIIR_SRS_SHIFT;
890 idx += srs;
891 ibs = (val & MSIIR_IBS_MASK) >> MSIIR_IBS_SHIFT;
892 opp->msi[srs].msir |= 1 << ibs;
893 openpic_set_irq(opp, idx, 1);
894 break;
895 default:
896 /* most registers are read-only, thus ignored */
897 break;
898 }
899 }
900
openpic_msi_read(void * opaque,hwaddr addr,unsigned size)901 static uint64_t openpic_msi_read(void *opaque, hwaddr addr, unsigned size)
902 {
903 OpenPICState *opp = opaque;
904 uint64_t r = 0;
905 int i, srs;
906
907 DPRINTF("%s: addr %#" HWADDR_PRIx, __func__, addr);
908 if (addr & 0xF) {
909 return -1;
910 }
911
912 srs = addr >> 4;
913
914 switch (addr) {
915 case 0x00:
916 case 0x10:
917 case 0x20:
918 case 0x30:
919 case 0x40:
920 case 0x50:
921 case 0x60:
922 case 0x70: /* MSIRs */
923 r = opp->msi[srs].msir;
924 /* Clear on read */
925 opp->msi[srs].msir = 0;
926 openpic_set_irq(opp, opp->irq_msi + srs, 0);
927 break;
928 case 0x120: /* MSISR */
929 for (i = 0; i < MAX_MSI; i++) {
930 r |= (opp->msi[i].msir ? 1 : 0) << i;
931 }
932 break;
933 }
934
935 return r;
936 }
937
openpic_summary_read(void * opaque,hwaddr addr,unsigned size)938 static uint64_t openpic_summary_read(void *opaque, hwaddr addr, unsigned size)
939 {
940 uint64_t r = 0;
941
942 DPRINTF("%s: addr %#" HWADDR_PRIx, __func__, addr);
943
944 /* TODO: EISR/EIMR */
945
946 return r;
947 }
948
openpic_summary_write(void * opaque,hwaddr addr,uint64_t val,unsigned size)949 static void openpic_summary_write(void *opaque, hwaddr addr, uint64_t val,
950 unsigned size)
951 {
952 DPRINTF("%s: addr %#" HWADDR_PRIx " <= 0x%08" PRIx64,
953 __func__, addr, val);
954
955 /* TODO: EISR/EIMR */
956 }
957
openpic_cpu_write_internal(void * opaque,hwaddr addr,uint32_t val,int idx)958 static void openpic_cpu_write_internal(void *opaque, hwaddr addr,
959 uint32_t val, int idx)
960 {
961 OpenPICState *opp = opaque;
962 IRQSource *src;
963 IRQDest *dst;
964 int s_IRQ, n_IRQ;
965
966 DPRINTF("%s: cpu %d addr %#" HWADDR_PRIx " <= 0x%08x", __func__, idx,
967 addr, val);
968
969 if (idx < 0 || idx >= opp->nb_cpus) {
970 return;
971 }
972
973 if (addr & 0xF) {
974 return;
975 }
976 dst = &opp->dst[idx];
977 addr &= 0xFF0;
978 switch (addr) {
979 case 0x40: /* IPIDR */
980 case 0x50:
981 case 0x60:
982 case 0x70:
983 idx = (addr - 0x40) >> 4;
984 /* we use IDE as mask which CPUs to deliver the IPI to still. */
985 opp->src[opp->irq_ipi0 + idx].destmask |= val;
986 openpic_set_irq(opp, opp->irq_ipi0 + idx, 1);
987 openpic_set_irq(opp, opp->irq_ipi0 + idx, 0);
988 break;
989 case 0x80: /* CTPR */
990 dst->ctpr = val & 0x0000000F;
991
992 DPRINTF("%s: set CPU %d ctpr to %d, raised %d servicing %d",
993 __func__, idx, dst->ctpr, dst->raised.priority,
994 dst->servicing.priority);
995
996 if (dst->raised.priority <= dst->ctpr) {
997 DPRINTF("%s: Lower OpenPIC INT output cpu %d due to ctpr",
998 __func__, idx);
999 qemu_irq_lower(dst->irqs[OPENPIC_OUTPUT_INT]);
1000 } else if (dst->raised.priority > dst->servicing.priority) {
1001 DPRINTF("%s: Raise OpenPIC INT output cpu %d irq %d",
1002 __func__, idx, dst->raised.next);
1003 qemu_irq_raise(dst->irqs[OPENPIC_OUTPUT_INT]);
1004 }
1005
1006 break;
1007 case 0x90: /* WHOAMI */
1008 /* Read-only register */
1009 break;
1010 case 0xA0: /* IACK */
1011 /* Read-only register */
1012 break;
1013 case 0xB0: /* EOI */
1014 DPRINTF("EOI");
1015 s_IRQ = IRQ_get_next(opp, &dst->servicing);
1016
1017 if (s_IRQ < 0) {
1018 DPRINTF("%s: EOI with no interrupt in service", __func__);
1019 break;
1020 }
1021
1022 IRQ_resetbit(&dst->servicing, s_IRQ);
1023 /* Set up next servicing IRQ */
1024 s_IRQ = IRQ_get_next(opp, &dst->servicing);
1025 /* Check queued interrupts. */
1026 n_IRQ = IRQ_get_next(opp, &dst->raised);
1027 src = &opp->src[n_IRQ];
1028 if (n_IRQ != -1 &&
1029 (s_IRQ == -1 ||
1030 IVPR_PRIORITY(src->ivpr) > dst->servicing.priority)) {
1031 DPRINTF("Raise OpenPIC INT output cpu %d irq %d",
1032 idx, n_IRQ);
1033 qemu_irq_raise(opp->dst[idx].irqs[OPENPIC_OUTPUT_INT]);
1034 }
1035 break;
1036 default:
1037 break;
1038 }
1039 }
1040
openpic_cpu_write(void * opaque,hwaddr addr,uint64_t val,unsigned len)1041 static void openpic_cpu_write(void *opaque, hwaddr addr, uint64_t val,
1042 unsigned len)
1043 {
1044 openpic_cpu_write_internal(opaque, addr, val, (addr & 0x1f000) >> 12);
1045 }
1046
1047
openpic_iack(OpenPICState * opp,IRQDest * dst,int cpu)1048 static uint32_t openpic_iack(OpenPICState *opp, IRQDest *dst, int cpu)
1049 {
1050 IRQSource *src;
1051 int retval, irq;
1052
1053 DPRINTF("Lower OpenPIC INT output");
1054 qemu_irq_lower(dst->irqs[OPENPIC_OUTPUT_INT]);
1055
1056 irq = IRQ_get_next(opp, &dst->raised);
1057 DPRINTF("IACK: irq=%d", irq);
1058
1059 if (irq == -1) {
1060 /* No more interrupt pending */
1061 return opp->spve;
1062 }
1063
1064 src = &opp->src[irq];
1065 if (!(src->ivpr & IVPR_ACTIVITY_MASK) ||
1066 !(IVPR_PRIORITY(src->ivpr) > dst->ctpr)) {
1067 error_report("%s: bad raised IRQ %d ctpr %d ivpr 0x%08x",
1068 __func__, irq, dst->ctpr, src->ivpr);
1069 openpic_update_irq(opp, irq);
1070 retval = opp->spve;
1071 } else {
1072 /* IRQ enter servicing state */
1073 IRQ_setbit(&dst->servicing, irq);
1074 retval = IVPR_VECTOR(opp, src->ivpr);
1075 }
1076
1077 if (!src->level) {
1078 /* edge-sensitive IRQ */
1079 src->ivpr &= ~IVPR_ACTIVITY_MASK;
1080 src->pending = 0;
1081 IRQ_resetbit(&dst->raised, irq);
1082 }
1083
1084 /* Timers and IPIs support multicast. */
1085 if (((irq >= opp->irq_ipi0) && (irq < (opp->irq_ipi0 + OPENPIC_MAX_IPI))) ||
1086 ((irq >= opp->irq_tim0) && (irq < (opp->irq_tim0 + OPENPIC_MAX_TMR)))) {
1087 DPRINTF("irq is IPI or TMR");
1088 src->destmask &= ~(1 << cpu);
1089 if (src->destmask && !src->level) {
1090 /* trigger on CPUs that didn't know about it yet */
1091 openpic_set_irq(opp, irq, 1);
1092 openpic_set_irq(opp, irq, 0);
1093 /* if all CPUs knew about it, set active bit again */
1094 src->ivpr |= IVPR_ACTIVITY_MASK;
1095 }
1096 }
1097
1098 return retval;
1099 }
1100
openpic_cpu_read_internal(void * opaque,hwaddr addr,int idx)1101 static uint32_t openpic_cpu_read_internal(void *opaque, hwaddr addr,
1102 int idx)
1103 {
1104 OpenPICState *opp = opaque;
1105 IRQDest *dst;
1106 uint32_t retval;
1107
1108 DPRINTF("%s: cpu %d addr %#" HWADDR_PRIx, __func__, idx, addr);
1109 retval = 0xFFFFFFFF;
1110
1111 if (idx < 0 || idx >= opp->nb_cpus) {
1112 return retval;
1113 }
1114
1115 if (addr & 0xF) {
1116 return retval;
1117 }
1118 dst = &opp->dst[idx];
1119 addr &= 0xFF0;
1120 switch (addr) {
1121 case 0x80: /* CTPR */
1122 retval = dst->ctpr;
1123 break;
1124 case 0x90: /* WHOAMI */
1125 retval = idx;
1126 break;
1127 case 0xA0: /* IACK */
1128 retval = openpic_iack(opp, dst, idx);
1129 break;
1130 case 0xB0: /* EOI */
1131 retval = 0;
1132 break;
1133 default:
1134 break;
1135 }
1136 DPRINTF("%s: => 0x%08x", __func__, retval);
1137
1138 return retval;
1139 }
1140
openpic_cpu_read(void * opaque,hwaddr addr,unsigned len)1141 static uint64_t openpic_cpu_read(void *opaque, hwaddr addr, unsigned len)
1142 {
1143 return openpic_cpu_read_internal(opaque, addr, (addr & 0x1f000) >> 12);
1144 }
1145
1146 static const MemoryRegionOps openpic_glb_ops_le = {
1147 .write = openpic_gbl_write,
1148 .read = openpic_gbl_read,
1149 .endianness = DEVICE_LITTLE_ENDIAN,
1150 .impl = {
1151 .min_access_size = 4,
1152 .max_access_size = 4,
1153 },
1154 };
1155
1156 static const MemoryRegionOps openpic_glb_ops_be = {
1157 .write = openpic_gbl_write,
1158 .read = openpic_gbl_read,
1159 .endianness = DEVICE_BIG_ENDIAN,
1160 .impl = {
1161 .min_access_size = 4,
1162 .max_access_size = 4,
1163 },
1164 };
1165
1166 static const MemoryRegionOps openpic_tmr_ops_le = {
1167 .write = openpic_tmr_write,
1168 .read = openpic_tmr_read,
1169 .endianness = DEVICE_LITTLE_ENDIAN,
1170 .impl = {
1171 .min_access_size = 4,
1172 .max_access_size = 4,
1173 },
1174 };
1175
1176 static const MemoryRegionOps openpic_tmr_ops_be = {
1177 .write = openpic_tmr_write,
1178 .read = openpic_tmr_read,
1179 .endianness = DEVICE_BIG_ENDIAN,
1180 .impl = {
1181 .min_access_size = 4,
1182 .max_access_size = 4,
1183 },
1184 };
1185
1186 static const MemoryRegionOps openpic_cpu_ops_le = {
1187 .write = openpic_cpu_write,
1188 .read = openpic_cpu_read,
1189 .endianness = DEVICE_LITTLE_ENDIAN,
1190 .impl = {
1191 .min_access_size = 4,
1192 .max_access_size = 4,
1193 },
1194 };
1195
1196 static const MemoryRegionOps openpic_cpu_ops_be = {
1197 .write = openpic_cpu_write,
1198 .read = openpic_cpu_read,
1199 .endianness = DEVICE_BIG_ENDIAN,
1200 .impl = {
1201 .min_access_size = 4,
1202 .max_access_size = 4,
1203 },
1204 };
1205
1206 static const MemoryRegionOps openpic_src_ops_le = {
1207 .write = openpic_src_write,
1208 .read = openpic_src_read,
1209 .endianness = DEVICE_LITTLE_ENDIAN,
1210 .impl = {
1211 .min_access_size = 4,
1212 .max_access_size = 4,
1213 },
1214 };
1215
1216 static const MemoryRegionOps openpic_src_ops_be = {
1217 .write = openpic_src_write,
1218 .read = openpic_src_read,
1219 .endianness = DEVICE_BIG_ENDIAN,
1220 .impl = {
1221 .min_access_size = 4,
1222 .max_access_size = 4,
1223 },
1224 };
1225
1226 static const MemoryRegionOps openpic_msi_ops_be = {
1227 .read = openpic_msi_read,
1228 .write = openpic_msi_write,
1229 .endianness = DEVICE_BIG_ENDIAN,
1230 .impl = {
1231 .min_access_size = 4,
1232 .max_access_size = 4,
1233 },
1234 };
1235
1236 static const MemoryRegionOps openpic_summary_ops_be = {
1237 .read = openpic_summary_read,
1238 .write = openpic_summary_write,
1239 .endianness = DEVICE_BIG_ENDIAN,
1240 .impl = {
1241 .min_access_size = 4,
1242 .max_access_size = 4,
1243 },
1244 };
1245
openpic_reset(DeviceState * d)1246 static void openpic_reset(DeviceState *d)
1247 {
1248 OpenPICState *opp = OPENPIC(d);
1249 int i;
1250
1251 opp->gcr = GCR_RESET;
1252 /* Initialise controller registers */
1253 opp->frr = ((opp->nb_irqs - 1) << FRR_NIRQ_SHIFT) |
1254 ((opp->nb_cpus - 1) << FRR_NCPU_SHIFT) |
1255 (opp->vid << FRR_VID_SHIFT);
1256
1257 opp->pir = 0;
1258 opp->spve = -1 & opp->vector_mask;
1259 opp->tfrr = opp->tfrr_reset;
1260 /* Initialise IRQ sources */
1261 for (i = 0; i < opp->max_irq; i++) {
1262 opp->src[i].ivpr = opp->ivpr_reset;
1263 switch (opp->src[i].type) {
1264 case IRQ_TYPE_NORMAL:
1265 opp->src[i].level = !!(opp->ivpr_reset & IVPR_SENSE_MASK);
1266 break;
1267
1268 case IRQ_TYPE_FSLINT:
1269 opp->src[i].ivpr |= IVPR_POLARITY_MASK;
1270 break;
1271
1272 case IRQ_TYPE_FSLSPECIAL:
1273 break;
1274 }
1275
1276 write_IRQreg_idr(opp, i, opp->idr_reset);
1277 }
1278 /* Initialise IRQ destinations */
1279 for (i = 0; i < opp->nb_cpus; i++) {
1280 opp->dst[i].ctpr = 15;
1281 opp->dst[i].raised.next = -1;
1282 opp->dst[i].raised.priority = 0;
1283 bitmap_clear(opp->dst[i].raised.queue, 0, IRQQUEUE_SIZE_BITS);
1284 opp->dst[i].servicing.next = -1;
1285 opp->dst[i].servicing.priority = 0;
1286 bitmap_clear(opp->dst[i].servicing.queue, 0, IRQQUEUE_SIZE_BITS);
1287 }
1288 /* Initialise timers */
1289 for (i = 0; i < OPENPIC_MAX_TMR; i++) {
1290 opp->timers[i].tccr = 0;
1291 opp->timers[i].tbcr = TBCR_CI;
1292 if (opp->timers[i].qemu_timer_active) {
1293 timer_del(opp->timers[i].qemu_timer); /* Inhibit timer */
1294 opp->timers[i].qemu_timer_active = false;
1295 }
1296 }
1297 /* Go out of RESET state */
1298 opp->gcr = 0;
1299 }
1300
1301 typedef struct MemReg {
1302 const char *name;
1303 MemoryRegionOps const *ops;
1304 hwaddr start_addr;
1305 ram_addr_t size;
1306 } MemReg;
1307
fsl_common_init(OpenPICState * opp)1308 static void fsl_common_init(OpenPICState *opp)
1309 {
1310 int i;
1311 int virq = OPENPIC_MAX_SRC;
1312
1313 opp->vid = VID_REVISION_1_2;
1314 opp->vir = VIR_GENERIC;
1315 opp->vector_mask = 0xFFFF;
1316 opp->tfrr_reset = 0;
1317 opp->ivpr_reset = IVPR_MASK_MASK;
1318 opp->idr_reset = 1 << 0;
1319 opp->max_irq = OPENPIC_MAX_IRQ;
1320
1321 opp->irq_ipi0 = virq;
1322 virq += OPENPIC_MAX_IPI;
1323 opp->irq_tim0 = virq;
1324 virq += OPENPIC_MAX_TMR;
1325
1326 assert(virq <= OPENPIC_MAX_IRQ);
1327
1328 opp->irq_msi = 224;
1329
1330 msi_nonbroken = true;
1331 for (i = 0; i < opp->fsl->max_ext; i++) {
1332 opp->src[i].level = false;
1333 }
1334
1335 /* Internal interrupts, including message and MSI */
1336 for (i = 16; i < OPENPIC_MAX_SRC; i++) {
1337 opp->src[i].type = IRQ_TYPE_FSLINT;
1338 opp->src[i].level = true;
1339 }
1340
1341 /* timers and IPIs */
1342 for (i = OPENPIC_MAX_SRC; i < virq; i++) {
1343 opp->src[i].type = IRQ_TYPE_FSLSPECIAL;
1344 opp->src[i].level = false;
1345 }
1346
1347 for (i = 0; i < OPENPIC_MAX_TMR; i++) {
1348 opp->timers[i].n_IRQ = opp->irq_tim0 + i;
1349 opp->timers[i].qemu_timer_active = false;
1350 opp->timers[i].qemu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
1351 &qemu_timer_cb,
1352 &opp->timers[i]);
1353 opp->timers[i].opp = opp;
1354 }
1355 }
1356
map_list(OpenPICState * opp,const MemReg * list,int * count)1357 static void map_list(OpenPICState *opp, const MemReg *list, int *count)
1358 {
1359 while (list->name) {
1360 assert(*count < ARRAY_SIZE(opp->sub_io_mem));
1361
1362 memory_region_init_io(&opp->sub_io_mem[*count], OBJECT(opp), list->ops,
1363 opp, list->name, list->size);
1364
1365 memory_region_add_subregion(&opp->mem, list->start_addr,
1366 &opp->sub_io_mem[*count]);
1367
1368 (*count)++;
1369 list++;
1370 }
1371 }
1372
1373 static const VMStateDescription vmstate_openpic_irq_queue = {
1374 .name = "openpic_irq_queue",
1375 .version_id = 0,
1376 .minimum_version_id = 0,
1377 .fields = (VMStateField[]) {
1378 VMSTATE_BITMAP(queue, IRQQueue, 0, queue_size),
1379 VMSTATE_INT32(next, IRQQueue),
1380 VMSTATE_INT32(priority, IRQQueue),
1381 VMSTATE_END_OF_LIST()
1382 }
1383 };
1384
1385 static const VMStateDescription vmstate_openpic_irqdest = {
1386 .name = "openpic_irqdest",
1387 .version_id = 0,
1388 .minimum_version_id = 0,
1389 .fields = (VMStateField[]) {
1390 VMSTATE_INT32(ctpr, IRQDest),
1391 VMSTATE_STRUCT(raised, IRQDest, 0, vmstate_openpic_irq_queue,
1392 IRQQueue),
1393 VMSTATE_STRUCT(servicing, IRQDest, 0, vmstate_openpic_irq_queue,
1394 IRQQueue),
1395 VMSTATE_UINT32_ARRAY(outputs_active, IRQDest, OPENPIC_OUTPUT_NB),
1396 VMSTATE_END_OF_LIST()
1397 }
1398 };
1399
1400 static const VMStateDescription vmstate_openpic_irqsource = {
1401 .name = "openpic_irqsource",
1402 .version_id = 0,
1403 .minimum_version_id = 0,
1404 .fields = (VMStateField[]) {
1405 VMSTATE_UINT32(ivpr, IRQSource),
1406 VMSTATE_UINT32(idr, IRQSource),
1407 VMSTATE_UINT32(destmask, IRQSource),
1408 VMSTATE_INT32(last_cpu, IRQSource),
1409 VMSTATE_INT32(pending, IRQSource),
1410 VMSTATE_END_OF_LIST()
1411 }
1412 };
1413
1414 static const VMStateDescription vmstate_openpic_timer = {
1415 .name = "openpic_timer",
1416 .version_id = 0,
1417 .minimum_version_id = 0,
1418 .fields = (VMStateField[]) {
1419 VMSTATE_UINT32(tccr, OpenPICTimer),
1420 VMSTATE_UINT32(tbcr, OpenPICTimer),
1421 VMSTATE_END_OF_LIST()
1422 }
1423 };
1424
1425 static const VMStateDescription vmstate_openpic_msi = {
1426 .name = "openpic_msi",
1427 .version_id = 0,
1428 .minimum_version_id = 0,
1429 .fields = (VMStateField[]) {
1430 VMSTATE_UINT32(msir, OpenPICMSI),
1431 VMSTATE_END_OF_LIST()
1432 }
1433 };
1434
openpic_post_load(void * opaque,int version_id)1435 static int openpic_post_load(void *opaque, int version_id)
1436 {
1437 OpenPICState *opp = (OpenPICState *)opaque;
1438 int i;
1439
1440 /* Update internal ivpr and idr variables */
1441 for (i = 0; i < opp->max_irq; i++) {
1442 write_IRQreg_idr(opp, i, opp->src[i].idr);
1443 write_IRQreg_ivpr(opp, i, opp->src[i].ivpr);
1444 }
1445
1446 return 0;
1447 }
1448
1449 static const VMStateDescription vmstate_openpic = {
1450 .name = "openpic",
1451 .version_id = 3,
1452 .minimum_version_id = 3,
1453 .post_load = openpic_post_load,
1454 .fields = (VMStateField[]) {
1455 VMSTATE_UINT32(gcr, OpenPICState),
1456 VMSTATE_UINT32(vir, OpenPICState),
1457 VMSTATE_UINT32(pir, OpenPICState),
1458 VMSTATE_UINT32(spve, OpenPICState),
1459 VMSTATE_UINT32(tfrr, OpenPICState),
1460 VMSTATE_UINT32(max_irq, OpenPICState),
1461 VMSTATE_STRUCT_VARRAY_UINT32(src, OpenPICState, max_irq, 0,
1462 vmstate_openpic_irqsource, IRQSource),
1463 VMSTATE_UINT32_EQUAL(nb_cpus, OpenPICState, NULL),
1464 VMSTATE_STRUCT_VARRAY_UINT32(dst, OpenPICState, nb_cpus, 0,
1465 vmstate_openpic_irqdest, IRQDest),
1466 VMSTATE_STRUCT_ARRAY(timers, OpenPICState, OPENPIC_MAX_TMR, 0,
1467 vmstate_openpic_timer, OpenPICTimer),
1468 VMSTATE_STRUCT_ARRAY(msi, OpenPICState, MAX_MSI, 0,
1469 vmstate_openpic_msi, OpenPICMSI),
1470 VMSTATE_UINT32(irq_ipi0, OpenPICState),
1471 VMSTATE_UINT32(irq_tim0, OpenPICState),
1472 VMSTATE_UINT32(irq_msi, OpenPICState),
1473 VMSTATE_END_OF_LIST()
1474 }
1475 };
1476
openpic_init(Object * obj)1477 static void openpic_init(Object *obj)
1478 {
1479 OpenPICState *opp = OPENPIC(obj);
1480
1481 memory_region_init(&opp->mem, obj, "openpic", 0x40000);
1482 }
1483
openpic_realize(DeviceState * dev,Error ** errp)1484 static void openpic_realize(DeviceState *dev, Error **errp)
1485 {
1486 SysBusDevice *d = SYS_BUS_DEVICE(dev);
1487 OpenPICState *opp = OPENPIC(dev);
1488 int i, j;
1489 int list_count = 0;
1490 static const MemReg list_le[] = {
1491 {"glb", &openpic_glb_ops_le,
1492 OPENPIC_GLB_REG_START, OPENPIC_GLB_REG_SIZE},
1493 {"tmr", &openpic_tmr_ops_le,
1494 OPENPIC_TMR_REG_START, OPENPIC_TMR_REG_SIZE},
1495 {"src", &openpic_src_ops_le,
1496 OPENPIC_SRC_REG_START, OPENPIC_SRC_REG_SIZE},
1497 {"cpu", &openpic_cpu_ops_le,
1498 OPENPIC_CPU_REG_START, OPENPIC_CPU_REG_SIZE},
1499 {NULL}
1500 };
1501 static const MemReg list_be[] = {
1502 {"glb", &openpic_glb_ops_be,
1503 OPENPIC_GLB_REG_START, OPENPIC_GLB_REG_SIZE},
1504 {"tmr", &openpic_tmr_ops_be,
1505 OPENPIC_TMR_REG_START, OPENPIC_TMR_REG_SIZE},
1506 {"src", &openpic_src_ops_be,
1507 OPENPIC_SRC_REG_START, OPENPIC_SRC_REG_SIZE},
1508 {"cpu", &openpic_cpu_ops_be,
1509 OPENPIC_CPU_REG_START, OPENPIC_CPU_REG_SIZE},
1510 {NULL}
1511 };
1512 static const MemReg list_fsl[] = {
1513 {"msi", &openpic_msi_ops_be,
1514 OPENPIC_MSI_REG_START, OPENPIC_MSI_REG_SIZE},
1515 {"summary", &openpic_summary_ops_be,
1516 OPENPIC_SUMMARY_REG_START, OPENPIC_SUMMARY_REG_SIZE},
1517 {NULL}
1518 };
1519
1520 if (opp->nb_cpus > MAX_CPU) {
1521 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE,
1522 TYPE_OPENPIC, "nb_cpus", (uint64_t)opp->nb_cpus,
1523 (uint64_t)0, (uint64_t)MAX_CPU);
1524 return;
1525 }
1526
1527 switch (opp->model) {
1528 case OPENPIC_MODEL_FSL_MPIC_20:
1529 default:
1530 opp->fsl = &fsl_mpic_20;
1531 opp->brr1 = 0x00400200;
1532 opp->flags |= OPENPIC_FLAG_IDR_CRIT;
1533 opp->nb_irqs = 80;
1534 opp->mpic_mode_mask = GCR_MODE_MIXED;
1535
1536 fsl_common_init(opp);
1537 map_list(opp, list_be, &list_count);
1538 map_list(opp, list_fsl, &list_count);
1539
1540 break;
1541
1542 case OPENPIC_MODEL_FSL_MPIC_42:
1543 opp->fsl = &fsl_mpic_42;
1544 opp->brr1 = 0x00400402;
1545 opp->flags |= OPENPIC_FLAG_ILR;
1546 opp->nb_irqs = 196;
1547 opp->mpic_mode_mask = GCR_MODE_PROXY;
1548
1549 fsl_common_init(opp);
1550 map_list(opp, list_be, &list_count);
1551 map_list(opp, list_fsl, &list_count);
1552
1553 break;
1554
1555 case OPENPIC_MODEL_RAVEN:
1556 opp->nb_irqs = RAVEN_MAX_EXT;
1557 opp->vid = VID_REVISION_1_3;
1558 opp->vir = VIR_GENERIC;
1559 opp->vector_mask = 0xFF;
1560 opp->tfrr_reset = 4160000;
1561 opp->ivpr_reset = IVPR_MASK_MASK | IVPR_MODE_MASK;
1562 opp->idr_reset = 0;
1563 opp->max_irq = RAVEN_MAX_IRQ;
1564 opp->irq_ipi0 = RAVEN_IPI_IRQ;
1565 opp->irq_tim0 = RAVEN_TMR_IRQ;
1566 opp->brr1 = -1;
1567 opp->mpic_mode_mask = GCR_MODE_MIXED;
1568
1569 if (opp->nb_cpus != 1) {
1570 error_setg(errp, "Only UP supported today");
1571 return;
1572 }
1573
1574 map_list(opp, list_le, &list_count);
1575 break;
1576
1577 case OPENPIC_MODEL_KEYLARGO:
1578 opp->nb_irqs = KEYLARGO_MAX_EXT;
1579 opp->vid = VID_REVISION_1_2;
1580 opp->vir = VIR_GENERIC;
1581 opp->vector_mask = 0xFF;
1582 opp->tfrr_reset = 4160000;
1583 opp->ivpr_reset = IVPR_MASK_MASK | IVPR_MODE_MASK;
1584 opp->idr_reset = 0;
1585 opp->max_irq = KEYLARGO_MAX_IRQ;
1586 opp->irq_ipi0 = KEYLARGO_IPI_IRQ;
1587 opp->irq_tim0 = KEYLARGO_TMR_IRQ;
1588 opp->brr1 = -1;
1589 opp->mpic_mode_mask = GCR_MODE_MIXED;
1590
1591 if (opp->nb_cpus != 1) {
1592 error_setg(errp, "Only UP supported today");
1593 return;
1594 }
1595
1596 map_list(opp, list_le, &list_count);
1597 break;
1598 }
1599
1600 for (i = 0; i < opp->nb_cpus; i++) {
1601 opp->dst[i].irqs = g_new0(qemu_irq, OPENPIC_OUTPUT_NB);
1602 for (j = 0; j < OPENPIC_OUTPUT_NB; j++) {
1603 sysbus_init_irq(d, &opp->dst[i].irqs[j]);
1604 }
1605
1606 opp->dst[i].raised.queue_size = IRQQUEUE_SIZE_BITS;
1607 opp->dst[i].raised.queue = bitmap_new(IRQQUEUE_SIZE_BITS);
1608 opp->dst[i].servicing.queue_size = IRQQUEUE_SIZE_BITS;
1609 opp->dst[i].servicing.queue = bitmap_new(IRQQUEUE_SIZE_BITS);
1610 }
1611
1612 sysbus_init_mmio(d, &opp->mem);
1613 qdev_init_gpio_in(dev, openpic_set_irq, opp->max_irq);
1614 }
1615
1616 static Property openpic_properties[] = {
1617 DEFINE_PROP_UINT32("model", OpenPICState, model, OPENPIC_MODEL_FSL_MPIC_20),
1618 DEFINE_PROP_UINT32("nb_cpus", OpenPICState, nb_cpus, 1),
1619 DEFINE_PROP_END_OF_LIST(),
1620 };
1621
openpic_class_init(ObjectClass * oc,void * data)1622 static void openpic_class_init(ObjectClass *oc, void *data)
1623 {
1624 DeviceClass *dc = DEVICE_CLASS(oc);
1625
1626 dc->realize = openpic_realize;
1627 dc->props = openpic_properties;
1628 dc->reset = openpic_reset;
1629 dc->vmsd = &vmstate_openpic;
1630 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
1631 }
1632
1633 static const TypeInfo openpic_info = {
1634 .name = TYPE_OPENPIC,
1635 .parent = TYPE_SYS_BUS_DEVICE,
1636 .instance_size = sizeof(OpenPICState),
1637 .instance_init = openpic_init,
1638 .class_init = openpic_class_init,
1639 };
1640
openpic_register_types(void)1641 static void openpic_register_types(void)
1642 {
1643 type_register_static(&openpic_info);
1644 }
1645
1646 type_init(openpic_register_types)
1647