xref: /qemu/hw/ppc/ppc.c (revision 6170d09c)
1 /*
2  * QEMU generic PowerPC hardware System Emulator
3  *
4  * Copyright (c) 2003-2007 Jocelyn Mayer
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #include "qemu/osdep.h"
26 #include "hw/irq.h"
27 #include "hw/ppc/ppc.h"
28 #include "hw/ppc/ppc_e500.h"
29 #include "qemu/timer.h"
30 #include "sysemu/cpus.h"
31 #include "qemu/log.h"
32 #include "qemu/main-loop.h"
33 #include "qemu/error-report.h"
34 #include "sysemu/kvm.h"
35 #include "sysemu/runstate.h"
36 #include "kvm_ppc.h"
37 #include "migration/vmstate.h"
38 #include "trace.h"
39 
40 static void cpu_ppc_tb_stop (CPUPPCState *env);
41 static void cpu_ppc_tb_start (CPUPPCState *env);
42 
43 void ppc_set_irq(PowerPCCPU *cpu, int irq, int level)
44 {
45     CPUPPCState *env = &cpu->env;
46     unsigned int old_pending;
47 
48     /* We may already have the BQL if coming from the reset path */
49     QEMU_IOTHREAD_LOCK_GUARD();
50 
51     old_pending = env->pending_interrupts;
52 
53     if (level) {
54         env->pending_interrupts |= irq;
55     } else {
56         env->pending_interrupts &= ~irq;
57     }
58 
59     if (old_pending != env->pending_interrupts) {
60         ppc_maybe_interrupt(env);
61         kvmppc_set_interrupt(cpu, irq, level);
62     }
63 
64     trace_ppc_irq_set_exit(env, irq, level, env->pending_interrupts,
65                            CPU(cpu)->interrupt_request);
66 }
67 
68 /* PowerPC 6xx / 7xx internal IRQ controller */
69 static void ppc6xx_set_irq(void *opaque, int pin, int level)
70 {
71     PowerPCCPU *cpu = opaque;
72     CPUPPCState *env = &cpu->env;
73     int cur_level;
74 
75     trace_ppc_irq_set(env, pin, level);
76 
77     cur_level = (env->irq_input_state >> pin) & 1;
78     /* Don't generate spurious events */
79     if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
80         CPUState *cs = CPU(cpu);
81 
82         switch (pin) {
83         case PPC6xx_INPUT_TBEN:
84             /* Level sensitive - active high */
85             trace_ppc_irq_set_state("time base", level);
86             if (level) {
87                 cpu_ppc_tb_start(env);
88             } else {
89                 cpu_ppc_tb_stop(env);
90             }
91             break;
92         case PPC6xx_INPUT_INT:
93             /* Level sensitive - active high */
94             trace_ppc_irq_set_state("external IRQ", level);
95             ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
96             break;
97         case PPC6xx_INPUT_SMI:
98             /* Level sensitive - active high */
99             trace_ppc_irq_set_state("SMI IRQ", level);
100             ppc_set_irq(cpu, PPC_INTERRUPT_SMI, level);
101             break;
102         case PPC6xx_INPUT_MCP:
103             /* Negative edge sensitive */
104             /* XXX: TODO: actual reaction may depends on HID0 status
105              *            603/604/740/750: check HID0[EMCP]
106              */
107             if (cur_level == 1 && level == 0) {
108                 trace_ppc_irq_set_state("machine check", 1);
109                 ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1);
110             }
111             break;
112         case PPC6xx_INPUT_CKSTP_IN:
113             /* Level sensitive - active low */
114             /* XXX: TODO: relay the signal to CKSTP_OUT pin */
115             /* XXX: Note that the only way to restart the CPU is to reset it */
116             if (level) {
117                 trace_ppc_irq_cpu("stop");
118                 cs->halted = 1;
119             }
120             break;
121         case PPC6xx_INPUT_HRESET:
122             /* Level sensitive - active low */
123             if (level) {
124                 trace_ppc_irq_reset("CPU");
125                 cpu_interrupt(cs, CPU_INTERRUPT_RESET);
126             }
127             break;
128         case PPC6xx_INPUT_SRESET:
129             trace_ppc_irq_set_state("RESET IRQ", level);
130             ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level);
131             break;
132         default:
133             g_assert_not_reached();
134         }
135         if (level)
136             env->irq_input_state |= 1 << pin;
137         else
138             env->irq_input_state &= ~(1 << pin);
139     }
140 }
141 
142 void ppc6xx_irq_init(PowerPCCPU *cpu)
143 {
144     qdev_init_gpio_in(DEVICE(cpu), ppc6xx_set_irq, PPC6xx_INPUT_NB);
145 }
146 
147 #if defined(TARGET_PPC64)
148 /* PowerPC 970 internal IRQ controller */
149 static void ppc970_set_irq(void *opaque, int pin, int level)
150 {
151     PowerPCCPU *cpu = opaque;
152     CPUPPCState *env = &cpu->env;
153     int cur_level;
154 
155     trace_ppc_irq_set(env, pin, level);
156 
157     cur_level = (env->irq_input_state >> pin) & 1;
158     /* Don't generate spurious events */
159     if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
160         CPUState *cs = CPU(cpu);
161 
162         switch (pin) {
163         case PPC970_INPUT_INT:
164             /* Level sensitive - active high */
165             trace_ppc_irq_set_state("external IRQ", level);
166             ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
167             break;
168         case PPC970_INPUT_THINT:
169             /* Level sensitive - active high */
170             trace_ppc_irq_set_state("SMI IRQ", level);
171             ppc_set_irq(cpu, PPC_INTERRUPT_THERM, level);
172             break;
173         case PPC970_INPUT_MCP:
174             /* Negative edge sensitive */
175             /* XXX: TODO: actual reaction may depends on HID0 status
176              *            603/604/740/750: check HID0[EMCP]
177              */
178             if (cur_level == 1 && level == 0) {
179                 trace_ppc_irq_set_state("machine check", 1);
180                 ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1);
181             }
182             break;
183         case PPC970_INPUT_CKSTP:
184             /* Level sensitive - active low */
185             /* XXX: TODO: relay the signal to CKSTP_OUT pin */
186             if (level) {
187                 trace_ppc_irq_cpu("stop");
188                 cs->halted = 1;
189             } else {
190                 trace_ppc_irq_cpu("restart");
191                 cs->halted = 0;
192                 qemu_cpu_kick(cs);
193             }
194             break;
195         case PPC970_INPUT_HRESET:
196             /* Level sensitive - active low */
197             if (level) {
198                 cpu_interrupt(cs, CPU_INTERRUPT_RESET);
199             }
200             break;
201         case PPC970_INPUT_SRESET:
202             trace_ppc_irq_set_state("RESET IRQ", level);
203             ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level);
204             break;
205         case PPC970_INPUT_TBEN:
206             trace_ppc_irq_set_state("TBEN IRQ", level);
207             /* XXX: TODO */
208             break;
209         default:
210             g_assert_not_reached();
211         }
212         if (level)
213             env->irq_input_state |= 1 << pin;
214         else
215             env->irq_input_state &= ~(1 << pin);
216     }
217 }
218 
219 void ppc970_irq_init(PowerPCCPU *cpu)
220 {
221     qdev_init_gpio_in(DEVICE(cpu), ppc970_set_irq, PPC970_INPUT_NB);
222 }
223 
224 /* POWER7 internal IRQ controller */
225 static void power7_set_irq(void *opaque, int pin, int level)
226 {
227     PowerPCCPU *cpu = opaque;
228 
229     trace_ppc_irq_set(&cpu->env, pin, level);
230 
231     switch (pin) {
232     case POWER7_INPUT_INT:
233         /* Level sensitive - active high */
234         trace_ppc_irq_set_state("external IRQ", level);
235         ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
236         break;
237     default:
238         g_assert_not_reached();
239     }
240 }
241 
242 void ppcPOWER7_irq_init(PowerPCCPU *cpu)
243 {
244     qdev_init_gpio_in(DEVICE(cpu), power7_set_irq, POWER7_INPUT_NB);
245 }
246 
247 /* POWER9 internal IRQ controller */
248 static void power9_set_irq(void *opaque, int pin, int level)
249 {
250     PowerPCCPU *cpu = opaque;
251 
252     trace_ppc_irq_set(&cpu->env, pin, level);
253 
254     switch (pin) {
255     case POWER9_INPUT_INT:
256         /* Level sensitive - active high */
257         trace_ppc_irq_set_state("external IRQ", level);
258         ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
259         break;
260     case POWER9_INPUT_HINT:
261         /* Level sensitive - active high */
262         trace_ppc_irq_set_state("HV external IRQ", level);
263         ppc_set_irq(cpu, PPC_INTERRUPT_HVIRT, level);
264         break;
265     default:
266         g_assert_not_reached();
267         return;
268     }
269 }
270 
271 void ppcPOWER9_irq_init(PowerPCCPU *cpu)
272 {
273     qdev_init_gpio_in(DEVICE(cpu), power9_set_irq, POWER9_INPUT_NB);
274 }
275 #endif /* defined(TARGET_PPC64) */
276 
277 void ppc40x_core_reset(PowerPCCPU *cpu)
278 {
279     CPUPPCState *env = &cpu->env;
280     target_ulong dbsr;
281 
282     qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC core\n");
283     cpu_interrupt(CPU(cpu), CPU_INTERRUPT_RESET);
284     dbsr = env->spr[SPR_40x_DBSR];
285     dbsr &= ~0x00000300;
286     dbsr |= 0x00000100;
287     env->spr[SPR_40x_DBSR] = dbsr;
288 }
289 
290 void ppc40x_chip_reset(PowerPCCPU *cpu)
291 {
292     CPUPPCState *env = &cpu->env;
293     target_ulong dbsr;
294 
295     qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC chip\n");
296     cpu_interrupt(CPU(cpu), CPU_INTERRUPT_RESET);
297     /* XXX: TODO reset all internal peripherals */
298     dbsr = env->spr[SPR_40x_DBSR];
299     dbsr &= ~0x00000300;
300     dbsr |= 0x00000200;
301     env->spr[SPR_40x_DBSR] = dbsr;
302 }
303 
304 void ppc40x_system_reset(PowerPCCPU *cpu)
305 {
306     qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC system\n");
307     qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
308 }
309 
310 void store_40x_dbcr0(CPUPPCState *env, uint32_t val)
311 {
312     PowerPCCPU *cpu = env_archcpu(env);
313 
314     qemu_mutex_lock_iothread();
315 
316     switch ((val >> 28) & 0x3) {
317     case 0x0:
318         /* No action */
319         break;
320     case 0x1:
321         /* Core reset */
322         ppc40x_core_reset(cpu);
323         break;
324     case 0x2:
325         /* Chip reset */
326         ppc40x_chip_reset(cpu);
327         break;
328     case 0x3:
329         /* System reset */
330         ppc40x_system_reset(cpu);
331         break;
332     }
333 
334     qemu_mutex_unlock_iothread();
335 }
336 
337 /* PowerPC 40x internal IRQ controller */
338 static void ppc40x_set_irq(void *opaque, int pin, int level)
339 {
340     PowerPCCPU *cpu = opaque;
341     CPUPPCState *env = &cpu->env;
342     int cur_level;
343 
344     trace_ppc_irq_set(env, pin, level);
345 
346     cur_level = (env->irq_input_state >> pin) & 1;
347     /* Don't generate spurious events */
348     if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
349         CPUState *cs = CPU(cpu);
350 
351         switch (pin) {
352         case PPC40x_INPUT_RESET_SYS:
353             if (level) {
354                 trace_ppc_irq_reset("system");
355                 ppc40x_system_reset(cpu);
356             }
357             break;
358         case PPC40x_INPUT_RESET_CHIP:
359             if (level) {
360                 trace_ppc_irq_reset("chip");
361                 ppc40x_chip_reset(cpu);
362             }
363             break;
364         case PPC40x_INPUT_RESET_CORE:
365             /* XXX: TODO: update DBSR[MRR] */
366             if (level) {
367                 trace_ppc_irq_reset("core");
368                 ppc40x_core_reset(cpu);
369             }
370             break;
371         case PPC40x_INPUT_CINT:
372             /* Level sensitive - active high */
373             trace_ppc_irq_set_state("critical IRQ", level);
374             ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level);
375             break;
376         case PPC40x_INPUT_INT:
377             /* Level sensitive - active high */
378             trace_ppc_irq_set_state("external IRQ", level);
379             ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
380             break;
381         case PPC40x_INPUT_HALT:
382             /* Level sensitive - active low */
383             if (level) {
384                 trace_ppc_irq_cpu("stop");
385                 cs->halted = 1;
386             } else {
387                 trace_ppc_irq_cpu("restart");
388                 cs->halted = 0;
389                 qemu_cpu_kick(cs);
390             }
391             break;
392         case PPC40x_INPUT_DEBUG:
393             /* Level sensitive - active high */
394             trace_ppc_irq_set_state("debug pin", level);
395             ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level);
396             break;
397         default:
398             g_assert_not_reached();
399         }
400         if (level)
401             env->irq_input_state |= 1 << pin;
402         else
403             env->irq_input_state &= ~(1 << pin);
404     }
405 }
406 
407 void ppc40x_irq_init(PowerPCCPU *cpu)
408 {
409     qdev_init_gpio_in(DEVICE(cpu), ppc40x_set_irq, PPC40x_INPUT_NB);
410 }
411 
412 /* PowerPC E500 internal IRQ controller */
413 static void ppce500_set_irq(void *opaque, int pin, int level)
414 {
415     PowerPCCPU *cpu = opaque;
416     CPUPPCState *env = &cpu->env;
417     int cur_level;
418 
419     trace_ppc_irq_set(env, pin, level);
420 
421     cur_level = (env->irq_input_state >> pin) & 1;
422     /* Don't generate spurious events */
423     if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
424         switch (pin) {
425         case PPCE500_INPUT_MCK:
426             if (level) {
427                 trace_ppc_irq_reset("system");
428                 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
429             }
430             break;
431         case PPCE500_INPUT_RESET_CORE:
432             if (level) {
433                 trace_ppc_irq_reset("core");
434                 ppc_set_irq(cpu, PPC_INTERRUPT_MCK, level);
435             }
436             break;
437         case PPCE500_INPUT_CINT:
438             /* Level sensitive - active high */
439             trace_ppc_irq_set_state("critical IRQ", level);
440             ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level);
441             break;
442         case PPCE500_INPUT_INT:
443             /* Level sensitive - active high */
444             trace_ppc_irq_set_state("core IRQ", level);
445             ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
446             break;
447         case PPCE500_INPUT_DEBUG:
448             /* Level sensitive - active high */
449             trace_ppc_irq_set_state("debug pin", level);
450             ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level);
451             break;
452         default:
453             g_assert_not_reached();
454         }
455         if (level)
456             env->irq_input_state |= 1 << pin;
457         else
458             env->irq_input_state &= ~(1 << pin);
459     }
460 }
461 
462 void ppce500_irq_init(PowerPCCPU *cpu)
463 {
464     qdev_init_gpio_in(DEVICE(cpu), ppce500_set_irq, PPCE500_INPUT_NB);
465 }
466 
467 /* Enable or Disable the E500 EPR capability */
468 void ppce500_set_mpic_proxy(bool enabled)
469 {
470     CPUState *cs;
471 
472     CPU_FOREACH(cs) {
473         PowerPCCPU *cpu = POWERPC_CPU(cs);
474 
475         cpu->env.mpic_proxy = enabled;
476         if (kvm_enabled()) {
477             kvmppc_set_mpic_proxy(cpu, enabled);
478         }
479     }
480 }
481 
482 /*****************************************************************************/
483 /* PowerPC time base and decrementer emulation */
484 
485 uint64_t cpu_ppc_get_tb(ppc_tb_t *tb_env, uint64_t vmclk, int64_t tb_offset)
486 {
487     /* TB time in tb periods */
488     return muldiv64(vmclk, tb_env->tb_freq, NANOSECONDS_PER_SECOND) + tb_offset;
489 }
490 
491 uint64_t cpu_ppc_load_tbl (CPUPPCState *env)
492 {
493     ppc_tb_t *tb_env = env->tb_env;
494     uint64_t tb;
495 
496     if (kvm_enabled()) {
497         return env->spr[SPR_TBL];
498     }
499 
500     tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
501     trace_ppc_tb_load(tb);
502 
503     return tb;
504 }
505 
506 static inline uint32_t _cpu_ppc_load_tbu(CPUPPCState *env)
507 {
508     ppc_tb_t *tb_env = env->tb_env;
509     uint64_t tb;
510 
511     tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
512     trace_ppc_tb_load(tb);
513 
514     return tb >> 32;
515 }
516 
517 uint32_t cpu_ppc_load_tbu (CPUPPCState *env)
518 {
519     if (kvm_enabled()) {
520         return env->spr[SPR_TBU];
521     }
522 
523     return _cpu_ppc_load_tbu(env);
524 }
525 
526 static inline void cpu_ppc_store_tb(ppc_tb_t *tb_env, uint64_t vmclk,
527                                     int64_t *tb_offsetp, uint64_t value)
528 {
529     *tb_offsetp = value -
530         muldiv64(vmclk, tb_env->tb_freq, NANOSECONDS_PER_SECOND);
531 
532     trace_ppc_tb_store(value, *tb_offsetp);
533 }
534 
535 void cpu_ppc_store_tbl (CPUPPCState *env, uint32_t value)
536 {
537     ppc_tb_t *tb_env = env->tb_env;
538     int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
539     uint64_t tb;
540 
541     tb = cpu_ppc_get_tb(tb_env, clock, tb_env->tb_offset);
542     tb &= 0xFFFFFFFF00000000ULL;
543     cpu_ppc_store_tb(tb_env, clock, &tb_env->tb_offset, tb | (uint64_t)value);
544 }
545 
546 static inline void _cpu_ppc_store_tbu(CPUPPCState *env, uint32_t value)
547 {
548     ppc_tb_t *tb_env = env->tb_env;
549     int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
550     uint64_t tb;
551 
552     tb = cpu_ppc_get_tb(tb_env, clock, tb_env->tb_offset);
553     tb &= 0x00000000FFFFFFFFULL;
554     cpu_ppc_store_tb(tb_env, clock, &tb_env->tb_offset,
555                      ((uint64_t)value << 32) | tb);
556 }
557 
558 void cpu_ppc_store_tbu (CPUPPCState *env, uint32_t value)
559 {
560     _cpu_ppc_store_tbu(env, value);
561 }
562 
563 uint64_t cpu_ppc_load_atbl (CPUPPCState *env)
564 {
565     ppc_tb_t *tb_env = env->tb_env;
566     uint64_t tb;
567 
568     tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
569     trace_ppc_tb_load(tb);
570 
571     return tb;
572 }
573 
574 uint32_t cpu_ppc_load_atbu (CPUPPCState *env)
575 {
576     ppc_tb_t *tb_env = env->tb_env;
577     uint64_t tb;
578 
579     tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
580     trace_ppc_tb_load(tb);
581 
582     return tb >> 32;
583 }
584 
585 void cpu_ppc_store_atbl (CPUPPCState *env, uint32_t value)
586 {
587     ppc_tb_t *tb_env = env->tb_env;
588     int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
589     uint64_t tb;
590 
591     tb = cpu_ppc_get_tb(tb_env, clock, tb_env->atb_offset);
592     tb &= 0xFFFFFFFF00000000ULL;
593     cpu_ppc_store_tb(tb_env, clock, &tb_env->atb_offset, tb | (uint64_t)value);
594 }
595 
596 void cpu_ppc_store_atbu (CPUPPCState *env, uint32_t value)
597 {
598     ppc_tb_t *tb_env = env->tb_env;
599     int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
600     uint64_t tb;
601 
602     tb = cpu_ppc_get_tb(tb_env, clock, tb_env->atb_offset);
603     tb &= 0x00000000FFFFFFFFULL;
604     cpu_ppc_store_tb(tb_env, clock, &tb_env->atb_offset,
605                      ((uint64_t)value << 32) | tb);
606 }
607 
608 uint64_t cpu_ppc_load_vtb(CPUPPCState *env)
609 {
610     ppc_tb_t *tb_env = env->tb_env;
611 
612     return cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
613                           tb_env->vtb_offset);
614 }
615 
616 void cpu_ppc_store_vtb(CPUPPCState *env, uint64_t value)
617 {
618     ppc_tb_t *tb_env = env->tb_env;
619 
620     cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
621                      &tb_env->vtb_offset, value);
622 }
623 
624 void cpu_ppc_store_tbu40(CPUPPCState *env, uint64_t value)
625 {
626     ppc_tb_t *tb_env = env->tb_env;
627     int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
628     uint64_t tb;
629 
630     tb = cpu_ppc_get_tb(tb_env, clock, tb_env->tb_offset);
631     tb &= 0xFFFFFFUL;
632     tb |= (value & ~0xFFFFFFUL);
633     cpu_ppc_store_tb(tb_env, clock, &tb_env->tb_offset, tb);
634 }
635 
636 static void cpu_ppc_tb_stop (CPUPPCState *env)
637 {
638     ppc_tb_t *tb_env = env->tb_env;
639     uint64_t tb, atb, vmclk;
640 
641     /* If the time base is already frozen, do nothing */
642     if (tb_env->tb_freq != 0) {
643         vmclk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
644         /* Get the time base */
645         tb = cpu_ppc_get_tb(tb_env, vmclk, tb_env->tb_offset);
646         /* Get the alternate time base */
647         atb = cpu_ppc_get_tb(tb_env, vmclk, tb_env->atb_offset);
648         /* Store the time base value (ie compute the current offset) */
649         cpu_ppc_store_tb(tb_env, vmclk, &tb_env->tb_offset, tb);
650         /* Store the alternate time base value (compute the current offset) */
651         cpu_ppc_store_tb(tb_env, vmclk, &tb_env->atb_offset, atb);
652         /* Set the time base frequency to zero */
653         tb_env->tb_freq = 0;
654         /* Now, the time bases are frozen to tb_offset / atb_offset value */
655     }
656 }
657 
658 static void cpu_ppc_tb_start (CPUPPCState *env)
659 {
660     ppc_tb_t *tb_env = env->tb_env;
661     uint64_t tb, atb, vmclk;
662 
663     /* If the time base is not frozen, do nothing */
664     if (tb_env->tb_freq == 0) {
665         vmclk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
666         /* Get the time base from tb_offset */
667         tb = tb_env->tb_offset;
668         /* Get the alternate time base from atb_offset */
669         atb = tb_env->atb_offset;
670         /* Restore the tb frequency from the decrementer frequency */
671         tb_env->tb_freq = tb_env->decr_freq;
672         /* Store the time base value */
673         cpu_ppc_store_tb(tb_env, vmclk, &tb_env->tb_offset, tb);
674         /* Store the alternate time base value */
675         cpu_ppc_store_tb(tb_env, vmclk, &tb_env->atb_offset, atb);
676     }
677 }
678 
679 bool ppc_decr_clear_on_delivery(CPUPPCState *env)
680 {
681     ppc_tb_t *tb_env = env->tb_env;
682     int flags = PPC_DECR_UNDERFLOW_TRIGGERED | PPC_DECR_UNDERFLOW_LEVEL;
683     return ((tb_env->flags & flags) == PPC_DECR_UNDERFLOW_TRIGGERED);
684 }
685 
686 static inline int64_t _cpu_ppc_load_decr(CPUPPCState *env, uint64_t next)
687 {
688     ppc_tb_t *tb_env = env->tb_env;
689     int64_t decr, diff;
690 
691     diff = next - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
692     if (diff >= 0) {
693         decr = muldiv64(diff, tb_env->decr_freq, NANOSECONDS_PER_SECOND);
694     } else if (tb_env->flags & PPC_TIMER_BOOKE) {
695         decr = 0;
696     }  else {
697         decr = -muldiv64(-diff, tb_env->decr_freq, NANOSECONDS_PER_SECOND);
698     }
699     trace_ppc_decr_load(decr);
700 
701     return decr;
702 }
703 
704 target_ulong cpu_ppc_load_decr(CPUPPCState *env)
705 {
706     ppc_tb_t *tb_env = env->tb_env;
707     uint64_t decr;
708 
709     if (kvm_enabled()) {
710         return env->spr[SPR_DECR];
711     }
712 
713     decr = _cpu_ppc_load_decr(env, tb_env->decr_next);
714 
715     /*
716      * If large decrementer is enabled then the decrementer is signed extened
717      * to 64 bits, otherwise it is a 32 bit value.
718      */
719     if (env->spr[SPR_LPCR] & LPCR_LD) {
720         return decr;
721     }
722     return (uint32_t) decr;
723 }
724 
725 target_ulong cpu_ppc_load_hdecr(CPUPPCState *env)
726 {
727     PowerPCCPU *cpu = env_archcpu(env);
728     PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
729     ppc_tb_t *tb_env = env->tb_env;
730     uint64_t hdecr;
731 
732     hdecr =  _cpu_ppc_load_decr(env, tb_env->hdecr_next);
733 
734     /*
735      * If we have a large decrementer (POWER9 or later) then hdecr is sign
736      * extended to 64 bits, otherwise it is 32 bits.
737      */
738     if (pcc->lrg_decr_bits > 32) {
739         return hdecr;
740     }
741     return (uint32_t) hdecr;
742 }
743 
744 uint64_t cpu_ppc_load_purr (CPUPPCState *env)
745 {
746     ppc_tb_t *tb_env = env->tb_env;
747 
748     return cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
749                           tb_env->purr_offset);
750 }
751 
752 /* When decrementer expires,
753  * all we need to do is generate or queue a CPU exception
754  */
755 static inline void cpu_ppc_decr_excp(PowerPCCPU *cpu)
756 {
757     /* Raise it */
758     trace_ppc_decr_excp("raise");
759     ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 1);
760 }
761 
762 static inline void cpu_ppc_decr_lower(PowerPCCPU *cpu)
763 {
764     ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 0);
765 }
766 
767 static inline void cpu_ppc_hdecr_excp(PowerPCCPU *cpu)
768 {
769     CPUPPCState *env = &cpu->env;
770 
771     /* Raise it */
772     trace_ppc_decr_excp("raise HV");
773 
774     /* The architecture specifies that we don't deliver HDEC
775      * interrupts in a PM state. Not only they don't cause a
776      * wakeup but they also get effectively discarded.
777      */
778     if (!env->resume_as_sreset) {
779         ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 1);
780     }
781 }
782 
783 static inline void cpu_ppc_hdecr_lower(PowerPCCPU *cpu)
784 {
785     ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0);
786 }
787 
788 static void __cpu_ppc_store_decr(PowerPCCPU *cpu, uint64_t *nextp,
789                                  QEMUTimer *timer,
790                                  void (*raise_excp)(void *),
791                                  void (*lower_excp)(PowerPCCPU *),
792                                  uint32_t flags, target_ulong decr,
793                                  target_ulong value, int nr_bits)
794 {
795     CPUPPCState *env = &cpu->env;
796     ppc_tb_t *tb_env = env->tb_env;
797     uint64_t now, next;
798     int64_t signed_value;
799     int64_t signed_decr;
800 
801     /* Truncate value to decr_width and sign extend for simplicity */
802     value = extract64(value, 0, nr_bits);
803     decr = extract64(decr, 0, nr_bits);
804     signed_value = sextract64(value, 0, nr_bits);
805     signed_decr = sextract64(decr, 0, nr_bits);
806 
807     trace_ppc_decr_store(nr_bits, decr, value);
808 
809     if (kvm_enabled()) {
810         /* KVM handles decrementer exceptions, we don't need our own timer */
811         return;
812     }
813 
814     /*
815      * Going from 1 -> 0 or 0 -> -1 is the event to generate a DEC interrupt.
816      *
817      * On MSB level based DEC implementations the MSB always means the interrupt
818      * is pending, so raise it on those.
819      *
820      * On MSB edge based DEC implementations the MSB going from 0 -> 1 triggers
821      * an edge interrupt, so raise it here too.
822      */
823     if (((flags & PPC_DECR_UNDERFLOW_LEVEL) && signed_value < 0) ||
824         ((flags & PPC_DECR_UNDERFLOW_TRIGGERED) && signed_value < 0
825           && signed_decr >= 0)) {
826         (*raise_excp)(cpu);
827         return;
828     }
829 
830     /* On MSB level based systems a 0 for the MSB stops interrupt delivery */
831     if (signed_value >= 0 && (flags & PPC_DECR_UNDERFLOW_LEVEL)) {
832         (*lower_excp)(cpu);
833     }
834 
835     /* Calculate the next timer event */
836     now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
837     next = now + muldiv64(value, NANOSECONDS_PER_SECOND, tb_env->decr_freq);
838     *nextp = next;
839 
840     /* Adjust timer */
841     timer_mod(timer, next);
842 }
843 
844 static inline void _cpu_ppc_store_decr(PowerPCCPU *cpu, target_ulong decr,
845                                        target_ulong value, int nr_bits)
846 {
847     ppc_tb_t *tb_env = cpu->env.tb_env;
848 
849     __cpu_ppc_store_decr(cpu, &tb_env->decr_next, tb_env->decr_timer,
850                          tb_env->decr_timer->cb, &cpu_ppc_decr_lower,
851                          tb_env->flags, decr, value, nr_bits);
852 }
853 
854 void cpu_ppc_store_decr(CPUPPCState *env, target_ulong value)
855 {
856     PowerPCCPU *cpu = env_archcpu(env);
857     PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
858     int nr_bits = 32;
859 
860     if (env->spr[SPR_LPCR] & LPCR_LD) {
861         nr_bits = pcc->lrg_decr_bits;
862     }
863 
864     _cpu_ppc_store_decr(cpu, cpu_ppc_load_decr(env), value, nr_bits);
865 }
866 
867 static void cpu_ppc_decr_cb(void *opaque)
868 {
869     PowerPCCPU *cpu = opaque;
870 
871     cpu_ppc_decr_excp(cpu);
872 }
873 
874 static inline void _cpu_ppc_store_hdecr(PowerPCCPU *cpu, target_ulong hdecr,
875                                         target_ulong value, int nr_bits)
876 {
877     ppc_tb_t *tb_env = cpu->env.tb_env;
878 
879     if (tb_env->hdecr_timer != NULL) {
880         /* HDECR (Book3S 64bit) is edge-based, not level like DECR */
881         __cpu_ppc_store_decr(cpu, &tb_env->hdecr_next, tb_env->hdecr_timer,
882                              tb_env->hdecr_timer->cb, &cpu_ppc_hdecr_lower,
883                              PPC_DECR_UNDERFLOW_TRIGGERED,
884                              hdecr, value, nr_bits);
885     }
886 }
887 
888 void cpu_ppc_store_hdecr(CPUPPCState *env, target_ulong value)
889 {
890     PowerPCCPU *cpu = env_archcpu(env);
891     PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
892 
893     _cpu_ppc_store_hdecr(cpu, cpu_ppc_load_hdecr(env), value,
894                          pcc->lrg_decr_bits);
895 }
896 
897 static void cpu_ppc_hdecr_cb(void *opaque)
898 {
899     PowerPCCPU *cpu = opaque;
900 
901     cpu_ppc_hdecr_excp(cpu);
902 }
903 
904 void cpu_ppc_store_purr(CPUPPCState *env, uint64_t value)
905 {
906     ppc_tb_t *tb_env = env->tb_env;
907 
908     cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
909                      &tb_env->purr_offset, value);
910 }
911 
912 static void cpu_ppc_set_tb_clk (void *opaque, uint32_t freq)
913 {
914     CPUPPCState *env = opaque;
915     PowerPCCPU *cpu = env_archcpu(env);
916     ppc_tb_t *tb_env = env->tb_env;
917 
918     tb_env->tb_freq = freq;
919     tb_env->decr_freq = freq;
920     /* There is a bug in Linux 2.4 kernels:
921      * if a decrementer exception is pending when it enables msr_ee at startup,
922      * it's not ready to handle it...
923      */
924     _cpu_ppc_store_decr(cpu, 0xFFFFFFFF, 0xFFFFFFFF, 32);
925     _cpu_ppc_store_hdecr(cpu, 0xFFFFFFFF, 0xFFFFFFFF, 32);
926     cpu_ppc_store_purr(env, 0x0000000000000000ULL);
927 }
928 
929 static void timebase_save(PPCTimebase *tb)
930 {
931     uint64_t ticks = cpu_get_host_ticks();
932     PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu);
933 
934     if (!first_ppc_cpu->env.tb_env) {
935         error_report("No timebase object");
936         return;
937     }
938 
939     /* not used anymore, we keep it for compatibility */
940     tb->time_of_the_day_ns = qemu_clock_get_ns(QEMU_CLOCK_HOST);
941     /*
942      * tb_offset is only expected to be changed by QEMU so
943      * there is no need to update it from KVM here
944      */
945     tb->guest_timebase = ticks + first_ppc_cpu->env.tb_env->tb_offset;
946 
947     tb->runstate_paused =
948         runstate_check(RUN_STATE_PAUSED) || runstate_check(RUN_STATE_SAVE_VM);
949 }
950 
951 static void timebase_load(PPCTimebase *tb)
952 {
953     CPUState *cpu;
954     PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu);
955     int64_t tb_off_adj, tb_off;
956     unsigned long freq;
957 
958     if (!first_ppc_cpu->env.tb_env) {
959         error_report("No timebase object");
960         return;
961     }
962 
963     freq = first_ppc_cpu->env.tb_env->tb_freq;
964 
965     tb_off_adj = tb->guest_timebase - cpu_get_host_ticks();
966 
967     tb_off = first_ppc_cpu->env.tb_env->tb_offset;
968     trace_ppc_tb_adjust(tb_off, tb_off_adj, tb_off_adj - tb_off,
969                         (tb_off_adj - tb_off) / freq);
970 
971     /* Set new offset to all CPUs */
972     CPU_FOREACH(cpu) {
973         PowerPCCPU *pcpu = POWERPC_CPU(cpu);
974         pcpu->env.tb_env->tb_offset = tb_off_adj;
975         kvmppc_set_reg_tb_offset(pcpu, pcpu->env.tb_env->tb_offset);
976     }
977 }
978 
979 void cpu_ppc_clock_vm_state_change(void *opaque, bool running,
980                                    RunState state)
981 {
982     PPCTimebase *tb = opaque;
983 
984     if (running) {
985         timebase_load(tb);
986     } else {
987         timebase_save(tb);
988     }
989 }
990 
991 /*
992  * When migrating a running guest, read the clock just
993  * before migration, so that the guest clock counts
994  * during the events between:
995  *
996  *  * vm_stop()
997  *  *
998  *  * pre_save()
999  *
1000  *  This reduces clock difference on migration from 5s
1001  *  to 0.1s (when max_downtime == 5s), because sending the
1002  *  final pages of memory (which happens between vm_stop()
1003  *  and pre_save()) takes max_downtime.
1004  */
1005 static int timebase_pre_save(void *opaque)
1006 {
1007     PPCTimebase *tb = opaque;
1008 
1009     /* guest_timebase won't be overridden in case of paused guest or savevm */
1010     if (!tb->runstate_paused) {
1011         timebase_save(tb);
1012     }
1013 
1014     return 0;
1015 }
1016 
1017 const VMStateDescription vmstate_ppc_timebase = {
1018     .name = "timebase",
1019     .version_id = 1,
1020     .minimum_version_id = 1,
1021     .pre_save = timebase_pre_save,
1022     .fields      = (VMStateField []) {
1023         VMSTATE_UINT64(guest_timebase, PPCTimebase),
1024         VMSTATE_INT64(time_of_the_day_ns, PPCTimebase),
1025         VMSTATE_END_OF_LIST()
1026     },
1027 };
1028 
1029 /* Set up (once) timebase frequency (in Hz) */
1030 clk_setup_cb cpu_ppc_tb_init (CPUPPCState *env, uint32_t freq)
1031 {
1032     PowerPCCPU *cpu = env_archcpu(env);
1033     ppc_tb_t *tb_env;
1034 
1035     tb_env = g_new0(ppc_tb_t, 1);
1036     env->tb_env = tb_env;
1037     tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED;
1038     if (is_book3s_arch2x(env)) {
1039         /* All Book3S 64bit CPUs implement level based DEC logic */
1040         tb_env->flags |= PPC_DECR_UNDERFLOW_LEVEL;
1041     }
1042     /* Create new timer */
1043     tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_ppc_decr_cb, cpu);
1044     if (env->has_hv_mode && !cpu->vhyp) {
1045         tb_env->hdecr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_ppc_hdecr_cb,
1046                                                 cpu);
1047     } else {
1048         tb_env->hdecr_timer = NULL;
1049     }
1050     cpu_ppc_set_tb_clk(env, freq);
1051 
1052     return &cpu_ppc_set_tb_clk;
1053 }
1054 
1055 void cpu_ppc_tb_free(CPUPPCState *env)
1056 {
1057     timer_free(env->tb_env->decr_timer);
1058     timer_free(env->tb_env->hdecr_timer);
1059     g_free(env->tb_env);
1060 }
1061 
1062 /* cpu_ppc_hdecr_init may be used if the timer is not used by HDEC emulation */
1063 void cpu_ppc_hdecr_init(CPUPPCState *env)
1064 {
1065     PowerPCCPU *cpu = env_archcpu(env);
1066 
1067     assert(env->tb_env->hdecr_timer == NULL);
1068 
1069     env->tb_env->hdecr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
1070                                             &cpu_ppc_hdecr_cb, cpu);
1071 }
1072 
1073 void cpu_ppc_hdecr_exit(CPUPPCState *env)
1074 {
1075     PowerPCCPU *cpu = env_archcpu(env);
1076 
1077     timer_free(env->tb_env->hdecr_timer);
1078     env->tb_env->hdecr_timer = NULL;
1079 
1080     cpu_ppc_hdecr_lower(cpu);
1081 }
1082 
1083 /*****************************************************************************/
1084 /* PowerPC 40x timers */
1085 
1086 /* PIT, FIT & WDT */
1087 typedef struct ppc40x_timer_t ppc40x_timer_t;
1088 struct ppc40x_timer_t {
1089     uint64_t pit_reload;  /* PIT auto-reload value        */
1090     uint64_t fit_next;    /* Tick for next FIT interrupt  */
1091     QEMUTimer *fit_timer;
1092     uint64_t wdt_next;    /* Tick for next WDT interrupt  */
1093     QEMUTimer *wdt_timer;
1094 
1095     /* 405 have the PIT, 440 have a DECR.  */
1096     unsigned int decr_excp;
1097 };
1098 
1099 /* Fixed interval timer */
1100 static void cpu_4xx_fit_cb (void *opaque)
1101 {
1102     PowerPCCPU *cpu = opaque;
1103     CPUPPCState *env = &cpu->env;
1104     ppc_tb_t *tb_env;
1105     ppc40x_timer_t *ppc40x_timer;
1106     uint64_t now, next;
1107 
1108     tb_env = env->tb_env;
1109     ppc40x_timer = tb_env->opaque;
1110     now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
1111     switch ((env->spr[SPR_40x_TCR] >> 24) & 0x3) {
1112     case 0:
1113         next = 1 << 9;
1114         break;
1115     case 1:
1116         next = 1 << 13;
1117         break;
1118     case 2:
1119         next = 1 << 17;
1120         break;
1121     case 3:
1122         next = 1 << 21;
1123         break;
1124     default:
1125         /* Cannot occur, but makes gcc happy */
1126         return;
1127     }
1128     next = now + muldiv64(next, NANOSECONDS_PER_SECOND, tb_env->tb_freq);
1129     if (next == now)
1130         next++;
1131     timer_mod(ppc40x_timer->fit_timer, next);
1132     env->spr[SPR_40x_TSR] |= 1 << 26;
1133     if ((env->spr[SPR_40x_TCR] >> 23) & 0x1) {
1134         ppc_set_irq(cpu, PPC_INTERRUPT_FIT, 1);
1135     }
1136     trace_ppc4xx_fit((int)((env->spr[SPR_40x_TCR] >> 23) & 0x1),
1137                          env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]);
1138 }
1139 
1140 /* Programmable interval timer */
1141 static void start_stop_pit (CPUPPCState *env, ppc_tb_t *tb_env, int is_excp)
1142 {
1143     ppc40x_timer_t *ppc40x_timer;
1144     uint64_t now, next;
1145 
1146     ppc40x_timer = tb_env->opaque;
1147     if (ppc40x_timer->pit_reload <= 1 ||
1148         !((env->spr[SPR_40x_TCR] >> 26) & 0x1) ||
1149         (is_excp && !((env->spr[SPR_40x_TCR] >> 22) & 0x1))) {
1150         /* Stop PIT */
1151         trace_ppc4xx_pit_stop();
1152         timer_del(tb_env->decr_timer);
1153     } else {
1154         trace_ppc4xx_pit_start(ppc40x_timer->pit_reload);
1155         now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
1156         next = now + muldiv64(ppc40x_timer->pit_reload,
1157                               NANOSECONDS_PER_SECOND, tb_env->decr_freq);
1158         if (is_excp)
1159             next += tb_env->decr_next - now;
1160         if (next == now)
1161             next++;
1162         timer_mod(tb_env->decr_timer, next);
1163         tb_env->decr_next = next;
1164     }
1165 }
1166 
1167 static void cpu_4xx_pit_cb (void *opaque)
1168 {
1169     PowerPCCPU *cpu = opaque;
1170     CPUPPCState *env = &cpu->env;
1171     ppc_tb_t *tb_env;
1172     ppc40x_timer_t *ppc40x_timer;
1173 
1174     tb_env = env->tb_env;
1175     ppc40x_timer = tb_env->opaque;
1176     env->spr[SPR_40x_TSR] |= 1 << 27;
1177     if ((env->spr[SPR_40x_TCR] >> 26) & 0x1) {
1178         ppc_set_irq(cpu, ppc40x_timer->decr_excp, 1);
1179     }
1180     start_stop_pit(env, tb_env, 1);
1181     trace_ppc4xx_pit((int)((env->spr[SPR_40x_TCR] >> 22) & 0x1),
1182            (int)((env->spr[SPR_40x_TCR] >> 26) & 0x1),
1183            env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR],
1184            ppc40x_timer->pit_reload);
1185 }
1186 
1187 /* Watchdog timer */
1188 static void cpu_4xx_wdt_cb (void *opaque)
1189 {
1190     PowerPCCPU *cpu = opaque;
1191     CPUPPCState *env = &cpu->env;
1192     ppc_tb_t *tb_env;
1193     ppc40x_timer_t *ppc40x_timer;
1194     uint64_t now, next;
1195 
1196     tb_env = env->tb_env;
1197     ppc40x_timer = tb_env->opaque;
1198     now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
1199     switch ((env->spr[SPR_40x_TCR] >> 30) & 0x3) {
1200     case 0:
1201         next = 1 << 17;
1202         break;
1203     case 1:
1204         next = 1 << 21;
1205         break;
1206     case 2:
1207         next = 1 << 25;
1208         break;
1209     case 3:
1210         next = 1 << 29;
1211         break;
1212     default:
1213         /* Cannot occur, but makes gcc happy */
1214         return;
1215     }
1216     next = now + muldiv64(next, NANOSECONDS_PER_SECOND, tb_env->decr_freq);
1217     if (next == now)
1218         next++;
1219     trace_ppc4xx_wdt(env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]);
1220     switch ((env->spr[SPR_40x_TSR] >> 30) & 0x3) {
1221     case 0x0:
1222     case 0x1:
1223         timer_mod(ppc40x_timer->wdt_timer, next);
1224         ppc40x_timer->wdt_next = next;
1225         env->spr[SPR_40x_TSR] |= 1U << 31;
1226         break;
1227     case 0x2:
1228         timer_mod(ppc40x_timer->wdt_timer, next);
1229         ppc40x_timer->wdt_next = next;
1230         env->spr[SPR_40x_TSR] |= 1 << 30;
1231         if ((env->spr[SPR_40x_TCR] >> 27) & 0x1) {
1232             ppc_set_irq(cpu, PPC_INTERRUPT_WDT, 1);
1233         }
1234         break;
1235     case 0x3:
1236         env->spr[SPR_40x_TSR] &= ~0x30000000;
1237         env->spr[SPR_40x_TSR] |= env->spr[SPR_40x_TCR] & 0x30000000;
1238         switch ((env->spr[SPR_40x_TCR] >> 28) & 0x3) {
1239         case 0x0:
1240             /* No reset */
1241             break;
1242         case 0x1: /* Core reset */
1243             ppc40x_core_reset(cpu);
1244             break;
1245         case 0x2: /* Chip reset */
1246             ppc40x_chip_reset(cpu);
1247             break;
1248         case 0x3: /* System reset */
1249             ppc40x_system_reset(cpu);
1250             break;
1251         }
1252     }
1253 }
1254 
1255 void store_40x_pit (CPUPPCState *env, target_ulong val)
1256 {
1257     ppc_tb_t *tb_env;
1258     ppc40x_timer_t *ppc40x_timer;
1259 
1260     tb_env = env->tb_env;
1261     ppc40x_timer = tb_env->opaque;
1262     trace_ppc40x_store_pit(val);
1263     ppc40x_timer->pit_reload = val;
1264     start_stop_pit(env, tb_env, 0);
1265 }
1266 
1267 target_ulong load_40x_pit (CPUPPCState *env)
1268 {
1269     return cpu_ppc_load_decr(env);
1270 }
1271 
1272 void store_40x_tsr(CPUPPCState *env, target_ulong val)
1273 {
1274     PowerPCCPU *cpu = env_archcpu(env);
1275 
1276     trace_ppc40x_store_tcr(val);
1277 
1278     env->spr[SPR_40x_TSR] &= ~(val & 0xFC000000);
1279     if (val & 0x80000000) {
1280         ppc_set_irq(cpu, PPC_INTERRUPT_PIT, 0);
1281     }
1282 }
1283 
1284 void store_40x_tcr(CPUPPCState *env, target_ulong val)
1285 {
1286     PowerPCCPU *cpu = env_archcpu(env);
1287     ppc_tb_t *tb_env;
1288 
1289     trace_ppc40x_store_tsr(val);
1290 
1291     tb_env = env->tb_env;
1292     env->spr[SPR_40x_TCR] = val & 0xFFC00000;
1293     start_stop_pit(env, tb_env, 1);
1294     cpu_4xx_wdt_cb(cpu);
1295 }
1296 
1297 static void ppc_40x_set_tb_clk (void *opaque, uint32_t freq)
1298 {
1299     CPUPPCState *env = opaque;
1300     ppc_tb_t *tb_env = env->tb_env;
1301 
1302     trace_ppc40x_set_tb_clk(freq);
1303     tb_env->tb_freq = freq;
1304     tb_env->decr_freq = freq;
1305     /* XXX: we should also update all timers */
1306 }
1307 
1308 clk_setup_cb ppc_40x_timers_init (CPUPPCState *env, uint32_t freq,
1309                                   unsigned int decr_excp)
1310 {
1311     ppc_tb_t *tb_env;
1312     ppc40x_timer_t *ppc40x_timer;
1313     PowerPCCPU *cpu = env_archcpu(env);
1314 
1315     trace_ppc40x_timers_init(freq);
1316 
1317     tb_env = g_new0(ppc_tb_t, 1);
1318     ppc40x_timer = g_new0(ppc40x_timer_t, 1);
1319 
1320     env->tb_env = tb_env;
1321     tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED;
1322     tb_env->tb_freq = freq;
1323     tb_env->decr_freq = freq;
1324     tb_env->opaque = ppc40x_timer;
1325 
1326     /* We use decr timer for PIT */
1327     tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_pit_cb, cpu);
1328     ppc40x_timer->fit_timer =
1329         timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_fit_cb, cpu);
1330     ppc40x_timer->wdt_timer =
1331         timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_wdt_cb, cpu);
1332     ppc40x_timer->decr_excp = decr_excp;
1333 
1334     return &ppc_40x_set_tb_clk;
1335 }
1336 
1337 /*****************************************************************************/
1338 /* Embedded PowerPC Device Control Registers */
1339 typedef struct ppc_dcrn_t ppc_dcrn_t;
1340 struct ppc_dcrn_t {
1341     dcr_read_cb dcr_read;
1342     dcr_write_cb dcr_write;
1343     void *opaque;
1344 };
1345 
1346 /* XXX: on 460, DCR addresses are 32 bits wide,
1347  *      using DCRIPR to get the 22 upper bits of the DCR address
1348  */
1349 #define DCRN_NB 1024
1350 struct ppc_dcr_t {
1351     ppc_dcrn_t dcrn[DCRN_NB];
1352     int (*read_error)(int dcrn);
1353     int (*write_error)(int dcrn);
1354 };
1355 
1356 int ppc_dcr_read (ppc_dcr_t *dcr_env, int dcrn, uint32_t *valp)
1357 {
1358     ppc_dcrn_t *dcr;
1359 
1360     if (dcrn < 0 || dcrn >= DCRN_NB)
1361         goto error;
1362     dcr = &dcr_env->dcrn[dcrn];
1363     if (dcr->dcr_read == NULL)
1364         goto error;
1365     *valp = (*dcr->dcr_read)(dcr->opaque, dcrn);
1366     trace_ppc_dcr_read(dcrn, *valp);
1367 
1368     return 0;
1369 
1370  error:
1371     if (dcr_env->read_error != NULL)
1372         return (*dcr_env->read_error)(dcrn);
1373 
1374     return -1;
1375 }
1376 
1377 int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val)
1378 {
1379     ppc_dcrn_t *dcr;
1380 
1381     if (dcrn < 0 || dcrn >= DCRN_NB)
1382         goto error;
1383     dcr = &dcr_env->dcrn[dcrn];
1384     if (dcr->dcr_write == NULL)
1385         goto error;
1386     trace_ppc_dcr_write(dcrn, val);
1387     (*dcr->dcr_write)(dcr->opaque, dcrn, val);
1388 
1389     return 0;
1390 
1391  error:
1392     if (dcr_env->write_error != NULL)
1393         return (*dcr_env->write_error)(dcrn);
1394 
1395     return -1;
1396 }
1397 
1398 int ppc_dcr_register (CPUPPCState *env, int dcrn, void *opaque,
1399                       dcr_read_cb dcr_read, dcr_write_cb dcr_write)
1400 {
1401     ppc_dcr_t *dcr_env;
1402     ppc_dcrn_t *dcr;
1403 
1404     dcr_env = env->dcr_env;
1405     if (dcr_env == NULL)
1406         return -1;
1407     if (dcrn < 0 || dcrn >= DCRN_NB)
1408         return -1;
1409     dcr = &dcr_env->dcrn[dcrn];
1410     if (dcr->opaque != NULL ||
1411         dcr->dcr_read != NULL ||
1412         dcr->dcr_write != NULL)
1413         return -1;
1414     dcr->opaque = opaque;
1415     dcr->dcr_read = dcr_read;
1416     dcr->dcr_write = dcr_write;
1417 
1418     return 0;
1419 }
1420 
1421 int ppc_dcr_init (CPUPPCState *env, int (*read_error)(int dcrn),
1422                   int (*write_error)(int dcrn))
1423 {
1424     ppc_dcr_t *dcr_env;
1425 
1426     dcr_env = g_new0(ppc_dcr_t, 1);
1427     dcr_env->read_error = read_error;
1428     dcr_env->write_error = write_error;
1429     env->dcr_env = dcr_env;
1430 
1431     return 0;
1432 }
1433 
1434 /*****************************************************************************/
1435 
1436 int ppc_cpu_pir(PowerPCCPU *cpu)
1437 {
1438     CPUPPCState *env = &cpu->env;
1439     return env->spr_cb[SPR_PIR].default_value;
1440 }
1441 
1442 int ppc_cpu_tir(PowerPCCPU *cpu)
1443 {
1444     CPUPPCState *env = &cpu->env;
1445     return env->spr_cb[SPR_TIR].default_value;
1446 }
1447 
1448 PowerPCCPU *ppc_get_vcpu_by_pir(int pir)
1449 {
1450     CPUState *cs;
1451 
1452     CPU_FOREACH(cs) {
1453         PowerPCCPU *cpu = POWERPC_CPU(cs);
1454 
1455         if (ppc_cpu_pir(cpu) == pir) {
1456             return cpu;
1457         }
1458     }
1459 
1460     return NULL;
1461 }
1462 
1463 void ppc_irq_reset(PowerPCCPU *cpu)
1464 {
1465     CPUPPCState *env = &cpu->env;
1466 
1467     env->irq_input_state = 0;
1468     kvmppc_set_interrupt(cpu, PPC_INTERRUPT_EXT, 0);
1469 }
1470