xref: /qemu/hw/ppc/ppc.c (revision 6402cbbb)
1 /*
2  * QEMU generic PowerPC hardware System Emulator
3  *
4  * Copyright (c) 2003-2007 Jocelyn Mayer
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 #include "qemu/osdep.h"
25 #include "qemu-common.h"
26 #include "cpu.h"
27 #include "hw/hw.h"
28 #include "hw/ppc/ppc.h"
29 #include "hw/ppc/ppc_e500.h"
30 #include "qemu/timer.h"
31 #include "sysemu/sysemu.h"
32 #include "sysemu/cpus.h"
33 #include "hw/timer/m48t59.h"
34 #include "qemu/log.h"
35 #include "qemu/error-report.h"
36 #include "qapi/error.h"
37 #include "hw/loader.h"
38 #include "sysemu/kvm.h"
39 #include "kvm_ppc.h"
40 #include "trace.h"
41 
42 //#define PPC_DEBUG_IRQ
43 //#define PPC_DEBUG_TB
44 
45 #ifdef PPC_DEBUG_IRQ
46 #  define LOG_IRQ(...) qemu_log_mask(CPU_LOG_INT, ## __VA_ARGS__)
47 #else
48 #  define LOG_IRQ(...) do { } while (0)
49 #endif
50 
51 
52 #ifdef PPC_DEBUG_TB
53 #  define LOG_TB(...) qemu_log(__VA_ARGS__)
54 #else
55 #  define LOG_TB(...) do { } while (0)
56 #endif
57 
58 static void cpu_ppc_tb_stop (CPUPPCState *env);
59 static void cpu_ppc_tb_start (CPUPPCState *env);
60 
61 void ppc_set_irq(PowerPCCPU *cpu, int n_IRQ, int level)
62 {
63     CPUState *cs = CPU(cpu);
64     CPUPPCState *env = &cpu->env;
65     unsigned int old_pending;
66     bool locked = false;
67 
68     /* We may already have the BQL if coming from the reset path */
69     if (!qemu_mutex_iothread_locked()) {
70         locked = true;
71         qemu_mutex_lock_iothread();
72     }
73 
74     old_pending = env->pending_interrupts;
75 
76     if (level) {
77         env->pending_interrupts |= 1 << n_IRQ;
78         cpu_interrupt(cs, CPU_INTERRUPT_HARD);
79     } else {
80         env->pending_interrupts &= ~(1 << n_IRQ);
81         if (env->pending_interrupts == 0) {
82             cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
83         }
84     }
85 
86     if (old_pending != env->pending_interrupts) {
87 #ifdef CONFIG_KVM
88         kvmppc_set_interrupt(cpu, n_IRQ, level);
89 #endif
90     }
91 
92 
93     LOG_IRQ("%s: %p n_IRQ %d level %d => pending %08" PRIx32
94                 "req %08x\n", __func__, env, n_IRQ, level,
95                 env->pending_interrupts, CPU(cpu)->interrupt_request);
96 
97     if (locked) {
98         qemu_mutex_unlock_iothread();
99     }
100 }
101 
102 /* PowerPC 6xx / 7xx internal IRQ controller */
103 static void ppc6xx_set_irq(void *opaque, int pin, int level)
104 {
105     PowerPCCPU *cpu = opaque;
106     CPUPPCState *env = &cpu->env;
107     int cur_level;
108 
109     LOG_IRQ("%s: env %p pin %d level %d\n", __func__,
110                 env, pin, level);
111     cur_level = (env->irq_input_state >> pin) & 1;
112     /* Don't generate spurious events */
113     if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
114         CPUState *cs = CPU(cpu);
115 
116         switch (pin) {
117         case PPC6xx_INPUT_TBEN:
118             /* Level sensitive - active high */
119             LOG_IRQ("%s: %s the time base\n",
120                         __func__, level ? "start" : "stop");
121             if (level) {
122                 cpu_ppc_tb_start(env);
123             } else {
124                 cpu_ppc_tb_stop(env);
125             }
126         case PPC6xx_INPUT_INT:
127             /* Level sensitive - active high */
128             LOG_IRQ("%s: set the external IRQ state to %d\n",
129                         __func__, level);
130             ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
131             break;
132         case PPC6xx_INPUT_SMI:
133             /* Level sensitive - active high */
134             LOG_IRQ("%s: set the SMI IRQ state to %d\n",
135                         __func__, level);
136             ppc_set_irq(cpu, PPC_INTERRUPT_SMI, level);
137             break;
138         case PPC6xx_INPUT_MCP:
139             /* Negative edge sensitive */
140             /* XXX: TODO: actual reaction may depends on HID0 status
141              *            603/604/740/750: check HID0[EMCP]
142              */
143             if (cur_level == 1 && level == 0) {
144                 LOG_IRQ("%s: raise machine check state\n",
145                             __func__);
146                 ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1);
147             }
148             break;
149         case PPC6xx_INPUT_CKSTP_IN:
150             /* Level sensitive - active low */
151             /* XXX: TODO: relay the signal to CKSTP_OUT pin */
152             /* XXX: Note that the only way to restart the CPU is to reset it */
153             if (level) {
154                 LOG_IRQ("%s: stop the CPU\n", __func__);
155                 cs->halted = 1;
156             }
157             break;
158         case PPC6xx_INPUT_HRESET:
159             /* Level sensitive - active low */
160             if (level) {
161                 LOG_IRQ("%s: reset the CPU\n", __func__);
162                 cpu_interrupt(cs, CPU_INTERRUPT_RESET);
163             }
164             break;
165         case PPC6xx_INPUT_SRESET:
166             LOG_IRQ("%s: set the RESET IRQ state to %d\n",
167                         __func__, level);
168             ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level);
169             break;
170         default:
171             /* Unknown pin - do nothing */
172             LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin);
173             return;
174         }
175         if (level)
176             env->irq_input_state |= 1 << pin;
177         else
178             env->irq_input_state &= ~(1 << pin);
179     }
180 }
181 
182 void ppc6xx_irq_init(PowerPCCPU *cpu)
183 {
184     CPUPPCState *env = &cpu->env;
185 
186     env->irq_inputs = (void **)qemu_allocate_irqs(&ppc6xx_set_irq, cpu,
187                                                   PPC6xx_INPUT_NB);
188 }
189 
190 #if defined(TARGET_PPC64)
191 /* PowerPC 970 internal IRQ controller */
192 static void ppc970_set_irq(void *opaque, int pin, int level)
193 {
194     PowerPCCPU *cpu = opaque;
195     CPUPPCState *env = &cpu->env;
196     int cur_level;
197 
198     LOG_IRQ("%s: env %p pin %d level %d\n", __func__,
199                 env, pin, level);
200     cur_level = (env->irq_input_state >> pin) & 1;
201     /* Don't generate spurious events */
202     if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
203         CPUState *cs = CPU(cpu);
204 
205         switch (pin) {
206         case PPC970_INPUT_INT:
207             /* Level sensitive - active high */
208             LOG_IRQ("%s: set the external IRQ state to %d\n",
209                         __func__, level);
210             ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
211             break;
212         case PPC970_INPUT_THINT:
213             /* Level sensitive - active high */
214             LOG_IRQ("%s: set the SMI IRQ state to %d\n", __func__,
215                         level);
216             ppc_set_irq(cpu, PPC_INTERRUPT_THERM, level);
217             break;
218         case PPC970_INPUT_MCP:
219             /* Negative edge sensitive */
220             /* XXX: TODO: actual reaction may depends on HID0 status
221              *            603/604/740/750: check HID0[EMCP]
222              */
223             if (cur_level == 1 && level == 0) {
224                 LOG_IRQ("%s: raise machine check state\n",
225                             __func__);
226                 ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1);
227             }
228             break;
229         case PPC970_INPUT_CKSTP:
230             /* Level sensitive - active low */
231             /* XXX: TODO: relay the signal to CKSTP_OUT pin */
232             if (level) {
233                 LOG_IRQ("%s: stop the CPU\n", __func__);
234                 cs->halted = 1;
235             } else {
236                 LOG_IRQ("%s: restart the CPU\n", __func__);
237                 cs->halted = 0;
238                 qemu_cpu_kick(cs);
239             }
240             break;
241         case PPC970_INPUT_HRESET:
242             /* Level sensitive - active low */
243             if (level) {
244                 cpu_interrupt(cs, CPU_INTERRUPT_RESET);
245             }
246             break;
247         case PPC970_INPUT_SRESET:
248             LOG_IRQ("%s: set the RESET IRQ state to %d\n",
249                         __func__, level);
250             ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level);
251             break;
252         case PPC970_INPUT_TBEN:
253             LOG_IRQ("%s: set the TBEN state to %d\n", __func__,
254                         level);
255             /* XXX: TODO */
256             break;
257         default:
258             /* Unknown pin - do nothing */
259             LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin);
260             return;
261         }
262         if (level)
263             env->irq_input_state |= 1 << pin;
264         else
265             env->irq_input_state &= ~(1 << pin);
266     }
267 }
268 
269 void ppc970_irq_init(PowerPCCPU *cpu)
270 {
271     CPUPPCState *env = &cpu->env;
272 
273     env->irq_inputs = (void **)qemu_allocate_irqs(&ppc970_set_irq, cpu,
274                                                   PPC970_INPUT_NB);
275 }
276 
277 /* POWER7 internal IRQ controller */
278 static void power7_set_irq(void *opaque, int pin, int level)
279 {
280     PowerPCCPU *cpu = opaque;
281     CPUPPCState *env = &cpu->env;
282 
283     LOG_IRQ("%s: env %p pin %d level %d\n", __func__,
284                 env, pin, level);
285 
286     switch (pin) {
287     case POWER7_INPUT_INT:
288         /* Level sensitive - active high */
289         LOG_IRQ("%s: set the external IRQ state to %d\n",
290                 __func__, level);
291         ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
292         break;
293     default:
294         /* Unknown pin - do nothing */
295         LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin);
296         return;
297     }
298     if (level) {
299         env->irq_input_state |= 1 << pin;
300     } else {
301         env->irq_input_state &= ~(1 << pin);
302     }
303 }
304 
305 void ppcPOWER7_irq_init(PowerPCCPU *cpu)
306 {
307     CPUPPCState *env = &cpu->env;
308 
309     env->irq_inputs = (void **)qemu_allocate_irqs(&power7_set_irq, cpu,
310                                                   POWER7_INPUT_NB);
311 }
312 #endif /* defined(TARGET_PPC64) */
313 
314 /* PowerPC 40x internal IRQ controller */
315 static void ppc40x_set_irq(void *opaque, int pin, int level)
316 {
317     PowerPCCPU *cpu = opaque;
318     CPUPPCState *env = &cpu->env;
319     int cur_level;
320 
321     LOG_IRQ("%s: env %p pin %d level %d\n", __func__,
322                 env, pin, level);
323     cur_level = (env->irq_input_state >> pin) & 1;
324     /* Don't generate spurious events */
325     if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
326         CPUState *cs = CPU(cpu);
327 
328         switch (pin) {
329         case PPC40x_INPUT_RESET_SYS:
330             if (level) {
331                 LOG_IRQ("%s: reset the PowerPC system\n",
332                             __func__);
333                 ppc40x_system_reset(cpu);
334             }
335             break;
336         case PPC40x_INPUT_RESET_CHIP:
337             if (level) {
338                 LOG_IRQ("%s: reset the PowerPC chip\n", __func__);
339                 ppc40x_chip_reset(cpu);
340             }
341             break;
342         case PPC40x_INPUT_RESET_CORE:
343             /* XXX: TODO: update DBSR[MRR] */
344             if (level) {
345                 LOG_IRQ("%s: reset the PowerPC core\n", __func__);
346                 ppc40x_core_reset(cpu);
347             }
348             break;
349         case PPC40x_INPUT_CINT:
350             /* Level sensitive - active high */
351             LOG_IRQ("%s: set the critical IRQ state to %d\n",
352                         __func__, level);
353             ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level);
354             break;
355         case PPC40x_INPUT_INT:
356             /* Level sensitive - active high */
357             LOG_IRQ("%s: set the external IRQ state to %d\n",
358                         __func__, level);
359             ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
360             break;
361         case PPC40x_INPUT_HALT:
362             /* Level sensitive - active low */
363             if (level) {
364                 LOG_IRQ("%s: stop the CPU\n", __func__);
365                 cs->halted = 1;
366             } else {
367                 LOG_IRQ("%s: restart the CPU\n", __func__);
368                 cs->halted = 0;
369                 qemu_cpu_kick(cs);
370             }
371             break;
372         case PPC40x_INPUT_DEBUG:
373             /* Level sensitive - active high */
374             LOG_IRQ("%s: set the debug pin state to %d\n",
375                         __func__, level);
376             ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level);
377             break;
378         default:
379             /* Unknown pin - do nothing */
380             LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin);
381             return;
382         }
383         if (level)
384             env->irq_input_state |= 1 << pin;
385         else
386             env->irq_input_state &= ~(1 << pin);
387     }
388 }
389 
390 void ppc40x_irq_init(PowerPCCPU *cpu)
391 {
392     CPUPPCState *env = &cpu->env;
393 
394     env->irq_inputs = (void **)qemu_allocate_irqs(&ppc40x_set_irq,
395                                                   cpu, PPC40x_INPUT_NB);
396 }
397 
398 /* PowerPC E500 internal IRQ controller */
399 static void ppce500_set_irq(void *opaque, int pin, int level)
400 {
401     PowerPCCPU *cpu = opaque;
402     CPUPPCState *env = &cpu->env;
403     int cur_level;
404 
405     LOG_IRQ("%s: env %p pin %d level %d\n", __func__,
406                 env, pin, level);
407     cur_level = (env->irq_input_state >> pin) & 1;
408     /* Don't generate spurious events */
409     if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
410         switch (pin) {
411         case PPCE500_INPUT_MCK:
412             if (level) {
413                 LOG_IRQ("%s: reset the PowerPC system\n",
414                             __func__);
415                 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
416             }
417             break;
418         case PPCE500_INPUT_RESET_CORE:
419             if (level) {
420                 LOG_IRQ("%s: reset the PowerPC core\n", __func__);
421                 ppc_set_irq(cpu, PPC_INTERRUPT_MCK, level);
422             }
423             break;
424         case PPCE500_INPUT_CINT:
425             /* Level sensitive - active high */
426             LOG_IRQ("%s: set the critical IRQ state to %d\n",
427                         __func__, level);
428             ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level);
429             break;
430         case PPCE500_INPUT_INT:
431             /* Level sensitive - active high */
432             LOG_IRQ("%s: set the core IRQ state to %d\n",
433                         __func__, level);
434             ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
435             break;
436         case PPCE500_INPUT_DEBUG:
437             /* Level sensitive - active high */
438             LOG_IRQ("%s: set the debug pin state to %d\n",
439                         __func__, level);
440             ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level);
441             break;
442         default:
443             /* Unknown pin - do nothing */
444             LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin);
445             return;
446         }
447         if (level)
448             env->irq_input_state |= 1 << pin;
449         else
450             env->irq_input_state &= ~(1 << pin);
451     }
452 }
453 
454 void ppce500_irq_init(PowerPCCPU *cpu)
455 {
456     CPUPPCState *env = &cpu->env;
457 
458     env->irq_inputs = (void **)qemu_allocate_irqs(&ppce500_set_irq,
459                                                   cpu, PPCE500_INPUT_NB);
460 }
461 
462 /* Enable or Disable the E500 EPR capability */
463 void ppce500_set_mpic_proxy(bool enabled)
464 {
465     CPUState *cs;
466 
467     CPU_FOREACH(cs) {
468         PowerPCCPU *cpu = POWERPC_CPU(cs);
469 
470         cpu->env.mpic_proxy = enabled;
471         if (kvm_enabled()) {
472             kvmppc_set_mpic_proxy(cpu, enabled);
473         }
474     }
475 }
476 
477 /*****************************************************************************/
478 /* PowerPC time base and decrementer emulation */
479 
480 uint64_t cpu_ppc_get_tb(ppc_tb_t *tb_env, uint64_t vmclk, int64_t tb_offset)
481 {
482     /* TB time in tb periods */
483     return muldiv64(vmclk, tb_env->tb_freq, NANOSECONDS_PER_SECOND) + tb_offset;
484 }
485 
486 uint64_t cpu_ppc_load_tbl (CPUPPCState *env)
487 {
488     ppc_tb_t *tb_env = env->tb_env;
489     uint64_t tb;
490 
491     if (kvm_enabled()) {
492         return env->spr[SPR_TBL];
493     }
494 
495     tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
496     LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb);
497 
498     return tb;
499 }
500 
501 static inline uint32_t _cpu_ppc_load_tbu(CPUPPCState *env)
502 {
503     ppc_tb_t *tb_env = env->tb_env;
504     uint64_t tb;
505 
506     tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
507     LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb);
508 
509     return tb >> 32;
510 }
511 
512 uint32_t cpu_ppc_load_tbu (CPUPPCState *env)
513 {
514     if (kvm_enabled()) {
515         return env->spr[SPR_TBU];
516     }
517 
518     return _cpu_ppc_load_tbu(env);
519 }
520 
521 static inline void cpu_ppc_store_tb(ppc_tb_t *tb_env, uint64_t vmclk,
522                                     int64_t *tb_offsetp, uint64_t value)
523 {
524     *tb_offsetp = value -
525         muldiv64(vmclk, tb_env->tb_freq, NANOSECONDS_PER_SECOND);
526 
527     LOG_TB("%s: tb %016" PRIx64 " offset %08" PRIx64 "\n",
528                 __func__, value, *tb_offsetp);
529 }
530 
531 void cpu_ppc_store_tbl (CPUPPCState *env, uint32_t value)
532 {
533     ppc_tb_t *tb_env = env->tb_env;
534     uint64_t tb;
535 
536     tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
537     tb &= 0xFFFFFFFF00000000ULL;
538     cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
539                      &tb_env->tb_offset, tb | (uint64_t)value);
540 }
541 
542 static inline void _cpu_ppc_store_tbu(CPUPPCState *env, uint32_t value)
543 {
544     ppc_tb_t *tb_env = env->tb_env;
545     uint64_t tb;
546 
547     tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
548     tb &= 0x00000000FFFFFFFFULL;
549     cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
550                      &tb_env->tb_offset, ((uint64_t)value << 32) | tb);
551 }
552 
553 void cpu_ppc_store_tbu (CPUPPCState *env, uint32_t value)
554 {
555     _cpu_ppc_store_tbu(env, value);
556 }
557 
558 uint64_t cpu_ppc_load_atbl (CPUPPCState *env)
559 {
560     ppc_tb_t *tb_env = env->tb_env;
561     uint64_t tb;
562 
563     tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
564     LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb);
565 
566     return tb;
567 }
568 
569 uint32_t cpu_ppc_load_atbu (CPUPPCState *env)
570 {
571     ppc_tb_t *tb_env = env->tb_env;
572     uint64_t tb;
573 
574     tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
575     LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb);
576 
577     return tb >> 32;
578 }
579 
580 void cpu_ppc_store_atbl (CPUPPCState *env, uint32_t value)
581 {
582     ppc_tb_t *tb_env = env->tb_env;
583     uint64_t tb;
584 
585     tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
586     tb &= 0xFFFFFFFF00000000ULL;
587     cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
588                      &tb_env->atb_offset, tb | (uint64_t)value);
589 }
590 
591 void cpu_ppc_store_atbu (CPUPPCState *env, uint32_t value)
592 {
593     ppc_tb_t *tb_env = env->tb_env;
594     uint64_t tb;
595 
596     tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
597     tb &= 0x00000000FFFFFFFFULL;
598     cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
599                      &tb_env->atb_offset, ((uint64_t)value << 32) | tb);
600 }
601 
602 static void cpu_ppc_tb_stop (CPUPPCState *env)
603 {
604     ppc_tb_t *tb_env = env->tb_env;
605     uint64_t tb, atb, vmclk;
606 
607     /* If the time base is already frozen, do nothing */
608     if (tb_env->tb_freq != 0) {
609         vmclk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
610         /* Get the time base */
611         tb = cpu_ppc_get_tb(tb_env, vmclk, tb_env->tb_offset);
612         /* Get the alternate time base */
613         atb = cpu_ppc_get_tb(tb_env, vmclk, tb_env->atb_offset);
614         /* Store the time base value (ie compute the current offset) */
615         cpu_ppc_store_tb(tb_env, vmclk, &tb_env->tb_offset, tb);
616         /* Store the alternate time base value (compute the current offset) */
617         cpu_ppc_store_tb(tb_env, vmclk, &tb_env->atb_offset, atb);
618         /* Set the time base frequency to zero */
619         tb_env->tb_freq = 0;
620         /* Now, the time bases are frozen to tb_offset / atb_offset value */
621     }
622 }
623 
624 static void cpu_ppc_tb_start (CPUPPCState *env)
625 {
626     ppc_tb_t *tb_env = env->tb_env;
627     uint64_t tb, atb, vmclk;
628 
629     /* If the time base is not frozen, do nothing */
630     if (tb_env->tb_freq == 0) {
631         vmclk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
632         /* Get the time base from tb_offset */
633         tb = tb_env->tb_offset;
634         /* Get the alternate time base from atb_offset */
635         atb = tb_env->atb_offset;
636         /* Restore the tb frequency from the decrementer frequency */
637         tb_env->tb_freq = tb_env->decr_freq;
638         /* Store the time base value */
639         cpu_ppc_store_tb(tb_env, vmclk, &tb_env->tb_offset, tb);
640         /* Store the alternate time base value */
641         cpu_ppc_store_tb(tb_env, vmclk, &tb_env->atb_offset, atb);
642     }
643 }
644 
645 bool ppc_decr_clear_on_delivery(CPUPPCState *env)
646 {
647     ppc_tb_t *tb_env = env->tb_env;
648     int flags = PPC_DECR_UNDERFLOW_TRIGGERED | PPC_DECR_UNDERFLOW_LEVEL;
649     return ((tb_env->flags & flags) == PPC_DECR_UNDERFLOW_TRIGGERED);
650 }
651 
652 static inline uint32_t _cpu_ppc_load_decr(CPUPPCState *env, uint64_t next)
653 {
654     ppc_tb_t *tb_env = env->tb_env;
655     uint32_t decr;
656     int64_t diff;
657 
658     diff = next - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
659     if (diff >= 0) {
660         decr = muldiv64(diff, tb_env->decr_freq, NANOSECONDS_PER_SECOND);
661     } else if (tb_env->flags & PPC_TIMER_BOOKE) {
662         decr = 0;
663     }  else {
664         decr = -muldiv64(-diff, tb_env->decr_freq, NANOSECONDS_PER_SECOND);
665     }
666     LOG_TB("%s: %08" PRIx32 "\n", __func__, decr);
667 
668     return decr;
669 }
670 
671 uint32_t cpu_ppc_load_decr (CPUPPCState *env)
672 {
673     ppc_tb_t *tb_env = env->tb_env;
674 
675     if (kvm_enabled()) {
676         return env->spr[SPR_DECR];
677     }
678 
679     return _cpu_ppc_load_decr(env, tb_env->decr_next);
680 }
681 
682 uint32_t cpu_ppc_load_hdecr (CPUPPCState *env)
683 {
684     ppc_tb_t *tb_env = env->tb_env;
685 
686     return _cpu_ppc_load_decr(env, tb_env->hdecr_next);
687 }
688 
689 uint64_t cpu_ppc_load_purr (CPUPPCState *env)
690 {
691     ppc_tb_t *tb_env = env->tb_env;
692     uint64_t diff;
693 
694     diff = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - tb_env->purr_start;
695 
696     return tb_env->purr_load +
697         muldiv64(diff, tb_env->tb_freq, NANOSECONDS_PER_SECOND);
698 }
699 
700 /* When decrementer expires,
701  * all we need to do is generate or queue a CPU exception
702  */
703 static inline void cpu_ppc_decr_excp(PowerPCCPU *cpu)
704 {
705     /* Raise it */
706     LOG_TB("raise decrementer exception\n");
707     ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 1);
708 }
709 
710 static inline void cpu_ppc_decr_lower(PowerPCCPU *cpu)
711 {
712     ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 0);
713 }
714 
715 static inline void cpu_ppc_hdecr_excp(PowerPCCPU *cpu)
716 {
717     CPUPPCState *env = &cpu->env;
718 
719     /* Raise it */
720     LOG_TB("raise hv decrementer exception\n");
721 
722     /* The architecture specifies that we don't deliver HDEC
723      * interrupts in a PM state. Not only they don't cause a
724      * wakeup but they also get effectively discarded.
725      */
726     if (!env->in_pm_state) {
727         ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 1);
728     }
729 }
730 
731 static inline void cpu_ppc_hdecr_lower(PowerPCCPU *cpu)
732 {
733     ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0);
734 }
735 
736 static void __cpu_ppc_store_decr(PowerPCCPU *cpu, uint64_t *nextp,
737                                  QEMUTimer *timer,
738                                  void (*raise_excp)(void *),
739                                  void (*lower_excp)(PowerPCCPU *),
740                                  uint32_t decr, uint32_t value)
741 {
742     CPUPPCState *env = &cpu->env;
743     ppc_tb_t *tb_env = env->tb_env;
744     uint64_t now, next;
745 
746     LOG_TB("%s: %08" PRIx32 " => %08" PRIx32 "\n", __func__,
747                 decr, value);
748 
749     if (kvm_enabled()) {
750         /* KVM handles decrementer exceptions, we don't need our own timer */
751         return;
752     }
753 
754     /*
755      * Going from 2 -> 1, 1 -> 0 or 0 -> -1 is the event to generate a DEC
756      * interrupt.
757      *
758      * If we get a really small DEC value, we can assume that by the time we
759      * handled it we should inject an interrupt already.
760      *
761      * On MSB level based DEC implementations the MSB always means the interrupt
762      * is pending, so raise it on those.
763      *
764      * On MSB edge based DEC implementations the MSB going from 0 -> 1 triggers
765      * an edge interrupt, so raise it here too.
766      */
767     if ((value < 3) ||
768         ((tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL) && (value & 0x80000000)) ||
769         ((tb_env->flags & PPC_DECR_UNDERFLOW_TRIGGERED) && (value & 0x80000000)
770           && !(decr & 0x80000000))) {
771         (*raise_excp)(cpu);
772         return;
773     }
774 
775     /* On MSB level based systems a 0 for the MSB stops interrupt delivery */
776     if (!(value & 0x80000000) && (tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL)) {
777         (*lower_excp)(cpu);
778     }
779 
780     /* Calculate the next timer event */
781     now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
782     next = now + muldiv64(value, NANOSECONDS_PER_SECOND, tb_env->decr_freq);
783     *nextp = next;
784 
785     /* Adjust timer */
786     timer_mod(timer, next);
787 }
788 
789 static inline void _cpu_ppc_store_decr(PowerPCCPU *cpu, uint32_t decr,
790                                        uint32_t value)
791 {
792     ppc_tb_t *tb_env = cpu->env.tb_env;
793 
794     __cpu_ppc_store_decr(cpu, &tb_env->decr_next, tb_env->decr_timer,
795                          tb_env->decr_timer->cb, &cpu_ppc_decr_lower, decr,
796                          value);
797 }
798 
799 void cpu_ppc_store_decr (CPUPPCState *env, uint32_t value)
800 {
801     PowerPCCPU *cpu = ppc_env_get_cpu(env);
802 
803     _cpu_ppc_store_decr(cpu, cpu_ppc_load_decr(env), value);
804 }
805 
806 static void cpu_ppc_decr_cb(void *opaque)
807 {
808     PowerPCCPU *cpu = opaque;
809 
810     cpu_ppc_decr_excp(cpu);
811 }
812 
813 static inline void _cpu_ppc_store_hdecr(PowerPCCPU *cpu, uint32_t hdecr,
814                                         uint32_t value)
815 {
816     ppc_tb_t *tb_env = cpu->env.tb_env;
817 
818     if (tb_env->hdecr_timer != NULL) {
819         __cpu_ppc_store_decr(cpu, &tb_env->hdecr_next, tb_env->hdecr_timer,
820                              tb_env->hdecr_timer->cb, &cpu_ppc_hdecr_lower,
821                              hdecr, value);
822     }
823 }
824 
825 void cpu_ppc_store_hdecr (CPUPPCState *env, uint32_t value)
826 {
827     PowerPCCPU *cpu = ppc_env_get_cpu(env);
828 
829     _cpu_ppc_store_hdecr(cpu, cpu_ppc_load_hdecr(env), value);
830 }
831 
832 static void cpu_ppc_hdecr_cb(void *opaque)
833 {
834     PowerPCCPU *cpu = opaque;
835 
836     cpu_ppc_hdecr_excp(cpu);
837 }
838 
839 static void cpu_ppc_store_purr(PowerPCCPU *cpu, uint64_t value)
840 {
841     ppc_tb_t *tb_env = cpu->env.tb_env;
842 
843     tb_env->purr_load = value;
844     tb_env->purr_start = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
845 }
846 
847 static void cpu_ppc_set_tb_clk (void *opaque, uint32_t freq)
848 {
849     CPUPPCState *env = opaque;
850     PowerPCCPU *cpu = ppc_env_get_cpu(env);
851     ppc_tb_t *tb_env = env->tb_env;
852 
853     tb_env->tb_freq = freq;
854     tb_env->decr_freq = freq;
855     /* There is a bug in Linux 2.4 kernels:
856      * if a decrementer exception is pending when it enables msr_ee at startup,
857      * it's not ready to handle it...
858      */
859     _cpu_ppc_store_decr(cpu, 0xFFFFFFFF, 0xFFFFFFFF);
860     _cpu_ppc_store_hdecr(cpu, 0xFFFFFFFF, 0xFFFFFFFF);
861     cpu_ppc_store_purr(cpu, 0x0000000000000000ULL);
862 }
863 
864 static void timebase_save(PPCTimebase *tb)
865 {
866     uint64_t ticks = cpu_get_host_ticks();
867     PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu);
868 
869     if (!first_ppc_cpu->env.tb_env) {
870         error_report("No timebase object");
871         return;
872     }
873 
874     /* not used anymore, we keep it for compatibility */
875     tb->time_of_the_day_ns = qemu_clock_get_ns(QEMU_CLOCK_HOST);
876     /*
877      * tb_offset is only expected to be changed by QEMU so
878      * there is no need to update it from KVM here
879      */
880     tb->guest_timebase = ticks + first_ppc_cpu->env.tb_env->tb_offset;
881 }
882 
883 static void timebase_load(PPCTimebase *tb)
884 {
885     CPUState *cpu;
886     PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu);
887     int64_t tb_off_adj, tb_off;
888     unsigned long freq;
889 
890     if (!first_ppc_cpu->env.tb_env) {
891         error_report("No timebase object");
892         return;
893     }
894 
895     freq = first_ppc_cpu->env.tb_env->tb_freq;
896 
897     tb_off_adj = tb->guest_timebase - cpu_get_host_ticks();
898 
899     tb_off = first_ppc_cpu->env.tb_env->tb_offset;
900     trace_ppc_tb_adjust(tb_off, tb_off_adj, tb_off_adj - tb_off,
901                         (tb_off_adj - tb_off) / freq);
902 
903     /* Set new offset to all CPUs */
904     CPU_FOREACH(cpu) {
905         PowerPCCPU *pcpu = POWERPC_CPU(cpu);
906         pcpu->env.tb_env->tb_offset = tb_off_adj;
907 #if defined(CONFIG_KVM)
908         kvm_set_one_reg(cpu, KVM_REG_PPC_TB_OFFSET,
909                         &pcpu->env.tb_env->tb_offset);
910 #endif
911     }
912 }
913 
914 void cpu_ppc_clock_vm_state_change(void *opaque, int running,
915                                    RunState state)
916 {
917     PPCTimebase *tb = opaque;
918 
919     if (running) {
920         timebase_load(tb);
921     } else {
922         timebase_save(tb);
923     }
924 }
925 
926 /*
927  * When migrating, read the clock just before migration,
928  * so that the guest clock counts during the events
929  * between:
930  *
931  *  * vm_stop()
932  *  *
933  *  * pre_save()
934  *
935  *  This reduces clock difference on migration from 5s
936  *  to 0.1s (when max_downtime == 5s), because sending the
937  *  final pages of memory (which happens between vm_stop()
938  *  and pre_save()) takes max_downtime.
939  */
940 static void timebase_pre_save(void *opaque)
941 {
942     PPCTimebase *tb = opaque;
943 
944     timebase_save(tb);
945 }
946 
947 const VMStateDescription vmstate_ppc_timebase = {
948     .name = "timebase",
949     .version_id = 1,
950     .minimum_version_id = 1,
951     .minimum_version_id_old = 1,
952     .pre_save = timebase_pre_save,
953     .fields      = (VMStateField []) {
954         VMSTATE_UINT64(guest_timebase, PPCTimebase),
955         VMSTATE_INT64(time_of_the_day_ns, PPCTimebase),
956         VMSTATE_END_OF_LIST()
957     },
958 };
959 
960 /* Set up (once) timebase frequency (in Hz) */
961 clk_setup_cb cpu_ppc_tb_init (CPUPPCState *env, uint32_t freq)
962 {
963     PowerPCCPU *cpu = ppc_env_get_cpu(env);
964     ppc_tb_t *tb_env;
965 
966     tb_env = g_malloc0(sizeof(ppc_tb_t));
967     env->tb_env = tb_env;
968     tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED;
969     if (env->insns_flags & PPC_SEGMENT_64B) {
970         /* All Book3S 64bit CPUs implement level based DEC logic */
971         tb_env->flags |= PPC_DECR_UNDERFLOW_LEVEL;
972     }
973     /* Create new timer */
974     tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_ppc_decr_cb, cpu);
975     if (env->has_hv_mode) {
976         tb_env->hdecr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_ppc_hdecr_cb,
977                                                 cpu);
978     } else {
979         tb_env->hdecr_timer = NULL;
980     }
981     cpu_ppc_set_tb_clk(env, freq);
982 
983     return &cpu_ppc_set_tb_clk;
984 }
985 
986 /* Specific helpers for POWER & PowerPC 601 RTC */
987 void cpu_ppc601_store_rtcu (CPUPPCState *env, uint32_t value)
988 {
989     _cpu_ppc_store_tbu(env, value);
990 }
991 
992 uint32_t cpu_ppc601_load_rtcu (CPUPPCState *env)
993 {
994     return _cpu_ppc_load_tbu(env);
995 }
996 
997 void cpu_ppc601_store_rtcl (CPUPPCState *env, uint32_t value)
998 {
999     cpu_ppc_store_tbl(env, value & 0x3FFFFF80);
1000 }
1001 
1002 uint32_t cpu_ppc601_load_rtcl (CPUPPCState *env)
1003 {
1004     return cpu_ppc_load_tbl(env) & 0x3FFFFF80;
1005 }
1006 
1007 /*****************************************************************************/
1008 /* PowerPC 40x timers */
1009 
1010 /* PIT, FIT & WDT */
1011 typedef struct ppc40x_timer_t ppc40x_timer_t;
1012 struct ppc40x_timer_t {
1013     uint64_t pit_reload;  /* PIT auto-reload value        */
1014     uint64_t fit_next;    /* Tick for next FIT interrupt  */
1015     QEMUTimer *fit_timer;
1016     uint64_t wdt_next;    /* Tick for next WDT interrupt  */
1017     QEMUTimer *wdt_timer;
1018 
1019     /* 405 have the PIT, 440 have a DECR.  */
1020     unsigned int decr_excp;
1021 };
1022 
1023 /* Fixed interval timer */
1024 static void cpu_4xx_fit_cb (void *opaque)
1025 {
1026     PowerPCCPU *cpu;
1027     CPUPPCState *env;
1028     ppc_tb_t *tb_env;
1029     ppc40x_timer_t *ppc40x_timer;
1030     uint64_t now, next;
1031 
1032     env = opaque;
1033     cpu = ppc_env_get_cpu(env);
1034     tb_env = env->tb_env;
1035     ppc40x_timer = tb_env->opaque;
1036     now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
1037     switch ((env->spr[SPR_40x_TCR] >> 24) & 0x3) {
1038     case 0:
1039         next = 1 << 9;
1040         break;
1041     case 1:
1042         next = 1 << 13;
1043         break;
1044     case 2:
1045         next = 1 << 17;
1046         break;
1047     case 3:
1048         next = 1 << 21;
1049         break;
1050     default:
1051         /* Cannot occur, but makes gcc happy */
1052         return;
1053     }
1054     next = now + muldiv64(next, NANOSECONDS_PER_SECOND, tb_env->tb_freq);
1055     if (next == now)
1056         next++;
1057     timer_mod(ppc40x_timer->fit_timer, next);
1058     env->spr[SPR_40x_TSR] |= 1 << 26;
1059     if ((env->spr[SPR_40x_TCR] >> 23) & 0x1) {
1060         ppc_set_irq(cpu, PPC_INTERRUPT_FIT, 1);
1061     }
1062     LOG_TB("%s: ir %d TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx "\n", __func__,
1063            (int)((env->spr[SPR_40x_TCR] >> 23) & 0x1),
1064            env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]);
1065 }
1066 
1067 /* Programmable interval timer */
1068 static void start_stop_pit (CPUPPCState *env, ppc_tb_t *tb_env, int is_excp)
1069 {
1070     ppc40x_timer_t *ppc40x_timer;
1071     uint64_t now, next;
1072 
1073     ppc40x_timer = tb_env->opaque;
1074     if (ppc40x_timer->pit_reload <= 1 ||
1075         !((env->spr[SPR_40x_TCR] >> 26) & 0x1) ||
1076         (is_excp && !((env->spr[SPR_40x_TCR] >> 22) & 0x1))) {
1077         /* Stop PIT */
1078         LOG_TB("%s: stop PIT\n", __func__);
1079         timer_del(tb_env->decr_timer);
1080     } else {
1081         LOG_TB("%s: start PIT %016" PRIx64 "\n",
1082                     __func__, ppc40x_timer->pit_reload);
1083         now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
1084         next = now + muldiv64(ppc40x_timer->pit_reload,
1085                               NANOSECONDS_PER_SECOND, tb_env->decr_freq);
1086         if (is_excp)
1087             next += tb_env->decr_next - now;
1088         if (next == now)
1089             next++;
1090         timer_mod(tb_env->decr_timer, next);
1091         tb_env->decr_next = next;
1092     }
1093 }
1094 
1095 static void cpu_4xx_pit_cb (void *opaque)
1096 {
1097     PowerPCCPU *cpu;
1098     CPUPPCState *env;
1099     ppc_tb_t *tb_env;
1100     ppc40x_timer_t *ppc40x_timer;
1101 
1102     env = opaque;
1103     cpu = ppc_env_get_cpu(env);
1104     tb_env = env->tb_env;
1105     ppc40x_timer = tb_env->opaque;
1106     env->spr[SPR_40x_TSR] |= 1 << 27;
1107     if ((env->spr[SPR_40x_TCR] >> 26) & 0x1) {
1108         ppc_set_irq(cpu, ppc40x_timer->decr_excp, 1);
1109     }
1110     start_stop_pit(env, tb_env, 1);
1111     LOG_TB("%s: ar %d ir %d TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx " "
1112            "%016" PRIx64 "\n", __func__,
1113            (int)((env->spr[SPR_40x_TCR] >> 22) & 0x1),
1114            (int)((env->spr[SPR_40x_TCR] >> 26) & 0x1),
1115            env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR],
1116            ppc40x_timer->pit_reload);
1117 }
1118 
1119 /* Watchdog timer */
1120 static void cpu_4xx_wdt_cb (void *opaque)
1121 {
1122     PowerPCCPU *cpu;
1123     CPUPPCState *env;
1124     ppc_tb_t *tb_env;
1125     ppc40x_timer_t *ppc40x_timer;
1126     uint64_t now, next;
1127 
1128     env = opaque;
1129     cpu = ppc_env_get_cpu(env);
1130     tb_env = env->tb_env;
1131     ppc40x_timer = tb_env->opaque;
1132     now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
1133     switch ((env->spr[SPR_40x_TCR] >> 30) & 0x3) {
1134     case 0:
1135         next = 1 << 17;
1136         break;
1137     case 1:
1138         next = 1 << 21;
1139         break;
1140     case 2:
1141         next = 1 << 25;
1142         break;
1143     case 3:
1144         next = 1 << 29;
1145         break;
1146     default:
1147         /* Cannot occur, but makes gcc happy */
1148         return;
1149     }
1150     next = now + muldiv64(next, NANOSECONDS_PER_SECOND, tb_env->decr_freq);
1151     if (next == now)
1152         next++;
1153     LOG_TB("%s: TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx "\n", __func__,
1154            env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]);
1155     switch ((env->spr[SPR_40x_TSR] >> 30) & 0x3) {
1156     case 0x0:
1157     case 0x1:
1158         timer_mod(ppc40x_timer->wdt_timer, next);
1159         ppc40x_timer->wdt_next = next;
1160         env->spr[SPR_40x_TSR] |= 1U << 31;
1161         break;
1162     case 0x2:
1163         timer_mod(ppc40x_timer->wdt_timer, next);
1164         ppc40x_timer->wdt_next = next;
1165         env->spr[SPR_40x_TSR] |= 1 << 30;
1166         if ((env->spr[SPR_40x_TCR] >> 27) & 0x1) {
1167             ppc_set_irq(cpu, PPC_INTERRUPT_WDT, 1);
1168         }
1169         break;
1170     case 0x3:
1171         env->spr[SPR_40x_TSR] &= ~0x30000000;
1172         env->spr[SPR_40x_TSR] |= env->spr[SPR_40x_TCR] & 0x30000000;
1173         switch ((env->spr[SPR_40x_TCR] >> 28) & 0x3) {
1174         case 0x0:
1175             /* No reset */
1176             break;
1177         case 0x1: /* Core reset */
1178             ppc40x_core_reset(cpu);
1179             break;
1180         case 0x2: /* Chip reset */
1181             ppc40x_chip_reset(cpu);
1182             break;
1183         case 0x3: /* System reset */
1184             ppc40x_system_reset(cpu);
1185             break;
1186         }
1187     }
1188 }
1189 
1190 void store_40x_pit (CPUPPCState *env, target_ulong val)
1191 {
1192     ppc_tb_t *tb_env;
1193     ppc40x_timer_t *ppc40x_timer;
1194 
1195     tb_env = env->tb_env;
1196     ppc40x_timer = tb_env->opaque;
1197     LOG_TB("%s val" TARGET_FMT_lx "\n", __func__, val);
1198     ppc40x_timer->pit_reload = val;
1199     start_stop_pit(env, tb_env, 0);
1200 }
1201 
1202 target_ulong load_40x_pit (CPUPPCState *env)
1203 {
1204     return cpu_ppc_load_decr(env);
1205 }
1206 
1207 static void ppc_40x_set_tb_clk (void *opaque, uint32_t freq)
1208 {
1209     CPUPPCState *env = opaque;
1210     ppc_tb_t *tb_env = env->tb_env;
1211 
1212     LOG_TB("%s set new frequency to %" PRIu32 "\n", __func__,
1213                 freq);
1214     tb_env->tb_freq = freq;
1215     tb_env->decr_freq = freq;
1216     /* XXX: we should also update all timers */
1217 }
1218 
1219 clk_setup_cb ppc_40x_timers_init (CPUPPCState *env, uint32_t freq,
1220                                   unsigned int decr_excp)
1221 {
1222     ppc_tb_t *tb_env;
1223     ppc40x_timer_t *ppc40x_timer;
1224 
1225     tb_env = g_malloc0(sizeof(ppc_tb_t));
1226     env->tb_env = tb_env;
1227     tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED;
1228     ppc40x_timer = g_malloc0(sizeof(ppc40x_timer_t));
1229     tb_env->tb_freq = freq;
1230     tb_env->decr_freq = freq;
1231     tb_env->opaque = ppc40x_timer;
1232     LOG_TB("%s freq %" PRIu32 "\n", __func__, freq);
1233     if (ppc40x_timer != NULL) {
1234         /* We use decr timer for PIT */
1235         tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_pit_cb, env);
1236         ppc40x_timer->fit_timer =
1237             timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_fit_cb, env);
1238         ppc40x_timer->wdt_timer =
1239             timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_wdt_cb, env);
1240         ppc40x_timer->decr_excp = decr_excp;
1241     }
1242 
1243     return &ppc_40x_set_tb_clk;
1244 }
1245 
1246 /*****************************************************************************/
1247 /* Embedded PowerPC Device Control Registers */
1248 typedef struct ppc_dcrn_t ppc_dcrn_t;
1249 struct ppc_dcrn_t {
1250     dcr_read_cb dcr_read;
1251     dcr_write_cb dcr_write;
1252     void *opaque;
1253 };
1254 
1255 /* XXX: on 460, DCR addresses are 32 bits wide,
1256  *      using DCRIPR to get the 22 upper bits of the DCR address
1257  */
1258 #define DCRN_NB 1024
1259 struct ppc_dcr_t {
1260     ppc_dcrn_t dcrn[DCRN_NB];
1261     int (*read_error)(int dcrn);
1262     int (*write_error)(int dcrn);
1263 };
1264 
1265 int ppc_dcr_read (ppc_dcr_t *dcr_env, int dcrn, uint32_t *valp)
1266 {
1267     ppc_dcrn_t *dcr;
1268 
1269     if (dcrn < 0 || dcrn >= DCRN_NB)
1270         goto error;
1271     dcr = &dcr_env->dcrn[dcrn];
1272     if (dcr->dcr_read == NULL)
1273         goto error;
1274     *valp = (*dcr->dcr_read)(dcr->opaque, dcrn);
1275 
1276     return 0;
1277 
1278  error:
1279     if (dcr_env->read_error != NULL)
1280         return (*dcr_env->read_error)(dcrn);
1281 
1282     return -1;
1283 }
1284 
1285 int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val)
1286 {
1287     ppc_dcrn_t *dcr;
1288 
1289     if (dcrn < 0 || dcrn >= DCRN_NB)
1290         goto error;
1291     dcr = &dcr_env->dcrn[dcrn];
1292     if (dcr->dcr_write == NULL)
1293         goto error;
1294     (*dcr->dcr_write)(dcr->opaque, dcrn, val);
1295 
1296     return 0;
1297 
1298  error:
1299     if (dcr_env->write_error != NULL)
1300         return (*dcr_env->write_error)(dcrn);
1301 
1302     return -1;
1303 }
1304 
1305 int ppc_dcr_register (CPUPPCState *env, int dcrn, void *opaque,
1306                       dcr_read_cb dcr_read, dcr_write_cb dcr_write)
1307 {
1308     ppc_dcr_t *dcr_env;
1309     ppc_dcrn_t *dcr;
1310 
1311     dcr_env = env->dcr_env;
1312     if (dcr_env == NULL)
1313         return -1;
1314     if (dcrn < 0 || dcrn >= DCRN_NB)
1315         return -1;
1316     dcr = &dcr_env->dcrn[dcrn];
1317     if (dcr->opaque != NULL ||
1318         dcr->dcr_read != NULL ||
1319         dcr->dcr_write != NULL)
1320         return -1;
1321     dcr->opaque = opaque;
1322     dcr->dcr_read = dcr_read;
1323     dcr->dcr_write = dcr_write;
1324 
1325     return 0;
1326 }
1327 
1328 int ppc_dcr_init (CPUPPCState *env, int (*read_error)(int dcrn),
1329                   int (*write_error)(int dcrn))
1330 {
1331     ppc_dcr_t *dcr_env;
1332 
1333     dcr_env = g_malloc0(sizeof(ppc_dcr_t));
1334     dcr_env->read_error = read_error;
1335     dcr_env->write_error = write_error;
1336     env->dcr_env = dcr_env;
1337 
1338     return 0;
1339 }
1340 
1341 /*****************************************************************************/
1342 /* Debug port */
1343 void PPC_debug_write (void *opaque, uint32_t addr, uint32_t val)
1344 {
1345     addr &= 0xF;
1346     switch (addr) {
1347     case 0:
1348         printf("%c", val);
1349         break;
1350     case 1:
1351         printf("\n");
1352         fflush(stdout);
1353         break;
1354     case 2:
1355         printf("Set loglevel to %04" PRIx32 "\n", val);
1356         qemu_set_log(val | 0x100);
1357         break;
1358     }
1359 }
1360 
1361 /* CPU device-tree ID helpers */
1362 int ppc_get_vcpu_dt_id(PowerPCCPU *cpu)
1363 {
1364     return cpu->cpu_dt_id;
1365 }
1366 
1367 PowerPCCPU *ppc_get_vcpu_by_dt_id(int cpu_dt_id)
1368 {
1369     CPUState *cs;
1370 
1371     CPU_FOREACH(cs) {
1372         PowerPCCPU *cpu = POWERPC_CPU(cs);
1373 
1374         if (cpu->cpu_dt_id == cpu_dt_id) {
1375             return cpu;
1376         }
1377     }
1378 
1379     return NULL;
1380 }
1381 
1382 void ppc_cpu_parse_features(const char *cpu_model)
1383 {
1384     CPUClass *cc;
1385     ObjectClass *oc;
1386     const char *typename;
1387     gchar **model_pieces;
1388 
1389     model_pieces = g_strsplit(cpu_model, ",", 2);
1390     if (!model_pieces[0]) {
1391         error_report("Invalid/empty CPU model name");
1392         exit(1);
1393     }
1394 
1395     oc = cpu_class_by_name(TYPE_POWERPC_CPU, model_pieces[0]);
1396     if (oc == NULL) {
1397         error_report("Unable to find CPU definition: %s", model_pieces[0]);
1398         exit(1);
1399     }
1400 
1401     typename = object_class_get_name(oc);
1402     cc = CPU_CLASS(oc);
1403     cc->parse_features(typename, model_pieces[1], &error_fatal);
1404     g_strfreev(model_pieces);
1405 }
1406