xref: /qemu/target/arm/cpu.c (revision 3cf42b8b)
1 /*
2  * QEMU ARM CPU
3  *
4  * Copyright (c) 2012 SUSE LINUX Products GmbH
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version 2
9  * of the License, or (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, see
18  * <http://www.gnu.org/licenses/gpl-2.0.html>
19  */
20 
21 #include "qemu/osdep.h"
22 #include "target/arm/idau.h"
23 #include "qemu/error-report.h"
24 #include "qapi/error.h"
25 #include "cpu.h"
26 #include "internals.h"
27 #include "qemu-common.h"
28 #include "exec/exec-all.h"
29 #include "hw/qdev-properties.h"
30 #if !defined(CONFIG_USER_ONLY)
31 #include "hw/loader.h"
32 #endif
33 #include "hw/arm/arm.h"
34 #include "sysemu/sysemu.h"
35 #include "sysemu/hw_accel.h"
36 #include "kvm_arm.h"
37 #include "disas/capstone.h"
38 #include "fpu/softfloat.h"
39 
40 static void arm_cpu_set_pc(CPUState *cs, vaddr value)
41 {
42     ARMCPU *cpu = ARM_CPU(cs);
43 
44     cpu->env.regs[15] = value;
45 }
46 
47 static bool arm_cpu_has_work(CPUState *cs)
48 {
49     ARMCPU *cpu = ARM_CPU(cs);
50 
51     return (cpu->power_state != PSCI_OFF)
52         && cs->interrupt_request &
53         (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD
54          | CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ
55          | CPU_INTERRUPT_EXITTB);
56 }
57 
58 void arm_register_el_change_hook(ARMCPU *cpu, ARMELChangeHook *hook,
59                                  void *opaque)
60 {
61     /* We currently only support registering a single hook function */
62     assert(!cpu->el_change_hook);
63     cpu->el_change_hook = hook;
64     cpu->el_change_hook_opaque = opaque;
65 }
66 
67 static void cp_reg_reset(gpointer key, gpointer value, gpointer opaque)
68 {
69     /* Reset a single ARMCPRegInfo register */
70     ARMCPRegInfo *ri = value;
71     ARMCPU *cpu = opaque;
72 
73     if (ri->type & (ARM_CP_SPECIAL | ARM_CP_ALIAS)) {
74         return;
75     }
76 
77     if (ri->resetfn) {
78         ri->resetfn(&cpu->env, ri);
79         return;
80     }
81 
82     /* A zero offset is never possible as it would be regs[0]
83      * so we use it to indicate that reset is being handled elsewhere.
84      * This is basically only used for fields in non-core coprocessors
85      * (like the pxa2xx ones).
86      */
87     if (!ri->fieldoffset) {
88         return;
89     }
90 
91     if (cpreg_field_is_64bit(ri)) {
92         CPREG_FIELD64(&cpu->env, ri) = ri->resetvalue;
93     } else {
94         CPREG_FIELD32(&cpu->env, ri) = ri->resetvalue;
95     }
96 }
97 
98 static void cp_reg_check_reset(gpointer key, gpointer value,  gpointer opaque)
99 {
100     /* Purely an assertion check: we've already done reset once,
101      * so now check that running the reset for the cpreg doesn't
102      * change its value. This traps bugs where two different cpregs
103      * both try to reset the same state field but to different values.
104      */
105     ARMCPRegInfo *ri = value;
106     ARMCPU *cpu = opaque;
107     uint64_t oldvalue, newvalue;
108 
109     if (ri->type & (ARM_CP_SPECIAL | ARM_CP_ALIAS | ARM_CP_NO_RAW)) {
110         return;
111     }
112 
113     oldvalue = read_raw_cp_reg(&cpu->env, ri);
114     cp_reg_reset(key, value, opaque);
115     newvalue = read_raw_cp_reg(&cpu->env, ri);
116     assert(oldvalue == newvalue);
117 }
118 
119 /* CPUClass::reset() */
120 static void arm_cpu_reset(CPUState *s)
121 {
122     ARMCPU *cpu = ARM_CPU(s);
123     ARMCPUClass *acc = ARM_CPU_GET_CLASS(cpu);
124     CPUARMState *env = &cpu->env;
125 
126     acc->parent_reset(s);
127 
128     memset(env, 0, offsetof(CPUARMState, end_reset_fields));
129 
130     g_hash_table_foreach(cpu->cp_regs, cp_reg_reset, cpu);
131     g_hash_table_foreach(cpu->cp_regs, cp_reg_check_reset, cpu);
132 
133     env->vfp.xregs[ARM_VFP_FPSID] = cpu->reset_fpsid;
134     env->vfp.xregs[ARM_VFP_MVFR0] = cpu->mvfr0;
135     env->vfp.xregs[ARM_VFP_MVFR1] = cpu->mvfr1;
136     env->vfp.xregs[ARM_VFP_MVFR2] = cpu->mvfr2;
137 
138     cpu->power_state = cpu->start_powered_off ? PSCI_OFF : PSCI_ON;
139     s->halted = cpu->start_powered_off;
140 
141     if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
142         env->iwmmxt.cregs[ARM_IWMMXT_wCID] = 0x69051000 | 'Q';
143     }
144 
145     if (arm_feature(env, ARM_FEATURE_AARCH64)) {
146         /* 64 bit CPUs always start in 64 bit mode */
147         env->aarch64 = 1;
148 #if defined(CONFIG_USER_ONLY)
149         env->pstate = PSTATE_MODE_EL0t;
150         /* Userspace expects access to DC ZVA, CTL_EL0 and the cache ops */
151         env->cp15.sctlr_el[1] |= SCTLR_UCT | SCTLR_UCI | SCTLR_DZE;
152         /* and to the FP/Neon instructions */
153         env->cp15.cpacr_el1 = deposit64(env->cp15.cpacr_el1, 20, 2, 3);
154 #else
155         /* Reset into the highest available EL */
156         if (arm_feature(env, ARM_FEATURE_EL3)) {
157             env->pstate = PSTATE_MODE_EL3h;
158         } else if (arm_feature(env, ARM_FEATURE_EL2)) {
159             env->pstate = PSTATE_MODE_EL2h;
160         } else {
161             env->pstate = PSTATE_MODE_EL1h;
162         }
163         env->pc = cpu->rvbar;
164 #endif
165     } else {
166 #if defined(CONFIG_USER_ONLY)
167         /* Userspace expects access to cp10 and cp11 for FP/Neon */
168         env->cp15.cpacr_el1 = deposit64(env->cp15.cpacr_el1, 20, 4, 0xf);
169 #endif
170     }
171 
172 #if defined(CONFIG_USER_ONLY)
173     env->uncached_cpsr = ARM_CPU_MODE_USR;
174     /* For user mode we must enable access to coprocessors */
175     env->vfp.xregs[ARM_VFP_FPEXC] = 1 << 30;
176     if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
177         env->cp15.c15_cpar = 3;
178     } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
179         env->cp15.c15_cpar = 1;
180     }
181 #else
182     /* SVC mode with interrupts disabled.  */
183     env->uncached_cpsr = ARM_CPU_MODE_SVC;
184     env->daif = PSTATE_D | PSTATE_A | PSTATE_I | PSTATE_F;
185 
186     if (arm_feature(env, ARM_FEATURE_M)) {
187         uint32_t initial_msp; /* Loaded from 0x0 */
188         uint32_t initial_pc; /* Loaded from 0x4 */
189         uint8_t *rom;
190         uint32_t vecbase;
191 
192         if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
193             env->v7m.secure = true;
194         } else {
195             /* This bit resets to 0 if security is supported, but 1 if
196              * it is not. The bit is not present in v7M, but we set it
197              * here so we can avoid having to make checks on it conditional
198              * on ARM_FEATURE_V8 (we don't let the guest see the bit).
199              */
200             env->v7m.aircr = R_V7M_AIRCR_BFHFNMINS_MASK;
201         }
202 
203         /* In v7M the reset value of this bit is IMPDEF, but ARM recommends
204          * that it resets to 1, so QEMU always does that rather than making
205          * it dependent on CPU model. In v8M it is RES1.
206          */
207         env->v7m.ccr[M_REG_NS] = R_V7M_CCR_STKALIGN_MASK;
208         env->v7m.ccr[M_REG_S] = R_V7M_CCR_STKALIGN_MASK;
209         if (arm_feature(env, ARM_FEATURE_V8)) {
210             /* in v8M the NONBASETHRDENA bit [0] is RES1 */
211             env->v7m.ccr[M_REG_NS] |= R_V7M_CCR_NONBASETHRDENA_MASK;
212             env->v7m.ccr[M_REG_S] |= R_V7M_CCR_NONBASETHRDENA_MASK;
213         }
214 
215         /* Unlike A/R profile, M profile defines the reset LR value */
216         env->regs[14] = 0xffffffff;
217 
218         env->v7m.vecbase[M_REG_S] = cpu->init_svtor & 0xffffff80;
219 
220         /* Load the initial SP and PC from offset 0 and 4 in the vector table */
221         vecbase = env->v7m.vecbase[env->v7m.secure];
222         rom = rom_ptr(vecbase);
223         if (rom) {
224             /* Address zero is covered by ROM which hasn't yet been
225              * copied into physical memory.
226              */
227             initial_msp = ldl_p(rom);
228             initial_pc = ldl_p(rom + 4);
229         } else {
230             /* Address zero not covered by a ROM blob, or the ROM blob
231              * is in non-modifiable memory and this is a second reset after
232              * it got copied into memory. In the latter case, rom_ptr
233              * will return a NULL pointer and we should use ldl_phys instead.
234              */
235             initial_msp = ldl_phys(s->as, vecbase);
236             initial_pc = ldl_phys(s->as, vecbase + 4);
237         }
238 
239         env->regs[13] = initial_msp & 0xFFFFFFFC;
240         env->regs[15] = initial_pc & ~1;
241         env->thumb = initial_pc & 1;
242     }
243 
244     /* AArch32 has a hard highvec setting of 0xFFFF0000.  If we are currently
245      * executing as AArch32 then check if highvecs are enabled and
246      * adjust the PC accordingly.
247      */
248     if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
249         env->regs[15] = 0xFFFF0000;
250     }
251 
252     /* M profile requires that reset clears the exclusive monitor;
253      * A profile does not, but clearing it makes more sense than having it
254      * set with an exclusive access on address zero.
255      */
256     arm_clear_exclusive(env);
257 
258     env->vfp.xregs[ARM_VFP_FPEXC] = 0;
259 #endif
260 
261     if (arm_feature(env, ARM_FEATURE_PMSA)) {
262         if (cpu->pmsav7_dregion > 0) {
263             if (arm_feature(env, ARM_FEATURE_V8)) {
264                 memset(env->pmsav8.rbar[M_REG_NS], 0,
265                        sizeof(*env->pmsav8.rbar[M_REG_NS])
266                        * cpu->pmsav7_dregion);
267                 memset(env->pmsav8.rlar[M_REG_NS], 0,
268                        sizeof(*env->pmsav8.rlar[M_REG_NS])
269                        * cpu->pmsav7_dregion);
270                 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
271                     memset(env->pmsav8.rbar[M_REG_S], 0,
272                            sizeof(*env->pmsav8.rbar[M_REG_S])
273                            * cpu->pmsav7_dregion);
274                     memset(env->pmsav8.rlar[M_REG_S], 0,
275                            sizeof(*env->pmsav8.rlar[M_REG_S])
276                            * cpu->pmsav7_dregion);
277                 }
278             } else if (arm_feature(env, ARM_FEATURE_V7)) {
279                 memset(env->pmsav7.drbar, 0,
280                        sizeof(*env->pmsav7.drbar) * cpu->pmsav7_dregion);
281                 memset(env->pmsav7.drsr, 0,
282                        sizeof(*env->pmsav7.drsr) * cpu->pmsav7_dregion);
283                 memset(env->pmsav7.dracr, 0,
284                        sizeof(*env->pmsav7.dracr) * cpu->pmsav7_dregion);
285             }
286         }
287         env->pmsav7.rnr[M_REG_NS] = 0;
288         env->pmsav7.rnr[M_REG_S] = 0;
289         env->pmsav8.mair0[M_REG_NS] = 0;
290         env->pmsav8.mair0[M_REG_S] = 0;
291         env->pmsav8.mair1[M_REG_NS] = 0;
292         env->pmsav8.mair1[M_REG_S] = 0;
293     }
294 
295     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
296         if (cpu->sau_sregion > 0) {
297             memset(env->sau.rbar, 0, sizeof(*env->sau.rbar) * cpu->sau_sregion);
298             memset(env->sau.rlar, 0, sizeof(*env->sau.rlar) * cpu->sau_sregion);
299         }
300         env->sau.rnr = 0;
301         /* SAU_CTRL reset value is IMPDEF; we choose 0, which is what
302          * the Cortex-M33 does.
303          */
304         env->sau.ctrl = 0;
305     }
306 
307     set_flush_to_zero(1, &env->vfp.standard_fp_status);
308     set_flush_inputs_to_zero(1, &env->vfp.standard_fp_status);
309     set_default_nan_mode(1, &env->vfp.standard_fp_status);
310     set_float_detect_tininess(float_tininess_before_rounding,
311                               &env->vfp.fp_status);
312     set_float_detect_tininess(float_tininess_before_rounding,
313                               &env->vfp.standard_fp_status);
314 #ifndef CONFIG_USER_ONLY
315     if (kvm_enabled()) {
316         kvm_arm_reset_vcpu(cpu);
317     }
318 #endif
319 
320     hw_breakpoint_update_all(cpu);
321     hw_watchpoint_update_all(cpu);
322 }
323 
324 bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
325 {
326     CPUClass *cc = CPU_GET_CLASS(cs);
327     CPUARMState *env = cs->env_ptr;
328     uint32_t cur_el = arm_current_el(env);
329     bool secure = arm_is_secure(env);
330     uint32_t target_el;
331     uint32_t excp_idx;
332     bool ret = false;
333 
334     if (interrupt_request & CPU_INTERRUPT_FIQ) {
335         excp_idx = EXCP_FIQ;
336         target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
337         if (arm_excp_unmasked(cs, excp_idx, target_el)) {
338             cs->exception_index = excp_idx;
339             env->exception.target_el = target_el;
340             cc->do_interrupt(cs);
341             ret = true;
342         }
343     }
344     if (interrupt_request & CPU_INTERRUPT_HARD) {
345         excp_idx = EXCP_IRQ;
346         target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
347         if (arm_excp_unmasked(cs, excp_idx, target_el)) {
348             cs->exception_index = excp_idx;
349             env->exception.target_el = target_el;
350             cc->do_interrupt(cs);
351             ret = true;
352         }
353     }
354     if (interrupt_request & CPU_INTERRUPT_VIRQ) {
355         excp_idx = EXCP_VIRQ;
356         target_el = 1;
357         if (arm_excp_unmasked(cs, excp_idx, target_el)) {
358             cs->exception_index = excp_idx;
359             env->exception.target_el = target_el;
360             cc->do_interrupt(cs);
361             ret = true;
362         }
363     }
364     if (interrupt_request & CPU_INTERRUPT_VFIQ) {
365         excp_idx = EXCP_VFIQ;
366         target_el = 1;
367         if (arm_excp_unmasked(cs, excp_idx, target_el)) {
368             cs->exception_index = excp_idx;
369             env->exception.target_el = target_el;
370             cc->do_interrupt(cs);
371             ret = true;
372         }
373     }
374 
375     return ret;
376 }
377 
378 #if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64)
379 static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
380 {
381     CPUClass *cc = CPU_GET_CLASS(cs);
382     ARMCPU *cpu = ARM_CPU(cs);
383     CPUARMState *env = &cpu->env;
384     bool ret = false;
385 
386     /* ARMv7-M interrupt masking works differently than -A or -R.
387      * There is no FIQ/IRQ distinction. Instead of I and F bits
388      * masking FIQ and IRQ interrupts, an exception is taken only
389      * if it is higher priority than the current execution priority
390      * (which depends on state like BASEPRI, FAULTMASK and the
391      * currently active exception).
392      */
393     if (interrupt_request & CPU_INTERRUPT_HARD
394         && (armv7m_nvic_can_take_pending_exception(env->nvic))) {
395         cs->exception_index = EXCP_IRQ;
396         cc->do_interrupt(cs);
397         ret = true;
398     }
399     return ret;
400 }
401 #endif
402 
403 #ifndef CONFIG_USER_ONLY
404 static void arm_cpu_set_irq(void *opaque, int irq, int level)
405 {
406     ARMCPU *cpu = opaque;
407     CPUARMState *env = &cpu->env;
408     CPUState *cs = CPU(cpu);
409     static const int mask[] = {
410         [ARM_CPU_IRQ] = CPU_INTERRUPT_HARD,
411         [ARM_CPU_FIQ] = CPU_INTERRUPT_FIQ,
412         [ARM_CPU_VIRQ] = CPU_INTERRUPT_VIRQ,
413         [ARM_CPU_VFIQ] = CPU_INTERRUPT_VFIQ
414     };
415 
416     switch (irq) {
417     case ARM_CPU_VIRQ:
418     case ARM_CPU_VFIQ:
419         assert(arm_feature(env, ARM_FEATURE_EL2));
420         /* fall through */
421     case ARM_CPU_IRQ:
422     case ARM_CPU_FIQ:
423         if (level) {
424             cpu_interrupt(cs, mask[irq]);
425         } else {
426             cpu_reset_interrupt(cs, mask[irq]);
427         }
428         break;
429     default:
430         g_assert_not_reached();
431     }
432 }
433 
434 static void arm_cpu_kvm_set_irq(void *opaque, int irq, int level)
435 {
436 #ifdef CONFIG_KVM
437     ARMCPU *cpu = opaque;
438     CPUState *cs = CPU(cpu);
439     int kvm_irq = KVM_ARM_IRQ_TYPE_CPU << KVM_ARM_IRQ_TYPE_SHIFT;
440 
441     switch (irq) {
442     case ARM_CPU_IRQ:
443         kvm_irq |= KVM_ARM_IRQ_CPU_IRQ;
444         break;
445     case ARM_CPU_FIQ:
446         kvm_irq |= KVM_ARM_IRQ_CPU_FIQ;
447         break;
448     default:
449         g_assert_not_reached();
450     }
451     kvm_irq |= cs->cpu_index << KVM_ARM_IRQ_VCPU_SHIFT;
452     kvm_set_irq(kvm_state, kvm_irq, level ? 1 : 0);
453 #endif
454 }
455 
456 static bool arm_cpu_virtio_is_big_endian(CPUState *cs)
457 {
458     ARMCPU *cpu = ARM_CPU(cs);
459     CPUARMState *env = &cpu->env;
460 
461     cpu_synchronize_state(cs);
462     return arm_cpu_data_is_big_endian(env);
463 }
464 
465 #endif
466 
467 static inline void set_feature(CPUARMState *env, int feature)
468 {
469     env->features |= 1ULL << feature;
470 }
471 
472 static inline void unset_feature(CPUARMState *env, int feature)
473 {
474     env->features &= ~(1ULL << feature);
475 }
476 
477 static int
478 print_insn_thumb1(bfd_vma pc, disassemble_info *info)
479 {
480   return print_insn_arm(pc | 1, info);
481 }
482 
483 static void arm_disas_set_info(CPUState *cpu, disassemble_info *info)
484 {
485     ARMCPU *ac = ARM_CPU(cpu);
486     CPUARMState *env = &ac->env;
487     bool sctlr_b;
488 
489     if (is_a64(env)) {
490         /* We might not be compiled with the A64 disassembler
491          * because it needs a C++ compiler. Leave print_insn
492          * unset in this case to use the caller default behaviour.
493          */
494 #if defined(CONFIG_ARM_A64_DIS)
495         info->print_insn = print_insn_arm_a64;
496 #endif
497         info->cap_arch = CS_ARCH_ARM64;
498         info->cap_insn_unit = 4;
499         info->cap_insn_split = 4;
500     } else {
501         int cap_mode;
502         if (env->thumb) {
503             info->print_insn = print_insn_thumb1;
504             info->cap_insn_unit = 2;
505             info->cap_insn_split = 4;
506             cap_mode = CS_MODE_THUMB;
507         } else {
508             info->print_insn = print_insn_arm;
509             info->cap_insn_unit = 4;
510             info->cap_insn_split = 4;
511             cap_mode = CS_MODE_ARM;
512         }
513         if (arm_feature(env, ARM_FEATURE_V8)) {
514             cap_mode |= CS_MODE_V8;
515         }
516         if (arm_feature(env, ARM_FEATURE_M)) {
517             cap_mode |= CS_MODE_MCLASS;
518         }
519         info->cap_arch = CS_ARCH_ARM;
520         info->cap_mode = cap_mode;
521     }
522 
523     sctlr_b = arm_sctlr_b(env);
524     if (bswap_code(sctlr_b)) {
525 #ifdef TARGET_WORDS_BIGENDIAN
526         info->endian = BFD_ENDIAN_LITTLE;
527 #else
528         info->endian = BFD_ENDIAN_BIG;
529 #endif
530     }
531     info->flags &= ~INSN_ARM_BE32;
532 #ifndef CONFIG_USER_ONLY
533     if (sctlr_b) {
534         info->flags |= INSN_ARM_BE32;
535     }
536 #endif
537 }
538 
539 uint64_t arm_cpu_mp_affinity(int idx, uint8_t clustersz)
540 {
541     uint32_t Aff1 = idx / clustersz;
542     uint32_t Aff0 = idx % clustersz;
543     return (Aff1 << ARM_AFF1_SHIFT) | Aff0;
544 }
545 
546 static void arm_cpu_initfn(Object *obj)
547 {
548     CPUState *cs = CPU(obj);
549     ARMCPU *cpu = ARM_CPU(obj);
550 
551     cs->env_ptr = &cpu->env;
552     cpu->cp_regs = g_hash_table_new_full(g_int_hash, g_int_equal,
553                                          g_free, g_free);
554 
555 #ifndef CONFIG_USER_ONLY
556     /* Our inbound IRQ and FIQ lines */
557     if (kvm_enabled()) {
558         /* VIRQ and VFIQ are unused with KVM but we add them to maintain
559          * the same interface as non-KVM CPUs.
560          */
561         qdev_init_gpio_in(DEVICE(cpu), arm_cpu_kvm_set_irq, 4);
562     } else {
563         qdev_init_gpio_in(DEVICE(cpu), arm_cpu_set_irq, 4);
564     }
565 
566     cpu->gt_timer[GTIMER_PHYS] = timer_new(QEMU_CLOCK_VIRTUAL, GTIMER_SCALE,
567                                                 arm_gt_ptimer_cb, cpu);
568     cpu->gt_timer[GTIMER_VIRT] = timer_new(QEMU_CLOCK_VIRTUAL, GTIMER_SCALE,
569                                                 arm_gt_vtimer_cb, cpu);
570     cpu->gt_timer[GTIMER_HYP] = timer_new(QEMU_CLOCK_VIRTUAL, GTIMER_SCALE,
571                                                 arm_gt_htimer_cb, cpu);
572     cpu->gt_timer[GTIMER_SEC] = timer_new(QEMU_CLOCK_VIRTUAL, GTIMER_SCALE,
573                                                 arm_gt_stimer_cb, cpu);
574     qdev_init_gpio_out(DEVICE(cpu), cpu->gt_timer_outputs,
575                        ARRAY_SIZE(cpu->gt_timer_outputs));
576 
577     qdev_init_gpio_out_named(DEVICE(cpu), &cpu->gicv3_maintenance_interrupt,
578                              "gicv3-maintenance-interrupt", 1);
579     qdev_init_gpio_out_named(DEVICE(cpu), &cpu->pmu_interrupt,
580                              "pmu-interrupt", 1);
581 #endif
582 
583     /* DTB consumers generally don't in fact care what the 'compatible'
584      * string is, so always provide some string and trust that a hypothetical
585      * picky DTB consumer will also provide a helpful error message.
586      */
587     cpu->dtb_compatible = "qemu,unknown";
588     cpu->psci_version = 1; /* By default assume PSCI v0.1 */
589     cpu->kvm_target = QEMU_KVM_ARM_TARGET_NONE;
590 
591     if (tcg_enabled()) {
592         cpu->psci_version = 2; /* TCG implements PSCI 0.2 */
593     }
594 }
595 
596 static Property arm_cpu_reset_cbar_property =
597             DEFINE_PROP_UINT64("reset-cbar", ARMCPU, reset_cbar, 0);
598 
599 static Property arm_cpu_reset_hivecs_property =
600             DEFINE_PROP_BOOL("reset-hivecs", ARMCPU, reset_hivecs, false);
601 
602 static Property arm_cpu_rvbar_property =
603             DEFINE_PROP_UINT64("rvbar", ARMCPU, rvbar, 0);
604 
605 static Property arm_cpu_has_el2_property =
606             DEFINE_PROP_BOOL("has_el2", ARMCPU, has_el2, true);
607 
608 static Property arm_cpu_has_el3_property =
609             DEFINE_PROP_BOOL("has_el3", ARMCPU, has_el3, true);
610 
611 static Property arm_cpu_cfgend_property =
612             DEFINE_PROP_BOOL("cfgend", ARMCPU, cfgend, false);
613 
614 /* use property name "pmu" to match other archs and virt tools */
615 static Property arm_cpu_has_pmu_property =
616             DEFINE_PROP_BOOL("pmu", ARMCPU, has_pmu, true);
617 
618 static Property arm_cpu_has_mpu_property =
619             DEFINE_PROP_BOOL("has-mpu", ARMCPU, has_mpu, true);
620 
621 /* This is like DEFINE_PROP_UINT32 but it doesn't set the default value,
622  * because the CPU initfn will have already set cpu->pmsav7_dregion to
623  * the right value for that particular CPU type, and we don't want
624  * to override that with an incorrect constant value.
625  */
626 static Property arm_cpu_pmsav7_dregion_property =
627             DEFINE_PROP_UNSIGNED_NODEFAULT("pmsav7-dregion", ARMCPU,
628                                            pmsav7_dregion,
629                                            qdev_prop_uint32, uint32_t);
630 
631 /* M profile: initial value of the Secure VTOR */
632 static Property arm_cpu_initsvtor_property =
633             DEFINE_PROP_UINT32("init-svtor", ARMCPU, init_svtor, 0);
634 
635 static void arm_cpu_post_init(Object *obj)
636 {
637     ARMCPU *cpu = ARM_CPU(obj);
638 
639     /* M profile implies PMSA. We have to do this here rather than
640      * in realize with the other feature-implication checks because
641      * we look at the PMSA bit to see if we should add some properties.
642      */
643     if (arm_feature(&cpu->env, ARM_FEATURE_M)) {
644         set_feature(&cpu->env, ARM_FEATURE_PMSA);
645     }
646 
647     if (arm_feature(&cpu->env, ARM_FEATURE_CBAR) ||
648         arm_feature(&cpu->env, ARM_FEATURE_CBAR_RO)) {
649         qdev_property_add_static(DEVICE(obj), &arm_cpu_reset_cbar_property,
650                                  &error_abort);
651     }
652 
653     if (!arm_feature(&cpu->env, ARM_FEATURE_M)) {
654         qdev_property_add_static(DEVICE(obj), &arm_cpu_reset_hivecs_property,
655                                  &error_abort);
656     }
657 
658     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
659         qdev_property_add_static(DEVICE(obj), &arm_cpu_rvbar_property,
660                                  &error_abort);
661     }
662 
663     if (arm_feature(&cpu->env, ARM_FEATURE_EL3)) {
664         /* Add the has_el3 state CPU property only if EL3 is allowed.  This will
665          * prevent "has_el3" from existing on CPUs which cannot support EL3.
666          */
667         qdev_property_add_static(DEVICE(obj), &arm_cpu_has_el3_property,
668                                  &error_abort);
669 
670 #ifndef CONFIG_USER_ONLY
671         object_property_add_link(obj, "secure-memory",
672                                  TYPE_MEMORY_REGION,
673                                  (Object **)&cpu->secure_memory,
674                                  qdev_prop_allow_set_link_before_realize,
675                                  OBJ_PROP_LINK_UNREF_ON_RELEASE,
676                                  &error_abort);
677 #endif
678     }
679 
680     if (arm_feature(&cpu->env, ARM_FEATURE_EL2)) {
681         qdev_property_add_static(DEVICE(obj), &arm_cpu_has_el2_property,
682                                  &error_abort);
683     }
684 
685     if (arm_feature(&cpu->env, ARM_FEATURE_PMU)) {
686         qdev_property_add_static(DEVICE(obj), &arm_cpu_has_pmu_property,
687                                  &error_abort);
688     }
689 
690     if (arm_feature(&cpu->env, ARM_FEATURE_PMSA)) {
691         qdev_property_add_static(DEVICE(obj), &arm_cpu_has_mpu_property,
692                                  &error_abort);
693         if (arm_feature(&cpu->env, ARM_FEATURE_V7)) {
694             qdev_property_add_static(DEVICE(obj),
695                                      &arm_cpu_pmsav7_dregion_property,
696                                      &error_abort);
697         }
698     }
699 
700     if (arm_feature(&cpu->env, ARM_FEATURE_M_SECURITY)) {
701         object_property_add_link(obj, "idau", TYPE_IDAU_INTERFACE, &cpu->idau,
702                                  qdev_prop_allow_set_link_before_realize,
703                                  OBJ_PROP_LINK_UNREF_ON_RELEASE,
704                                  &error_abort);
705         qdev_property_add_static(DEVICE(obj), &arm_cpu_initsvtor_property,
706                                  &error_abort);
707     }
708 
709     qdev_property_add_static(DEVICE(obj), &arm_cpu_cfgend_property,
710                              &error_abort);
711 }
712 
713 static void arm_cpu_finalizefn(Object *obj)
714 {
715     ARMCPU *cpu = ARM_CPU(obj);
716     g_hash_table_destroy(cpu->cp_regs);
717 }
718 
719 static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
720 {
721     CPUState *cs = CPU(dev);
722     ARMCPU *cpu = ARM_CPU(dev);
723     ARMCPUClass *acc = ARM_CPU_GET_CLASS(dev);
724     CPUARMState *env = &cpu->env;
725     int pagebits;
726     Error *local_err = NULL;
727 
728     /* If we needed to query the host kernel for the CPU features
729      * then it's possible that might have failed in the initfn, but
730      * this is the first point where we can report it.
731      */
732     if (cpu->host_cpu_probe_failed) {
733         if (!kvm_enabled()) {
734             error_setg(errp, "The 'host' CPU type can only be used with KVM");
735         } else {
736             error_setg(errp, "Failed to retrieve host CPU features");
737         }
738         return;
739     }
740 
741     cpu_exec_realizefn(cs, &local_err);
742     if (local_err != NULL) {
743         error_propagate(errp, local_err);
744         return;
745     }
746 
747     /* Some features automatically imply others: */
748     if (arm_feature(env, ARM_FEATURE_V8)) {
749         set_feature(env, ARM_FEATURE_V7);
750         set_feature(env, ARM_FEATURE_ARM_DIV);
751         set_feature(env, ARM_FEATURE_LPAE);
752     }
753     if (arm_feature(env, ARM_FEATURE_V7)) {
754         set_feature(env, ARM_FEATURE_VAPA);
755         set_feature(env, ARM_FEATURE_THUMB2);
756         set_feature(env, ARM_FEATURE_MPIDR);
757         if (!arm_feature(env, ARM_FEATURE_M)) {
758             set_feature(env, ARM_FEATURE_V6K);
759         } else {
760             set_feature(env, ARM_FEATURE_V6);
761         }
762 
763         /* Always define VBAR for V7 CPUs even if it doesn't exist in
764          * non-EL3 configs. This is needed by some legacy boards.
765          */
766         set_feature(env, ARM_FEATURE_VBAR);
767     }
768     if (arm_feature(env, ARM_FEATURE_V6K)) {
769         set_feature(env, ARM_FEATURE_V6);
770         set_feature(env, ARM_FEATURE_MVFR);
771     }
772     if (arm_feature(env, ARM_FEATURE_V6)) {
773         set_feature(env, ARM_FEATURE_V5);
774         set_feature(env, ARM_FEATURE_JAZELLE);
775         if (!arm_feature(env, ARM_FEATURE_M)) {
776             set_feature(env, ARM_FEATURE_AUXCR);
777         }
778     }
779     if (arm_feature(env, ARM_FEATURE_V5)) {
780         set_feature(env, ARM_FEATURE_V4T);
781     }
782     if (arm_feature(env, ARM_FEATURE_M)) {
783         set_feature(env, ARM_FEATURE_THUMB_DIV);
784     }
785     if (arm_feature(env, ARM_FEATURE_ARM_DIV)) {
786         set_feature(env, ARM_FEATURE_THUMB_DIV);
787     }
788     if (arm_feature(env, ARM_FEATURE_VFP4)) {
789         set_feature(env, ARM_FEATURE_VFP3);
790         set_feature(env, ARM_FEATURE_VFP_FP16);
791     }
792     if (arm_feature(env, ARM_FEATURE_VFP3)) {
793         set_feature(env, ARM_FEATURE_VFP);
794     }
795     if (arm_feature(env, ARM_FEATURE_LPAE)) {
796         set_feature(env, ARM_FEATURE_V7MP);
797         set_feature(env, ARM_FEATURE_PXN);
798     }
799     if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
800         set_feature(env, ARM_FEATURE_CBAR);
801     }
802     if (arm_feature(env, ARM_FEATURE_THUMB2) &&
803         !arm_feature(env, ARM_FEATURE_M)) {
804         set_feature(env, ARM_FEATURE_THUMB_DSP);
805     }
806 
807     if (arm_feature(env, ARM_FEATURE_V7) &&
808         !arm_feature(env, ARM_FEATURE_M) &&
809         !arm_feature(env, ARM_FEATURE_PMSA)) {
810         /* v7VMSA drops support for the old ARMv5 tiny pages, so we
811          * can use 4K pages.
812          */
813         pagebits = 12;
814     } else {
815         /* For CPUs which might have tiny 1K pages, or which have an
816          * MPU and might have small region sizes, stick with 1K pages.
817          */
818         pagebits = 10;
819     }
820     if (!set_preferred_target_page_bits(pagebits)) {
821         /* This can only ever happen for hotplugging a CPU, or if
822          * the board code incorrectly creates a CPU which it has
823          * promised via minimum_page_size that it will not.
824          */
825         error_setg(errp, "This CPU requires a smaller page size than the "
826                    "system is using");
827         return;
828     }
829 
830     /* This cpu-id-to-MPIDR affinity is used only for TCG; KVM will override it.
831      * We don't support setting cluster ID ([16..23]) (known as Aff2
832      * in later ARM ARM versions), or any of the higher affinity level fields,
833      * so these bits always RAZ.
834      */
835     if (cpu->mp_affinity == ARM64_AFFINITY_INVALID) {
836         cpu->mp_affinity = arm_cpu_mp_affinity(cs->cpu_index,
837                                                ARM_DEFAULT_CPUS_PER_CLUSTER);
838     }
839 
840     if (cpu->reset_hivecs) {
841             cpu->reset_sctlr |= (1 << 13);
842     }
843 
844     if (cpu->cfgend) {
845         if (arm_feature(&cpu->env, ARM_FEATURE_V7)) {
846             cpu->reset_sctlr |= SCTLR_EE;
847         } else {
848             cpu->reset_sctlr |= SCTLR_B;
849         }
850     }
851 
852     if (!cpu->has_el3) {
853         /* If the has_el3 CPU property is disabled then we need to disable the
854          * feature.
855          */
856         unset_feature(env, ARM_FEATURE_EL3);
857 
858         /* Disable the security extension feature bits in the processor feature
859          * registers as well. These are id_pfr1[7:4] and id_aa64pfr0[15:12].
860          */
861         cpu->id_pfr1 &= ~0xf0;
862         cpu->id_aa64pfr0 &= ~0xf000;
863     }
864 
865     if (!cpu->has_el2) {
866         unset_feature(env, ARM_FEATURE_EL2);
867     }
868 
869     if (!cpu->has_pmu) {
870         unset_feature(env, ARM_FEATURE_PMU);
871         cpu->id_aa64dfr0 &= ~0xf00;
872     }
873 
874     if (!arm_feature(env, ARM_FEATURE_EL2)) {
875         /* Disable the hypervisor feature bits in the processor feature
876          * registers if we don't have EL2. These are id_pfr1[15:12] and
877          * id_aa64pfr0_el1[11:8].
878          */
879         cpu->id_aa64pfr0 &= ~0xf00;
880         cpu->id_pfr1 &= ~0xf000;
881     }
882 
883     /* MPU can be configured out of a PMSA CPU either by setting has-mpu
884      * to false or by setting pmsav7-dregion to 0.
885      */
886     if (!cpu->has_mpu) {
887         cpu->pmsav7_dregion = 0;
888     }
889     if (cpu->pmsav7_dregion == 0) {
890         cpu->has_mpu = false;
891     }
892 
893     if (arm_feature(env, ARM_FEATURE_PMSA) &&
894         arm_feature(env, ARM_FEATURE_V7)) {
895         uint32_t nr = cpu->pmsav7_dregion;
896 
897         if (nr > 0xff) {
898             error_setg(errp, "PMSAv7 MPU #regions invalid %" PRIu32, nr);
899             return;
900         }
901 
902         if (nr) {
903             if (arm_feature(env, ARM_FEATURE_V8)) {
904                 /* PMSAv8 */
905                 env->pmsav8.rbar[M_REG_NS] = g_new0(uint32_t, nr);
906                 env->pmsav8.rlar[M_REG_NS] = g_new0(uint32_t, nr);
907                 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
908                     env->pmsav8.rbar[M_REG_S] = g_new0(uint32_t, nr);
909                     env->pmsav8.rlar[M_REG_S] = g_new0(uint32_t, nr);
910                 }
911             } else {
912                 env->pmsav7.drbar = g_new0(uint32_t, nr);
913                 env->pmsav7.drsr = g_new0(uint32_t, nr);
914                 env->pmsav7.dracr = g_new0(uint32_t, nr);
915             }
916         }
917     }
918 
919     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
920         uint32_t nr = cpu->sau_sregion;
921 
922         if (nr > 0xff) {
923             error_setg(errp, "v8M SAU #regions invalid %" PRIu32, nr);
924             return;
925         }
926 
927         if (nr) {
928             env->sau.rbar = g_new0(uint32_t, nr);
929             env->sau.rlar = g_new0(uint32_t, nr);
930         }
931     }
932 
933     if (arm_feature(env, ARM_FEATURE_EL3)) {
934         set_feature(env, ARM_FEATURE_VBAR);
935     }
936 
937     register_cp_regs_for_features(cpu);
938     arm_cpu_register_gdb_regs_for_features(cpu);
939 
940     init_cpreg_list(cpu);
941 
942 #ifndef CONFIG_USER_ONLY
943     if (cpu->has_el3 || arm_feature(env, ARM_FEATURE_M_SECURITY)) {
944         cs->num_ases = 2;
945 
946         if (!cpu->secure_memory) {
947             cpu->secure_memory = cs->memory;
948         }
949         cpu_address_space_init(cs, ARMASIdx_S, "cpu-secure-memory",
950                                cpu->secure_memory);
951     } else {
952         cs->num_ases = 1;
953     }
954     cpu_address_space_init(cs, ARMASIdx_NS, "cpu-memory", cs->memory);
955 
956     /* No core_count specified, default to smp_cpus. */
957     if (cpu->core_count == -1) {
958         cpu->core_count = smp_cpus;
959     }
960 #endif
961 
962     qemu_init_vcpu(cs);
963     cpu_reset(cs);
964 
965     acc->parent_realize(dev, errp);
966 }
967 
968 static ObjectClass *arm_cpu_class_by_name(const char *cpu_model)
969 {
970     ObjectClass *oc;
971     char *typename;
972     char **cpuname;
973     const char *cpunamestr;
974 
975     cpuname = g_strsplit(cpu_model, ",", 1);
976     cpunamestr = cpuname[0];
977 #ifdef CONFIG_USER_ONLY
978     /* For backwards compatibility usermode emulation allows "-cpu any",
979      * which has the same semantics as "-cpu max".
980      */
981     if (!strcmp(cpunamestr, "any")) {
982         cpunamestr = "max";
983     }
984 #endif
985     typename = g_strdup_printf(ARM_CPU_TYPE_NAME("%s"), cpunamestr);
986     oc = object_class_by_name(typename);
987     g_strfreev(cpuname);
988     g_free(typename);
989     if (!oc || !object_class_dynamic_cast(oc, TYPE_ARM_CPU) ||
990         object_class_is_abstract(oc)) {
991         return NULL;
992     }
993     return oc;
994 }
995 
996 /* CPU models. These are not needed for the AArch64 linux-user build. */
997 #if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64)
998 
999 static void arm926_initfn(Object *obj)
1000 {
1001     ARMCPU *cpu = ARM_CPU(obj);
1002 
1003     cpu->dtb_compatible = "arm,arm926";
1004     set_feature(&cpu->env, ARM_FEATURE_V5);
1005     set_feature(&cpu->env, ARM_FEATURE_VFP);
1006     set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
1007     set_feature(&cpu->env, ARM_FEATURE_CACHE_TEST_CLEAN);
1008     set_feature(&cpu->env, ARM_FEATURE_JAZELLE);
1009     cpu->midr = 0x41069265;
1010     cpu->reset_fpsid = 0x41011090;
1011     cpu->ctr = 0x1dd20d2;
1012     cpu->reset_sctlr = 0x00090078;
1013 }
1014 
1015 static void arm946_initfn(Object *obj)
1016 {
1017     ARMCPU *cpu = ARM_CPU(obj);
1018 
1019     cpu->dtb_compatible = "arm,arm946";
1020     set_feature(&cpu->env, ARM_FEATURE_V5);
1021     set_feature(&cpu->env, ARM_FEATURE_PMSA);
1022     set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
1023     cpu->midr = 0x41059461;
1024     cpu->ctr = 0x0f004006;
1025     cpu->reset_sctlr = 0x00000078;
1026 }
1027 
1028 static void arm1026_initfn(Object *obj)
1029 {
1030     ARMCPU *cpu = ARM_CPU(obj);
1031 
1032     cpu->dtb_compatible = "arm,arm1026";
1033     set_feature(&cpu->env, ARM_FEATURE_V5);
1034     set_feature(&cpu->env, ARM_FEATURE_VFP);
1035     set_feature(&cpu->env, ARM_FEATURE_AUXCR);
1036     set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
1037     set_feature(&cpu->env, ARM_FEATURE_CACHE_TEST_CLEAN);
1038     set_feature(&cpu->env, ARM_FEATURE_JAZELLE);
1039     cpu->midr = 0x4106a262;
1040     cpu->reset_fpsid = 0x410110a0;
1041     cpu->ctr = 0x1dd20d2;
1042     cpu->reset_sctlr = 0x00090078;
1043     cpu->reset_auxcr = 1;
1044     {
1045         /* The 1026 had an IFAR at c6,c0,0,1 rather than the ARMv6 c6,c0,0,2 */
1046         ARMCPRegInfo ifar = {
1047             .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
1048             .access = PL1_RW,
1049             .fieldoffset = offsetof(CPUARMState, cp15.ifar_ns),
1050             .resetvalue = 0
1051         };
1052         define_one_arm_cp_reg(cpu, &ifar);
1053     }
1054 }
1055 
1056 static void arm1136_r2_initfn(Object *obj)
1057 {
1058     ARMCPU *cpu = ARM_CPU(obj);
1059     /* What qemu calls "arm1136_r2" is actually the 1136 r0p2, ie an
1060      * older core than plain "arm1136". In particular this does not
1061      * have the v6K features.
1062      * These ID register values are correct for 1136 but may be wrong
1063      * for 1136_r2 (in particular r0p2 does not actually implement most
1064      * of the ID registers).
1065      */
1066 
1067     cpu->dtb_compatible = "arm,arm1136";
1068     set_feature(&cpu->env, ARM_FEATURE_V6);
1069     set_feature(&cpu->env, ARM_FEATURE_VFP);
1070     set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
1071     set_feature(&cpu->env, ARM_FEATURE_CACHE_DIRTY_REG);
1072     set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS);
1073     cpu->midr = 0x4107b362;
1074     cpu->reset_fpsid = 0x410120b4;
1075     cpu->mvfr0 = 0x11111111;
1076     cpu->mvfr1 = 0x00000000;
1077     cpu->ctr = 0x1dd20d2;
1078     cpu->reset_sctlr = 0x00050078;
1079     cpu->id_pfr0 = 0x111;
1080     cpu->id_pfr1 = 0x1;
1081     cpu->id_dfr0 = 0x2;
1082     cpu->id_afr0 = 0x3;
1083     cpu->id_mmfr0 = 0x01130003;
1084     cpu->id_mmfr1 = 0x10030302;
1085     cpu->id_mmfr2 = 0x01222110;
1086     cpu->id_isar0 = 0x00140011;
1087     cpu->id_isar1 = 0x12002111;
1088     cpu->id_isar2 = 0x11231111;
1089     cpu->id_isar3 = 0x01102131;
1090     cpu->id_isar4 = 0x141;
1091     cpu->reset_auxcr = 7;
1092 }
1093 
1094 static void arm1136_initfn(Object *obj)
1095 {
1096     ARMCPU *cpu = ARM_CPU(obj);
1097 
1098     cpu->dtb_compatible = "arm,arm1136";
1099     set_feature(&cpu->env, ARM_FEATURE_V6K);
1100     set_feature(&cpu->env, ARM_FEATURE_V6);
1101     set_feature(&cpu->env, ARM_FEATURE_VFP);
1102     set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
1103     set_feature(&cpu->env, ARM_FEATURE_CACHE_DIRTY_REG);
1104     set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS);
1105     cpu->midr = 0x4117b363;
1106     cpu->reset_fpsid = 0x410120b4;
1107     cpu->mvfr0 = 0x11111111;
1108     cpu->mvfr1 = 0x00000000;
1109     cpu->ctr = 0x1dd20d2;
1110     cpu->reset_sctlr = 0x00050078;
1111     cpu->id_pfr0 = 0x111;
1112     cpu->id_pfr1 = 0x1;
1113     cpu->id_dfr0 = 0x2;
1114     cpu->id_afr0 = 0x3;
1115     cpu->id_mmfr0 = 0x01130003;
1116     cpu->id_mmfr1 = 0x10030302;
1117     cpu->id_mmfr2 = 0x01222110;
1118     cpu->id_isar0 = 0x00140011;
1119     cpu->id_isar1 = 0x12002111;
1120     cpu->id_isar2 = 0x11231111;
1121     cpu->id_isar3 = 0x01102131;
1122     cpu->id_isar4 = 0x141;
1123     cpu->reset_auxcr = 7;
1124 }
1125 
1126 static void arm1176_initfn(Object *obj)
1127 {
1128     ARMCPU *cpu = ARM_CPU(obj);
1129 
1130     cpu->dtb_compatible = "arm,arm1176";
1131     set_feature(&cpu->env, ARM_FEATURE_V6K);
1132     set_feature(&cpu->env, ARM_FEATURE_VFP);
1133     set_feature(&cpu->env, ARM_FEATURE_VAPA);
1134     set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
1135     set_feature(&cpu->env, ARM_FEATURE_CACHE_DIRTY_REG);
1136     set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS);
1137     set_feature(&cpu->env, ARM_FEATURE_EL3);
1138     cpu->midr = 0x410fb767;
1139     cpu->reset_fpsid = 0x410120b5;
1140     cpu->mvfr0 = 0x11111111;
1141     cpu->mvfr1 = 0x00000000;
1142     cpu->ctr = 0x1dd20d2;
1143     cpu->reset_sctlr = 0x00050078;
1144     cpu->id_pfr0 = 0x111;
1145     cpu->id_pfr1 = 0x11;
1146     cpu->id_dfr0 = 0x33;
1147     cpu->id_afr0 = 0;
1148     cpu->id_mmfr0 = 0x01130003;
1149     cpu->id_mmfr1 = 0x10030302;
1150     cpu->id_mmfr2 = 0x01222100;
1151     cpu->id_isar0 = 0x0140011;
1152     cpu->id_isar1 = 0x12002111;
1153     cpu->id_isar2 = 0x11231121;
1154     cpu->id_isar3 = 0x01102131;
1155     cpu->id_isar4 = 0x01141;
1156     cpu->reset_auxcr = 7;
1157 }
1158 
1159 static void arm11mpcore_initfn(Object *obj)
1160 {
1161     ARMCPU *cpu = ARM_CPU(obj);
1162 
1163     cpu->dtb_compatible = "arm,arm11mpcore";
1164     set_feature(&cpu->env, ARM_FEATURE_V6K);
1165     set_feature(&cpu->env, ARM_FEATURE_VFP);
1166     set_feature(&cpu->env, ARM_FEATURE_VAPA);
1167     set_feature(&cpu->env, ARM_FEATURE_MPIDR);
1168     set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
1169     cpu->midr = 0x410fb022;
1170     cpu->reset_fpsid = 0x410120b4;
1171     cpu->mvfr0 = 0x11111111;
1172     cpu->mvfr1 = 0x00000000;
1173     cpu->ctr = 0x1d192992; /* 32K icache 32K dcache */
1174     cpu->id_pfr0 = 0x111;
1175     cpu->id_pfr1 = 0x1;
1176     cpu->id_dfr0 = 0;
1177     cpu->id_afr0 = 0x2;
1178     cpu->id_mmfr0 = 0x01100103;
1179     cpu->id_mmfr1 = 0x10020302;
1180     cpu->id_mmfr2 = 0x01222000;
1181     cpu->id_isar0 = 0x00100011;
1182     cpu->id_isar1 = 0x12002111;
1183     cpu->id_isar2 = 0x11221011;
1184     cpu->id_isar3 = 0x01102131;
1185     cpu->id_isar4 = 0x141;
1186     cpu->reset_auxcr = 1;
1187 }
1188 
1189 static void cortex_m3_initfn(Object *obj)
1190 {
1191     ARMCPU *cpu = ARM_CPU(obj);
1192     set_feature(&cpu->env, ARM_FEATURE_V7);
1193     set_feature(&cpu->env, ARM_FEATURE_M);
1194     cpu->midr = 0x410fc231;
1195     cpu->pmsav7_dregion = 8;
1196     cpu->id_pfr0 = 0x00000030;
1197     cpu->id_pfr1 = 0x00000200;
1198     cpu->id_dfr0 = 0x00100000;
1199     cpu->id_afr0 = 0x00000000;
1200     cpu->id_mmfr0 = 0x00000030;
1201     cpu->id_mmfr1 = 0x00000000;
1202     cpu->id_mmfr2 = 0x00000000;
1203     cpu->id_mmfr3 = 0x00000000;
1204     cpu->id_isar0 = 0x01141110;
1205     cpu->id_isar1 = 0x02111000;
1206     cpu->id_isar2 = 0x21112231;
1207     cpu->id_isar3 = 0x01111110;
1208     cpu->id_isar4 = 0x01310102;
1209     cpu->id_isar5 = 0x00000000;
1210 }
1211 
1212 static void cortex_m4_initfn(Object *obj)
1213 {
1214     ARMCPU *cpu = ARM_CPU(obj);
1215 
1216     set_feature(&cpu->env, ARM_FEATURE_V7);
1217     set_feature(&cpu->env, ARM_FEATURE_M);
1218     set_feature(&cpu->env, ARM_FEATURE_THUMB_DSP);
1219     cpu->midr = 0x410fc240; /* r0p0 */
1220     cpu->pmsav7_dregion = 8;
1221     cpu->id_pfr0 = 0x00000030;
1222     cpu->id_pfr1 = 0x00000200;
1223     cpu->id_dfr0 = 0x00100000;
1224     cpu->id_afr0 = 0x00000000;
1225     cpu->id_mmfr0 = 0x00000030;
1226     cpu->id_mmfr1 = 0x00000000;
1227     cpu->id_mmfr2 = 0x00000000;
1228     cpu->id_mmfr3 = 0x00000000;
1229     cpu->id_isar0 = 0x01141110;
1230     cpu->id_isar1 = 0x02111000;
1231     cpu->id_isar2 = 0x21112231;
1232     cpu->id_isar3 = 0x01111110;
1233     cpu->id_isar4 = 0x01310102;
1234     cpu->id_isar5 = 0x00000000;
1235 }
1236 
1237 static void cortex_m33_initfn(Object *obj)
1238 {
1239     ARMCPU *cpu = ARM_CPU(obj);
1240 
1241     set_feature(&cpu->env, ARM_FEATURE_V8);
1242     set_feature(&cpu->env, ARM_FEATURE_M);
1243     set_feature(&cpu->env, ARM_FEATURE_M_SECURITY);
1244     set_feature(&cpu->env, ARM_FEATURE_THUMB_DSP);
1245     cpu->midr = 0x410fd213; /* r0p3 */
1246     cpu->pmsav7_dregion = 16;
1247     cpu->sau_sregion = 8;
1248     cpu->id_pfr0 = 0x00000030;
1249     cpu->id_pfr1 = 0x00000210;
1250     cpu->id_dfr0 = 0x00200000;
1251     cpu->id_afr0 = 0x00000000;
1252     cpu->id_mmfr0 = 0x00101F40;
1253     cpu->id_mmfr1 = 0x00000000;
1254     cpu->id_mmfr2 = 0x01000000;
1255     cpu->id_mmfr3 = 0x00000000;
1256     cpu->id_isar0 = 0x01101110;
1257     cpu->id_isar1 = 0x02212000;
1258     cpu->id_isar2 = 0x20232232;
1259     cpu->id_isar3 = 0x01111131;
1260     cpu->id_isar4 = 0x01310132;
1261     cpu->id_isar5 = 0x00000000;
1262     cpu->clidr = 0x00000000;
1263     cpu->ctr = 0x8000c000;
1264 }
1265 
1266 static void arm_v7m_class_init(ObjectClass *oc, void *data)
1267 {
1268     CPUClass *cc = CPU_CLASS(oc);
1269 
1270 #ifndef CONFIG_USER_ONLY
1271     cc->do_interrupt = arm_v7m_cpu_do_interrupt;
1272 #endif
1273 
1274     cc->cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt;
1275 }
1276 
1277 static const ARMCPRegInfo cortexr5_cp_reginfo[] = {
1278     /* Dummy the TCM region regs for the moment */
1279     { .name = "ATCM", .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
1280       .access = PL1_RW, .type = ARM_CP_CONST },
1281     { .name = "BTCM", .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
1282       .access = PL1_RW, .type = ARM_CP_CONST },
1283     { .name = "DCACHE_INVAL", .cp = 15, .opc1 = 0, .crn = 15, .crm = 5,
1284       .opc2 = 0, .access = PL1_W, .type = ARM_CP_NOP },
1285     REGINFO_SENTINEL
1286 };
1287 
1288 static void cortex_r5_initfn(Object *obj)
1289 {
1290     ARMCPU *cpu = ARM_CPU(obj);
1291 
1292     set_feature(&cpu->env, ARM_FEATURE_V7);
1293     set_feature(&cpu->env, ARM_FEATURE_THUMB_DIV);
1294     set_feature(&cpu->env, ARM_FEATURE_ARM_DIV);
1295     set_feature(&cpu->env, ARM_FEATURE_V7MP);
1296     set_feature(&cpu->env, ARM_FEATURE_PMSA);
1297     cpu->midr = 0x411fc153; /* r1p3 */
1298     cpu->id_pfr0 = 0x0131;
1299     cpu->id_pfr1 = 0x001;
1300     cpu->id_dfr0 = 0x010400;
1301     cpu->id_afr0 = 0x0;
1302     cpu->id_mmfr0 = 0x0210030;
1303     cpu->id_mmfr1 = 0x00000000;
1304     cpu->id_mmfr2 = 0x01200000;
1305     cpu->id_mmfr3 = 0x0211;
1306     cpu->id_isar0 = 0x2101111;
1307     cpu->id_isar1 = 0x13112111;
1308     cpu->id_isar2 = 0x21232141;
1309     cpu->id_isar3 = 0x01112131;
1310     cpu->id_isar4 = 0x0010142;
1311     cpu->id_isar5 = 0x0;
1312     cpu->mp_is_up = true;
1313     cpu->pmsav7_dregion = 16;
1314     define_arm_cp_regs(cpu, cortexr5_cp_reginfo);
1315 }
1316 
1317 static const ARMCPRegInfo cortexa8_cp_reginfo[] = {
1318     { .name = "L2LOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 0,
1319       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1320     { .name = "L2AUXCR", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 2,
1321       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1322     REGINFO_SENTINEL
1323 };
1324 
1325 static void cortex_a8_initfn(Object *obj)
1326 {
1327     ARMCPU *cpu = ARM_CPU(obj);
1328 
1329     cpu->dtb_compatible = "arm,cortex-a8";
1330     set_feature(&cpu->env, ARM_FEATURE_V7);
1331     set_feature(&cpu->env, ARM_FEATURE_VFP3);
1332     set_feature(&cpu->env, ARM_FEATURE_NEON);
1333     set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
1334     set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
1335     set_feature(&cpu->env, ARM_FEATURE_EL3);
1336     cpu->midr = 0x410fc080;
1337     cpu->reset_fpsid = 0x410330c0;
1338     cpu->mvfr0 = 0x11110222;
1339     cpu->mvfr1 = 0x00011111;
1340     cpu->ctr = 0x82048004;
1341     cpu->reset_sctlr = 0x00c50078;
1342     cpu->id_pfr0 = 0x1031;
1343     cpu->id_pfr1 = 0x11;
1344     cpu->id_dfr0 = 0x400;
1345     cpu->id_afr0 = 0;
1346     cpu->id_mmfr0 = 0x31100003;
1347     cpu->id_mmfr1 = 0x20000000;
1348     cpu->id_mmfr2 = 0x01202000;
1349     cpu->id_mmfr3 = 0x11;
1350     cpu->id_isar0 = 0x00101111;
1351     cpu->id_isar1 = 0x12112111;
1352     cpu->id_isar2 = 0x21232031;
1353     cpu->id_isar3 = 0x11112131;
1354     cpu->id_isar4 = 0x00111142;
1355     cpu->dbgdidr = 0x15141000;
1356     cpu->clidr = (1 << 27) | (2 << 24) | 3;
1357     cpu->ccsidr[0] = 0xe007e01a; /* 16k L1 dcache. */
1358     cpu->ccsidr[1] = 0x2007e01a; /* 16k L1 icache. */
1359     cpu->ccsidr[2] = 0xf0000000; /* No L2 icache. */
1360     cpu->reset_auxcr = 2;
1361     define_arm_cp_regs(cpu, cortexa8_cp_reginfo);
1362 }
1363 
1364 static const ARMCPRegInfo cortexa9_cp_reginfo[] = {
1365     /* power_control should be set to maximum latency. Again,
1366      * default to 0 and set by private hook
1367      */
1368     { .name = "A9_PWRCTL", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
1369       .access = PL1_RW, .resetvalue = 0,
1370       .fieldoffset = offsetof(CPUARMState, cp15.c15_power_control) },
1371     { .name = "A9_DIAG", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 1,
1372       .access = PL1_RW, .resetvalue = 0,
1373       .fieldoffset = offsetof(CPUARMState, cp15.c15_diagnostic) },
1374     { .name = "A9_PWRDIAG", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 2,
1375       .access = PL1_RW, .resetvalue = 0,
1376       .fieldoffset = offsetof(CPUARMState, cp15.c15_power_diagnostic) },
1377     { .name = "NEONBUSY", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
1378       .access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST },
1379     /* TLB lockdown control */
1380     { .name = "TLB_LOCKR", .cp = 15, .crn = 15, .crm = 4, .opc1 = 5, .opc2 = 2,
1381       .access = PL1_W, .resetvalue = 0, .type = ARM_CP_NOP },
1382     { .name = "TLB_LOCKW", .cp = 15, .crn = 15, .crm = 4, .opc1 = 5, .opc2 = 4,
1383       .access = PL1_W, .resetvalue = 0, .type = ARM_CP_NOP },
1384     { .name = "TLB_VA", .cp = 15, .crn = 15, .crm = 5, .opc1 = 5, .opc2 = 2,
1385       .access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST },
1386     { .name = "TLB_PA", .cp = 15, .crn = 15, .crm = 6, .opc1 = 5, .opc2 = 2,
1387       .access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST },
1388     { .name = "TLB_ATTR", .cp = 15, .crn = 15, .crm = 7, .opc1 = 5, .opc2 = 2,
1389       .access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST },
1390     REGINFO_SENTINEL
1391 };
1392 
1393 static void cortex_a9_initfn(Object *obj)
1394 {
1395     ARMCPU *cpu = ARM_CPU(obj);
1396 
1397     cpu->dtb_compatible = "arm,cortex-a9";
1398     set_feature(&cpu->env, ARM_FEATURE_V7);
1399     set_feature(&cpu->env, ARM_FEATURE_VFP3);
1400     set_feature(&cpu->env, ARM_FEATURE_VFP_FP16);
1401     set_feature(&cpu->env, ARM_FEATURE_NEON);
1402     set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
1403     set_feature(&cpu->env, ARM_FEATURE_EL3);
1404     /* Note that A9 supports the MP extensions even for
1405      * A9UP and single-core A9MP (which are both different
1406      * and valid configurations; we don't model A9UP).
1407      */
1408     set_feature(&cpu->env, ARM_FEATURE_V7MP);
1409     set_feature(&cpu->env, ARM_FEATURE_CBAR);
1410     cpu->midr = 0x410fc090;
1411     cpu->reset_fpsid = 0x41033090;
1412     cpu->mvfr0 = 0x11110222;
1413     cpu->mvfr1 = 0x01111111;
1414     cpu->ctr = 0x80038003;
1415     cpu->reset_sctlr = 0x00c50078;
1416     cpu->id_pfr0 = 0x1031;
1417     cpu->id_pfr1 = 0x11;
1418     cpu->id_dfr0 = 0x000;
1419     cpu->id_afr0 = 0;
1420     cpu->id_mmfr0 = 0x00100103;
1421     cpu->id_mmfr1 = 0x20000000;
1422     cpu->id_mmfr2 = 0x01230000;
1423     cpu->id_mmfr3 = 0x00002111;
1424     cpu->id_isar0 = 0x00101111;
1425     cpu->id_isar1 = 0x13112111;
1426     cpu->id_isar2 = 0x21232041;
1427     cpu->id_isar3 = 0x11112131;
1428     cpu->id_isar4 = 0x00111142;
1429     cpu->dbgdidr = 0x35141000;
1430     cpu->clidr = (1 << 27) | (1 << 24) | 3;
1431     cpu->ccsidr[0] = 0xe00fe019; /* 16k L1 dcache. */
1432     cpu->ccsidr[1] = 0x200fe019; /* 16k L1 icache. */
1433     define_arm_cp_regs(cpu, cortexa9_cp_reginfo);
1434 }
1435 
1436 #ifndef CONFIG_USER_ONLY
1437 static uint64_t a15_l2ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1438 {
1439     /* Linux wants the number of processors from here.
1440      * Might as well set the interrupt-controller bit too.
1441      */
1442     return ((smp_cpus - 1) << 24) | (1 << 23);
1443 }
1444 #endif
1445 
1446 static const ARMCPRegInfo cortexa15_cp_reginfo[] = {
1447 #ifndef CONFIG_USER_ONLY
1448     { .name = "L2CTLR", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 2,
1449       .access = PL1_RW, .resetvalue = 0, .readfn = a15_l2ctlr_read,
1450       .writefn = arm_cp_write_ignore, },
1451 #endif
1452     { .name = "L2ECTLR", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 3,
1453       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1454     REGINFO_SENTINEL
1455 };
1456 
1457 static void cortex_a7_initfn(Object *obj)
1458 {
1459     ARMCPU *cpu = ARM_CPU(obj);
1460 
1461     cpu->dtb_compatible = "arm,cortex-a7";
1462     set_feature(&cpu->env, ARM_FEATURE_V7);
1463     set_feature(&cpu->env, ARM_FEATURE_VFP4);
1464     set_feature(&cpu->env, ARM_FEATURE_NEON);
1465     set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
1466     set_feature(&cpu->env, ARM_FEATURE_ARM_DIV);
1467     set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
1468     set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
1469     set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
1470     set_feature(&cpu->env, ARM_FEATURE_LPAE);
1471     set_feature(&cpu->env, ARM_FEATURE_EL3);
1472     cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A7;
1473     cpu->midr = 0x410fc075;
1474     cpu->reset_fpsid = 0x41023075;
1475     cpu->mvfr0 = 0x10110222;
1476     cpu->mvfr1 = 0x11111111;
1477     cpu->ctr = 0x84448003;
1478     cpu->reset_sctlr = 0x00c50078;
1479     cpu->id_pfr0 = 0x00001131;
1480     cpu->id_pfr1 = 0x00011011;
1481     cpu->id_dfr0 = 0x02010555;
1482     cpu->pmceid0 = 0x00000000;
1483     cpu->pmceid1 = 0x00000000;
1484     cpu->id_afr0 = 0x00000000;
1485     cpu->id_mmfr0 = 0x10101105;
1486     cpu->id_mmfr1 = 0x40000000;
1487     cpu->id_mmfr2 = 0x01240000;
1488     cpu->id_mmfr3 = 0x02102211;
1489     cpu->id_isar0 = 0x01101110;
1490     cpu->id_isar1 = 0x13112111;
1491     cpu->id_isar2 = 0x21232041;
1492     cpu->id_isar3 = 0x11112131;
1493     cpu->id_isar4 = 0x10011142;
1494     cpu->dbgdidr = 0x3515f005;
1495     cpu->clidr = 0x0a200023;
1496     cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */
1497     cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */
1498     cpu->ccsidr[2] = 0x711fe07a; /* 4096K L2 unified cache */
1499     define_arm_cp_regs(cpu, cortexa15_cp_reginfo); /* Same as A15 */
1500 }
1501 
1502 static void cortex_a15_initfn(Object *obj)
1503 {
1504     ARMCPU *cpu = ARM_CPU(obj);
1505 
1506     cpu->dtb_compatible = "arm,cortex-a15";
1507     set_feature(&cpu->env, ARM_FEATURE_V7);
1508     set_feature(&cpu->env, ARM_FEATURE_VFP4);
1509     set_feature(&cpu->env, ARM_FEATURE_NEON);
1510     set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
1511     set_feature(&cpu->env, ARM_FEATURE_ARM_DIV);
1512     set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
1513     set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
1514     set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
1515     set_feature(&cpu->env, ARM_FEATURE_LPAE);
1516     set_feature(&cpu->env, ARM_FEATURE_EL3);
1517     cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A15;
1518     cpu->midr = 0x412fc0f1;
1519     cpu->reset_fpsid = 0x410430f0;
1520     cpu->mvfr0 = 0x10110222;
1521     cpu->mvfr1 = 0x11111111;
1522     cpu->ctr = 0x8444c004;
1523     cpu->reset_sctlr = 0x00c50078;
1524     cpu->id_pfr0 = 0x00001131;
1525     cpu->id_pfr1 = 0x00011011;
1526     cpu->id_dfr0 = 0x02010555;
1527     cpu->pmceid0 = 0x0000000;
1528     cpu->pmceid1 = 0x00000000;
1529     cpu->id_afr0 = 0x00000000;
1530     cpu->id_mmfr0 = 0x10201105;
1531     cpu->id_mmfr1 = 0x20000000;
1532     cpu->id_mmfr2 = 0x01240000;
1533     cpu->id_mmfr3 = 0x02102211;
1534     cpu->id_isar0 = 0x02101110;
1535     cpu->id_isar1 = 0x13112111;
1536     cpu->id_isar2 = 0x21232041;
1537     cpu->id_isar3 = 0x11112131;
1538     cpu->id_isar4 = 0x10011142;
1539     cpu->dbgdidr = 0x3515f021;
1540     cpu->clidr = 0x0a200023;
1541     cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */
1542     cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */
1543     cpu->ccsidr[2] = 0x711fe07a; /* 4096K L2 unified cache */
1544     define_arm_cp_regs(cpu, cortexa15_cp_reginfo);
1545 }
1546 
1547 static void ti925t_initfn(Object *obj)
1548 {
1549     ARMCPU *cpu = ARM_CPU(obj);
1550     set_feature(&cpu->env, ARM_FEATURE_V4T);
1551     set_feature(&cpu->env, ARM_FEATURE_OMAPCP);
1552     cpu->midr = ARM_CPUID_TI925T;
1553     cpu->ctr = 0x5109149;
1554     cpu->reset_sctlr = 0x00000070;
1555 }
1556 
1557 static void sa1100_initfn(Object *obj)
1558 {
1559     ARMCPU *cpu = ARM_CPU(obj);
1560 
1561     cpu->dtb_compatible = "intel,sa1100";
1562     set_feature(&cpu->env, ARM_FEATURE_STRONGARM);
1563     set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
1564     cpu->midr = 0x4401A11B;
1565     cpu->reset_sctlr = 0x00000070;
1566 }
1567 
1568 static void sa1110_initfn(Object *obj)
1569 {
1570     ARMCPU *cpu = ARM_CPU(obj);
1571     set_feature(&cpu->env, ARM_FEATURE_STRONGARM);
1572     set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
1573     cpu->midr = 0x6901B119;
1574     cpu->reset_sctlr = 0x00000070;
1575 }
1576 
1577 static void pxa250_initfn(Object *obj)
1578 {
1579     ARMCPU *cpu = ARM_CPU(obj);
1580 
1581     cpu->dtb_compatible = "marvell,xscale";
1582     set_feature(&cpu->env, ARM_FEATURE_V5);
1583     set_feature(&cpu->env, ARM_FEATURE_XSCALE);
1584     cpu->midr = 0x69052100;
1585     cpu->ctr = 0xd172172;
1586     cpu->reset_sctlr = 0x00000078;
1587 }
1588 
1589 static void pxa255_initfn(Object *obj)
1590 {
1591     ARMCPU *cpu = ARM_CPU(obj);
1592 
1593     cpu->dtb_compatible = "marvell,xscale";
1594     set_feature(&cpu->env, ARM_FEATURE_V5);
1595     set_feature(&cpu->env, ARM_FEATURE_XSCALE);
1596     cpu->midr = 0x69052d00;
1597     cpu->ctr = 0xd172172;
1598     cpu->reset_sctlr = 0x00000078;
1599 }
1600 
1601 static void pxa260_initfn(Object *obj)
1602 {
1603     ARMCPU *cpu = ARM_CPU(obj);
1604 
1605     cpu->dtb_compatible = "marvell,xscale";
1606     set_feature(&cpu->env, ARM_FEATURE_V5);
1607     set_feature(&cpu->env, ARM_FEATURE_XSCALE);
1608     cpu->midr = 0x69052903;
1609     cpu->ctr = 0xd172172;
1610     cpu->reset_sctlr = 0x00000078;
1611 }
1612 
1613 static void pxa261_initfn(Object *obj)
1614 {
1615     ARMCPU *cpu = ARM_CPU(obj);
1616 
1617     cpu->dtb_compatible = "marvell,xscale";
1618     set_feature(&cpu->env, ARM_FEATURE_V5);
1619     set_feature(&cpu->env, ARM_FEATURE_XSCALE);
1620     cpu->midr = 0x69052d05;
1621     cpu->ctr = 0xd172172;
1622     cpu->reset_sctlr = 0x00000078;
1623 }
1624 
1625 static void pxa262_initfn(Object *obj)
1626 {
1627     ARMCPU *cpu = ARM_CPU(obj);
1628 
1629     cpu->dtb_compatible = "marvell,xscale";
1630     set_feature(&cpu->env, ARM_FEATURE_V5);
1631     set_feature(&cpu->env, ARM_FEATURE_XSCALE);
1632     cpu->midr = 0x69052d06;
1633     cpu->ctr = 0xd172172;
1634     cpu->reset_sctlr = 0x00000078;
1635 }
1636 
1637 static void pxa270a0_initfn(Object *obj)
1638 {
1639     ARMCPU *cpu = ARM_CPU(obj);
1640 
1641     cpu->dtb_compatible = "marvell,xscale";
1642     set_feature(&cpu->env, ARM_FEATURE_V5);
1643     set_feature(&cpu->env, ARM_FEATURE_XSCALE);
1644     set_feature(&cpu->env, ARM_FEATURE_IWMMXT);
1645     cpu->midr = 0x69054110;
1646     cpu->ctr = 0xd172172;
1647     cpu->reset_sctlr = 0x00000078;
1648 }
1649 
1650 static void pxa270a1_initfn(Object *obj)
1651 {
1652     ARMCPU *cpu = ARM_CPU(obj);
1653 
1654     cpu->dtb_compatible = "marvell,xscale";
1655     set_feature(&cpu->env, ARM_FEATURE_V5);
1656     set_feature(&cpu->env, ARM_FEATURE_XSCALE);
1657     set_feature(&cpu->env, ARM_FEATURE_IWMMXT);
1658     cpu->midr = 0x69054111;
1659     cpu->ctr = 0xd172172;
1660     cpu->reset_sctlr = 0x00000078;
1661 }
1662 
1663 static void pxa270b0_initfn(Object *obj)
1664 {
1665     ARMCPU *cpu = ARM_CPU(obj);
1666 
1667     cpu->dtb_compatible = "marvell,xscale";
1668     set_feature(&cpu->env, ARM_FEATURE_V5);
1669     set_feature(&cpu->env, ARM_FEATURE_XSCALE);
1670     set_feature(&cpu->env, ARM_FEATURE_IWMMXT);
1671     cpu->midr = 0x69054112;
1672     cpu->ctr = 0xd172172;
1673     cpu->reset_sctlr = 0x00000078;
1674 }
1675 
1676 static void pxa270b1_initfn(Object *obj)
1677 {
1678     ARMCPU *cpu = ARM_CPU(obj);
1679 
1680     cpu->dtb_compatible = "marvell,xscale";
1681     set_feature(&cpu->env, ARM_FEATURE_V5);
1682     set_feature(&cpu->env, ARM_FEATURE_XSCALE);
1683     set_feature(&cpu->env, ARM_FEATURE_IWMMXT);
1684     cpu->midr = 0x69054113;
1685     cpu->ctr = 0xd172172;
1686     cpu->reset_sctlr = 0x00000078;
1687 }
1688 
1689 static void pxa270c0_initfn(Object *obj)
1690 {
1691     ARMCPU *cpu = ARM_CPU(obj);
1692 
1693     cpu->dtb_compatible = "marvell,xscale";
1694     set_feature(&cpu->env, ARM_FEATURE_V5);
1695     set_feature(&cpu->env, ARM_FEATURE_XSCALE);
1696     set_feature(&cpu->env, ARM_FEATURE_IWMMXT);
1697     cpu->midr = 0x69054114;
1698     cpu->ctr = 0xd172172;
1699     cpu->reset_sctlr = 0x00000078;
1700 }
1701 
1702 static void pxa270c5_initfn(Object *obj)
1703 {
1704     ARMCPU *cpu = ARM_CPU(obj);
1705 
1706     cpu->dtb_compatible = "marvell,xscale";
1707     set_feature(&cpu->env, ARM_FEATURE_V5);
1708     set_feature(&cpu->env, ARM_FEATURE_XSCALE);
1709     set_feature(&cpu->env, ARM_FEATURE_IWMMXT);
1710     cpu->midr = 0x69054117;
1711     cpu->ctr = 0xd172172;
1712     cpu->reset_sctlr = 0x00000078;
1713 }
1714 
1715 #ifndef TARGET_AARCH64
1716 /* -cpu max: if KVM is enabled, like -cpu host (best possible with this host);
1717  * otherwise, a CPU with as many features enabled as our emulation supports.
1718  * The version of '-cpu max' for qemu-system-aarch64 is defined in cpu64.c;
1719  * this only needs to handle 32 bits.
1720  */
1721 static void arm_max_initfn(Object *obj)
1722 {
1723     ARMCPU *cpu = ARM_CPU(obj);
1724 
1725     if (kvm_enabled()) {
1726         kvm_arm_set_cpu_features_from_host(cpu);
1727     } else {
1728         cortex_a15_initfn(obj);
1729 #ifdef CONFIG_USER_ONLY
1730         /* We don't set these in system emulation mode for the moment,
1731          * since we don't correctly set the ID registers to advertise them,
1732          */
1733         set_feature(&cpu->env, ARM_FEATURE_V8);
1734         set_feature(&cpu->env, ARM_FEATURE_VFP4);
1735         set_feature(&cpu->env, ARM_FEATURE_NEON);
1736         set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
1737         set_feature(&cpu->env, ARM_FEATURE_V8_AES);
1738         set_feature(&cpu->env, ARM_FEATURE_V8_SHA1);
1739         set_feature(&cpu->env, ARM_FEATURE_V8_SHA256);
1740         set_feature(&cpu->env, ARM_FEATURE_V8_PMULL);
1741         set_feature(&cpu->env, ARM_FEATURE_CRC);
1742         set_feature(&cpu->env, ARM_FEATURE_V8_RDM);
1743         set_feature(&cpu->env, ARM_FEATURE_V8_FCMA);
1744 #endif
1745     }
1746 }
1747 #endif
1748 
1749 #endif /* !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64) */
1750 
1751 typedef struct ARMCPUInfo {
1752     const char *name;
1753     void (*initfn)(Object *obj);
1754     void (*class_init)(ObjectClass *oc, void *data);
1755 } ARMCPUInfo;
1756 
1757 static const ARMCPUInfo arm_cpus[] = {
1758 #if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64)
1759     { .name = "arm926",      .initfn = arm926_initfn },
1760     { .name = "arm946",      .initfn = arm946_initfn },
1761     { .name = "arm1026",     .initfn = arm1026_initfn },
1762     /* What QEMU calls "arm1136-r2" is actually the 1136 r0p2, i.e. an
1763      * older core than plain "arm1136". In particular this does not
1764      * have the v6K features.
1765      */
1766     { .name = "arm1136-r2",  .initfn = arm1136_r2_initfn },
1767     { .name = "arm1136",     .initfn = arm1136_initfn },
1768     { .name = "arm1176",     .initfn = arm1176_initfn },
1769     { .name = "arm11mpcore", .initfn = arm11mpcore_initfn },
1770     { .name = "cortex-m3",   .initfn = cortex_m3_initfn,
1771                              .class_init = arm_v7m_class_init },
1772     { .name = "cortex-m4",   .initfn = cortex_m4_initfn,
1773                              .class_init = arm_v7m_class_init },
1774     { .name = "cortex-m33",  .initfn = cortex_m33_initfn,
1775                              .class_init = arm_v7m_class_init },
1776     { .name = "cortex-r5",   .initfn = cortex_r5_initfn },
1777     { .name = "cortex-a7",   .initfn = cortex_a7_initfn },
1778     { .name = "cortex-a8",   .initfn = cortex_a8_initfn },
1779     { .name = "cortex-a9",   .initfn = cortex_a9_initfn },
1780     { .name = "cortex-a15",  .initfn = cortex_a15_initfn },
1781     { .name = "ti925t",      .initfn = ti925t_initfn },
1782     { .name = "sa1100",      .initfn = sa1100_initfn },
1783     { .name = "sa1110",      .initfn = sa1110_initfn },
1784     { .name = "pxa250",      .initfn = pxa250_initfn },
1785     { .name = "pxa255",      .initfn = pxa255_initfn },
1786     { .name = "pxa260",      .initfn = pxa260_initfn },
1787     { .name = "pxa261",      .initfn = pxa261_initfn },
1788     { .name = "pxa262",      .initfn = pxa262_initfn },
1789     /* "pxa270" is an alias for "pxa270-a0" */
1790     { .name = "pxa270",      .initfn = pxa270a0_initfn },
1791     { .name = "pxa270-a0",   .initfn = pxa270a0_initfn },
1792     { .name = "pxa270-a1",   .initfn = pxa270a1_initfn },
1793     { .name = "pxa270-b0",   .initfn = pxa270b0_initfn },
1794     { .name = "pxa270-b1",   .initfn = pxa270b1_initfn },
1795     { .name = "pxa270-c0",   .initfn = pxa270c0_initfn },
1796     { .name = "pxa270-c5",   .initfn = pxa270c5_initfn },
1797 #ifndef TARGET_AARCH64
1798     { .name = "max",         .initfn = arm_max_initfn },
1799 #endif
1800 #ifdef CONFIG_USER_ONLY
1801     { .name = "any",         .initfn = arm_max_initfn },
1802 #endif
1803 #endif
1804     { .name = NULL }
1805 };
1806 
1807 static Property arm_cpu_properties[] = {
1808     DEFINE_PROP_BOOL("start-powered-off", ARMCPU, start_powered_off, false),
1809     DEFINE_PROP_UINT32("psci-conduit", ARMCPU, psci_conduit, 0),
1810     DEFINE_PROP_UINT32("midr", ARMCPU, midr, 0),
1811     DEFINE_PROP_UINT64("mp-affinity", ARMCPU,
1812                         mp_affinity, ARM64_AFFINITY_INVALID),
1813     DEFINE_PROP_INT32("node-id", ARMCPU, node_id, CPU_UNSET_NUMA_NODE_ID),
1814     DEFINE_PROP_INT32("core-count", ARMCPU, core_count, -1),
1815     DEFINE_PROP_END_OF_LIST()
1816 };
1817 
1818 #ifdef CONFIG_USER_ONLY
1819 static int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
1820                                     int rw, int mmu_idx)
1821 {
1822     ARMCPU *cpu = ARM_CPU(cs);
1823     CPUARMState *env = &cpu->env;
1824 
1825     env->exception.vaddress = address;
1826     if (rw == 2) {
1827         cs->exception_index = EXCP_PREFETCH_ABORT;
1828     } else {
1829         cs->exception_index = EXCP_DATA_ABORT;
1830     }
1831     return 1;
1832 }
1833 #endif
1834 
1835 static gchar *arm_gdb_arch_name(CPUState *cs)
1836 {
1837     ARMCPU *cpu = ARM_CPU(cs);
1838     CPUARMState *env = &cpu->env;
1839 
1840     if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
1841         return g_strdup("iwmmxt");
1842     }
1843     return g_strdup("arm");
1844 }
1845 
1846 static void arm_cpu_class_init(ObjectClass *oc, void *data)
1847 {
1848     ARMCPUClass *acc = ARM_CPU_CLASS(oc);
1849     CPUClass *cc = CPU_CLASS(acc);
1850     DeviceClass *dc = DEVICE_CLASS(oc);
1851 
1852     device_class_set_parent_realize(dc, arm_cpu_realizefn,
1853                                     &acc->parent_realize);
1854     dc->props = arm_cpu_properties;
1855 
1856     acc->parent_reset = cc->reset;
1857     cc->reset = arm_cpu_reset;
1858 
1859     cc->class_by_name = arm_cpu_class_by_name;
1860     cc->has_work = arm_cpu_has_work;
1861     cc->cpu_exec_interrupt = arm_cpu_exec_interrupt;
1862     cc->dump_state = arm_cpu_dump_state;
1863     cc->set_pc = arm_cpu_set_pc;
1864     cc->gdb_read_register = arm_cpu_gdb_read_register;
1865     cc->gdb_write_register = arm_cpu_gdb_write_register;
1866 #ifdef CONFIG_USER_ONLY
1867     cc->handle_mmu_fault = arm_cpu_handle_mmu_fault;
1868 #else
1869     cc->do_interrupt = arm_cpu_do_interrupt;
1870     cc->do_unaligned_access = arm_cpu_do_unaligned_access;
1871     cc->do_transaction_failed = arm_cpu_do_transaction_failed;
1872     cc->get_phys_page_attrs_debug = arm_cpu_get_phys_page_attrs_debug;
1873     cc->asidx_from_attrs = arm_asidx_from_attrs;
1874     cc->vmsd = &vmstate_arm_cpu;
1875     cc->virtio_is_big_endian = arm_cpu_virtio_is_big_endian;
1876     cc->write_elf64_note = arm_cpu_write_elf64_note;
1877     cc->write_elf32_note = arm_cpu_write_elf32_note;
1878 #endif
1879     cc->gdb_num_core_regs = 26;
1880     cc->gdb_core_xml_file = "arm-core.xml";
1881     cc->gdb_arch_name = arm_gdb_arch_name;
1882     cc->gdb_stop_before_watchpoint = true;
1883     cc->debug_excp_handler = arm_debug_excp_handler;
1884     cc->debug_check_watchpoint = arm_debug_check_watchpoint;
1885 #if !defined(CONFIG_USER_ONLY)
1886     cc->adjust_watchpoint_address = arm_adjust_watchpoint_address;
1887 #endif
1888 
1889     cc->disas_set_info = arm_disas_set_info;
1890 #ifdef CONFIG_TCG
1891     cc->tcg_initialize = arm_translate_init;
1892 #endif
1893 }
1894 
1895 #ifdef CONFIG_KVM
1896 static void arm_host_initfn(Object *obj)
1897 {
1898     ARMCPU *cpu = ARM_CPU(obj);
1899 
1900     kvm_arm_set_cpu_features_from_host(cpu);
1901 }
1902 
1903 static const TypeInfo host_arm_cpu_type_info = {
1904     .name = TYPE_ARM_HOST_CPU,
1905 #ifdef TARGET_AARCH64
1906     .parent = TYPE_AARCH64_CPU,
1907 #else
1908     .parent = TYPE_ARM_CPU,
1909 #endif
1910     .instance_init = arm_host_initfn,
1911 };
1912 
1913 #endif
1914 
1915 static void cpu_register(const ARMCPUInfo *info)
1916 {
1917     TypeInfo type_info = {
1918         .parent = TYPE_ARM_CPU,
1919         .instance_size = sizeof(ARMCPU),
1920         .instance_init = info->initfn,
1921         .class_size = sizeof(ARMCPUClass),
1922         .class_init = info->class_init,
1923     };
1924 
1925     type_info.name = g_strdup_printf("%s-" TYPE_ARM_CPU, info->name);
1926     type_register(&type_info);
1927     g_free((void *)type_info.name);
1928 }
1929 
1930 static const TypeInfo arm_cpu_type_info = {
1931     .name = TYPE_ARM_CPU,
1932     .parent = TYPE_CPU,
1933     .instance_size = sizeof(ARMCPU),
1934     .instance_init = arm_cpu_initfn,
1935     .instance_post_init = arm_cpu_post_init,
1936     .instance_finalize = arm_cpu_finalizefn,
1937     .abstract = true,
1938     .class_size = sizeof(ARMCPUClass),
1939     .class_init = arm_cpu_class_init,
1940 };
1941 
1942 static const TypeInfo idau_interface_type_info = {
1943     .name = TYPE_IDAU_INTERFACE,
1944     .parent = TYPE_INTERFACE,
1945     .class_size = sizeof(IDAUInterfaceClass),
1946 };
1947 
1948 static void arm_cpu_register_types(void)
1949 {
1950     const ARMCPUInfo *info = arm_cpus;
1951 
1952     type_register_static(&arm_cpu_type_info);
1953     type_register_static(&idau_interface_type_info);
1954 
1955     while (info->name) {
1956         cpu_register(info);
1957         info++;
1958     }
1959 
1960 #ifdef CONFIG_KVM
1961     type_register_static(&host_arm_cpu_type_info);
1962 #endif
1963 }
1964 
1965 type_init(arm_cpu_register_types)
1966