xref: /qemu/target/arm/cpu64.c (revision 7bdd67a5)
1 /*
2  * QEMU AArch64 CPU
3  *
4  * Copyright (c) 2013 Linaro Ltd
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version 2
9  * of the License, or (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, see
18  * <http://www.gnu.org/licenses/gpl-2.0.html>
19  */
20 
21 #include "qemu/osdep.h"
22 #include "qapi/error.h"
23 #include "cpu.h"
24 #include "cpregs.h"
25 #include "qemu/module.h"
26 #include "sysemu/kvm.h"
27 #include "sysemu/hvf.h"
28 #include "kvm_arm.h"
29 #include "hvf_arm.h"
30 #include "qapi/visitor.h"
31 #include "hw/qdev-properties.h"
32 #include "internals.h"
33 
34 static void aarch64_a35_initfn(Object *obj)
35 {
36     ARMCPU *cpu = ARM_CPU(obj);
37 
38     cpu->dtb_compatible = "arm,cortex-a35";
39     set_feature(&cpu->env, ARM_FEATURE_V8);
40     set_feature(&cpu->env, ARM_FEATURE_NEON);
41     set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
42     set_feature(&cpu->env, ARM_FEATURE_AARCH64);
43     set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
44     set_feature(&cpu->env, ARM_FEATURE_EL2);
45     set_feature(&cpu->env, ARM_FEATURE_EL3);
46     set_feature(&cpu->env, ARM_FEATURE_PMU);
47 
48     /* From B2.2 AArch64 identification registers. */
49     cpu->midr = 0x411fd040;
50     cpu->revidr = 0;
51     cpu->ctr = 0x84448004;
52     cpu->isar.id_pfr0 = 0x00000131;
53     cpu->isar.id_pfr1 = 0x00011011;
54     cpu->isar.id_dfr0 = 0x03010066;
55     cpu->id_afr0 = 0;
56     cpu->isar.id_mmfr0 = 0x10201105;
57     cpu->isar.id_mmfr1 = 0x40000000;
58     cpu->isar.id_mmfr2 = 0x01260000;
59     cpu->isar.id_mmfr3 = 0x02102211;
60     cpu->isar.id_isar0 = 0x02101110;
61     cpu->isar.id_isar1 = 0x13112111;
62     cpu->isar.id_isar2 = 0x21232042;
63     cpu->isar.id_isar3 = 0x01112131;
64     cpu->isar.id_isar4 = 0x00011142;
65     cpu->isar.id_isar5 = 0x00011121;
66     cpu->isar.id_aa64pfr0 = 0x00002222;
67     cpu->isar.id_aa64pfr1 = 0;
68     cpu->isar.id_aa64dfr0 = 0x10305106;
69     cpu->isar.id_aa64dfr1 = 0;
70     cpu->isar.id_aa64isar0 = 0x00011120;
71     cpu->isar.id_aa64isar1 = 0;
72     cpu->isar.id_aa64mmfr0 = 0x00101122;
73     cpu->isar.id_aa64mmfr1 = 0;
74     cpu->clidr = 0x0a200023;
75     cpu->dcz_blocksize = 4;
76 
77     /* From B2.4 AArch64 Virtual Memory control registers */
78     cpu->reset_sctlr = 0x00c50838;
79 
80     /* From B2.10 AArch64 performance monitor registers */
81     cpu->isar.reset_pmcr_el0 = 0x410a3000;
82 
83     /* From B2.29 Cache ID registers */
84     cpu->ccsidr[0] = 0x700fe01a; /* 32KB L1 dcache */
85     cpu->ccsidr[1] = 0x201fe00a; /* 32KB L1 icache */
86     cpu->ccsidr[2] = 0x703fe03a; /* 512KB L2 cache */
87 
88     /* From B3.5 VGIC Type register */
89     cpu->gic_num_lrs = 4;
90     cpu->gic_vpribits = 5;
91     cpu->gic_vprebits = 5;
92     cpu->gic_pribits = 5;
93 
94     /* From C6.4 Debug ID Register */
95     cpu->isar.dbgdidr = 0x3516d000;
96     /* From C6.5 Debug Device ID Register */
97     cpu->isar.dbgdevid = 0x00110f13;
98     /* From C6.6 Debug Device ID Register 1 */
99     cpu->isar.dbgdevid1 = 0x2;
100 
101     /* From Cortex-A35 SIMD and Floating-point Support r1p0 */
102     /* From 3.2 AArch32 register summary */
103     cpu->reset_fpsid = 0x41034043;
104 
105     /* From 2.2 AArch64 register summary */
106     cpu->isar.mvfr0 = 0x10110222;
107     cpu->isar.mvfr1 = 0x12111111;
108     cpu->isar.mvfr2 = 0x00000043;
109 
110     /* These values are the same with A53/A57/A72. */
111     define_cortex_a72_a57_a53_cp_reginfo(cpu);
112 }
113 
114 void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
115 {
116     /*
117      * If any vector lengths are explicitly enabled with sve<N> properties,
118      * then all other lengths are implicitly disabled.  If sve-max-vq is
119      * specified then it is the same as explicitly enabling all lengths
120      * up to and including the specified maximum, which means all larger
121      * lengths will be implicitly disabled.  If no sve<N> properties
122      * are enabled and sve-max-vq is not specified, then all lengths not
123      * explicitly disabled will be enabled.  Additionally, all power-of-two
124      * vector lengths less than the maximum enabled length will be
125      * automatically enabled and all vector lengths larger than the largest
126      * disabled power-of-two vector length will be automatically disabled.
127      * Errors are generated if the user provided input that interferes with
128      * any of the above.  Finally, if SVE is not disabled, then at least one
129      * vector length must be enabled.
130      */
131     uint32_t vq_map = cpu->sve_vq.map;
132     uint32_t vq_init = cpu->sve_vq.init;
133     uint32_t vq_supported;
134     uint32_t vq_mask = 0;
135     uint32_t tmp, vq, max_vq = 0;
136 
137     /*
138      * CPU models specify a set of supported vector lengths which are
139      * enabled by default.  Attempting to enable any vector length not set
140      * in the supported bitmap results in an error.  When KVM is enabled we
141      * fetch the supported bitmap from the host.
142      */
143     if (kvm_enabled()) {
144         if (kvm_arm_sve_supported()) {
145             cpu->sve_vq.supported = kvm_arm_sve_get_vls(CPU(cpu));
146             vq_supported = cpu->sve_vq.supported;
147         } else {
148             assert(!cpu_isar_feature(aa64_sve, cpu));
149             vq_supported = 0;
150         }
151     } else {
152         vq_supported = cpu->sve_vq.supported;
153     }
154 
155     /*
156      * Process explicit sve<N> properties.
157      * From the properties, sve_vq_map<N> implies sve_vq_init<N>.
158      * Check first for any sve<N> enabled.
159      */
160     if (vq_map != 0) {
161         max_vq = 32 - clz32(vq_map);
162         vq_mask = MAKE_64BIT_MASK(0, max_vq);
163 
164         if (cpu->sve_max_vq && max_vq > cpu->sve_max_vq) {
165             error_setg(errp, "cannot enable sve%d", max_vq * 128);
166             error_append_hint(errp, "sve%d is larger than the maximum vector "
167                               "length, sve-max-vq=%d (%d bits)\n",
168                               max_vq * 128, cpu->sve_max_vq,
169                               cpu->sve_max_vq * 128);
170             return;
171         }
172 
173         if (kvm_enabled()) {
174             /*
175              * For KVM we have to automatically enable all supported unitialized
176              * lengths, even when the smaller lengths are not all powers-of-two.
177              */
178             vq_map |= vq_supported & ~vq_init & vq_mask;
179         } else {
180             /* Propagate enabled bits down through required powers-of-two. */
181             vq_map |= SVE_VQ_POW2_MAP & ~vq_init & vq_mask;
182         }
183     } else if (cpu->sve_max_vq == 0) {
184         /*
185          * No explicit bits enabled, and no implicit bits from sve-max-vq.
186          */
187         if (!cpu_isar_feature(aa64_sve, cpu)) {
188             /* SVE is disabled and so are all vector lengths.  Good. */
189             return;
190         }
191 
192         if (kvm_enabled()) {
193             /* Disabling a supported length disables all larger lengths. */
194             tmp = vq_init & vq_supported;
195         } else {
196             /* Disabling a power-of-two disables all larger lengths. */
197             tmp = vq_init & SVE_VQ_POW2_MAP;
198         }
199         vq = ctz32(tmp) + 1;
200 
201         max_vq = vq <= ARM_MAX_VQ ? vq - 1 : ARM_MAX_VQ;
202         vq_mask = MAKE_64BIT_MASK(0, max_vq);
203         vq_map = vq_supported & ~vq_init & vq_mask;
204 
205         if (max_vq == 0 || vq_map == 0) {
206             error_setg(errp, "cannot disable sve%d", vq * 128);
207             error_append_hint(errp, "Disabling sve%d results in all "
208                               "vector lengths being disabled.\n",
209                               vq * 128);
210             error_append_hint(errp, "With SVE enabled, at least one "
211                               "vector length must be enabled.\n");
212             return;
213         }
214 
215         max_vq = 32 - clz32(vq_map);
216         vq_mask = MAKE_64BIT_MASK(0, max_vq);
217     }
218 
219     /*
220      * Process the sve-max-vq property.
221      * Note that we know from the above that no bit above
222      * sve-max-vq is currently set.
223      */
224     if (cpu->sve_max_vq != 0) {
225         max_vq = cpu->sve_max_vq;
226         vq_mask = MAKE_64BIT_MASK(0, max_vq);
227 
228         if (vq_init & ~vq_map & (1 << (max_vq - 1))) {
229             error_setg(errp, "cannot disable sve%d", max_vq * 128);
230             error_append_hint(errp, "The maximum vector length must be "
231                               "enabled, sve-max-vq=%d (%d bits)\n",
232                               max_vq, max_vq * 128);
233             return;
234         }
235 
236         /* Set all bits not explicitly set within sve-max-vq. */
237         vq_map |= ~vq_init & vq_mask;
238     }
239 
240     /*
241      * We should know what max-vq is now.  Also, as we're done
242      * manipulating sve-vq-map, we ensure any bits above max-vq
243      * are clear, just in case anybody looks.
244      */
245     assert(max_vq != 0);
246     assert(vq_mask != 0);
247     vq_map &= vq_mask;
248 
249     /* Ensure the set of lengths matches what is supported. */
250     tmp = vq_map ^ (vq_supported & vq_mask);
251     if (tmp) {
252         vq = 32 - clz32(tmp);
253         if (vq_map & (1 << (vq - 1))) {
254             if (cpu->sve_max_vq) {
255                 error_setg(errp, "cannot set sve-max-vq=%d", cpu->sve_max_vq);
256                 error_append_hint(errp, "This CPU does not support "
257                                   "the vector length %d-bits.\n", vq * 128);
258                 error_append_hint(errp, "It may not be possible to use "
259                                   "sve-max-vq with this CPU. Try "
260                                   "using only sve<N> properties.\n");
261             } else {
262                 error_setg(errp, "cannot enable sve%d", vq * 128);
263                 if (vq_supported) {
264                     error_append_hint(errp, "This CPU does not support "
265                                       "the vector length %d-bits.\n", vq * 128);
266                 } else {
267                     error_append_hint(errp, "SVE not supported by KVM "
268                                       "on this host\n");
269                 }
270             }
271             return;
272         } else {
273             if (kvm_enabled()) {
274                 error_setg(errp, "cannot disable sve%d", vq * 128);
275                 error_append_hint(errp, "The KVM host requires all "
276                                   "supported vector lengths smaller "
277                                   "than %d bits to also be enabled.\n",
278                                   max_vq * 128);
279                 return;
280             } else {
281                 /* Ensure all required powers-of-two are enabled. */
282                 tmp = SVE_VQ_POW2_MAP & vq_mask & ~vq_map;
283                 if (tmp) {
284                     vq = 32 - clz32(tmp);
285                     error_setg(errp, "cannot disable sve%d", vq * 128);
286                     error_append_hint(errp, "sve%d is required as it "
287                                       "is a power-of-two length smaller "
288                                       "than the maximum, sve%d\n",
289                                       vq * 128, max_vq * 128);
290                     return;
291                 }
292             }
293         }
294     }
295 
296     /*
297      * Now that we validated all our vector lengths, the only question
298      * left to answer is if we even want SVE at all.
299      */
300     if (!cpu_isar_feature(aa64_sve, cpu)) {
301         error_setg(errp, "cannot enable sve%d", max_vq * 128);
302         error_append_hint(errp, "SVE must be enabled to enable vector "
303                           "lengths.\n");
304         error_append_hint(errp, "Add sve=on to the CPU property list.\n");
305         return;
306     }
307 
308     /* From now on sve_max_vq is the actual maximum supported length. */
309     cpu->sve_max_vq = max_vq;
310     cpu->sve_vq.map = vq_map;
311 }
312 
313 static void cpu_max_get_sve_max_vq(Object *obj, Visitor *v, const char *name,
314                                    void *opaque, Error **errp)
315 {
316     ARMCPU *cpu = ARM_CPU(obj);
317     uint32_t value;
318 
319     /* All vector lengths are disabled when SVE is off. */
320     if (!cpu_isar_feature(aa64_sve, cpu)) {
321         value = 0;
322     } else {
323         value = cpu->sve_max_vq;
324     }
325     visit_type_uint32(v, name, &value, errp);
326 }
327 
328 static void cpu_max_set_sve_max_vq(Object *obj, Visitor *v, const char *name,
329                                    void *opaque, Error **errp)
330 {
331     ARMCPU *cpu = ARM_CPU(obj);
332     uint32_t max_vq;
333 
334     if (!visit_type_uint32(v, name, &max_vq, errp)) {
335         return;
336     }
337 
338     if (kvm_enabled() && !kvm_arm_sve_supported()) {
339         error_setg(errp, "cannot set sve-max-vq");
340         error_append_hint(errp, "SVE not supported by KVM on this host\n");
341         return;
342     }
343 
344     if (max_vq == 0 || max_vq > ARM_MAX_VQ) {
345         error_setg(errp, "unsupported SVE vector length");
346         error_append_hint(errp, "Valid sve-max-vq in range [1-%d]\n",
347                           ARM_MAX_VQ);
348         return;
349     }
350 
351     cpu->sve_max_vq = max_vq;
352 }
353 
354 /*
355  * Note that cpu_arm_{get,set}_vq cannot use the simpler
356  * object_property_add_bool interface because they make use of the
357  * contents of "name" to determine which bit on which to operate.
358  */
359 static void cpu_arm_get_vq(Object *obj, Visitor *v, const char *name,
360                            void *opaque, Error **errp)
361 {
362     ARMCPU *cpu = ARM_CPU(obj);
363     ARMVQMap *vq_map = opaque;
364     uint32_t vq = atoi(&name[3]) / 128;
365     bool sve = vq_map == &cpu->sve_vq;
366     bool value;
367 
368     /* All vector lengths are disabled when feature is off. */
369     if (sve
370         ? !cpu_isar_feature(aa64_sve, cpu)
371         : !cpu_isar_feature(aa64_sme, cpu)) {
372         value = false;
373     } else {
374         value = extract32(vq_map->map, vq - 1, 1);
375     }
376     visit_type_bool(v, name, &value, errp);
377 }
378 
379 static void cpu_arm_set_vq(Object *obj, Visitor *v, const char *name,
380                            void *opaque, Error **errp)
381 {
382     ARMVQMap *vq_map = opaque;
383     uint32_t vq = atoi(&name[3]) / 128;
384     bool value;
385 
386     if (!visit_type_bool(v, name, &value, errp)) {
387         return;
388     }
389 
390     vq_map->map = deposit32(vq_map->map, vq - 1, 1, value);
391     vq_map->init |= 1 << (vq - 1);
392 }
393 
394 static bool cpu_arm_get_sve(Object *obj, Error **errp)
395 {
396     ARMCPU *cpu = ARM_CPU(obj);
397     return cpu_isar_feature(aa64_sve, cpu);
398 }
399 
400 static void cpu_arm_set_sve(Object *obj, bool value, Error **errp)
401 {
402     ARMCPU *cpu = ARM_CPU(obj);
403     uint64_t t;
404 
405     if (value && kvm_enabled() && !kvm_arm_sve_supported()) {
406         error_setg(errp, "'sve' feature not supported by KVM on this host");
407         return;
408     }
409 
410     t = cpu->isar.id_aa64pfr0;
411     t = FIELD_DP64(t, ID_AA64PFR0, SVE, value);
412     cpu->isar.id_aa64pfr0 = t;
413 }
414 
415 void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp)
416 {
417     uint32_t vq_map = cpu->sme_vq.map;
418     uint32_t vq_init = cpu->sme_vq.init;
419     uint32_t vq_supported = cpu->sme_vq.supported;
420     uint32_t vq;
421 
422     if (vq_map == 0) {
423         if (!cpu_isar_feature(aa64_sme, cpu)) {
424             cpu->isar.id_aa64smfr0 = 0;
425             return;
426         }
427 
428         /* TODO: KVM will require limitations via SMCR_EL2. */
429         vq_map = vq_supported & ~vq_init;
430 
431         if (vq_map == 0) {
432             vq = ctz32(vq_supported) + 1;
433             error_setg(errp, "cannot disable sme%d", vq * 128);
434             error_append_hint(errp, "All SME vector lengths are disabled.\n");
435             error_append_hint(errp, "With SME enabled, at least one "
436                               "vector length must be enabled.\n");
437             return;
438         }
439     } else {
440         if (!cpu_isar_feature(aa64_sme, cpu)) {
441             vq = 32 - clz32(vq_map);
442             error_setg(errp, "cannot enable sme%d", vq * 128);
443             error_append_hint(errp, "SME must be enabled to enable "
444                               "vector lengths.\n");
445             error_append_hint(errp, "Add sme=on to the CPU property list.\n");
446             return;
447         }
448         /* TODO: KVM will require limitations via SMCR_EL2. */
449     }
450 
451     cpu->sme_vq.map = vq_map;
452 }
453 
454 static bool cpu_arm_get_sme(Object *obj, Error **errp)
455 {
456     ARMCPU *cpu = ARM_CPU(obj);
457     return cpu_isar_feature(aa64_sme, cpu);
458 }
459 
460 static void cpu_arm_set_sme(Object *obj, bool value, Error **errp)
461 {
462     ARMCPU *cpu = ARM_CPU(obj);
463     uint64_t t;
464 
465     t = cpu->isar.id_aa64pfr1;
466     t = FIELD_DP64(t, ID_AA64PFR1, SME, value);
467     cpu->isar.id_aa64pfr1 = t;
468 }
469 
470 static bool cpu_arm_get_sme_fa64(Object *obj, Error **errp)
471 {
472     ARMCPU *cpu = ARM_CPU(obj);
473     return cpu_isar_feature(aa64_sme, cpu) &&
474            cpu_isar_feature(aa64_sme_fa64, cpu);
475 }
476 
477 static void cpu_arm_set_sme_fa64(Object *obj, bool value, Error **errp)
478 {
479     ARMCPU *cpu = ARM_CPU(obj);
480     uint64_t t;
481 
482     t = cpu->isar.id_aa64smfr0;
483     t = FIELD_DP64(t, ID_AA64SMFR0, FA64, value);
484     cpu->isar.id_aa64smfr0 = t;
485 }
486 
487 #ifdef CONFIG_USER_ONLY
488 /* Mirror linux /proc/sys/abi/{sve,sme}_default_vector_length. */
489 static void cpu_arm_set_default_vec_len(Object *obj, Visitor *v,
490                                         const char *name, void *opaque,
491                                         Error **errp)
492 {
493     uint32_t *ptr_default_vq = opaque;
494     int32_t default_len, default_vq, remainder;
495 
496     if (!visit_type_int32(v, name, &default_len, errp)) {
497         return;
498     }
499 
500     /* Undocumented, but the kernel allows -1 to indicate "maximum". */
501     if (default_len == -1) {
502         *ptr_default_vq = ARM_MAX_VQ;
503         return;
504     }
505 
506     default_vq = default_len / 16;
507     remainder = default_len % 16;
508 
509     /*
510      * Note that the 512 max comes from include/uapi/asm/sve_context.h
511      * and is the maximum architectural width of ZCR_ELx.LEN.
512      */
513     if (remainder || default_vq < 1 || default_vq > 512) {
514         ARMCPU *cpu = ARM_CPU(obj);
515         const char *which =
516             (ptr_default_vq == &cpu->sve_default_vq ? "sve" : "sme");
517 
518         error_setg(errp, "cannot set %s-default-vector-length", which);
519         if (remainder) {
520             error_append_hint(errp, "Vector length not a multiple of 16\n");
521         } else if (default_vq < 1) {
522             error_append_hint(errp, "Vector length smaller than 16\n");
523         } else {
524             error_append_hint(errp, "Vector length larger than %d\n",
525                               512 * 16);
526         }
527         return;
528     }
529 
530     *ptr_default_vq = default_vq;
531 }
532 
533 static void cpu_arm_get_default_vec_len(Object *obj, Visitor *v,
534                                         const char *name, void *opaque,
535                                         Error **errp)
536 {
537     uint32_t *ptr_default_vq = opaque;
538     int32_t value = *ptr_default_vq * 16;
539 
540     visit_type_int32(v, name, &value, errp);
541 }
542 #endif
543 
544 static void aarch64_add_sve_properties(Object *obj)
545 {
546     ARMCPU *cpu = ARM_CPU(obj);
547     uint32_t vq;
548 
549     object_property_add_bool(obj, "sve", cpu_arm_get_sve, cpu_arm_set_sve);
550 
551     for (vq = 1; vq <= ARM_MAX_VQ; ++vq) {
552         char name[8];
553         sprintf(name, "sve%d", vq * 128);
554         object_property_add(obj, name, "bool", cpu_arm_get_vq,
555                             cpu_arm_set_vq, NULL, &cpu->sve_vq);
556     }
557 
558 #ifdef CONFIG_USER_ONLY
559     /* Mirror linux /proc/sys/abi/sve_default_vector_length. */
560     object_property_add(obj, "sve-default-vector-length", "int32",
561                         cpu_arm_get_default_vec_len,
562                         cpu_arm_set_default_vec_len, NULL,
563                         &cpu->sve_default_vq);
564 #endif
565 }
566 
567 static void aarch64_add_sme_properties(Object *obj)
568 {
569     ARMCPU *cpu = ARM_CPU(obj);
570     uint32_t vq;
571 
572     object_property_add_bool(obj, "sme", cpu_arm_get_sme, cpu_arm_set_sme);
573     object_property_add_bool(obj, "sme_fa64", cpu_arm_get_sme_fa64,
574                              cpu_arm_set_sme_fa64);
575 
576     for (vq = 1; vq <= ARM_MAX_VQ; vq <<= 1) {
577         char name[8];
578         sprintf(name, "sme%d", vq * 128);
579         object_property_add(obj, name, "bool", cpu_arm_get_vq,
580                             cpu_arm_set_vq, NULL, &cpu->sme_vq);
581     }
582 
583 #ifdef CONFIG_USER_ONLY
584     /* Mirror linux /proc/sys/abi/sme_default_vector_length. */
585     object_property_add(obj, "sme-default-vector-length", "int32",
586                         cpu_arm_get_default_vec_len,
587                         cpu_arm_set_default_vec_len, NULL,
588                         &cpu->sme_default_vq);
589 #endif
590 }
591 
592 void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp)
593 {
594     int arch_val = 0, impdef_val = 0;
595     uint64_t t;
596 
597     /* Exit early if PAuth is enabled, and fall through to disable it */
598     if ((kvm_enabled() || hvf_enabled()) && cpu->prop_pauth) {
599         if (!cpu_isar_feature(aa64_pauth, cpu)) {
600             error_setg(errp, "'pauth' feature not supported by %s on this host",
601                        kvm_enabled() ? "KVM" : "hvf");
602         }
603 
604         return;
605     }
606 
607     /* TODO: Handle HaveEnhancedPAC, HaveEnhancedPAC2, HaveFPAC. */
608     if (cpu->prop_pauth) {
609         if (cpu->prop_pauth_impdef) {
610             impdef_val = 1;
611         } else {
612             arch_val = 1;
613         }
614     } else if (cpu->prop_pauth_impdef) {
615         error_setg(errp, "cannot enable pauth-impdef without pauth");
616         error_append_hint(errp, "Add pauth=on to the CPU property list.\n");
617     }
618 
619     t = cpu->isar.id_aa64isar1;
620     t = FIELD_DP64(t, ID_AA64ISAR1, APA, arch_val);
621     t = FIELD_DP64(t, ID_AA64ISAR1, GPA, arch_val);
622     t = FIELD_DP64(t, ID_AA64ISAR1, API, impdef_val);
623     t = FIELD_DP64(t, ID_AA64ISAR1, GPI, impdef_val);
624     cpu->isar.id_aa64isar1 = t;
625 }
626 
627 static Property arm_cpu_pauth_property =
628     DEFINE_PROP_BOOL("pauth", ARMCPU, prop_pauth, true);
629 static Property arm_cpu_pauth_impdef_property =
630     DEFINE_PROP_BOOL("pauth-impdef", ARMCPU, prop_pauth_impdef, false);
631 
632 static void aarch64_add_pauth_properties(Object *obj)
633 {
634     ARMCPU *cpu = ARM_CPU(obj);
635 
636     /* Default to PAUTH on, with the architected algorithm on TCG. */
637     qdev_property_add_static(DEVICE(obj), &arm_cpu_pauth_property);
638     if (kvm_enabled() || hvf_enabled()) {
639         /*
640          * Mirror PAuth support from the probed sysregs back into the
641          * property for KVM or hvf. Is it just a bit backward? Yes it is!
642          * Note that prop_pauth is true whether the host CPU supports the
643          * architected QARMA5 algorithm or the IMPDEF one. We don't
644          * provide the separate pauth-impdef property for KVM or hvf,
645          * only for TCG.
646          */
647         cpu->prop_pauth = cpu_isar_feature(aa64_pauth, cpu);
648     } else {
649         qdev_property_add_static(DEVICE(obj), &arm_cpu_pauth_impdef_property);
650     }
651 }
652 
653 static Property arm_cpu_lpa2_property =
654     DEFINE_PROP_BOOL("lpa2", ARMCPU, prop_lpa2, true);
655 
656 void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp)
657 {
658     uint64_t t;
659 
660     /*
661      * We only install the property for tcg -cpu max; this is the
662      * only situation in which the cpu field can be true.
663      */
664     if (!cpu->prop_lpa2) {
665         return;
666     }
667 
668     t = cpu->isar.id_aa64mmfr0;
669     t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN16, 2);   /* 16k pages w/ LPA2 */
670     t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN4, 1);    /*  4k pages w/ LPA2 */
671     t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN16_2, 3); /* 16k stage2 w/ LPA2 */
672     t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN4_2, 3);  /*  4k stage2 w/ LPA2 */
673     cpu->isar.id_aa64mmfr0 = t;
674 }
675 
676 static void aarch64_a57_initfn(Object *obj)
677 {
678     ARMCPU *cpu = ARM_CPU(obj);
679 
680     cpu->dtb_compatible = "arm,cortex-a57";
681     set_feature(&cpu->env, ARM_FEATURE_V8);
682     set_feature(&cpu->env, ARM_FEATURE_NEON);
683     set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
684     set_feature(&cpu->env, ARM_FEATURE_AARCH64);
685     set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
686     set_feature(&cpu->env, ARM_FEATURE_EL2);
687     set_feature(&cpu->env, ARM_FEATURE_EL3);
688     set_feature(&cpu->env, ARM_FEATURE_PMU);
689     cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A57;
690     cpu->midr = 0x411fd070;
691     cpu->revidr = 0x00000000;
692     cpu->reset_fpsid = 0x41034070;
693     cpu->isar.mvfr0 = 0x10110222;
694     cpu->isar.mvfr1 = 0x12111111;
695     cpu->isar.mvfr2 = 0x00000043;
696     cpu->ctr = 0x8444c004;
697     cpu->reset_sctlr = 0x00c50838;
698     cpu->isar.id_pfr0 = 0x00000131;
699     cpu->isar.id_pfr1 = 0x00011011;
700     cpu->isar.id_dfr0 = 0x03010066;
701     cpu->id_afr0 = 0x00000000;
702     cpu->isar.id_mmfr0 = 0x10101105;
703     cpu->isar.id_mmfr1 = 0x40000000;
704     cpu->isar.id_mmfr2 = 0x01260000;
705     cpu->isar.id_mmfr3 = 0x02102211;
706     cpu->isar.id_isar0 = 0x02101110;
707     cpu->isar.id_isar1 = 0x13112111;
708     cpu->isar.id_isar2 = 0x21232042;
709     cpu->isar.id_isar3 = 0x01112131;
710     cpu->isar.id_isar4 = 0x00011142;
711     cpu->isar.id_isar5 = 0x00011121;
712     cpu->isar.id_isar6 = 0;
713     cpu->isar.id_aa64pfr0 = 0x00002222;
714     cpu->isar.id_aa64dfr0 = 0x10305106;
715     cpu->isar.id_aa64isar0 = 0x00011120;
716     cpu->isar.id_aa64mmfr0 = 0x00001124;
717     cpu->isar.dbgdidr = 0x3516d000;
718     cpu->isar.dbgdevid = 0x01110f13;
719     cpu->isar.dbgdevid1 = 0x2;
720     cpu->isar.reset_pmcr_el0 = 0x41013000;
721     cpu->clidr = 0x0a200023;
722     cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */
723     cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */
724     cpu->ccsidr[2] = 0x70ffe07a; /* 2048KB L2 cache */
725     cpu->dcz_blocksize = 4; /* 64 bytes */
726     cpu->gic_num_lrs = 4;
727     cpu->gic_vpribits = 5;
728     cpu->gic_vprebits = 5;
729     cpu->gic_pribits = 5;
730     define_cortex_a72_a57_a53_cp_reginfo(cpu);
731 }
732 
733 static void aarch64_a53_initfn(Object *obj)
734 {
735     ARMCPU *cpu = ARM_CPU(obj);
736 
737     cpu->dtb_compatible = "arm,cortex-a53";
738     set_feature(&cpu->env, ARM_FEATURE_V8);
739     set_feature(&cpu->env, ARM_FEATURE_NEON);
740     set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
741     set_feature(&cpu->env, ARM_FEATURE_AARCH64);
742     set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
743     set_feature(&cpu->env, ARM_FEATURE_EL2);
744     set_feature(&cpu->env, ARM_FEATURE_EL3);
745     set_feature(&cpu->env, ARM_FEATURE_PMU);
746     cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A53;
747     cpu->midr = 0x410fd034;
748     cpu->revidr = 0x00000000;
749     cpu->reset_fpsid = 0x41034070;
750     cpu->isar.mvfr0 = 0x10110222;
751     cpu->isar.mvfr1 = 0x12111111;
752     cpu->isar.mvfr2 = 0x00000043;
753     cpu->ctr = 0x84448004; /* L1Ip = VIPT */
754     cpu->reset_sctlr = 0x00c50838;
755     cpu->isar.id_pfr0 = 0x00000131;
756     cpu->isar.id_pfr1 = 0x00011011;
757     cpu->isar.id_dfr0 = 0x03010066;
758     cpu->id_afr0 = 0x00000000;
759     cpu->isar.id_mmfr0 = 0x10101105;
760     cpu->isar.id_mmfr1 = 0x40000000;
761     cpu->isar.id_mmfr2 = 0x01260000;
762     cpu->isar.id_mmfr3 = 0x02102211;
763     cpu->isar.id_isar0 = 0x02101110;
764     cpu->isar.id_isar1 = 0x13112111;
765     cpu->isar.id_isar2 = 0x21232042;
766     cpu->isar.id_isar3 = 0x01112131;
767     cpu->isar.id_isar4 = 0x00011142;
768     cpu->isar.id_isar5 = 0x00011121;
769     cpu->isar.id_isar6 = 0;
770     cpu->isar.id_aa64pfr0 = 0x00002222;
771     cpu->isar.id_aa64dfr0 = 0x10305106;
772     cpu->isar.id_aa64isar0 = 0x00011120;
773     cpu->isar.id_aa64mmfr0 = 0x00001122; /* 40 bit physical addr */
774     cpu->isar.dbgdidr = 0x3516d000;
775     cpu->isar.dbgdevid = 0x00110f13;
776     cpu->isar.dbgdevid1 = 0x1;
777     cpu->isar.reset_pmcr_el0 = 0x41033000;
778     cpu->clidr = 0x0a200023;
779     cpu->ccsidr[0] = 0x700fe01a; /* 32KB L1 dcache */
780     cpu->ccsidr[1] = 0x201fe00a; /* 32KB L1 icache */
781     cpu->ccsidr[2] = 0x707fe07a; /* 1024KB L2 cache */
782     cpu->dcz_blocksize = 4; /* 64 bytes */
783     cpu->gic_num_lrs = 4;
784     cpu->gic_vpribits = 5;
785     cpu->gic_vprebits = 5;
786     cpu->gic_pribits = 5;
787     define_cortex_a72_a57_a53_cp_reginfo(cpu);
788 }
789 
790 static void aarch64_a55_initfn(Object *obj)
791 {
792     ARMCPU *cpu = ARM_CPU(obj);
793 
794     cpu->dtb_compatible = "arm,cortex-a55";
795     set_feature(&cpu->env, ARM_FEATURE_V8);
796     set_feature(&cpu->env, ARM_FEATURE_NEON);
797     set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
798     set_feature(&cpu->env, ARM_FEATURE_AARCH64);
799     set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
800     set_feature(&cpu->env, ARM_FEATURE_EL2);
801     set_feature(&cpu->env, ARM_FEATURE_EL3);
802     set_feature(&cpu->env, ARM_FEATURE_PMU);
803 
804     /* Ordered by B2.4 AArch64 registers by functional group */
805     cpu->clidr = 0x82000023;
806     cpu->ctr = 0x84448004; /* L1Ip = VIPT */
807     cpu->dcz_blocksize = 4; /* 64 bytes */
808     cpu->isar.id_aa64dfr0  = 0x0000000010305408ull;
809     cpu->isar.id_aa64isar0 = 0x0000100010211120ull;
810     cpu->isar.id_aa64isar1 = 0x0000000000100001ull;
811     cpu->isar.id_aa64mmfr0 = 0x0000000000101122ull;
812     cpu->isar.id_aa64mmfr1 = 0x0000000010212122ull;
813     cpu->isar.id_aa64mmfr2 = 0x0000000000001011ull;
814     cpu->isar.id_aa64pfr0  = 0x0000000010112222ull;
815     cpu->isar.id_aa64pfr1  = 0x0000000000000010ull;
816     cpu->id_afr0       = 0x00000000;
817     cpu->isar.id_dfr0  = 0x04010088;
818     cpu->isar.id_isar0 = 0x02101110;
819     cpu->isar.id_isar1 = 0x13112111;
820     cpu->isar.id_isar2 = 0x21232042;
821     cpu->isar.id_isar3 = 0x01112131;
822     cpu->isar.id_isar4 = 0x00011142;
823     cpu->isar.id_isar5 = 0x01011121;
824     cpu->isar.id_isar6 = 0x00000010;
825     cpu->isar.id_mmfr0 = 0x10201105;
826     cpu->isar.id_mmfr1 = 0x40000000;
827     cpu->isar.id_mmfr2 = 0x01260000;
828     cpu->isar.id_mmfr3 = 0x02122211;
829     cpu->isar.id_mmfr4 = 0x00021110;
830     cpu->isar.id_pfr0  = 0x10010131;
831     cpu->isar.id_pfr1  = 0x00011011;
832     cpu->isar.id_pfr2  = 0x00000011;
833     cpu->midr = 0x412FD050;          /* r2p0 */
834     cpu->revidr = 0;
835 
836     /* From B2.23 CCSIDR_EL1 */
837     cpu->ccsidr[0] = 0x700fe01a; /* 32KB L1 dcache */
838     cpu->ccsidr[1] = 0x200fe01a; /* 32KB L1 icache */
839     cpu->ccsidr[2] = 0x703fe07a; /* 512KB L2 cache */
840 
841     /* From B2.96 SCTLR_EL3 */
842     cpu->reset_sctlr = 0x30c50838;
843 
844     /* From B4.45 ICH_VTR_EL2 */
845     cpu->gic_num_lrs = 4;
846     cpu->gic_vpribits = 5;
847     cpu->gic_vprebits = 5;
848     cpu->gic_pribits = 5;
849 
850     cpu->isar.mvfr0 = 0x10110222;
851     cpu->isar.mvfr1 = 0x13211111;
852     cpu->isar.mvfr2 = 0x00000043;
853 
854     /* From D5.4 AArch64 PMU register summary */
855     cpu->isar.reset_pmcr_el0 = 0x410b3000;
856 }
857 
858 static void aarch64_a72_initfn(Object *obj)
859 {
860     ARMCPU *cpu = ARM_CPU(obj);
861 
862     cpu->dtb_compatible = "arm,cortex-a72";
863     set_feature(&cpu->env, ARM_FEATURE_V8);
864     set_feature(&cpu->env, ARM_FEATURE_NEON);
865     set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
866     set_feature(&cpu->env, ARM_FEATURE_AARCH64);
867     set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
868     set_feature(&cpu->env, ARM_FEATURE_EL2);
869     set_feature(&cpu->env, ARM_FEATURE_EL3);
870     set_feature(&cpu->env, ARM_FEATURE_PMU);
871     cpu->midr = 0x410fd083;
872     cpu->revidr = 0x00000000;
873     cpu->reset_fpsid = 0x41034080;
874     cpu->isar.mvfr0 = 0x10110222;
875     cpu->isar.mvfr1 = 0x12111111;
876     cpu->isar.mvfr2 = 0x00000043;
877     cpu->ctr = 0x8444c004;
878     cpu->reset_sctlr = 0x00c50838;
879     cpu->isar.id_pfr0 = 0x00000131;
880     cpu->isar.id_pfr1 = 0x00011011;
881     cpu->isar.id_dfr0 = 0x03010066;
882     cpu->id_afr0 = 0x00000000;
883     cpu->isar.id_mmfr0 = 0x10201105;
884     cpu->isar.id_mmfr1 = 0x40000000;
885     cpu->isar.id_mmfr2 = 0x01260000;
886     cpu->isar.id_mmfr3 = 0x02102211;
887     cpu->isar.id_isar0 = 0x02101110;
888     cpu->isar.id_isar1 = 0x13112111;
889     cpu->isar.id_isar2 = 0x21232042;
890     cpu->isar.id_isar3 = 0x01112131;
891     cpu->isar.id_isar4 = 0x00011142;
892     cpu->isar.id_isar5 = 0x00011121;
893     cpu->isar.id_aa64pfr0 = 0x00002222;
894     cpu->isar.id_aa64dfr0 = 0x10305106;
895     cpu->isar.id_aa64isar0 = 0x00011120;
896     cpu->isar.id_aa64mmfr0 = 0x00001124;
897     cpu->isar.dbgdidr = 0x3516d000;
898     cpu->isar.dbgdevid = 0x01110f13;
899     cpu->isar.dbgdevid1 = 0x2;
900     cpu->isar.reset_pmcr_el0 = 0x41023000;
901     cpu->clidr = 0x0a200023;
902     cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */
903     cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */
904     cpu->ccsidr[2] = 0x707fe07a; /* 1MB L2 cache */
905     cpu->dcz_blocksize = 4; /* 64 bytes */
906     cpu->gic_num_lrs = 4;
907     cpu->gic_vpribits = 5;
908     cpu->gic_vprebits = 5;
909     cpu->gic_pribits = 5;
910     define_cortex_a72_a57_a53_cp_reginfo(cpu);
911 }
912 
913 static void aarch64_a76_initfn(Object *obj)
914 {
915     ARMCPU *cpu = ARM_CPU(obj);
916 
917     cpu->dtb_compatible = "arm,cortex-a76";
918     set_feature(&cpu->env, ARM_FEATURE_V8);
919     set_feature(&cpu->env, ARM_FEATURE_NEON);
920     set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
921     set_feature(&cpu->env, ARM_FEATURE_AARCH64);
922     set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
923     set_feature(&cpu->env, ARM_FEATURE_EL2);
924     set_feature(&cpu->env, ARM_FEATURE_EL3);
925     set_feature(&cpu->env, ARM_FEATURE_PMU);
926 
927     /* Ordered by B2.4 AArch64 registers by functional group */
928     cpu->clidr = 0x82000023;
929     cpu->ctr = 0x8444C004;
930     cpu->dcz_blocksize = 4;
931     cpu->isar.id_aa64dfr0  = 0x0000000010305408ull;
932     cpu->isar.id_aa64isar0 = 0x0000100010211120ull;
933     cpu->isar.id_aa64isar1 = 0x0000000000100001ull;
934     cpu->isar.id_aa64mmfr0 = 0x0000000000101122ull;
935     cpu->isar.id_aa64mmfr1 = 0x0000000010212122ull;
936     cpu->isar.id_aa64mmfr2 = 0x0000000000001011ull;
937     cpu->isar.id_aa64pfr0  = 0x1100000010111112ull; /* GIC filled in later */
938     cpu->isar.id_aa64pfr1  = 0x0000000000000010ull;
939     cpu->id_afr0       = 0x00000000;
940     cpu->isar.id_dfr0  = 0x04010088;
941     cpu->isar.id_isar0 = 0x02101110;
942     cpu->isar.id_isar1 = 0x13112111;
943     cpu->isar.id_isar2 = 0x21232042;
944     cpu->isar.id_isar3 = 0x01112131;
945     cpu->isar.id_isar4 = 0x00010142;
946     cpu->isar.id_isar5 = 0x01011121;
947     cpu->isar.id_isar6 = 0x00000010;
948     cpu->isar.id_mmfr0 = 0x10201105;
949     cpu->isar.id_mmfr1 = 0x40000000;
950     cpu->isar.id_mmfr2 = 0x01260000;
951     cpu->isar.id_mmfr3 = 0x02122211;
952     cpu->isar.id_mmfr4 = 0x00021110;
953     cpu->isar.id_pfr0  = 0x10010131;
954     cpu->isar.id_pfr1  = 0x00010000; /* GIC filled in later */
955     cpu->isar.id_pfr2  = 0x00000011;
956     cpu->midr = 0x414fd0b1;          /* r4p1 */
957     cpu->revidr = 0;
958 
959     /* From B2.18 CCSIDR_EL1 */
960     cpu->ccsidr[0] = 0x701fe01a; /* 64KB L1 dcache */
961     cpu->ccsidr[1] = 0x201fe01a; /* 64KB L1 icache */
962     cpu->ccsidr[2] = 0x707fe03a; /* 512KB L2 cache */
963 
964     /* From B2.93 SCTLR_EL3 */
965     cpu->reset_sctlr = 0x30c50838;
966 
967     /* From B4.23 ICH_VTR_EL2 */
968     cpu->gic_num_lrs = 4;
969     cpu->gic_vpribits = 5;
970     cpu->gic_vprebits = 5;
971     cpu->gic_pribits = 5;
972 
973     /* From B5.1 AdvSIMD AArch64 register summary */
974     cpu->isar.mvfr0 = 0x10110222;
975     cpu->isar.mvfr1 = 0x13211111;
976     cpu->isar.mvfr2 = 0x00000043;
977 
978     /* From D5.1 AArch64 PMU register summary */
979     cpu->isar.reset_pmcr_el0 = 0x410b3000;
980 }
981 
982 static void aarch64_a64fx_initfn(Object *obj)
983 {
984     ARMCPU *cpu = ARM_CPU(obj);
985 
986     cpu->dtb_compatible = "arm,a64fx";
987     set_feature(&cpu->env, ARM_FEATURE_V8);
988     set_feature(&cpu->env, ARM_FEATURE_NEON);
989     set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
990     set_feature(&cpu->env, ARM_FEATURE_AARCH64);
991     set_feature(&cpu->env, ARM_FEATURE_EL2);
992     set_feature(&cpu->env, ARM_FEATURE_EL3);
993     set_feature(&cpu->env, ARM_FEATURE_PMU);
994     cpu->midr = 0x461f0010;
995     cpu->revidr = 0x00000000;
996     cpu->ctr = 0x86668006;
997     cpu->reset_sctlr = 0x30000180;
998     cpu->isar.id_aa64pfr0 =   0x0000000101111111; /* No RAS Extensions */
999     cpu->isar.id_aa64pfr1 = 0x0000000000000000;
1000     cpu->isar.id_aa64dfr0 = 0x0000000010305408;
1001     cpu->isar.id_aa64dfr1 = 0x0000000000000000;
1002     cpu->id_aa64afr0 = 0x0000000000000000;
1003     cpu->id_aa64afr1 = 0x0000000000000000;
1004     cpu->isar.id_aa64mmfr0 = 0x0000000000001122;
1005     cpu->isar.id_aa64mmfr1 = 0x0000000011212100;
1006     cpu->isar.id_aa64mmfr2 = 0x0000000000001011;
1007     cpu->isar.id_aa64isar0 = 0x0000000010211120;
1008     cpu->isar.id_aa64isar1 = 0x0000000000010001;
1009     cpu->isar.id_aa64zfr0 = 0x0000000000000000;
1010     cpu->clidr = 0x0000000080000023;
1011     cpu->ccsidr[0] = 0x7007e01c; /* 64KB L1 dcache */
1012     cpu->ccsidr[1] = 0x2007e01c; /* 64KB L1 icache */
1013     cpu->ccsidr[2] = 0x70ffe07c; /* 8MB L2 cache */
1014     cpu->dcz_blocksize = 6; /* 256 bytes */
1015     cpu->gic_num_lrs = 4;
1016     cpu->gic_vpribits = 5;
1017     cpu->gic_vprebits = 5;
1018     cpu->gic_pribits = 5;
1019 
1020     /* The A64FX supports only 128, 256 and 512 bit vector lengths */
1021     aarch64_add_sve_properties(obj);
1022     cpu->sve_vq.supported = (1 << 0)  /* 128bit */
1023                           | (1 << 1)  /* 256bit */
1024                           | (1 << 3); /* 512bit */
1025 
1026     cpu->isar.reset_pmcr_el0 = 0x46014040;
1027 
1028     /* TODO:  Add A64FX specific HPC extension registers */
1029 }
1030 
1031 static const ARMCPRegInfo neoverse_n1_cp_reginfo[] = {
1032     { .name = "ATCR_EL1", .state = ARM_CP_STATE_AA64,
1033       .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 7, .opc2 = 0,
1034       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1035     { .name = "ATCR_EL2", .state = ARM_CP_STATE_AA64,
1036       .opc0 = 3, .opc1 = 4, .crn = 15, .crm = 7, .opc2 = 0,
1037       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1038     { .name = "ATCR_EL3", .state = ARM_CP_STATE_AA64,
1039       .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 7, .opc2 = 0,
1040       .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1041     { .name = "ATCR_EL12", .state = ARM_CP_STATE_AA64,
1042       .opc0 = 3, .opc1 = 5, .crn = 15, .crm = 7, .opc2 = 0,
1043       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1044     { .name = "AVTCR_EL2", .state = ARM_CP_STATE_AA64,
1045       .opc0 = 3, .opc1 = 4, .crn = 15, .crm = 7, .opc2 = 1,
1046       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1047     { .name = "CPUACTLR_EL1", .state = ARM_CP_STATE_AA64,
1048       .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 0,
1049       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1050     { .name = "CPUACTLR2_EL1", .state = ARM_CP_STATE_AA64,
1051       .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 1,
1052       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1053     { .name = "CPUACTLR3_EL1", .state = ARM_CP_STATE_AA64,
1054       .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 2,
1055       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1056     /*
1057      * Report CPUCFR_EL1.SCU as 1, as we do not implement the DSU
1058      * (and in particular its system registers).
1059      */
1060     { .name = "CPUCFR_EL1", .state = ARM_CP_STATE_AA64,
1061       .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 0, .opc2 = 0,
1062       .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 4 },
1063     { .name = "CPUECTLR_EL1", .state = ARM_CP_STATE_AA64,
1064       .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 4,
1065       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0x961563010 },
1066     { .name = "CPUPCR_EL3", .state = ARM_CP_STATE_AA64,
1067       .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 8, .opc2 = 1,
1068       .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1069     { .name = "CPUPMR_EL3", .state = ARM_CP_STATE_AA64,
1070       .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 8, .opc2 = 3,
1071       .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1072     { .name = "CPUPOR_EL3", .state = ARM_CP_STATE_AA64,
1073       .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 8, .opc2 = 2,
1074       .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1075     { .name = "CPUPSELR_EL3", .state = ARM_CP_STATE_AA64,
1076       .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 8, .opc2 = 0,
1077       .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1078     { .name = "CPUPWRCTLR_EL1", .state = ARM_CP_STATE_AA64,
1079       .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 2, .opc2 = 7,
1080       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1081     { .name = "ERXPFGCDN_EL1", .state = ARM_CP_STATE_AA64,
1082       .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 2, .opc2 = 2,
1083       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1084     { .name = "ERXPFGCTL_EL1", .state = ARM_CP_STATE_AA64,
1085       .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 2, .opc2 = 1,
1086       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1087     { .name = "ERXPFGF_EL1", .state = ARM_CP_STATE_AA64,
1088       .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 2, .opc2 = 0,
1089       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1090 };
1091 
1092 static void define_neoverse_n1_cp_reginfo(ARMCPU *cpu)
1093 {
1094     define_arm_cp_regs(cpu, neoverse_n1_cp_reginfo);
1095 }
1096 
1097 static void aarch64_neoverse_n1_initfn(Object *obj)
1098 {
1099     ARMCPU *cpu = ARM_CPU(obj);
1100 
1101     cpu->dtb_compatible = "arm,neoverse-n1";
1102     set_feature(&cpu->env, ARM_FEATURE_V8);
1103     set_feature(&cpu->env, ARM_FEATURE_NEON);
1104     set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
1105     set_feature(&cpu->env, ARM_FEATURE_AARCH64);
1106     set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
1107     set_feature(&cpu->env, ARM_FEATURE_EL2);
1108     set_feature(&cpu->env, ARM_FEATURE_EL3);
1109     set_feature(&cpu->env, ARM_FEATURE_PMU);
1110 
1111     /* Ordered by B2.4 AArch64 registers by functional group */
1112     cpu->clidr = 0x82000023;
1113     cpu->ctr = 0x8444c004;
1114     cpu->dcz_blocksize = 4;
1115     cpu->isar.id_aa64dfr0  = 0x0000000110305408ull;
1116     cpu->isar.id_aa64isar0 = 0x0000100010211120ull;
1117     cpu->isar.id_aa64isar1 = 0x0000000000100001ull;
1118     cpu->isar.id_aa64mmfr0 = 0x0000000000101125ull;
1119     cpu->isar.id_aa64mmfr1 = 0x0000000010212122ull;
1120     cpu->isar.id_aa64mmfr2 = 0x0000000000001011ull;
1121     cpu->isar.id_aa64pfr0  = 0x1100000010111112ull; /* GIC filled in later */
1122     cpu->isar.id_aa64pfr1  = 0x0000000000000020ull;
1123     cpu->id_afr0       = 0x00000000;
1124     cpu->isar.id_dfr0  = 0x04010088;
1125     cpu->isar.id_isar0 = 0x02101110;
1126     cpu->isar.id_isar1 = 0x13112111;
1127     cpu->isar.id_isar2 = 0x21232042;
1128     cpu->isar.id_isar3 = 0x01112131;
1129     cpu->isar.id_isar4 = 0x00010142;
1130     cpu->isar.id_isar5 = 0x01011121;
1131     cpu->isar.id_isar6 = 0x00000010;
1132     cpu->isar.id_mmfr0 = 0x10201105;
1133     cpu->isar.id_mmfr1 = 0x40000000;
1134     cpu->isar.id_mmfr2 = 0x01260000;
1135     cpu->isar.id_mmfr3 = 0x02122211;
1136     cpu->isar.id_mmfr4 = 0x00021110;
1137     cpu->isar.id_pfr0  = 0x10010131;
1138     cpu->isar.id_pfr1  = 0x00010000; /* GIC filled in later */
1139     cpu->isar.id_pfr2  = 0x00000011;
1140     cpu->midr = 0x414fd0c1;          /* r4p1 */
1141     cpu->revidr = 0;
1142 
1143     /* From B2.23 CCSIDR_EL1 */
1144     cpu->ccsidr[0] = 0x701fe01a; /* 64KB L1 dcache */
1145     cpu->ccsidr[1] = 0x201fe01a; /* 64KB L1 icache */
1146     cpu->ccsidr[2] = 0x70ffe03a; /* 1MB L2 cache */
1147 
1148     /* From B2.98 SCTLR_EL3 */
1149     cpu->reset_sctlr = 0x30c50838;
1150 
1151     /* From B4.23 ICH_VTR_EL2 */
1152     cpu->gic_num_lrs = 4;
1153     cpu->gic_vpribits = 5;
1154     cpu->gic_vprebits = 5;
1155     cpu->gic_pribits = 5;
1156 
1157     /* From B5.1 AdvSIMD AArch64 register summary */
1158     cpu->isar.mvfr0 = 0x10110222;
1159     cpu->isar.mvfr1 = 0x13211111;
1160     cpu->isar.mvfr2 = 0x00000043;
1161 
1162     /* From D5.1 AArch64 PMU register summary */
1163     cpu->isar.reset_pmcr_el0 = 0x410c3000;
1164 
1165     define_neoverse_n1_cp_reginfo(cpu);
1166 }
1167 
1168 static void aarch64_host_initfn(Object *obj)
1169 {
1170 #if defined(CONFIG_KVM)
1171     ARMCPU *cpu = ARM_CPU(obj);
1172     kvm_arm_set_cpu_features_from_host(cpu);
1173     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
1174         aarch64_add_sve_properties(obj);
1175         aarch64_add_pauth_properties(obj);
1176     }
1177 #elif defined(CONFIG_HVF)
1178     ARMCPU *cpu = ARM_CPU(obj);
1179     hvf_arm_set_cpu_features_from_host(cpu);
1180     aarch64_add_pauth_properties(obj);
1181 #else
1182     g_assert_not_reached();
1183 #endif
1184 }
1185 
1186 /* -cpu max: if KVM is enabled, like -cpu host (best possible with this host);
1187  * otherwise, a CPU with as many features enabled as our emulation supports.
1188  * The version of '-cpu max' for qemu-system-arm is defined in cpu.c;
1189  * this only needs to handle 64 bits.
1190  */
1191 static void aarch64_max_initfn(Object *obj)
1192 {
1193     ARMCPU *cpu = ARM_CPU(obj);
1194     uint64_t t;
1195     uint32_t u;
1196 
1197     if (kvm_enabled() || hvf_enabled()) {
1198         /* With KVM or HVF, '-cpu max' is identical to '-cpu host' */
1199         aarch64_host_initfn(obj);
1200         return;
1201     }
1202 
1203     /* '-cpu max' for TCG: we currently do this as "A57 with extra things" */
1204 
1205     aarch64_a57_initfn(obj);
1206 
1207     /*
1208      * Reset MIDR so the guest doesn't mistake our 'max' CPU type for a real
1209      * one and try to apply errata workarounds or use impdef features we
1210      * don't provide.
1211      * An IMPLEMENTER field of 0 means "reserved for software use";
1212      * ARCHITECTURE must be 0xf indicating "v7 or later, check ID registers
1213      * to see which features are present";
1214      * the VARIANT, PARTNUM and REVISION fields are all implementation
1215      * defined and we choose to define PARTNUM just in case guest
1216      * code needs to distinguish this QEMU CPU from other software
1217      * implementations, though this shouldn't be needed.
1218      */
1219     t = FIELD_DP64(0, MIDR_EL1, IMPLEMENTER, 0);
1220     t = FIELD_DP64(t, MIDR_EL1, ARCHITECTURE, 0xf);
1221     t = FIELD_DP64(t, MIDR_EL1, PARTNUM, 'Q');
1222     t = FIELD_DP64(t, MIDR_EL1, VARIANT, 0);
1223     t = FIELD_DP64(t, MIDR_EL1, REVISION, 0);
1224     cpu->midr = t;
1225 
1226     /*
1227      * We're going to set FEAT_S2FWB, which mandates that CLIDR_EL1.{LoUU,LoUIS}
1228      * are zero.
1229      */
1230     u = cpu->clidr;
1231     u = FIELD_DP32(u, CLIDR_EL1, LOUIS, 0);
1232     u = FIELD_DP32(u, CLIDR_EL1, LOUU, 0);
1233     cpu->clidr = u;
1234 
1235     t = cpu->isar.id_aa64isar0;
1236     t = FIELD_DP64(t, ID_AA64ISAR0, AES, 2);      /* FEAT_PMULL */
1237     t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 1);     /* FEAT_SHA1 */
1238     t = FIELD_DP64(t, ID_AA64ISAR0, SHA2, 2);     /* FEAT_SHA512 */
1239     t = FIELD_DP64(t, ID_AA64ISAR0, CRC32, 1);
1240     t = FIELD_DP64(t, ID_AA64ISAR0, ATOMIC, 2);   /* FEAT_LSE */
1241     t = FIELD_DP64(t, ID_AA64ISAR0, RDM, 1);      /* FEAT_RDM */
1242     t = FIELD_DP64(t, ID_AA64ISAR0, SHA3, 1);     /* FEAT_SHA3 */
1243     t = FIELD_DP64(t, ID_AA64ISAR0, SM3, 1);      /* FEAT_SM3 */
1244     t = FIELD_DP64(t, ID_AA64ISAR0, SM4, 1);      /* FEAT_SM4 */
1245     t = FIELD_DP64(t, ID_AA64ISAR0, DP, 1);       /* FEAT_DotProd */
1246     t = FIELD_DP64(t, ID_AA64ISAR0, FHM, 1);      /* FEAT_FHM */
1247     t = FIELD_DP64(t, ID_AA64ISAR0, TS, 2);       /* FEAT_FlagM2 */
1248     t = FIELD_DP64(t, ID_AA64ISAR0, TLB, 2);      /* FEAT_TLBIRANGE */
1249     t = FIELD_DP64(t, ID_AA64ISAR0, RNDR, 1);     /* FEAT_RNG */
1250     cpu->isar.id_aa64isar0 = t;
1251 
1252     t = cpu->isar.id_aa64isar1;
1253     t = FIELD_DP64(t, ID_AA64ISAR1, DPB, 2);      /* FEAT_DPB2 */
1254     t = FIELD_DP64(t, ID_AA64ISAR1, JSCVT, 1);    /* FEAT_JSCVT */
1255     t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 1);     /* FEAT_FCMA */
1256     t = FIELD_DP64(t, ID_AA64ISAR1, LRCPC, 2);    /* FEAT_LRCPC2 */
1257     t = FIELD_DP64(t, ID_AA64ISAR1, FRINTTS, 1);  /* FEAT_FRINTTS */
1258     t = FIELD_DP64(t, ID_AA64ISAR1, SB, 1);       /* FEAT_SB */
1259     t = FIELD_DP64(t, ID_AA64ISAR1, SPECRES, 1);  /* FEAT_SPECRES */
1260     t = FIELD_DP64(t, ID_AA64ISAR1, BF16, 1);     /* FEAT_BF16 */
1261     t = FIELD_DP64(t, ID_AA64ISAR1, DGH, 1);      /* FEAT_DGH */
1262     t = FIELD_DP64(t, ID_AA64ISAR1, I8MM, 1);     /* FEAT_I8MM */
1263     cpu->isar.id_aa64isar1 = t;
1264 
1265     t = cpu->isar.id_aa64pfr0;
1266     t = FIELD_DP64(t, ID_AA64PFR0, FP, 1);        /* FEAT_FP16 */
1267     t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 1);   /* FEAT_FP16 */
1268     t = FIELD_DP64(t, ID_AA64PFR0, RAS, 2);       /* FEAT_RASv1p1 + FEAT_DoubleFault */
1269     t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1);
1270     t = FIELD_DP64(t, ID_AA64PFR0, SEL2, 1);      /* FEAT_SEL2 */
1271     t = FIELD_DP64(t, ID_AA64PFR0, DIT, 1);       /* FEAT_DIT */
1272     t = FIELD_DP64(t, ID_AA64PFR0, CSV2, 2);      /* FEAT_CSV2_2 */
1273     t = FIELD_DP64(t, ID_AA64PFR0, CSV3, 1);      /* FEAT_CSV3 */
1274     cpu->isar.id_aa64pfr0 = t;
1275 
1276     t = cpu->isar.id_aa64pfr1;
1277     t = FIELD_DP64(t, ID_AA64PFR1, BT, 1);        /* FEAT_BTI */
1278     t = FIELD_DP64(t, ID_AA64PFR1, SSBS, 2);      /* FEAT_SSBS2 */
1279     /*
1280      * Begin with full support for MTE. This will be downgraded to MTE=0
1281      * during realize if the board provides no tag memory, much like
1282      * we do for EL2 with the virtualization=on property.
1283      */
1284     t = FIELD_DP64(t, ID_AA64PFR1, MTE, 3);       /* FEAT_MTE3 */
1285     t = FIELD_DP64(t, ID_AA64PFR1, RAS_FRAC, 0);  /* FEAT_RASv1p1 + FEAT_DoubleFault */
1286     t = FIELD_DP64(t, ID_AA64PFR1, SME, 1);       /* FEAT_SME */
1287     t = FIELD_DP64(t, ID_AA64PFR1, CSV2_FRAC, 0); /* FEAT_CSV2_2 */
1288     cpu->isar.id_aa64pfr1 = t;
1289 
1290     t = cpu->isar.id_aa64mmfr0;
1291     t = FIELD_DP64(t, ID_AA64MMFR0, PARANGE, 6); /* FEAT_LPA: 52 bits */
1292     t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN16, 1);   /* 16k pages supported */
1293     t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN16_2, 2); /* 16k stage2 supported */
1294     t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN64_2, 2); /* 64k stage2 supported */
1295     t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN4_2, 2);  /*  4k stage2 supported */
1296     t = FIELD_DP64(t, ID_AA64MMFR0, FGT, 1);       /* FEAT_FGT */
1297     cpu->isar.id_aa64mmfr0 = t;
1298 
1299     t = cpu->isar.id_aa64mmfr1;
1300     t = FIELD_DP64(t, ID_AA64MMFR1, HAFDBS, 2);   /* FEAT_HAFDBS */
1301     t = FIELD_DP64(t, ID_AA64MMFR1, VMIDBITS, 2); /* FEAT_VMID16 */
1302     t = FIELD_DP64(t, ID_AA64MMFR1, VH, 1);       /* FEAT_VHE */
1303     t = FIELD_DP64(t, ID_AA64MMFR1, HPDS, 1);     /* FEAT_HPDS */
1304     t = FIELD_DP64(t, ID_AA64MMFR1, LO, 1);       /* FEAT_LOR */
1305     t = FIELD_DP64(t, ID_AA64MMFR1, PAN, 3);      /* FEAT_PAN3 */
1306     t = FIELD_DP64(t, ID_AA64MMFR1, XNX, 1);      /* FEAT_XNX */
1307     t = FIELD_DP64(t, ID_AA64MMFR1, ETS, 1);      /* FEAT_ETS */
1308     t = FIELD_DP64(t, ID_AA64MMFR1, HCX, 1);      /* FEAT_HCX */
1309     cpu->isar.id_aa64mmfr1 = t;
1310 
1311     t = cpu->isar.id_aa64mmfr2;
1312     t = FIELD_DP64(t, ID_AA64MMFR2, CNP, 1);      /* FEAT_TTCNP */
1313     t = FIELD_DP64(t, ID_AA64MMFR2, UAO, 1);      /* FEAT_UAO */
1314     t = FIELD_DP64(t, ID_AA64MMFR2, IESB, 1);     /* FEAT_IESB */
1315     t = FIELD_DP64(t, ID_AA64MMFR2, VARANGE, 1);  /* FEAT_LVA */
1316     t = FIELD_DP64(t, ID_AA64MMFR2, ST, 1);       /* FEAT_TTST */
1317     t = FIELD_DP64(t, ID_AA64MMFR2, IDS, 1);      /* FEAT_IDST */
1318     t = FIELD_DP64(t, ID_AA64MMFR2, FWB, 1);      /* FEAT_S2FWB */
1319     t = FIELD_DP64(t, ID_AA64MMFR2, TTL, 1);      /* FEAT_TTL */
1320     t = FIELD_DP64(t, ID_AA64MMFR2, BBM, 2);      /* FEAT_BBM at level 2 */
1321     t = FIELD_DP64(t, ID_AA64MMFR2, EVT, 2);      /* FEAT_EVT */
1322     t = FIELD_DP64(t, ID_AA64MMFR2, E0PD, 1);     /* FEAT_E0PD */
1323     cpu->isar.id_aa64mmfr2 = t;
1324 
1325     t = cpu->isar.id_aa64zfr0;
1326     t = FIELD_DP64(t, ID_AA64ZFR0, SVEVER, 1);
1327     t = FIELD_DP64(t, ID_AA64ZFR0, AES, 2);       /* FEAT_SVE_PMULL128 */
1328     t = FIELD_DP64(t, ID_AA64ZFR0, BITPERM, 1);   /* FEAT_SVE_BitPerm */
1329     t = FIELD_DP64(t, ID_AA64ZFR0, BFLOAT16, 1);  /* FEAT_BF16 */
1330     t = FIELD_DP64(t, ID_AA64ZFR0, SHA3, 1);      /* FEAT_SVE_SHA3 */
1331     t = FIELD_DP64(t, ID_AA64ZFR0, SM4, 1);       /* FEAT_SVE_SM4 */
1332     t = FIELD_DP64(t, ID_AA64ZFR0, I8MM, 1);      /* FEAT_I8MM */
1333     t = FIELD_DP64(t, ID_AA64ZFR0, F32MM, 1);     /* FEAT_F32MM */
1334     t = FIELD_DP64(t, ID_AA64ZFR0, F64MM, 1);     /* FEAT_F64MM */
1335     cpu->isar.id_aa64zfr0 = t;
1336 
1337     t = cpu->isar.id_aa64dfr0;
1338     t = FIELD_DP64(t, ID_AA64DFR0, DEBUGVER, 9);  /* FEAT_Debugv8p4 */
1339     t = FIELD_DP64(t, ID_AA64DFR0, PMUVER, 6);    /* FEAT_PMUv3p5 */
1340     cpu->isar.id_aa64dfr0 = t;
1341 
1342     t = cpu->isar.id_aa64smfr0;
1343     t = FIELD_DP64(t, ID_AA64SMFR0, F32F32, 1);   /* FEAT_SME */
1344     t = FIELD_DP64(t, ID_AA64SMFR0, B16F32, 1);   /* FEAT_SME */
1345     t = FIELD_DP64(t, ID_AA64SMFR0, F16F32, 1);   /* FEAT_SME */
1346     t = FIELD_DP64(t, ID_AA64SMFR0, I8I32, 0xf);  /* FEAT_SME */
1347     t = FIELD_DP64(t, ID_AA64SMFR0, F64F64, 1);   /* FEAT_SME_F64F64 */
1348     t = FIELD_DP64(t, ID_AA64SMFR0, I16I64, 0xf); /* FEAT_SME_I16I64 */
1349     t = FIELD_DP64(t, ID_AA64SMFR0, FA64, 1);     /* FEAT_SME_FA64 */
1350     cpu->isar.id_aa64smfr0 = t;
1351 
1352     /* Replicate the same data to the 32-bit id registers.  */
1353     aa32_max_features(cpu);
1354 
1355 #ifdef CONFIG_USER_ONLY
1356     /*
1357      * For usermode -cpu max we can use a larger and more efficient DCZ
1358      * blocksize since we don't have to follow what the hardware does.
1359      */
1360     cpu->ctr = 0x80038003; /* 32 byte I and D cacheline size, VIPT icache */
1361     cpu->dcz_blocksize = 7; /*  512 bytes */
1362 #endif
1363 
1364     cpu->sve_vq.supported = MAKE_64BIT_MASK(0, ARM_MAX_VQ);
1365     cpu->sme_vq.supported = SVE_VQ_POW2_MAP;
1366 
1367     aarch64_add_pauth_properties(obj);
1368     aarch64_add_sve_properties(obj);
1369     aarch64_add_sme_properties(obj);
1370     object_property_add(obj, "sve-max-vq", "uint32", cpu_max_get_sve_max_vq,
1371                         cpu_max_set_sve_max_vq, NULL, NULL);
1372     qdev_property_add_static(DEVICE(obj), &arm_cpu_lpa2_property);
1373 }
1374 
1375 static const ARMCPUInfo aarch64_cpus[] = {
1376     { .name = "cortex-a35",         .initfn = aarch64_a35_initfn },
1377     { .name = "cortex-a57",         .initfn = aarch64_a57_initfn },
1378     { .name = "cortex-a53",         .initfn = aarch64_a53_initfn },
1379     { .name = "cortex-a55",         .initfn = aarch64_a55_initfn },
1380     { .name = "cortex-a72",         .initfn = aarch64_a72_initfn },
1381     { .name = "cortex-a76",         .initfn = aarch64_a76_initfn },
1382     { .name = "a64fx",              .initfn = aarch64_a64fx_initfn },
1383     { .name = "neoverse-n1",        .initfn = aarch64_neoverse_n1_initfn },
1384     { .name = "max",                .initfn = aarch64_max_initfn },
1385 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
1386     { .name = "host",               .initfn = aarch64_host_initfn },
1387 #endif
1388 };
1389 
1390 static bool aarch64_cpu_get_aarch64(Object *obj, Error **errp)
1391 {
1392     ARMCPU *cpu = ARM_CPU(obj);
1393 
1394     return arm_feature(&cpu->env, ARM_FEATURE_AARCH64);
1395 }
1396 
1397 static void aarch64_cpu_set_aarch64(Object *obj, bool value, Error **errp)
1398 {
1399     ARMCPU *cpu = ARM_CPU(obj);
1400 
1401     /* At this time, this property is only allowed if KVM is enabled.  This
1402      * restriction allows us to avoid fixing up functionality that assumes a
1403      * uniform execution state like do_interrupt.
1404      */
1405     if (value == false) {
1406         if (!kvm_enabled() || !kvm_arm_aarch32_supported()) {
1407             error_setg(errp, "'aarch64' feature cannot be disabled "
1408                              "unless KVM is enabled and 32-bit EL1 "
1409                              "is supported");
1410             return;
1411         }
1412         unset_feature(&cpu->env, ARM_FEATURE_AARCH64);
1413     } else {
1414         set_feature(&cpu->env, ARM_FEATURE_AARCH64);
1415     }
1416 }
1417 
1418 static void aarch64_cpu_finalizefn(Object *obj)
1419 {
1420 }
1421 
1422 static gchar *aarch64_gdb_arch_name(CPUState *cs)
1423 {
1424     return g_strdup("aarch64");
1425 }
1426 
1427 static void aarch64_cpu_class_init(ObjectClass *oc, void *data)
1428 {
1429     CPUClass *cc = CPU_CLASS(oc);
1430 
1431     cc->gdb_read_register = aarch64_cpu_gdb_read_register;
1432     cc->gdb_write_register = aarch64_cpu_gdb_write_register;
1433     cc->gdb_num_core_regs = 34;
1434     cc->gdb_core_xml_file = "aarch64-core.xml";
1435     cc->gdb_arch_name = aarch64_gdb_arch_name;
1436 
1437     object_class_property_add_bool(oc, "aarch64", aarch64_cpu_get_aarch64,
1438                                    aarch64_cpu_set_aarch64);
1439     object_class_property_set_description(oc, "aarch64",
1440                                           "Set on/off to enable/disable aarch64 "
1441                                           "execution state ");
1442 }
1443 
1444 static void aarch64_cpu_instance_init(Object *obj)
1445 {
1446     ARMCPUClass *acc = ARM_CPU_GET_CLASS(obj);
1447 
1448     acc->info->initfn(obj);
1449     arm_cpu_post_init(obj);
1450 }
1451 
1452 static void cpu_register_class_init(ObjectClass *oc, void *data)
1453 {
1454     ARMCPUClass *acc = ARM_CPU_CLASS(oc);
1455 
1456     acc->info = data;
1457 }
1458 
1459 void aarch64_cpu_register(const ARMCPUInfo *info)
1460 {
1461     TypeInfo type_info = {
1462         .parent = TYPE_AARCH64_CPU,
1463         .instance_size = sizeof(ARMCPU),
1464         .instance_init = aarch64_cpu_instance_init,
1465         .class_size = sizeof(ARMCPUClass),
1466         .class_init = info->class_init ?: cpu_register_class_init,
1467         .class_data = (void *)info,
1468     };
1469 
1470     type_info.name = g_strdup_printf("%s-" TYPE_ARM_CPU, info->name);
1471     type_register(&type_info);
1472     g_free((void *)type_info.name);
1473 }
1474 
1475 static const TypeInfo aarch64_cpu_type_info = {
1476     .name = TYPE_AARCH64_CPU,
1477     .parent = TYPE_ARM_CPU,
1478     .instance_size = sizeof(ARMCPU),
1479     .instance_finalize = aarch64_cpu_finalizefn,
1480     .abstract = true,
1481     .class_size = sizeof(AArch64CPUClass),
1482     .class_init = aarch64_cpu_class_init,
1483 };
1484 
1485 static void aarch64_cpu_register_types(void)
1486 {
1487     size_t i;
1488 
1489     type_register_static(&aarch64_cpu_type_info);
1490 
1491     for (i = 0; i < ARRAY_SIZE(aarch64_cpus); ++i) {
1492         aarch64_cpu_register(&aarch64_cpus[i]);
1493     }
1494 }
1495 
1496 type_init(aarch64_cpu_register_types)
1497