1 #include "cpu.h"
2 #include "internals.h"
3 #include "exec/helper-proto.h"
4 #include "qemu/host-utils.h"
5 #include "sysemu/sysemu.h"
6 #include "qemu/bitops.h"
7 #include "qemu/crc32c.h"
8 #include "exec/cpu_ldst.h"
9 #include "arm_ldst.h"
10 
11 #ifndef CONFIG_USER_ONLY
12 static inline int get_phys_addr(CPUARMState *env, target_ulong address,
13                                 int access_type, int is_user,
14                                 hwaddr *phys_ptr, int *prot,
15                                 target_ulong *page_size);
16 
17 /* Definitions for the PMCCNTR and PMCR registers */
18 #define PMCRD   0x8
19 #define PMCRC   0x4
20 #define PMCRE   0x1
21 #endif
22 
raw_read(CPUARMState * env,const ARMCPRegInfo * ri)23 static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
24 {
25     if (cpreg_field_is_64bit(ri)) {
26         return CPREG_FIELD64(env, ri);
27     } else {
28         return CPREG_FIELD32(env, ri);
29     }
30 }
31 
raw_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)32 static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
33                       uint64_t value)
34 {
35     if (cpreg_field_is_64bit(ri)) {
36         CPREG_FIELD64(env, ri) = value;
37     } else {
38         CPREG_FIELD32(env, ri) = value;
39     }
40 }
41 
read_raw_cp_reg(CPUARMState * env,const ARMCPRegInfo * ri)42 static uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
43 {
44     /* Raw read of a coprocessor register (as needed for migration, etc). */
45     if (ri->type & ARM_CP_CONST) {
46         return ri->resetvalue;
47     } else if (ri->raw_readfn) {
48         return ri->raw_readfn(env, ri);
49     } else if (ri->readfn) {
50         return ri->readfn(env, ri);
51     } else {
52         return raw_read(env, ri);
53     }
54 }
55 
write_raw_cp_reg(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t v)56 static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
57                              uint64_t v)
58 {
59     /* Raw write of a coprocessor register (as needed for migration, etc).
60      * Note that constant registers are treated as write-ignored; the
61      * caller should check for success by whether a readback gives the
62      * value written.
63      */
64     if (ri->type & ARM_CP_CONST) {
65         return;
66     } else if (ri->raw_writefn) {
67         ri->raw_writefn(env, ri, v);
68     } else if (ri->writefn) {
69         ri->writefn(env, ri, v);
70     } else {
71         raw_write(env, ri, v);
72     }
73 }
74 
write_cpustate_to_list(ARMCPU * cpu)75 bool write_cpustate_to_list(ARMCPU *cpu)
76 {
77     /* Write the coprocessor state from cpu->env to the (index,value) list. */
78     int i;
79     bool ok = true;
80 
81     for (i = 0; i < cpu->cpreg_array_len; i++) {
82         uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
83         const ARMCPRegInfo *ri;
84 
85         ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
86         if (!ri) {
87             ok = false;
88             continue;
89         }
90         if (ri->type & ARM_CP_NO_MIGRATE) {
91             continue;
92         }
93         cpu->cpreg_values[i] = read_raw_cp_reg(&cpu->env, ri);
94     }
95     return ok;
96 }
97 
write_list_to_cpustate(ARMCPU * cpu)98 bool write_list_to_cpustate(ARMCPU *cpu)
99 {
100     int i;
101     bool ok = true;
102 
103     for (i = 0; i < cpu->cpreg_array_len; i++) {
104         uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
105         uint64_t v = cpu->cpreg_values[i];
106         const ARMCPRegInfo *ri;
107 
108         ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
109         if (!ri) {
110             ok = false;
111             continue;
112         }
113         if (ri->type & ARM_CP_NO_MIGRATE) {
114             continue;
115         }
116         /* Write value and confirm it reads back as written
117          * (to catch read-only registers and partially read-only
118          * registers where the incoming migration value doesn't match)
119          */
120         write_raw_cp_reg(&cpu->env, ri, v);
121         if (read_raw_cp_reg(&cpu->env, ri) != v) {
122             ok = false;
123         }
124     }
125     return ok;
126 }
127 
add_cpreg_to_list(gpointer key,gpointer opaque)128 static void add_cpreg_to_list(gpointer key, gpointer opaque)
129 {
130     ARMCPU *cpu = opaque;
131     uint64_t regidx;
132     const ARMCPRegInfo *ri;
133 
134     regidx = *(uint32_t *)key;
135     ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
136 
137     if (!(ri->type & ARM_CP_NO_MIGRATE)) {
138         cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
139         /* The value array need not be initialized at this point */
140         cpu->cpreg_array_len++;
141     }
142 }
143 
count_cpreg(gpointer key,gpointer opaque)144 static void count_cpreg(gpointer key, gpointer opaque)
145 {
146     ARMCPU *cpu = opaque;
147     uint64_t regidx;
148     const ARMCPRegInfo *ri;
149 
150     regidx = *(uint32_t *)key;
151     ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
152 
153     if (!(ri->type & ARM_CP_NO_MIGRATE)) {
154         cpu->cpreg_array_len++;
155     }
156 }
157 
cpreg_key_compare(gconstpointer a,gconstpointer b)158 static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
159 {
160     uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a);
161     uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b);
162 
163     if (aidx > bidx) {
164         return 1;
165     }
166     if (aidx < bidx) {
167         return -1;
168     }
169     return 0;
170 }
171 
cpreg_make_keylist(gpointer key,gpointer value,gpointer udata)172 static void cpreg_make_keylist(gpointer key, gpointer value, gpointer udata)
173 {
174     GList **plist = udata;
175 
176     *plist = g_list_prepend(*plist, key);
177 }
178 
init_cpreg_list(ARMCPU * cpu)179 void init_cpreg_list(ARMCPU *cpu)
180 {
181     /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
182      * Note that we require cpreg_tuples[] to be sorted by key ID.
183      */
184     GList *keys = NULL;
185     int arraylen;
186 
187     g_hash_table_foreach(cpu->cp_regs, cpreg_make_keylist, &keys);
188 
189     keys = g_list_sort(keys, cpreg_key_compare);
190 
191     cpu->cpreg_array_len = 0;
192 
193     g_list_foreach(keys, count_cpreg, cpu);
194 
195     arraylen = cpu->cpreg_array_len;
196     cpu->cpreg_indexes = g_new(uint64_t, arraylen);
197     cpu->cpreg_values = g_new(uint64_t, arraylen);
198     cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
199     cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
200     cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
201     cpu->cpreg_array_len = 0;
202 
203     g_list_foreach(keys, add_cpreg_to_list, cpu);
204 
205     assert(cpu->cpreg_array_len == arraylen);
206 
207     g_list_free(keys);
208 }
209 
dacr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)210 static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
211 {
212     ARMCPU *cpu = arm_env_get_cpu(env);
213 
214     raw_write(env, ri, value);
215     tlb_flush(CPU(cpu), 1); /* Flush TLB as domain not tracked in TLB */
216 }
217 
fcse_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)218 static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
219 {
220     ARMCPU *cpu = arm_env_get_cpu(env);
221 
222     if (raw_read(env, ri) != value) {
223         /* Unlike real hardware the qemu TLB uses virtual addresses,
224          * not modified virtual addresses, so this causes a TLB flush.
225          */
226         tlb_flush(CPU(cpu), 1);
227         raw_write(env, ri, value);
228     }
229 }
230 
contextidr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)231 static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
232                              uint64_t value)
233 {
234     ARMCPU *cpu = arm_env_get_cpu(env);
235 
236     if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_MPU)
237         && !extended_addresses_enabled(env)) {
238         /* For VMSA (when not using the LPAE long descriptor page table
239          * format) this register includes the ASID, so do a TLB flush.
240          * For PMSA it is purely a process ID and no action is needed.
241          */
242         tlb_flush(CPU(cpu), 1);
243     }
244     raw_write(env, ri, value);
245 }
246 
tlbiall_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)247 static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
248                           uint64_t value)
249 {
250     /* Invalidate all (TLBIALL) */
251     ARMCPU *cpu = arm_env_get_cpu(env);
252 
253     tlb_flush(CPU(cpu), 1);
254 }
255 
tlbimva_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)256 static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
257                           uint64_t value)
258 {
259     /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
260     ARMCPU *cpu = arm_env_get_cpu(env);
261 
262     tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
263 }
264 
tlbiasid_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)265 static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
266                            uint64_t value)
267 {
268     /* Invalidate by ASID (TLBIASID) */
269     ARMCPU *cpu = arm_env_get_cpu(env);
270 
271     tlb_flush(CPU(cpu), value == 0);
272 }
273 
tlbimvaa_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)274 static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
275                            uint64_t value)
276 {
277     /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
278     ARMCPU *cpu = arm_env_get_cpu(env);
279 
280     tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
281 }
282 
283 /* IS variants of TLB operations must affect all cores */
tlbiall_is_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)284 static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
285                              uint64_t value)
286 {
287     //struct uc_struct *uc = env->uc;
288     // TODO: issue #642
289     // tlb_flush(other_cpu, 1);
290 }
291 
tlbiasid_is_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)292 static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
293                              uint64_t value)
294 {
295     //struct uc_struct *uc = env->uc;
296     // TODO: issue #642
297     // tlb_flush(other_cpu, value == 0);
298 }
299 
tlbimva_is_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)300 static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
301                              uint64_t value)
302 {
303     //struct uc_struct *uc = env->uc;
304     // TODO: issue #642
305     // tlb_flush(other_cpu, value & TARGET_PAGE_MASK);
306 }
307 
tlbimvaa_is_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)308 static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
309                              uint64_t value)
310 {
311     //struct uc_struct *uc = env->uc;
312     // TODO: issue #642
313     // tlb_flush(other_cpu, value & TARGET_PAGE_MASK);
314 }
315 
316 static const ARMCPRegInfo cp_reginfo[] = {
317     { "FCSEIDR",   15,13,0, 0,0,0, 0,
318       0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c13_fcse),
319       NULL, NULL, fcse_write, NULL, raw_write, NULL, },
320     { "CONTEXTIDR", 0,13,0,  3,0,1, ARM_CP_STATE_BOTH,
321       0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.contextidr_el1),
322       NULL, NULL, contextidr_write, NULL, raw_write, NULL, },
323     REGINFO_SENTINEL
324 };
325 
326 static const ARMCPRegInfo not_v8_cp_reginfo[] = {
327     /* NB: Some of these registers exist in v8 but with more precise
328      * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
329      */
330     /* MMU Domain access control / MPU write buffer control */
331     { "DACR", 15,3,CP_ANY, 0,CP_ANY,CP_ANY, 0,
332       0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c3),
333       NULL, NULL, dacr_write, NULL, raw_write, NULL, },
334     /* ??? This covers not just the impdef TLB lockdown registers but also
335      * some v7VMSA registers relating to TEX remap, so it is overly broad.
336      */
337     { "TLB_LOCKDOWN", 15,10,CP_ANY, 0,CP_ANY,CP_ANY, 0,
338       ARM_CP_NOP, PL1_RW,  },
339     /* Cache maintenance ops; some of this space may be overridden later. */
340     { "CACHEMAINT", 15,7,CP_ANY, 0,0,CP_ANY, 0,
341       ARM_CP_NOP | ARM_CP_OVERRIDE, PL1_W, },
342     REGINFO_SENTINEL
343 };
344 
345 static const ARMCPRegInfo not_v6_cp_reginfo[] = {
346     /* Not all pre-v6 cores implemented this WFI, so this is slightly
347      * over-broad.
348      */
349     { "WFI_v5", 15,7,8, 0,0,2, 0,
350       ARM_CP_WFI, PL1_W, },
351     REGINFO_SENTINEL
352 };
353 
354 static const ARMCPRegInfo not_v7_cp_reginfo[] = {
355     /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
356      * is UNPREDICTABLE; we choose to NOP as most implementations do).
357      */
358     { "WFI_v6", 15,7,0, 0,0,4, 0,
359       ARM_CP_WFI, PL1_W, },
360     /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
361      * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
362      * OMAPCP will override this space.
363      */
364     { "DLOCKDOWN", 15,9,0, 0,0,0, 0,
365       0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c9_data), },
366     { "ILOCKDOWN", 15,9,0, 0,0,1, 0,
367       0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c9_insn), },
368     /* v6 doesn't have the cache ID registers but Linux reads them anyway */
369     { "DUMMY", 15,0,0, 0,1,CP_ANY, 0,
370       ARM_CP_CONST | ARM_CP_NO_MIGRATE, PL1_R, NULL, 0 },
371     /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
372      * implementing it as RAZ means the "debug architecture version" bits
373      * will read as a reserved value, which should cause Linux to not try
374      * to use the debug hardware.
375      */
376     { "DBGDIDR", 14,0,0, 0,0,0, 0,
377       ARM_CP_CONST, PL0_R, NULL, 0 },
378     /* MMU TLB control. Note that the wildcarding means we cover not just
379      * the unified TLB ops but also the dside/iside/inner-shareable variants.
380      */
381     { "TLBIALL", 15,8,CP_ANY, 0,CP_ANY,0, 0,
382       ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0,
383       NULL, NULL, tlbiall_write, },
384     { "TLBIMVA", 15,8,CP_ANY, 0,CP_ANY,1, 0,
385       ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0,
386       NULL, NULL, tlbimva_write, },
387     { "TLBIASID", 15,8,CP_ANY, 0,CP_ANY,2, 0,
388       ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0,
389       NULL, NULL, tlbiasid_write, },
390     { "TLBIMVAA", 15,8,CP_ANY, 0,CP_ANY,3, 0,
391       ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0,
392       NULL, NULL, tlbimvaa_write, },
393     REGINFO_SENTINEL
394 };
395 
cpacr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)396 static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
397                         uint64_t value)
398 {
399     uint32_t mask = 0;
400 
401     /* In ARMv8 most bits of CPACR_EL1 are RES0. */
402     if (!arm_feature(env, ARM_FEATURE_V8)) {
403         /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
404          * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
405          * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
406          */
407         if (arm_feature(env, ARM_FEATURE_VFP)) {
408             /* VFP coprocessor: cp10 & cp11 [23:20] */
409             mask |= (1U << 31) | (1 << 30) | (0xf << 20);
410 
411             if (!arm_feature(env, ARM_FEATURE_NEON)) {
412                 /* ASEDIS [31] bit is RAO/WI */
413                 value |= (1U << 31);
414             }
415 
416             /* VFPv3 and upwards with NEON implement 32 double precision
417              * registers (D0-D31).
418              */
419             if (!arm_feature(env, ARM_FEATURE_NEON) ||
420                     !arm_feature(env, ARM_FEATURE_VFP3)) {
421                 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
422                 value |= (1 << 30);
423             }
424         }
425         value &= mask;
426     }
427     env->cp15.c1_coproc = value;
428 }
429 
430 static const ARMCPRegInfo v6_cp_reginfo[] = {
431     /* prefetch by MVA in v6, NOP in v7 */
432     { "MVA_prefetch", 15,7,13, 0,0,1, 0,
433       ARM_CP_NOP, PL1_W, },
434     { "ISB", 15,7,5, 0,0,4, 0,
435       ARM_CP_NOP, PL0_W, },
436     { "DSB", 15,7,10, 0,0,4, 0,
437       ARM_CP_NOP, PL0_W, },
438     { "DMB", 15,7,10, 0,0,5, 0,
439       ARM_CP_NOP, PL0_W, },
440     { "IFAR", 15,6,0, 0,0,2, 0,
441       0, PL1_RW, NULL, 0, offsetofhigh32(CPUARMState, cp15.far_el[1]), },
442     /* Watchpoint Fault Address Register : should actually only be present
443      * for 1136, 1176, 11MPCore.
444      */
445     { "WFAR", 15,6,0, 0,0,1, 0,
446       ARM_CP_CONST, PL1_RW, NULL, 0, },
447     { "CPACR", 0,1,0, 3,0,2, ARM_CP_STATE_BOTH,
448       0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c1_coproc),
449       NULL, NULL, cpacr_write },
450     REGINFO_SENTINEL
451 };
452 
pmreg_access(CPUARMState * env,const ARMCPRegInfo * ri)453 static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri)
454 {
455     /* Performance monitor registers user accessibility is controlled
456      * by PMUSERENR.
457      */
458     if (arm_current_el(env) == 0 && !env->cp15.c9_pmuserenr) {
459         return CP_ACCESS_TRAP;
460     }
461     return CP_ACCESS_OK;
462 }
463 
464 #ifndef CONFIG_USER_ONLY
465 
arm_ccnt_enabled(CPUARMState * env)466 static inline bool arm_ccnt_enabled(CPUARMState *env)
467 {
468     /* This does not support checking PMCCFILTR_EL0 register */
469 
470     if (!(env->cp15.c9_pmcr & PMCRE)) {
471         return false;
472     }
473 
474     return true;
475 }
476 
pmccntr_sync(CPUARMState * env)477 void pmccntr_sync(CPUARMState *env)
478 {
479     uint64_t temp_ticks;
480 
481     temp_ticks = muldiv64(qemu_clock_get_us(QEMU_CLOCK_VIRTUAL),
482                           get_ticks_per_sec(), 1000000);
483 
484     if (env->cp15.c9_pmcr & PMCRD) {
485         /* Increment once every 64 processor clock cycles */
486         temp_ticks /= 64;
487     }
488 
489     if (arm_ccnt_enabled(env)) {
490         env->cp15.c15_ccnt = temp_ticks - env->cp15.c15_ccnt;
491     }
492 }
493 
pmcr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)494 static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
495                        uint64_t value)
496 {
497     pmccntr_sync(env);
498 
499     if (value & PMCRC) {
500         /* The counter has been reset */
501         env->cp15.c15_ccnt = 0;
502     }
503 
504     /* only the DP, X, D and E bits are writable */
505     env->cp15.c9_pmcr &= ~0x39;
506     env->cp15.c9_pmcr |= (value & 0x39);
507 
508     pmccntr_sync(env);
509 }
510 
pmccntr_read(CPUARMState * env,const ARMCPRegInfo * ri)511 static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
512 {
513     uint64_t total_ticks;
514 
515     if (!arm_ccnt_enabled(env)) {
516         /* Counter is disabled, do not change value */
517         return env->cp15.c15_ccnt;
518     }
519 
520     total_ticks = muldiv64(qemu_clock_get_us(QEMU_CLOCK_VIRTUAL),
521                            get_ticks_per_sec(), 1000000);
522 
523     if (env->cp15.c9_pmcr & PMCRD) {
524         /* Increment once every 64 processor clock cycles */
525         total_ticks /= 64;
526     }
527     return total_ticks - env->cp15.c15_ccnt;
528 }
529 
pmccntr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)530 static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
531                         uint64_t value)
532 {
533     uint64_t total_ticks;
534 
535     if (!arm_ccnt_enabled(env)) {
536         /* Counter is disabled, set the absolute value */
537         env->cp15.c15_ccnt = value;
538         return;
539     }
540 
541     total_ticks = muldiv64(qemu_clock_get_us(QEMU_CLOCK_VIRTUAL),
542                            get_ticks_per_sec(), 1000000);
543 
544     if (env->cp15.c9_pmcr & PMCRD) {
545         /* Increment once every 64 processor clock cycles */
546         total_ticks /= 64;
547     }
548     env->cp15.c15_ccnt = total_ticks - value;
549 }
550 
pmccntr_write32(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)551 static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
552                             uint64_t value)
553 {
554     uint64_t cur_val = pmccntr_read(env, NULL);
555 
556     pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
557 }
558 
559 #else /* CONFIG_USER_ONLY */
560 
pmccntr_sync(CPUARMState * env)561 void pmccntr_sync(CPUARMState *env)
562 {
563 }
564 
565 #endif
566 
pmccfiltr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)567 static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
568                             uint64_t value)
569 {
570     pmccntr_sync(env);
571     env->cp15.pmccfiltr_el0 = value & 0x7E000000;
572     pmccntr_sync(env);
573 }
574 
pmcntenset_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)575 static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
576                             uint64_t value)
577 {
578     value &= (1U << 31);
579     env->cp15.c9_pmcnten |= value;
580 }
581 
pmcntenclr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)582 static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
583                              uint64_t value)
584 {
585     value &= (1U << 31);
586     env->cp15.c9_pmcnten &= ~value;
587 }
588 
pmovsr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)589 static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
590                          uint64_t value)
591 {
592     env->cp15.c9_pmovsr &= ~value;
593 }
594 
pmxevtyper_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)595 static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
596                              uint64_t value)
597 {
598     env->cp15.c9_pmxevtyper = value & 0xff;
599 }
600 
pmuserenr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)601 static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
602                             uint64_t value)
603 {
604     env->cp15.c9_pmuserenr = value & 1;
605 }
606 
pmintenset_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)607 static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
608                              uint64_t value)
609 {
610     /* We have no event counters so only the C bit can be changed */
611     value &= (1U << 31);
612     env->cp15.c9_pminten |= value;
613 }
614 
pmintenclr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)615 static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
616                              uint64_t value)
617 {
618     value &= (1U << 31);
619     env->cp15.c9_pminten &= ~value;
620 }
621 
vbar_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)622 static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
623                        uint64_t value)
624 {
625     /* Note that even though the AArch64 view of this register has bits
626      * [10:0] all RES0 we can only mask the bottom 5, to comply with the
627      * architectural requirements for bits which are RES0 only in some
628      * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
629      * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
630      */
631     raw_write(env, ri, value & ~0x1FULL);
632 }
633 
scr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)634 static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
635 {
636     /* We only mask off bits that are RES0 both for AArch64 and AArch32.
637      * For bits that vary between AArch32/64, code needs to check the
638      * current execution mode before directly using the feature bit.
639      */
640     uint32_t valid_mask = SCR_AARCH64_MASK | SCR_AARCH32_MASK;
641 
642     if (!arm_feature(env, ARM_FEATURE_EL2)) {
643         valid_mask &= ~SCR_HCE;
644 
645         /* On ARMv7, SMD (or SCD as it is called in v7) is only
646          * supported if EL2 exists. The bit is UNK/SBZP when
647          * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
648          * when EL2 is unavailable.
649          */
650         if (arm_feature(env, ARM_FEATURE_V7)) {
651             valid_mask &= ~SCR_SMD;
652         }
653     }
654 
655     /* Clear all-context RES0 bits.  */
656     value &= valid_mask;
657     raw_write(env, ri, value);
658 }
659 
ccsidr_read(CPUARMState * env,const ARMCPRegInfo * ri)660 static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
661 {
662     ARMCPU *cpu = arm_env_get_cpu(env);
663     return cpu->ccsidr[env->cp15.c0_cssel];
664 }
665 
csselr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)666 static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
667                          uint64_t value)
668 {
669     raw_write(env, ri, value & 0xf);
670 }
671 
isr_read(CPUARMState * env,const ARMCPRegInfo * ri)672 static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
673 {
674     CPUState *cs = ENV_GET_CPU(env);
675     uint64_t ret = 0;
676 
677     if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
678         ret |= CPSR_I;
679     }
680     if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
681         ret |= CPSR_F;
682     }
683     /* External aborts are not possible in QEMU so A bit is always clear */
684     return ret;
685 }
686 
687 static const ARMCPRegInfo v7_cp_reginfo[] = {
688     /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
689     { "NOP", 15,7,0, 0,0,4, 0,
690       ARM_CP_NOP, PL1_W,  },
691     /* Performance monitors are implementation defined in v7,
692      * but with an ARM recommended set of registers, which we
693      * follow (although we don't actually implement any counters)
694      *
695      * Performance registers fall into three categories:
696      *  (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
697      *  (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
698      *  (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
699      * For the cases controlled by PMUSERENR we must set .access to PL0_RW
700      * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
701      */
702     { "PMCNTENSET", 15,9,12, 0,0,1, 0,
703       ARM_CP_NO_MIGRATE, PL0_RW, NULL, 0, offsetoflow32(CPUARMState, cp15.c9_pmcnten),
704       pmreg_access, NULL, pmcntenset_write, NULL, raw_write },
705     { "PMCNTENSET_EL0", 0,9,12, 3,3,1, ARM_CP_STATE_AA64,
706       0, PL0_RW, NULL, 0, offsetof(CPUARMState, cp15.c9_pmcnten),
707       pmreg_access, NULL, pmcntenset_write, NULL, raw_write },
708     { "PMCNTENCLR", 15,9,12, 0,0,2, 0,
709       ARM_CP_NO_MIGRATE, PL0_RW, NULL, 0, offsetoflow32(CPUARMState, cp15.c9_pmcnten),
710       pmreg_access, NULL, pmcntenclr_write, },
711     { "PMCNTENCLR_EL0", 0,9,12, 3,3,2, ARM_CP_STATE_AA64,
712       ARM_CP_NO_MIGRATE, PL0_RW, NULL, 0, offsetof(CPUARMState, cp15.c9_pmcnten),
713       pmreg_access, NULL, pmcntenclr_write },
714     { "PMOVSR", 15,9,12, 0,0,3, 0,
715       0, PL0_RW, NULL, 0, offsetof(CPUARMState, cp15.c9_pmovsr),
716       pmreg_access, NULL, pmovsr_write, NULL, raw_write },
717     /* Unimplemented so WI. */
718     { "PMSWINC", 15,9,12, 0,0,4, 0,
719       ARM_CP_NOP, PL0_W, NULL, 0, 0,
720       pmreg_access, },
721     /* Since we don't implement any events, writing to PMSELR is UNPREDICTABLE.
722      * We choose to RAZ/WI.
723      */
724     { "PMSELR", 15,9,12, 0,0,5, 0,
725       ARM_CP_CONST, PL0_RW, NULL, 0, 0,
726       pmreg_access },
727 #ifndef CONFIG_USER_ONLY
728     { "PMCCNTR", 15,9,13, 0,0,0, 0,
729       ARM_CP_IO, PL0_RW, NULL, 0, 0,
730       pmreg_access, pmccntr_read, pmccntr_write32, },
731     { "PMCCNTR_EL0", 0,9,13, 3,3,0, ARM_CP_STATE_AA64,
732       ARM_CP_IO, PL0_RW, NULL, 0, 0,
733       pmreg_access, pmccntr_read, pmccntr_write, },
734 #endif
735     { "PMCCFILTR_EL0", 0,14,15, 3,3,7, ARM_CP_STATE_AA64,
736       ARM_CP_IO, PL0_RW, NULL, 0, offsetof(CPUARMState, cp15.pmccfiltr_el0),
737       pmreg_access, NULL, pmccfiltr_write, },
738     { "PMXEVTYPER", 15,9,13, 0,0,1, 0,
739       0, PL0_RW, NULL, 0, offsetof(CPUARMState, cp15.c9_pmxevtyper),
740       pmreg_access, NULL, pmxevtyper_write, NULL, raw_write },
741     /* Unimplemented, RAZ/WI. */
742     { "PMXEVCNTR", 15,9,13, 0,0,2, 0,
743       ARM_CP_CONST, PL0_RW, NULL, 0, 0,
744       pmreg_access },
745     { "PMUSERENR", 15,9,14, 0,0,0, 0,
746       0, PL0_R | PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c9_pmuserenr),
747       NULL, NULL, pmuserenr_write, NULL, raw_write },
748     { "PMINTENSET", 15,9,14, 0,0,1, 0,
749       0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c9_pminten),
750       NULL, NULL, pmintenset_write, NULL, raw_write },
751     { "PMINTENCLR", 15,9,14, 0,0,2, 0,
752       ARM_CP_NO_MIGRATE, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c9_pminten),
753       NULL, NULL, pmintenclr_write, },
754     { "VBAR", 0,12,0, 3,0,0, ARM_CP_STATE_BOTH,
755       0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.vbar_el[1]),
756       NULL, NULL, vbar_write, },
757     { "SCR", 15,1,1, 0,0,0, 0,
758       0, PL1_RW, NULL, 0, offsetoflow32(CPUARMState, cp15.scr_el3),
759       NULL, NULL, scr_write },
760     { "CCSIDR", 0,0,0, 3,1,0, ARM_CP_STATE_BOTH,
761       ARM_CP_NO_MIGRATE, PL1_R, NULL, 0, 0,
762       NULL, ccsidr_read, },
763     { "CSSELR", 0,0,0, 3,2,0, ARM_CP_STATE_BOTH,
764       0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c0_cssel),
765       NULL, NULL, csselr_write, },
766     /* Auxiliary ID register: this actually has an IMPDEF value but for now
767      * just RAZ for all cores:
768      */
769     { "AIDR", 0,0,0, 3,1,7, ARM_CP_STATE_BOTH,
770       ARM_CP_CONST, PL1_R, NULL, 0 },
771     /* Auxiliary fault status registers: these also are IMPDEF, and we
772      * choose to RAZ/WI for all cores.
773      */
774     { "AFSR0_EL1", 0,5,1, 3,0,0, ARM_CP_STATE_BOTH,
775       ARM_CP_CONST, PL1_RW, NULL, 0 },
776     { "AFSR1_EL1", 0,5,1, 3,0,1, ARM_CP_STATE_BOTH,
777       ARM_CP_CONST, PL1_RW, NULL, 0 },
778     /* MAIR can just read-as-written because we don't implement caches
779      * and so don't need to care about memory attributes.
780      */
781     { "MAIR_EL1", 0,10,2, 3,0,0, ARM_CP_STATE_AA64,
782       0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.mair_el1), },
783     /* For non-long-descriptor page tables these are PRRR and NMRR;
784      * regardless they still act as reads-as-written for QEMU.
785      * The override is necessary because of the overly-broad TLB_LOCKDOWN
786      * definition.
787      */
788     { "MAIR0", 15,10,2, 0,0,0, ARM_CP_STATE_AA32,
789       ARM_CP_OVERRIDE, PL1_RW, NULL, 0, offsetoflow32(CPUARMState, cp15.mair_el1),
790       NULL, NULL, NULL, NULL, NULL, arm_cp_reset_ignore },
791     { "MAIR1", 15,10,2, 0,0,1, ARM_CP_STATE_AA32,
792       ARM_CP_OVERRIDE, PL1_RW, NULL, 0, offsetofhigh32(CPUARMState, cp15.mair_el1),
793       NULL, NULL, NULL, NULL, NULL, arm_cp_reset_ignore },
794     { "ISR_EL1", 0,12,1, 3,0,0, ARM_CP_STATE_BOTH,
795       ARM_CP_NO_MIGRATE, PL1_R, NULL, 0, 0,
796       NULL, isr_read },
797     /* 32 bit ITLB invalidates */
798     { "ITLBIALL", 15,8,5, 0,0,0, 0,
799       ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0,
800       NULL, NULL, tlbiall_write },
801     { "ITLBIMVA", 15,8,5, 0,0,1, 0,
802       ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0,
803       NULL, NULL, tlbimva_write },
804     { "ITLBIASID", 15,8,5, 0,0,2, 0,
805       ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0,
806       NULL, NULL, tlbiasid_write },
807     /* 32 bit DTLB invalidates */
808     { "DTLBIALL", 15,8,6, 0,0,0, 0,
809       ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0,
810       NULL, NULL, tlbiall_write },
811     { "DTLBIMVA", 15,8,6, 0,0,1, 0,
812       ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0,
813       NULL, NULL, tlbimva_write },
814     { "DTLBIASID", 15,8,6, 0,0,2, 0,
815       ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0,
816       NULL, NULL, tlbiasid_write },
817     /* 32 bit TLB invalidates */
818     { "TLBIALL", 15,8,7, 0,0,0, 0,
819       ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0,
820       NULL, NULL, tlbiall_write },
821     { "TLBIMVA", 15,8,7, 0,0,1, 0,
822       ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0,
823       NULL, NULL, tlbimva_write },
824     { "TLBIASID", 15,8,7, 0,0,2, 0,
825       ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0,
826       NULL, NULL, tlbiasid_write },
827     { "TLBIMVAA", 15,8,7, 0,0,3, 0,
828       ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0,
829       NULL, NULL, tlbimvaa_write },
830     REGINFO_SENTINEL
831 };
832 
833 static const ARMCPRegInfo v7mp_cp_reginfo[] = {
834     /* 32 bit TLB invalidates, Inner Shareable */
835     { "TLBIALLIS", 15,8,3, 0,0,0, 0,
836       ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0,
837       NULL, NULL, tlbiall_is_write },
838     { "TLBIMVAIS", 15,8,3, 0,0,1, 0,
839       ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0,
840       NULL, NULL, tlbimva_is_write },
841     { "TLBIASIDIS", 15,8,3, 0,0,2, 0,
842       ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0,
843       NULL, NULL, tlbiasid_is_write },
844     { "TLBIMVAAIS", 15,8,3, 0,0,3, 0,
845       ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0,
846       NULL, NULL, tlbimvaa_is_write },
847     REGINFO_SENTINEL
848 };
849 
teecr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)850 static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
851                         uint64_t value)
852 {
853     value &= 1;
854     env->teecr = value;
855 }
856 
teehbr_access(CPUARMState * env,const ARMCPRegInfo * ri)857 static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri)
858 {
859     if (arm_current_el(env) == 0 && (env->teecr & 1)) {
860         return CP_ACCESS_TRAP;
861     }
862     return CP_ACCESS_OK;
863 }
864 
865 static const ARMCPRegInfo t2ee_cp_reginfo[] = {
866     { "TEECR", 14,0,0, 0,6,0, 0,
867       0, PL1_RW, NULL, 0, offsetof(CPUARMState, teecr),
868       NULL, NULL, teecr_write },
869     { "TEEHBR", 14,1,0, 0,6,0, 0,
870       0, PL0_RW, NULL, 0, offsetof(CPUARMState, teehbr),
871       teehbr_access, },
872     REGINFO_SENTINEL
873 };
874 
875 static const ARMCPRegInfo v6k_cp_reginfo[] = {
876     { "TPIDR_EL0", 0,13,0, 3,3,2, ARM_CP_STATE_AA64,
877       0, PL0_RW, NULL, 0, offsetof(CPUARMState, cp15.tpidr_el0), },
878     { "TPIDRURW", 15,13,0, 0,0,2, 0,
879       0, PL0_RW, NULL, 0, offsetoflow32(CPUARMState, cp15.tpidr_el0),
880       NULL, NULL, NULL, NULL, NULL, arm_cp_reset_ignore },
881     { "TPIDRRO_EL0", 0,13,0, 3,3,3, ARM_CP_STATE_AA64,
882       0, PL0_R|PL1_W, NULL, 0, offsetof(CPUARMState, cp15.tpidrro_el0) },
883     { "TPIDRURO", 15,13,0, 0,0,3, 0,
884       0, PL0_R|PL1_W, NULL, 0, offsetoflow32(CPUARMState, cp15.tpidrro_el0),
885       NULL, NULL, NULL, NULL, NULL, arm_cp_reset_ignore },
886     { "TPIDR_EL1", 0,13,0, 3,0,4, ARM_CP_STATE_BOTH,
887       0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.tpidr_el1), },
888     REGINFO_SENTINEL
889 };
890 
891 #ifndef CONFIG_USER_ONLY
892 
gt_cntfrq_access(CPUARMState * env,const ARMCPRegInfo * ri)893 static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri)
894 {
895     /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero */
896     if (arm_current_el(env) == 0 && !extract32(env->cp15.c14_cntkctl, 0, 2)) {
897         return CP_ACCESS_TRAP;
898     }
899     return CP_ACCESS_OK;
900 }
901 
gt_counter_access(CPUARMState * env,int timeridx)902 static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx)
903 {
904     /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */
905     if (arm_current_el(env) == 0 &&
906         !extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
907         return CP_ACCESS_TRAP;
908     }
909     return CP_ACCESS_OK;
910 }
911 
gt_timer_access(CPUARMState * env,int timeridx)912 static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx)
913 {
914     /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if
915      * EL0[PV]TEN is zero.
916      */
917     if (arm_current_el(env) == 0 &&
918         !extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
919         return CP_ACCESS_TRAP;
920     }
921     return CP_ACCESS_OK;
922 }
923 
gt_pct_access(CPUARMState * env,const ARMCPRegInfo * ri)924 static CPAccessResult gt_pct_access(CPUARMState *env,
925                                          const ARMCPRegInfo *ri)
926 {
927     return gt_counter_access(env, GTIMER_PHYS);
928 }
929 
gt_vct_access(CPUARMState * env,const ARMCPRegInfo * ri)930 static CPAccessResult gt_vct_access(CPUARMState *env,
931                                          const ARMCPRegInfo *ri)
932 {
933     return gt_counter_access(env, GTIMER_VIRT);
934 }
935 
gt_ptimer_access(CPUARMState * env,const ARMCPRegInfo * ri)936 static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri)
937 {
938     return gt_timer_access(env, GTIMER_PHYS);
939 }
940 
gt_vtimer_access(CPUARMState * env,const ARMCPRegInfo * ri)941 static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri)
942 {
943     return gt_timer_access(env, GTIMER_VIRT);
944 }
945 
gt_get_countervalue(CPUARMState * env)946 static uint64_t gt_get_countervalue(CPUARMState *env)
947 {
948     return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / GTIMER_SCALE;
949 }
950 
gt_recalc_timer(ARMCPU * cpu,int timeridx)951 static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
952 {
953     ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
954 
955     if (gt->ctl & 1) {
956         /* Timer enabled: calculate and set current ISTATUS, irq, and
957          * reset timer to when ISTATUS next has to change
958          */
959         uint64_t count = gt_get_countervalue(&cpu->env);
960         /* Note that this must be unsigned 64 bit arithmetic: */
961         int istatus = count >= gt->cval;
962         uint64_t nexttick;
963 
964         gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
965         //qemu_set_irq(cpu->gt_timer_outputs[timeridx],
966         //             (istatus && !(gt->ctl & 2)));
967         if (istatus) {
968             /* Next transition is when count rolls back over to zero */
969             nexttick = UINT64_MAX;
970         } else {
971             /* Next transition is when we hit cval */
972             nexttick = gt->cval;
973         }
974         /* Note that the desired next expiry time might be beyond the
975          * signed-64-bit range of a QEMUTimer -- in this case we just
976          * set the timer for as far in the future as possible. When the
977          * timer expires we will reset the timer for any remaining period.
978          */
979         if (nexttick > INT64_MAX / GTIMER_SCALE) {
980             nexttick = INT64_MAX / GTIMER_SCALE;
981         }
982         //timer_mod(cpu->gt_timer[timeridx], nexttick);
983     } else {
984         /* Timer disabled: ISTATUS and timer output always clear */
985         gt->ctl &= ~4;
986         //qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0);
987         //timer_del(cpu->gt_timer[timeridx]);
988     }
989 }
990 
gt_cnt_reset(CPUARMState * env,const ARMCPRegInfo * ri)991 static void gt_cnt_reset(CPUARMState *env, const ARMCPRegInfo *ri)
992 {
993 }
994 
gt_cnt_read(CPUARMState * env,const ARMCPRegInfo * ri)995 static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
996 {
997     return gt_get_countervalue(env);
998 }
999 
gt_cval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1000 static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1001                           uint64_t value)
1002 {
1003     int timeridx = ri->opc1 & 1;
1004 
1005     env->cp15.c14_timer[timeridx].cval = value;
1006     //gt_recalc_timer(arm_env_get_cpu(env), timeridx);
1007 }
1008 
gt_tval_read(CPUARMState * env,const ARMCPRegInfo * ri)1009 static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1010 {
1011     int timeridx = ri->crm & 1;
1012 
1013     return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
1014                       gt_get_countervalue(env));
1015 }
1016 
gt_tval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1017 static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1018                           uint64_t value)
1019 {
1020     int timeridx = ri->crm & 1;
1021 
1022     env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) +
1023         + sextract64(value, 0, 32);
1024     gt_recalc_timer(arm_env_get_cpu(env), timeridx);
1025 }
1026 
gt_ctl_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1027 static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1028                          uint64_t value)
1029 {
1030     ARMCPU *cpu = arm_env_get_cpu(env);
1031     int timeridx = ri->crm & 1;
1032     uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
1033 
1034     env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
1035     if ((oldval ^ value) & 1) {
1036         /* Enable toggled */
1037         gt_recalc_timer(cpu, timeridx);
1038     } else if ((oldval ^ value) & 2) {
1039         /* IMASK toggled: don't need to recalculate,
1040          * just set the interrupt line based on ISTATUS
1041          */
1042         //qemu_set_irq(cpu->gt_timer_outputs[timeridx],
1043         //             (oldval & 4) && !(value & 2));
1044     }
1045 }
1046 
arm_gt_ptimer_cb(void * opaque)1047 void arm_gt_ptimer_cb(void *opaque)
1048 {
1049     ARMCPU *cpu = opaque;
1050 
1051     gt_recalc_timer(cpu, GTIMER_PHYS);
1052 }
1053 
arm_gt_vtimer_cb(void * opaque)1054 void arm_gt_vtimer_cb(void *opaque)
1055 {
1056     ARMCPU *cpu = opaque;
1057 
1058     gt_recalc_timer(cpu, GTIMER_VIRT);
1059 }
1060 
1061 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
1062     /* Note that CNTFRQ is purely reads-as-written for the benefit
1063      * of software; writing it doesn't actually change the timer frequency.
1064      * Our reset value matches the fixed frequency we implement the timer at.
1065      */
1066     { "CNTFRQ", 15,14,0, 0,0,0, 0,
1067       ARM_CP_NO_MIGRATE, PL1_RW | PL0_R, NULL, 0, offsetoflow32(CPUARMState, cp15.c14_cntfrq),
1068       gt_cntfrq_access, NULL,NULL, NULL,NULL, arm_cp_reset_ignore, },
1069     { "CNTFRQ_EL0", 0,14,0, 3,3,0, ARM_CP_STATE_AA64,
1070       0, PL1_RW | PL0_R, NULL, (1000 * 1000 * 1000) / GTIMER_SCALE, offsetof(CPUARMState, cp15.c14_cntfrq),
1071       gt_cntfrq_access, },
1072     /* overall control: mostly access permissions */
1073     { "CNTKCTL", 0,14,1, 3,0,0, ARM_CP_STATE_BOTH,
1074       0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c14_cntkctl), },
1075     /* per-timer control */
1076     { "CNTP_CTL", 15,14,2, 0,0,1, 0,
1077       ARM_CP_IO | ARM_CP_NO_MIGRATE, PL1_RW | PL0_R, NULL, 0, offsetoflow32(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
1078       gt_ptimer_access, NULL, gt_ctl_write, NULL,raw_write, arm_cp_reset_ignore, },
1079     { "CNTP_CTL_EL0", 0,14,2, 3,3,1, ARM_CP_STATE_AA64,
1080       ARM_CP_IO, PL1_RW | PL0_R, NULL, 0, offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
1081       gt_ptimer_access, NULL,gt_ctl_write, NULL,raw_write, },
1082     { "CNTV_CTL", 15,14,3, 0,0,1, 0,
1083       ARM_CP_IO | ARM_CP_NO_MIGRATE, PL1_RW | PL0_R, NULL, 0, offsetoflow32(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
1084       gt_vtimer_access, NULL,gt_ctl_write, NULL,raw_write, arm_cp_reset_ignore, },
1085     { "CNTV_CTL_EL0", 0,14,3, 3,3,1, ARM_CP_STATE_AA64,
1086       ARM_CP_IO, PL1_RW | PL0_R, NULL, 0, offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
1087       gt_vtimer_access, NULL,gt_ctl_write, NULL,raw_write, },
1088     /* TimerValue views: a 32 bit downcounting view of the underlying state */
1089     { "CNTP_TVAL", 15,14,2, 0,0,0, 0,
1090       ARM_CP_NO_MIGRATE | ARM_CP_IO, PL1_RW | PL0_R, NULL, 0, 0,
1091       gt_ptimer_access, gt_tval_read, gt_tval_write, },
1092     { "CNTP_TVAL_EL0", 0,14,2, 3,3,0, ARM_CP_STATE_AA64,
1093       ARM_CP_NO_MIGRATE | ARM_CP_IO, PL1_RW | PL0_R, NULL, 0, 0,
1094       NULL, gt_tval_read, gt_tval_write, },
1095     { "CNTV_TVAL", 15,14,3, 0,0,0, 0,
1096       ARM_CP_NO_MIGRATE | ARM_CP_IO, PL1_RW | PL0_R, NULL, 0, 0,
1097       gt_vtimer_access, gt_tval_read, gt_tval_write, },
1098     { "CNTV_TVAL_EL0", 0,14,3, 3,3,0, ARM_CP_STATE_AA64,
1099       ARM_CP_NO_MIGRATE | ARM_CP_IO, PL1_RW | PL0_R, NULL, 0, 0,
1100       NULL, gt_tval_read, gt_tval_write, },
1101     /* The counter itself */
1102     { "CNTPCT", 15,0,14, 0,0, 0, 0,
1103       ARM_CP_64BIT | ARM_CP_NO_MIGRATE | ARM_CP_IO, PL0_R, NULL, 0, 0,
1104       gt_pct_access, gt_cnt_read,NULL, NULL,NULL, arm_cp_reset_ignore, },
1105     { "CNTPCT_EL0", 0,14,0, 3,3,1, ARM_CP_STATE_AA64,
1106       ARM_CP_NO_MIGRATE | ARM_CP_IO, PL0_R, NULL, 0, 0,
1107       gt_pct_access, gt_cnt_read, NULL, NULL, NULL, gt_cnt_reset, },
1108     { "CNTVCT", 15,0,14, 0,1,0, 0,
1109       ARM_CP_64BIT | ARM_CP_NO_MIGRATE | ARM_CP_IO, PL0_R, NULL, 0, 0,
1110       gt_vct_access, gt_cnt_read,NULL, NULL,NULL, arm_cp_reset_ignore, },
1111     { "CNTVCT_EL0", 0,14,0, 3,3,2, ARM_CP_STATE_AA64,
1112       ARM_CP_NO_MIGRATE | ARM_CP_IO, PL0_R, NULL, 0, 0,
1113       gt_vct_access, gt_cnt_read, NULL, NULL,NULL, gt_cnt_reset, },
1114     /* Comparison value, indicating when the timer goes off */
1115     { "CNTP_CVAL", 15, 0,14, 0,2, 0, 0,
1116       ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_MIGRATE, PL1_RW | PL0_R, NULL, 0, offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
1117       gt_ptimer_access, NULL, gt_cval_write, NULL, raw_write, arm_cp_reset_ignore, },
1118     { "CNTP_CVAL_EL0", 0,14,2, 3,3,2, ARM_CP_STATE_AA64,
1119       ARM_CP_IO, PL1_RW | PL0_R, NULL, 0, offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
1120       gt_vtimer_access, NULL, gt_cval_write, NULL, raw_write, },
1121     { "CNTV_CVAL", 15, 0,14, 0,3,0, 0,
1122       ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_MIGRATE, PL1_RW | PL0_R, NULL, 0, offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
1123       gt_vtimer_access, NULL, gt_cval_write, NULL, raw_write, arm_cp_reset_ignore, },
1124     { "CNTV_CVAL_EL0", 0,14,3, 3,3,2, ARM_CP_STATE_AA64,
1125       ARM_CP_IO, PL1_RW | PL0_R, NULL, 0, offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
1126       gt_vtimer_access, NULL, gt_cval_write, NULL, raw_write, },
1127     REGINFO_SENTINEL
1128 };
1129 
1130 #else
1131 /* In user-mode none of the generic timer registers are accessible,
1132  * and their implementation depends on QEMU_CLOCK_VIRTUAL and qdev gpio outputs,
1133  * so instead just don't register any of them.
1134  */
1135 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
1136     REGINFO_SENTINEL
1137 };
1138 
1139 #endif
1140 
par_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1141 static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
1142 {
1143     if (arm_feature(env, ARM_FEATURE_LPAE)) {
1144         raw_write(env, ri, value);
1145     } else if (arm_feature(env, ARM_FEATURE_V7)) {
1146         raw_write(env, ri, value & 0xfffff6ff);
1147     } else {
1148         raw_write(env, ri, value & 0xfffff1ff);
1149     }
1150 }
1151 
1152 #ifndef CONFIG_USER_ONLY
1153 /* get_phys_addr() isn't present for user-mode-only targets */
1154 
ats_access(CPUARMState * env,const ARMCPRegInfo * ri)1155 static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri)
1156 {
1157     if (ri->opc2 & 4) {
1158         /* Other states are only available with TrustZone; in
1159          * a non-TZ implementation these registers don't exist
1160          * at all, which is an Uncategorized trap. This underdecoding
1161          * is safe because the reginfo is NO_MIGRATE.
1162          */
1163         return CP_ACCESS_TRAP_UNCATEGORIZED;
1164     }
1165     return CP_ACCESS_OK;
1166 }
1167 
ats_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1168 static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
1169 {
1170     hwaddr phys_addr;
1171     target_ulong page_size;
1172     int prot;
1173     int ret, is_user = ri->opc2 & 2;
1174     int access_type = ri->opc2 & 1;
1175 
1176     ret = get_phys_addr(env, value, access_type, is_user,
1177                         &phys_addr, &prot, &page_size);
1178     if (extended_addresses_enabled(env)) {
1179         /* ret is a DFSR/IFSR value for the long descriptor
1180          * translation table format, but with WnR always clear.
1181          * Convert it to a 64-bit PAR.
1182          */
1183         uint64_t par64 = (1 << 11); /* LPAE bit always set */
1184         if (ret == 0) {
1185             par64 |= phys_addr & ~0xfffULL;
1186             /* We don't set the ATTR or SH fields in the PAR. */
1187         } else {
1188             par64 |= 1; /* F */
1189             par64 |= (ret & 0x3f) << 1; /* FS */
1190             /* Note that S2WLK and FSTAGE are always zero, because we don't
1191              * implement virtualization and therefore there can't be a stage 2
1192              * fault.
1193              */
1194         }
1195         env->cp15.par_el1 = par64;
1196     } else {
1197         /* ret is a DFSR/IFSR value for the short descriptor
1198          * translation table format (with WnR always clear).
1199          * Convert it to a 32-bit PAR.
1200          */
1201         if (ret == 0) {
1202             /* We do not set any attribute bits in the PAR */
1203             if (page_size == (1 << 24)
1204                 && arm_feature(env, ARM_FEATURE_V7)) {
1205                 env->cp15.par_el1 = (phys_addr & 0xff000000) | 1 << 1;
1206             } else {
1207                 env->cp15.par_el1 = phys_addr & 0xfffff000;
1208             }
1209         } else {
1210             env->cp15.par_el1 = ((ret & (1 << 10)) >> 5) |
1211                 ((ret & (1 << 12)) >> 6) |
1212                 ((ret & 0xf) << 1) | 1;
1213         }
1214     }
1215 }
1216 #endif
1217 
1218 static const ARMCPRegInfo vapa_cp_reginfo[] = {
1219     { "PAR", 15,7,4, 0,0,0, 0,
1220       0, PL1_RW, NULL, 0, offsetoflow32(CPUARMState, cp15.par_el1),
1221       NULL, NULL, par_write },
1222 #ifndef CONFIG_USER_ONLY
1223     { "ATS", 15,7,8, 0,0,CP_ANY, 0,
1224       ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0,
1225       ats_access, NULL, ats_write },
1226 #endif
1227     REGINFO_SENTINEL
1228 };
1229 
1230 /* Return basic MPU access permission bits.  */
simple_mpu_ap_bits(uint32_t val)1231 static uint32_t simple_mpu_ap_bits(uint32_t val)
1232 {
1233     uint32_t ret;
1234     uint32_t mask;
1235     int i;
1236     ret = 0;
1237     mask = 3;
1238     for (i = 0; i < 16; i += 2) {
1239         ret |= (val >> i) & mask;
1240         mask <<= 2;
1241     }
1242     return ret;
1243 }
1244 
1245 /* Pad basic MPU access permission bits to extended format.  */
extended_mpu_ap_bits(uint32_t val)1246 static uint32_t extended_mpu_ap_bits(uint32_t val)
1247 {
1248     uint32_t ret;
1249     uint32_t mask;
1250     int i;
1251     ret = 0;
1252     mask = 3;
1253     for (i = 0; i < 16; i += 2) {
1254         ret |= (val & mask) << i;
1255         mask <<= 2;
1256     }
1257     return ret;
1258 }
1259 
pmsav5_data_ap_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1260 static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
1261                                  uint64_t value)
1262 {
1263     env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value);
1264 }
1265 
pmsav5_data_ap_read(CPUARMState * env,const ARMCPRegInfo * ri)1266 static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
1267 {
1268     return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap);
1269 }
1270 
pmsav5_insn_ap_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1271 static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
1272                                  uint64_t value)
1273 {
1274     env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value);
1275 }
1276 
pmsav5_insn_ap_read(CPUARMState * env,const ARMCPRegInfo * ri)1277 static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
1278 {
1279     return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap);
1280 }
1281 
1282 static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
1283     { "DATA_AP", 15,5,0, 0,0,0, 0,
1284       ARM_CP_NO_MIGRATE, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.pmsav5_data_ap),
1285       NULL, pmsav5_data_ap_read, pmsav5_data_ap_write, },
1286     { "INSN_AP", 15,5,0, 0,0,1, 0,
1287       ARM_CP_NO_MIGRATE,PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.pmsav5_insn_ap),
1288       NULL, pmsav5_insn_ap_read, pmsav5_insn_ap_write, },
1289     { "DATA_EXT_AP", 15,5,0, 0,0,2, 0,
1290       0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.pmsav5_data_ap), },
1291     { "INSN_EXT_AP", 15,5,0, 0,0,3, 0,
1292       0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.pmsav5_insn_ap), },
1293     { "DCACHE_CFG", 15,2,0, 0,0,0, 0,
1294       0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c2_data), },
1295     { "ICACHE_CFG", 15,2,0, 0,0,1, 0,
1296       0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c2_insn), },
1297     /* Protection region base and size registers */
1298     { "946_PRBS0", 15,6,0, 0,0,CP_ANY, 0,
1299       0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c6_region[0]) },
1300     { "946_PRBS1", 15,6,1, 0,0,CP_ANY, 0,
1301       0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c6_region[1]) },
1302     { "946_PRBS2", 15,6,2, 0,0,CP_ANY, 0,
1303       0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c6_region[2]) },
1304     { "946_PRBS3", 15,6,3, 0,0,CP_ANY, 0,
1305       0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c6_region[3]) },
1306     { "946_PRBS4", 15,6,4, 0,0,CP_ANY, 0,
1307       0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c6_region[4]) },
1308     { "946_PRBS5", 15,6,5, 0,0,CP_ANY, 0,
1309       0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c6_region[5]) },
1310     { "946_PRBS6", 15,6,6, 0,0,CP_ANY, 0,
1311       0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c6_region[6]) },
1312     { "946_PRBS7", 15,6,7, 0,0,CP_ANY, 0,
1313       0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c6_region[7]) },
1314     REGINFO_SENTINEL
1315 };
1316 
vmsa_ttbcr_raw_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1317 static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
1318                                  uint64_t value)
1319 {
1320     int maskshift = extract32(value, 0, 3);
1321 
1322     if (!arm_feature(env, ARM_FEATURE_V8)) {
1323         if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) {
1324             /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
1325              * using Long-desciptor translation table format */
1326             value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
1327         } else if (arm_feature(env, ARM_FEATURE_EL3)) {
1328             /* In an implementation that includes the Security Extensions
1329              * TTBCR has additional fields PD0 [4] and PD1 [5] for
1330              * Short-descriptor translation table format.
1331              */
1332             value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N;
1333         } else {
1334             value &= TTBCR_N;
1335         }
1336     }
1337 
1338     /* Note that we always calculate c2_mask and c2_base_mask, but
1339      * they are only used for short-descriptor tables (ie if EAE is 0);
1340      * for long-descriptor tables the TTBCR fields are used differently
1341      * and the c2_mask and c2_base_mask values are meaningless.
1342      */
1343     raw_write(env, ri, value);
1344     env->cp15.c2_mask = ~(((uint32_t)0xffffffffu) >> maskshift);
1345     env->cp15.c2_base_mask = ~((uint32_t)0x3fffu >> maskshift);
1346 }
1347 
vmsa_ttbcr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1348 static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1349                              uint64_t value)
1350 {
1351     ARMCPU *cpu = arm_env_get_cpu(env);
1352 
1353     if (arm_feature(env, ARM_FEATURE_LPAE)) {
1354         /* With LPAE the TTBCR could result in a change of ASID
1355          * via the TTBCR.A1 bit, so do a TLB flush.
1356          */
1357         tlb_flush(CPU(cpu), 1);
1358     }
1359     vmsa_ttbcr_raw_write(env, ri, value);
1360 }
1361 
vmsa_ttbcr_reset(CPUARMState * env,const ARMCPRegInfo * ri)1362 static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1363 {
1364     env->cp15.c2_base_mask = 0xffffc000u;
1365     raw_write(env, ri, 0);
1366     env->cp15.c2_mask = 0;
1367 }
1368 
vmsa_tcr_el1_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1369 static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
1370                                uint64_t value)
1371 {
1372     ARMCPU *cpu = arm_env_get_cpu(env);
1373 
1374     /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
1375     tlb_flush(CPU(cpu), 1);
1376     raw_write(env, ri, value);
1377 }
1378 
vmsa_ttbr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1379 static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1380                             uint64_t value)
1381 {
1382     /* 64 bit accesses to the TTBRs can change the ASID and so we
1383      * must flush the TLB.
1384      */
1385     if (cpreg_field_is_64bit(ri)) {
1386         ARMCPU *cpu = arm_env_get_cpu(env);
1387 
1388         tlb_flush(CPU(cpu), 1);
1389     }
1390     raw_write(env, ri, value);
1391 }
1392 
1393 static const ARMCPRegInfo vmsa_cp_reginfo[] = {
1394     { "DFSR", 15,5,0, 0,0,0, 0,
1395       ARM_CP_NO_MIGRATE, PL1_RW, NULL, 0, offsetoflow32(CPUARMState, cp15.esr_el[1]),
1396       NULL,NULL,NULL,NULL,NULL, arm_cp_reset_ignore, },
1397     { "IFSR", 15,5,0, 0,0,1, 0,
1398       0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.ifsr_el2), },
1399     { "ESR_EL1", 0,5,2, 3,0,0, ARM_CP_STATE_AA64,
1400       0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.esr_el[1]), },
1401     { "TTBR0_EL1", 0,2,0, 3,0,0, ARM_CP_STATE_BOTH,
1402       0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.ttbr0_el1),
1403       NULL, NULL, vmsa_ttbr_write, },
1404     { "TTBR1_EL1", 0,2,0, 3,0,1, ARM_CP_STATE_BOTH,
1405       0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.ttbr1_el1),
1406       NULL, NULL, vmsa_ttbr_write, },
1407     { "TCR_EL1", 0,2,0, 3,0,2, ARM_CP_STATE_AA64,
1408       0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c2_control),
1409       NULL, NULL,vmsa_tcr_el1_write, NULL,raw_write, vmsa_ttbcr_reset, },
1410     { "TTBCR", 15,2,0, 0,0,2, 0,
1411       ARM_CP_NO_MIGRATE, PL1_RW, NULL, 0, offsetoflow32(CPUARMState, cp15.c2_control),
1412       NULL, NULL, vmsa_ttbcr_write, NULL, vmsa_ttbcr_raw_write, arm_cp_reset_ignore, },
1413     /* 64-bit FAR; this entry also gives us the AArch32 DFAR */
1414     { "FAR_EL1", 0,6,0, 3,0,0, ARM_CP_STATE_BOTH,
1415       0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.far_el[1]), },
1416     REGINFO_SENTINEL
1417 };
1418 
omap_ticonfig_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1419 static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri,
1420                                 uint64_t value)
1421 {
1422     env->cp15.c15_ticonfig = value & 0xe7;
1423     /* The OS_TYPE bit in this register changes the reported CPUID! */
1424     env->cp15.c0_cpuid = (value & (1 << 5)) ?
1425         ARM_CPUID_TI915T : ARM_CPUID_TI925T;
1426 }
1427 
omap_threadid_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1428 static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri,
1429                                 uint64_t value)
1430 {
1431     env->cp15.c15_threadid = value & 0xffff;
1432 }
1433 
omap_wfi_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1434 static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
1435                            uint64_t value)
1436 {
1437     /* Wait-for-interrupt (deprecated) */
1438     cpu_interrupt(CPU(arm_env_get_cpu(env)), CPU_INTERRUPT_HALT);
1439 }
1440 
omap_cachemaint_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1441 static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
1442                                   uint64_t value)
1443 {
1444     /* On OMAP there are registers indicating the max/min index of dcache lines
1445      * containing a dirty line; cache flush operations have to reset these.
1446      */
1447     env->cp15.c15_i_max = 0x000;
1448     env->cp15.c15_i_min = 0xff0;
1449 }
1450 
1451 static const ARMCPRegInfo omap_cp_reginfo[] = {
1452     { "DFSR", 15,5,CP_ANY, 0,CP_ANY,CP_ANY, 0,
1453       ARM_CP_OVERRIDE, PL1_RW, NULL, 0, offsetoflow32(CPUARMState, cp15.esr_el[1]), },
1454     { "", 15,15,0, 0,0,0, 0,
1455       ARM_CP_NOP, PL1_RW, NULL, 0, 0, },
1456     { "TICONFIG", 15,15,1, 0,0,0, 0,
1457       0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c15_ticonfig),
1458       NULL, NULL, omap_ticonfig_write },
1459     { "IMAX", 15,15,2, 0,0,0, 0,
1460       0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c15_i_max), },
1461     { "IMIN", 15,15,3, 0,0,0, 0,
1462       0, PL1_RW, NULL, 0xff0, offsetof(CPUARMState, cp15.c15_i_min) },
1463     { "THREADID", 15,15,4, 0,0,0, 0,
1464       0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c15_threadid),
1465       NULL, NULL, omap_threadid_write },
1466     { "TI925T_STATUS", 15,15,8, 0,0,0, 0,
1467       ARM_CP_NO_MIGRATE, PL1_RW, NULL, 0, 0,
1468       NULL, arm_cp_read_zero, omap_wfi_write, },
1469     /* TODO: Peripheral port remap register:
1470      * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
1471      * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
1472      * when MMU is off.
1473      */
1474     { "OMAP_CACHEMAINT", 15,7,CP_ANY, 0,0,CP_ANY, 0,
1475       ARM_CP_OVERRIDE | ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0,
1476       NULL, NULL, omap_cachemaint_write },
1477     { "C9", 15,9,CP_ANY, 0,CP_ANY,CP_ANY, 0,
1478       ARM_CP_CONST | ARM_CP_OVERRIDE, PL1_RW, NULL, 0, 0, },
1479     REGINFO_SENTINEL
1480 };
1481 
xscale_cpar_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1482 static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri,
1483                               uint64_t value)
1484 {
1485     env->cp15.c15_cpar = value & 0x3fff;
1486 }
1487 
1488 static const ARMCPRegInfo xscale_cp_reginfo[] = {
1489     { "XSCALE_CPAR", 15,15,1, 0,0,0, 0,
1490       0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c15_cpar),
1491       NULL, NULL, xscale_cpar_write, },
1492     { "XSCALE_AUXCR", 15,1,0, 0,0,1, 0,
1493       0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c1_xscaleauxcr), },
1494     /* XScale specific cache-lockdown: since we have no cache we NOP these
1495      * and hope the guest does not really rely on cache behaviour.
1496      */
1497     { "XSCALE_LOCK_ICACHE_LINE", 15,9,1, 0,0,0, 0,
1498       ARM_CP_NOP, PL1_W },
1499     { "XSCALE_UNLOCK_ICACHE", 15,9,1, 0,0,1, 0,
1500       ARM_CP_NOP, PL1_W, },
1501     { "XSCALE_DCACHE_LOCK", 15,9,2, 0,0,0, 0,
1502       ARM_CP_NOP, PL1_RW },
1503     { "XSCALE_UNLOCK_DCACHE", 15,9,2, 0,0,1, 0,
1504       ARM_CP_NOP, PL1_W, },
1505     REGINFO_SENTINEL
1506 };
1507 
1508 static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
1509     /* RAZ/WI the whole crn=15 space, when we don't have a more specific
1510      * implementation of this implementation-defined space.
1511      * Ideally this should eventually disappear in favour of actually
1512      * implementing the correct behaviour for all cores.
1513      */
1514     { "C15_IMPDEF", 15,15,CP_ANY, 0,CP_ANY,CP_ANY, 0,
1515       ARM_CP_CONST | ARM_CP_NO_MIGRATE | ARM_CP_OVERRIDE, PL1_RW, NULL, 0 },
1516     REGINFO_SENTINEL
1517 };
1518 
1519 static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
1520     /* Cache status: RAZ because we have no cache so it's always clean */
1521     { "CDSR", 15,7,10, 0,0,6, 0,
1522       ARM_CP_CONST | ARM_CP_NO_MIGRATE, PL1_R, NULL, 0 },
1523     REGINFO_SENTINEL
1524 };
1525 
1526 static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
1527     /* We never have a a block transfer operation in progress */
1528     { "BXSR", 15,7,12, 0,0,4, 0,
1529       ARM_CP_CONST | ARM_CP_NO_MIGRATE, PL0_R, NULL, 0 },
1530     /* The cache ops themselves: these all NOP for QEMU */
1531     { "IICR", 15, 0,5, 0,0, 0, 0,
1532       ARM_CP_NOP|ARM_CP_64BIT, PL1_W },
1533     { "IDCR", 15, 0,6, 0,0, 0, 0,
1534       ARM_CP_NOP|ARM_CP_64BIT, PL1_W, },
1535     { "CDCR", 15, 0,12, 0,0, 0, 0,
1536       ARM_CP_NOP|ARM_CP_64BIT, PL0_W, },
1537     { "PIR", 15, 0,12, 0,1, 0, 0,
1538       ARM_CP_NOP|ARM_CP_64BIT, PL0_W, },
1539     { "PDR", 15, 0,12, 0,2, 0, 0,
1540       ARM_CP_NOP|ARM_CP_64BIT, PL0_W, },
1541     { "CIDCR", 15, 0,14, 0,0, 0, 0,
1542       ARM_CP_NOP|ARM_CP_64BIT, PL1_W, },
1543     REGINFO_SENTINEL
1544 };
1545 
1546 static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
1547     /* The cache test-and-clean instructions always return (1 << 30)
1548      * to indicate that there are no dirty cache lines.
1549      */
1550     { "TC_DCACHE", 15,7,10, 0,0,3, 0,
1551       ARM_CP_CONST | ARM_CP_NO_MIGRATE, PL0_R, NULL, (1 << 30) },
1552     { "TCI_DCACHE", 15,7,14, 0,0,3, 0,
1553       ARM_CP_CONST | ARM_CP_NO_MIGRATE, PL0_R, NULL, (1 << 30) },
1554     REGINFO_SENTINEL
1555 };
1556 
1557 static const ARMCPRegInfo strongarm_cp_reginfo[] = {
1558     /* Ignore ReadBuffer accesses */
1559     { "C9_READBUFFER", 15,9,CP_ANY, 0,CP_ANY,CP_ANY, 0,
1560       ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_MIGRATE, PL1_RW, NULL, 0, },
1561     REGINFO_SENTINEL
1562 };
1563 
mpidr_read(CPUARMState * env,const ARMCPRegInfo * ri)1564 static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1565 {
1566     CPUState *cs = CPU(arm_env_get_cpu(env));
1567     uint32_t mpidr = cs->cpu_index;
1568     /* We don't support setting cluster ID ([8..11]) (known as Aff1
1569      * in later ARM ARM versions), or any of the higher affinity level fields,
1570      * so these bits always RAZ.
1571      */
1572     if (arm_feature(env, ARM_FEATURE_V7MP)) {
1573         mpidr |= (1U << 31);
1574         /* Cores which are uniprocessor (non-coherent)
1575          * but still implement the MP extensions set
1576          * bit 30. (For instance, A9UP.) However we do
1577          * not currently model any of those cores.
1578          */
1579     }
1580     return mpidr;
1581 }
1582 
1583 static const ARMCPRegInfo mpidr_cp_reginfo[] = {
1584     { "MPIDR", 0,0,0, 3,0,5, ARM_CP_STATE_BOTH,
1585       ARM_CP_NO_MIGRATE, PL1_R, NULL, 0, 0,
1586       NULL, mpidr_read, },
1587     REGINFO_SENTINEL
1588 };
1589 
1590 static const ARMCPRegInfo lpae_cp_reginfo[] = {
1591     /* NOP AMAIR0/1: the override is because these clash with the rather
1592      * broadly specified TLB_LOCKDOWN entry in the generic cp_reginfo.
1593      */
1594     { "AMAIR0", 0,10,3, 3,0,0, ARM_CP_STATE_BOTH,
1595       ARM_CP_CONST | ARM_CP_OVERRIDE, PL1_RW, NULL, 0 },
1596     /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
1597     { "AMAIR1", 15,10,3, 0,0,1, 0,
1598       ARM_CP_CONST | ARM_CP_OVERRIDE, PL1_RW, NULL, 0 },
1599     { "PAR", 15, 0,7, 0,0, 0, 0,
1600       ARM_CP_64BIT, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.par_el1), },
1601     { "TTBR0", 15, 0,2, 0,0, 0, 0,
1602       ARM_CP_64BIT | ARM_CP_NO_MIGRATE, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.ttbr0_el1),
1603       NULL, NULL, vmsa_ttbr_write, NULL,NULL, arm_cp_reset_ignore },
1604     { "TTBR1", 15, 0,2, 0,1, 0, 0,
1605       ARM_CP_64BIT | ARM_CP_NO_MIGRATE, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.ttbr1_el1),
1606       NULL, NULL, vmsa_ttbr_write, NULL,NULL, arm_cp_reset_ignore },
1607     REGINFO_SENTINEL
1608 };
1609 
aa64_fpcr_read(CPUARMState * env,const ARMCPRegInfo * ri)1610 static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1611 {
1612     return vfp_get_fpcr(env);
1613 }
1614 
aa64_fpcr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1615 static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1616                             uint64_t value)
1617 {
1618     vfp_set_fpcr(env, value);
1619 }
1620 
aa64_fpsr_read(CPUARMState * env,const ARMCPRegInfo * ri)1621 static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1622 {
1623     return vfp_get_fpsr(env);
1624 }
1625 
aa64_fpsr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1626 static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1627                             uint64_t value)
1628 {
1629     vfp_set_fpsr(env, value);
1630 }
1631 
aa64_daif_access(CPUARMState * env,const ARMCPRegInfo * ri)1632 static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri)
1633 {
1634     if (arm_current_el(env) == 0 && !(env->cp15.c1_sys & SCTLR_UMA)) {
1635         return CP_ACCESS_TRAP;
1636     }
1637     return CP_ACCESS_OK;
1638 }
1639 
aa64_daif_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1640 static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri,
1641                             uint64_t value)
1642 {
1643     env->daif = value & PSTATE_DAIF;
1644 }
1645 
aa64_cacheop_access(CPUARMState * env,const ARMCPRegInfo * ri)1646 static CPAccessResult aa64_cacheop_access(CPUARMState *env,
1647                                           const ARMCPRegInfo *ri)
1648 {
1649     /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
1650      * SCTLR_EL1.UCI is set.
1651      */
1652     if (arm_current_el(env) == 0 && !(env->cp15.c1_sys & SCTLR_UCI)) {
1653         return CP_ACCESS_TRAP;
1654     }
1655     return CP_ACCESS_OK;
1656 }
1657 
1658 /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
1659  * Page D4-1736 (DDI0487A.b)
1660  */
1661 
tlbi_aa64_va_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1662 static void tlbi_aa64_va_write(CPUARMState *env, const ARMCPRegInfo *ri,
1663                                uint64_t value)
1664 {
1665     /* Invalidate by VA (AArch64 version) */
1666     ARMCPU *cpu = arm_env_get_cpu(env);
1667     uint64_t pageaddr = sextract64(value << 12, 0, 56);
1668 
1669     tlb_flush_page(CPU(cpu), pageaddr);
1670 }
1671 
tlbi_aa64_vaa_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1672 static void tlbi_aa64_vaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
1673                                 uint64_t value)
1674 {
1675     /* Invalidate by VA, all ASIDs (AArch64 version) */
1676     ARMCPU *cpu = arm_env_get_cpu(env);
1677     uint64_t pageaddr = sextract64(value << 12, 0, 56);
1678 
1679     tlb_flush_page(CPU(cpu), pageaddr);
1680 }
1681 
tlbi_aa64_asid_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1682 static void tlbi_aa64_asid_write(CPUARMState *env, const ARMCPRegInfo *ri,
1683                                  uint64_t value)
1684 {
1685     /* Invalidate by ASID (AArch64 version) */
1686     ARMCPU *cpu = arm_env_get_cpu(env);
1687     int asid = extract64(value, 48, 16);
1688     tlb_flush(CPU(cpu), asid == 0);
1689 }
1690 
tlbi_aa64_va_is_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1691 static void tlbi_aa64_va_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
1692                                   uint64_t value)
1693 {
1694     //uint64_t pageaddr = sextract64(value << 12, 0, 56);
1695     //struct uc_struct *uc = env->uc;
1696     // TODO: issue #642
1697     // tlb_flush(other_cpu, pageaddr);
1698 }
1699 
tlbi_aa64_vaa_is_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1700 static void tlbi_aa64_vaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
1701                                   uint64_t value)
1702 {
1703     //uint64_t pageaddr = sextract64(value << 12, 0, 56);
1704     //struct uc_struct *uc = env->uc;
1705     // TODO: issue #642
1706     // tlb_flush(other_cpu, pageaddr);
1707 }
1708 
tlbi_aa64_asid_is_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1709 static void tlbi_aa64_asid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
1710                                   uint64_t value)
1711 {
1712     //int asid = extract64(value, 48, 16);
1713     //struct uc_struct *uc = env->uc;
1714     // TODO: issue #642
1715     // tlb_flush(other_cpu, asid == 0);
1716 }
1717 
aa64_zva_access(CPUARMState * env,const ARMCPRegInfo * ri)1718 static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri)
1719 {
1720     /* We don't implement EL2, so the only control on DC ZVA is the
1721      * bit in the SCTLR which can prohibit access for EL0.
1722      */
1723     if (arm_current_el(env) == 0 && !(env->cp15.c1_sys & SCTLR_DZE)) {
1724         return CP_ACCESS_TRAP;
1725     }
1726     return CP_ACCESS_OK;
1727 }
1728 
aa64_dczid_read(CPUARMState * env,const ARMCPRegInfo * ri)1729 static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri)
1730 {
1731     ARMCPU *cpu = arm_env_get_cpu(env);
1732     int dzp_bit = 1 << 4;
1733 
1734     /* DZP indicates whether DC ZVA access is allowed */
1735     if (aa64_zva_access(env, NULL) == CP_ACCESS_OK) {
1736         dzp_bit = 0;
1737     }
1738     return cpu->dcz_blocksize | dzp_bit;
1739 }
1740 
sp_el0_access(CPUARMState * env,const ARMCPRegInfo * ri)1741 static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri)
1742 {
1743     if (!(env->pstate & PSTATE_SP)) {
1744         /* Access to SP_EL0 is undefined if it's being used as
1745          * the stack pointer.
1746          */
1747         return CP_ACCESS_TRAP_UNCATEGORIZED;
1748     }
1749     return CP_ACCESS_OK;
1750 }
1751 
spsel_read(CPUARMState * env,const ARMCPRegInfo * ri)1752 static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri)
1753 {
1754     return env->pstate & PSTATE_SP;
1755 }
1756 
spsel_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t val)1757 static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
1758 {
1759     update_spsel(env, val);
1760 }
1761 
1762 static const ARMCPRegInfo v8_cp_reginfo[] = {
1763     /* Minimal set of EL0-visible registers. This will need to be expanded
1764      * significantly for system emulation of AArch64 CPUs.
1765      */
1766     { "NZCV", 0,4,2, 3,3,0, ARM_CP_STATE_AA64,
1767       ARM_CP_NZCV, PL0_RW,  },
1768     { "DAIF", 0,4,2, 3,3,1, ARM_CP_STATE_AA64,
1769       ARM_CP_NO_MIGRATE, PL0_RW, NULL, 0, offsetof(CPUARMState, daif),
1770       aa64_daif_access, NULL, aa64_daif_write, NULL,NULL, arm_cp_reset_ignore },
1771     { "FPCR", 0,4,4, 3,3,0, ARM_CP_STATE_AA64,
1772       0, PL0_RW, NULL, 0, 0,
1773       NULL, aa64_fpcr_read, aa64_fpcr_write },
1774     { "FPSR", 0,4,4, 3,3,1, ARM_CP_STATE_AA64,
1775       0, PL0_RW, NULL, 0, 0,
1776       NULL, aa64_fpsr_read, aa64_fpsr_write },
1777     { "DCZID_EL0", 0,0,0, 3,3,7, ARM_CP_STATE_AA64,
1778       ARM_CP_NO_MIGRATE, PL0_R, NULL, 0, 0,
1779       NULL, aa64_dczid_read },
1780     { "DC_ZVA", 0,7,4, 1,3,1, ARM_CP_STATE_AA64,
1781       ARM_CP_DC_ZVA, PL0_W, NULL, 0, 0,
1782 #ifndef CONFIG_USER_ONLY
1783       /* Avoid overhead of an access check that always passes in user-mode */
1784       aa64_zva_access,
1785 #endif
1786     },
1787     { "CURRENTEL", 0,4,2, 3,0,2, ARM_CP_STATE_AA64,
1788       ARM_CP_CURRENTEL, PL1_R, },
1789     /* Cache ops: all NOPs since we don't emulate caches */
1790     { "IC_IALLUIS", 0,7,1, 1,0,0, ARM_CP_STATE_AA64,
1791       ARM_CP_NOP, PL1_W, },
1792     { "IC_IALLU", 0,7,5, 1,0,0, ARM_CP_STATE_AA64,
1793       ARM_CP_NOP, PL1_W, },
1794     { "IC_IVAU", 0,7,5, 1,3,1, ARM_CP_STATE_AA64,
1795       ARM_CP_NOP, PL0_W, NULL, 0, 0,
1796       aa64_cacheop_access },
1797     { "DC_IVAC", 0,7,6, 1,0,1, ARM_CP_STATE_AA64,
1798       ARM_CP_NOP, PL1_W, },
1799     { "DC_ISW", 0,7,6, 1,0,2, ARM_CP_STATE_AA64,
1800       ARM_CP_NOP, PL1_W, },
1801     { "DC_CVAC", 0,7,10, 1,3,1, ARM_CP_STATE_AA64,
1802       ARM_CP_NOP, PL0_W, NULL, 0, 0,
1803       aa64_cacheop_access },
1804     { "DC_CSW", 0,7,10, 1,0,2, ARM_CP_STATE_AA64,
1805       ARM_CP_NOP, PL1_W, },
1806     { "DC_CVAU", 0,7,11, 1,3,1, ARM_CP_STATE_AA64,
1807       ARM_CP_NOP, PL0_W, NULL, 0, 0,
1808       aa64_cacheop_access },
1809     { "DC_CIVAC", 0,7,14, 1,3,1, ARM_CP_STATE_AA64,
1810       ARM_CP_NOP, PL0_W,  NULL, 0, 0,
1811       aa64_cacheop_access },
1812     { "DC_CISW", 0,7,14, 1,0,2, ARM_CP_STATE_AA64,
1813       ARM_CP_NOP, PL1_W,  },
1814     /* TLBI operations */
1815     { "TLBI_VMALLE1IS", 0,8,3, 1,0,0, ARM_CP_STATE_AA64,
1816       ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0,
1817       NULL, NULL, tlbiall_is_write },
1818     { "TLBI_VAE1IS", 0,8,3, 1,0,1, ARM_CP_STATE_AA64,
1819       ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0,
1820       NULL, NULL, tlbi_aa64_va_is_write },
1821     { "TLBI_ASIDE1IS", 0,8,3, 1,0,2, ARM_CP_STATE_AA64,
1822       ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0,
1823       NULL, NULL, tlbi_aa64_asid_is_write },
1824     { "TLBI_VAAE1IS", 0,8,3, 1,0,3, ARM_CP_STATE_AA64,
1825       ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0,
1826       NULL, NULL, tlbi_aa64_vaa_is_write },
1827     { "TLBI_VALE1IS", 0,8,3, 1,0,5, ARM_CP_STATE_AA64,
1828       ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0,
1829       NULL, NULL, tlbi_aa64_va_is_write },
1830     { "TLBI_VAALE1IS", 0,8,3, 1,0,7, ARM_CP_STATE_AA64,
1831       ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0,
1832       NULL, NULL, tlbi_aa64_vaa_is_write },
1833     { "TLBI_VMALLE1", 0,8,7, 1,0,0, ARM_CP_STATE_AA64,
1834       ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0,
1835       NULL, NULL, tlbiall_write },
1836     { "TLBI_VAE1", 0,8,7, 1,0,1, ARM_CP_STATE_AA64,
1837       ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0,
1838       NULL, NULL, tlbi_aa64_va_write },
1839     { "TLBI_ASIDE1", 0,8,7, 1,0,2, ARM_CP_STATE_AA64,
1840       ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0,
1841       NULL, NULL, tlbi_aa64_asid_write },
1842     { "TLBI_VAAE1", 0,8,7, 1,0,3, ARM_CP_STATE_AA64,
1843       ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0,
1844       NULL, NULL, tlbi_aa64_vaa_write },
1845     { "TLBI_VALE1", 0,8,7, 1,0,5, ARM_CP_STATE_AA64,
1846       ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0,
1847       NULL, NULL, tlbi_aa64_va_write },
1848     { "TLBI_VAALE1", 0,8,7, 1,0,7, ARM_CP_STATE_AA64,
1849       ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0,
1850       NULL, NULL, tlbi_aa64_vaa_write },
1851 #ifndef CONFIG_USER_ONLY
1852     /* 64 bit address translation operations */
1853     { "AT_S1E1R", 0,7,8, 1,0,0, ARM_CP_STATE_AA64,
1854       ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0,
1855       NULL, NULL, ats_write },
1856     { "AT_S1E1W", 0,7,8, 1,0,1, ARM_CP_STATE_AA64,
1857       ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0,
1858       NULL, NULL, ats_write },
1859     { "AT_S1E0R", 0,7,8, 1,0,2, ARM_CP_STATE_AA64,
1860       ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0,
1861       NULL, NULL, ats_write },
1862     { "AT_S1E0W", 0,7,8, 1,0,3, ARM_CP_STATE_AA64,
1863       ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0,
1864       NULL, NULL, ats_write },
1865 #endif
1866     /* TLB invalidate last level of translation table walk */
1867     { "TLBIMVALIS", 15,8,3, 0,0,5, 0,
1868       ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0,
1869       NULL, NULL, tlbimva_is_write },
1870     { "TLBIMVAALIS", 15,8,3, 0,0,7, 0,
1871       ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0,
1872       NULL, NULL, tlbimvaa_is_write },
1873     { "TLBIMVAL", 15,8,7, 0,0,5, 0,
1874       ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0,
1875       NULL, NULL, tlbimva_write },
1876     { "TLBIMVAAL", 15,8,7, 0,0,7, 0,
1877       ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0,
1878       NULL, NULL, tlbimvaa_write },
1879     /* 32 bit cache operations */
1880     { "ICIALLUIS", 15,7,1, 0,0,0, 0,
1881       ARM_CP_NOP, PL1_W },
1882     { "BPIALLUIS", 15,7,1, 0,0,6, 0,
1883       ARM_CP_NOP, PL1_W },
1884     { "ICIALLU", 15,7,5, 0,0,0, 0,
1885       ARM_CP_NOP, PL1_W },
1886     { "ICIMVAU", 15,7,5, 0,0,1, 0,
1887       ARM_CP_NOP, PL1_W },
1888     { "BPIALL", 15,7,5, 0,0,6, 0,
1889       ARM_CP_NOP, PL1_W },
1890     { "BPIMVA", 15,7,5, 0,0,7, 0,
1891       ARM_CP_NOP, PL1_W },
1892     { "DCIMVAC", 15,7,6, 0,0,1, 0,
1893       ARM_CP_NOP, PL1_W },
1894     { "DCISW", 15,7,6, 0,0,2, 0,
1895       ARM_CP_NOP, PL1_W },
1896     { "DCCMVAC", 15,7,10, 0,0,1, 0,
1897       ARM_CP_NOP, PL1_W },
1898     { "DCCSW", 15,7,10, 0,0,2, 0,
1899       ARM_CP_NOP, PL1_W },
1900     { "DCCMVAU", 15,7,11, 0,0,1, 0,
1901       ARM_CP_NOP, PL1_W },
1902     { "DCCIMVAC", 15,7,14, 0,0,1, 0,
1903       ARM_CP_NOP, PL1_W },
1904     { "DCCISW", 15,7,14, 0,0,2, 0,
1905       ARM_CP_NOP, PL1_W },
1906     /* MMU Domain access control / MPU write buffer control */
1907     { "DACR", 15,3,0, 0,0,0, 0,
1908       0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c3),
1909       NULL, NULL,dacr_write, NULL,raw_write, },
1910     { "ELR_EL1", 0,4,0, 3,0,1, ARM_CP_STATE_AA64,
1911       ARM_CP_NO_MIGRATE, PL1_RW, NULL, 0, offsetof(CPUARMState, elr_el[1]) },
1912     { "SPSR_EL1", 0,4,0, 3,0,0, ARM_CP_STATE_AA64,
1913       ARM_CP_NO_MIGRATE, PL1_RW, NULL, 0, offsetof(CPUARMState, banked_spsr[0]) },
1914     /* We rely on the access checks not allowing the guest to write to the
1915      * state field when SPSel indicates that it's being used as the stack
1916      * pointer.
1917      */
1918     { "SP_EL0", 0,4,1, 3,0,0, ARM_CP_STATE_AA64,
1919       ARM_CP_NO_MIGRATE, PL1_RW, NULL, 0, offsetof(CPUARMState, sp_el[0]),
1920       sp_el0_access, },
1921     { "SPSel", 0,4,2, 3,0,0, ARM_CP_STATE_AA64,
1922       ARM_CP_NO_MIGRATE, PL1_RW, NULL, 0, 0,
1923       NULL, spsel_read, spsel_write },
1924     REGINFO_SENTINEL
1925 };
1926 
1927 /* Used to describe the behaviour of EL2 regs when EL2 does not exist.  */
1928 static const ARMCPRegInfo v8_el3_no_el2_cp_reginfo[] = {
1929     { "VBAR_EL2", 0,12,0, 3,4,0, ARM_CP_STATE_AA64,
1930       0, PL2_RW, NULL, 0, 0,
1931       NULL, arm_cp_read_zero, arm_cp_write_ignore },
1932     { "HCR_EL2", 0,1,1, 3,4,0, ARM_CP_STATE_AA64,
1933       ARM_CP_NO_MIGRATE, PL2_RW, NULL, 0, 0,
1934       NULL, arm_cp_read_zero, arm_cp_write_ignore },
1935     REGINFO_SENTINEL
1936 };
1937 
hcr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1938 static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
1939 {
1940     ARMCPU *cpu = arm_env_get_cpu(env);
1941     uint64_t valid_mask = HCR_MASK;
1942 
1943     if (arm_feature(env, ARM_FEATURE_EL3)) {
1944         valid_mask &= ~HCR_HCD;
1945     } else {
1946         valid_mask &= ~HCR_TSC;
1947     }
1948 
1949     /* Clear RES0 bits.  */
1950     value &= valid_mask;
1951 
1952     /* These bits change the MMU setup:
1953      * HCR_VM enables stage 2 translation
1954      * HCR_PTW forbids certain page-table setups
1955      * HCR_DC Disables stage1 and enables stage2 translation
1956      */
1957     if ((raw_read(env, ri) ^ value) & (HCR_VM | HCR_PTW | HCR_DC)) {
1958         tlb_flush(CPU(cpu), 1);
1959     }
1960     raw_write(env, ri, value);
1961 }
1962 
1963 static const ARMCPRegInfo v8_el2_cp_reginfo[] = {
1964     { "HCR_EL2", 0,1,1, 3,4,0, ARM_CP_STATE_AA64,
1965       0, PL2_RW, NULL, 0, offsetof(CPUARMState, cp15.hcr_el2),
1966       NULL, NULL, hcr_write },
1967     { "ELR_EL2", 0,4,0, 3,4,1, ARM_CP_STATE_AA64,
1968       ARM_CP_NO_MIGRATE, PL2_RW, NULL, 0, offsetof(CPUARMState, elr_el[2]) },
1969     { "ESR_EL2", 0,5,2, 3,4,0, ARM_CP_STATE_AA64,
1970       ARM_CP_NO_MIGRATE, PL2_RW, NULL, 0, offsetof(CPUARMState, cp15.esr_el[2]) },
1971     { "FAR_EL2", 0,6,0, 3,4,0, ARM_CP_STATE_AA64,
1972       0, PL2_RW, NULL, 0, offsetof(CPUARMState, cp15.far_el[2]) },
1973     { "SPSR_EL2", 0,4,0, 3,4,0, ARM_CP_STATE_AA64,
1974       ARM_CP_NO_MIGRATE, PL2_RW, NULL, 0, offsetof(CPUARMState, banked_spsr[6]) },
1975     { "VBAR_EL2", 0,12,0, 3,4,0, ARM_CP_STATE_AA64,
1976       0, PL2_RW, NULL, 0, offsetof(CPUARMState, cp15.vbar_el[2]),
1977       NULL, NULL, vbar_write, },
1978     REGINFO_SENTINEL
1979 };
1980 
1981 static const ARMCPRegInfo v8_el3_cp_reginfo[] = {
1982     { "ELR_EL3", 0,4,0, 3,6,1, ARM_CP_STATE_AA64,
1983       ARM_CP_NO_MIGRATE, PL3_RW, NULL, 0, offsetof(CPUARMState, elr_el[3]) },
1984     { "ESR_EL3", 0,5,2, 3,6,0, ARM_CP_STATE_AA64,
1985       ARM_CP_NO_MIGRATE, PL3_RW, NULL, 0, offsetof(CPUARMState, cp15.esr_el[3]) },
1986     { "FAR_EL3", 0,6,0, 3,6,0, ARM_CP_STATE_AA64,
1987       0, PL3_RW, NULL, 0, offsetof(CPUARMState, cp15.far_el[3]) },
1988     { "SPSR_EL3", 0,4,0, 3,6,0, ARM_CP_STATE_AA64,
1989       ARM_CP_NO_MIGRATE, PL3_RW, NULL, 0, offsetof(CPUARMState, banked_spsr[7]) },
1990     { "VBAR_EL3", 0,12,0, 3,6,0, ARM_CP_STATE_AA64,
1991       0, PL3_RW, NULL, 0, offsetof(CPUARMState, cp15.vbar_el[3]),
1992       NULL, NULL, vbar_write, },
1993     { "SCR_EL3", 0,1,1, 3,6,0, ARM_CP_STATE_AA64,
1994       ARM_CP_NO_MIGRATE, PL3_RW, NULL, 0, offsetof(CPUARMState, cp15.scr_el3),
1995       NULL, NULL, scr_write },
1996     REGINFO_SENTINEL
1997 };
1998 
sctlr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1999 static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2000                         uint64_t value)
2001 {
2002     ARMCPU *cpu = arm_env_get_cpu(env);
2003 
2004     if (raw_read(env, ri) == value) {
2005         /* Skip the TLB flush if nothing actually changed; Linux likes
2006          * to do a lot of pointless SCTLR writes.
2007          */
2008         return;
2009     }
2010 
2011     raw_write(env, ri, value);
2012     /* ??? Lots of these bits are not implemented.  */
2013     /* This may enable/disable the MMU, so do a TLB flush.  */
2014     tlb_flush(CPU(cpu), 1);
2015 }
2016 
ctr_el0_access(CPUARMState * env,const ARMCPRegInfo * ri)2017 static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri)
2018 {
2019     /* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64,
2020      * but the AArch32 CTR has its own reginfo struct)
2021      */
2022     if (arm_current_el(env) == 0 && !(env->cp15.c1_sys & SCTLR_UCT)) {
2023         return CP_ACCESS_TRAP;
2024     }
2025     return CP_ACCESS_OK;
2026 }
2027 
2028 static const ARMCPRegInfo debug_cp_reginfo[] = {
2029     /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
2030      * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1;
2031      * unlike DBGDRAR it is never accessible from EL0.
2032      * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64
2033      * accessor.
2034      */
2035     { "DBGDRAR", 14,1,0, 0,0,0, 0,
2036       ARM_CP_CONST, PL0_R, NULL, 0 },
2037     { "MDRAR_EL1", 0,1,0, 2,0,0, ARM_CP_STATE_AA64,
2038       ARM_CP_CONST, PL1_R, NULL, 0 },
2039     { "DBGDSAR", 14,2,0, 0,0,0, 0,
2040       ARM_CP_CONST, PL0_R, NULL, 0 },
2041     /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */
2042     { "MDSCR_EL1", 14,0,2, 2,0,2, ARM_CP_STATE_BOTH,
2043       0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.mdscr_el1), },
2044     /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1.
2045      * We don't implement the configurable EL0 access.
2046      */
2047     { "MDCCSR_EL0", 14,0,1, 2,0,0, ARM_CP_STATE_BOTH,
2048       ARM_CP_NO_MIGRATE, PL1_R, NULL, 0, offsetof(CPUARMState, cp15.mdscr_el1),
2049       NULL,NULL,NULL,NULL,NULL, arm_cp_reset_ignore },
2050     /* We define a dummy WI OSLAR_EL1, because Linux writes to it. */
2051     { "OSLAR_EL1", 14,1,0, 2,0,4, ARM_CP_STATE_BOTH,
2052       ARM_CP_NOP, PL1_W, },
2053     /* Dummy OSDLR_EL1: 32-bit Linux will read this */
2054     { "OSDLR_EL1", 14,1,3, 2,0,4, ARM_CP_STATE_BOTH,
2055       ARM_CP_NOP, PL1_RW, },
2056     /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't
2057      * implement vector catch debug events yet.
2058      */
2059     { "DBGVCR", 14,0,7, 0,0,0, 0,
2060       ARM_CP_NOP, PL1_RW, },
2061     REGINFO_SENTINEL
2062 };
2063 
2064 static const ARMCPRegInfo debug_lpae_cp_reginfo[] = {
2065     /* 64 bit access versions of the (dummy) debug registers */
2066     { "DBGDRAR", 14, 0,1, 0,0, 0, 0,
2067       ARM_CP_CONST|ARM_CP_64BIT, PL0_R, NULL, 0 },
2068     { "DBGDSAR", 14, 0,2, 0,0, 0, 0,
2069       ARM_CP_CONST|ARM_CP_64BIT, PL0_R, NULL, 0 },
2070     REGINFO_SENTINEL
2071 };
2072 
hw_watchpoint_update(ARMCPU * cpu,int n)2073 void hw_watchpoint_update(ARMCPU *cpu, int n)
2074 {
2075     CPUARMState *env = &cpu->env;
2076     vaddr len = 0;
2077     vaddr wvr = env->cp15.dbgwvr[n];
2078     uint64_t wcr = env->cp15.dbgwcr[n];
2079     int mask;
2080     int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
2081 
2082     if (env->cpu_watchpoint[n]) {
2083         cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]);
2084         env->cpu_watchpoint[n] = NULL;
2085     }
2086 
2087     if (!extract64(wcr, 0, 1)) {
2088         /* E bit clear : watchpoint disabled */
2089         return;
2090     }
2091 
2092     switch (extract64(wcr, 3, 2)) {
2093     case 0:
2094         /* LSC 00 is reserved and must behave as if the wp is disabled */
2095         return;
2096     case 1:
2097         flags |= BP_MEM_READ;
2098         break;
2099     case 2:
2100         flags |= BP_MEM_WRITE;
2101         break;
2102     case 3:
2103         flags |= BP_MEM_ACCESS;
2104         break;
2105     }
2106 
2107     /* Attempts to use both MASK and BAS fields simultaneously are
2108      * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case,
2109      * thus generating a watchpoint for every byte in the masked region.
2110      */
2111     mask = extract64(wcr, 24, 4);
2112     if (mask == 1 || mask == 2) {
2113         /* Reserved values of MASK; we must act as if the mask value was
2114          * some non-reserved value, or as if the watchpoint were disabled.
2115          * We choose the latter.
2116          */
2117         return;
2118     } else if (mask) {
2119         /* Watchpoint covers an aligned area up to 2GB in size */
2120         len = 1ULL << mask;
2121         /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE
2122          * whether the watchpoint fires when the unmasked bits match; we opt
2123          * to generate the exceptions.
2124          */
2125         wvr &= ~(len - 1);
2126     } else {
2127         /* Watchpoint covers bytes defined by the byte address select bits */
2128         int bas = extract64(wcr, 5, 8);
2129         int basstart;
2130 
2131         if (bas == 0) {
2132             /* This must act as if the watchpoint is disabled */
2133             return;
2134         }
2135 
2136         if (extract64(wvr, 2, 1)) {
2137             /* Deprecated case of an only 4-aligned address. BAS[7:4] are
2138              * ignored, and BAS[3:0] define which bytes to watch.
2139              */
2140             bas &= 0xf;
2141         }
2142         /* The BAS bits are supposed to be programmed to indicate a contiguous
2143          * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether
2144          * we fire for each byte in the word/doubleword addressed by the WVR.
2145          * We choose to ignore any non-zero bits after the first range of 1s.
2146          */
2147         basstart = ctz32(bas);
2148         len = cto32(bas >> (basstart & 0x1f));
2149         wvr += basstart;
2150     }
2151 
2152     cpu_watchpoint_insert(CPU(cpu), wvr, len, flags,
2153                           &env->cpu_watchpoint[n]);
2154 }
2155 
hw_watchpoint_update_all(ARMCPU * cpu)2156 void hw_watchpoint_update_all(ARMCPU *cpu)
2157 {
2158     int i;
2159     CPUARMState *env = &cpu->env;
2160 
2161     /* Completely clear out existing QEMU watchpoints and our array, to
2162      * avoid possible stale entries following migration load.
2163      */
2164     cpu_watchpoint_remove_all(CPU(cpu), BP_CPU);
2165     memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint));
2166 
2167     for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) {
2168         hw_watchpoint_update(cpu, i);
2169     }
2170 }
2171 
dbgwvr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2172 static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2173                          uint64_t value)
2174 {
2175     ARMCPU *cpu = arm_env_get_cpu(env);
2176     int i = ri->crm;
2177 
2178     /* Bits [63:49] are hardwired to the value of bit [48]; that is, the
2179      * register reads and behaves as if values written are sign extended.
2180      * Bits [1:0] are RES0.
2181      */
2182     value = sextract64(value, 0, 49) & ~3ULL;
2183 
2184     raw_write(env, ri, value);
2185     hw_watchpoint_update(cpu, i);
2186 }
2187 
dbgwcr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2188 static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2189                          uint64_t value)
2190 {
2191     ARMCPU *cpu = arm_env_get_cpu(env);
2192     int i = ri->crm;
2193 
2194     raw_write(env, ri, value);
2195     hw_watchpoint_update(cpu, i);
2196 }
2197 
hw_breakpoint_update(ARMCPU * cpu,int n)2198 void hw_breakpoint_update(ARMCPU *cpu, int n)
2199 {
2200     CPUARMState *env = &cpu->env;
2201     uint64_t bvr = env->cp15.dbgbvr[n];
2202     uint64_t bcr = env->cp15.dbgbcr[n];
2203     vaddr addr;
2204     int bt;
2205     int flags = BP_CPU;
2206 
2207     if (env->cpu_breakpoint[n]) {
2208         cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]);
2209         env->cpu_breakpoint[n] = NULL;
2210     }
2211 
2212     if (!extract64(bcr, 0, 1)) {
2213         /* E bit clear : watchpoint disabled */
2214         return;
2215     }
2216 
2217     bt = extract64(bcr, 20, 4);
2218 
2219     switch (bt) {
2220     case 4: /* unlinked address mismatch (reserved if AArch64) */
2221     case 5: /* linked address mismatch (reserved if AArch64) */
2222         qemu_log_mask(LOG_UNIMP,
2223                       "arm: address mismatch breakpoint types not implemented");
2224         return;
2225     case 0: /* unlinked address match */
2226     case 1: /* linked address match */
2227     {
2228         /* Bits [63:49] are hardwired to the value of bit [48]; that is,
2229          * we behave as if the register was sign extended. Bits [1:0] are
2230          * RES0. The BAS field is used to allow setting breakpoints on 16
2231          * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether
2232          * a bp will fire if the addresses covered by the bp and the addresses
2233          * covered by the insn overlap but the insn doesn't start at the
2234          * start of the bp address range. We choose to require the insn and
2235          * the bp to have the same address. The constraints on writing to
2236          * BAS enforced in dbgbcr_write mean we have only four cases:
2237          *  0b0000  => no breakpoint
2238          *  0b0011  => breakpoint on addr
2239          *  0b1100  => breakpoint on addr + 2
2240          *  0b1111  => breakpoint on addr
2241          * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
2242          */
2243         int bas = extract64(bcr, 5, 4);
2244         addr = sextract64(bvr, 0, 49) & ~3ULL;
2245         if (bas == 0) {
2246             return;
2247         }
2248         if (bas == 0xc) {
2249             addr += 2;
2250         }
2251         break;
2252     }
2253     case 2: /* unlinked context ID match */
2254     case 8: /* unlinked VMID match (reserved if no EL2) */
2255     case 10: /* unlinked context ID and VMID match (reserved if no EL2) */
2256         qemu_log_mask(LOG_UNIMP,
2257                       "arm: unlinked context breakpoint types not implemented");
2258         return;
2259     case 9: /* linked VMID match (reserved if no EL2) */
2260     case 11: /* linked context ID and VMID match (reserved if no EL2) */
2261     case 3: /* linked context ID match */
2262     default:
2263         /* We must generate no events for Linked context matches (unless
2264          * they are linked to by some other bp/wp, which is handled in
2265          * updates for the linking bp/wp). We choose to also generate no events
2266          * for reserved values.
2267          */
2268         return;
2269     }
2270 
2271     cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]);
2272 }
2273 
hw_breakpoint_update_all(ARMCPU * cpu)2274 void hw_breakpoint_update_all(ARMCPU *cpu)
2275 {
2276     int i;
2277     CPUARMState *env = &cpu->env;
2278 
2279     /* Completely clear out existing QEMU breakpoints and our array, to
2280      * avoid possible stale entries following migration load.
2281      */
2282     cpu_breakpoint_remove_all(CPU(cpu), BP_CPU);
2283     memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint));
2284 
2285     for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) {
2286         hw_breakpoint_update(cpu, i);
2287     }
2288 }
2289 
dbgbvr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2290 static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2291                          uint64_t value)
2292 {
2293     ARMCPU *cpu = arm_env_get_cpu(env);
2294     int i = ri->crm;
2295 
2296     raw_write(env, ri, value);
2297     hw_breakpoint_update(cpu, i);
2298 }
2299 
dbgbcr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2300 static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2301                          uint64_t value)
2302 {
2303     ARMCPU *cpu = arm_env_get_cpu(env);
2304     int i = ri->crm;
2305 
2306     /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only
2307      * copy of BAS[0].
2308      */
2309     value = deposit64(value, 6, 1, extract64(value, 5, 1));
2310     value = deposit64(value, 8, 1, extract64(value, 7, 1));
2311 
2312     raw_write(env, ri, value);
2313     hw_breakpoint_update(cpu, i);
2314 }
2315 
define_debug_regs(ARMCPU * cpu)2316 static void define_debug_regs(ARMCPU *cpu)
2317 {
2318     /* Define v7 and v8 architectural debug registers.
2319      * These are just dummy implementations for now.
2320      */
2321     int i;
2322     int wrps, brps, ctx_cmps;
2323     ARMCPRegInfo dbgdidr = {
2324         "DBGDIDR", 14,0,0, 0,0,0, 0,
2325         ARM_CP_CONST, PL0_R, NULL, cpu->dbgdidr,
2326     };
2327 
2328     /* Note that all these register fields hold "number of Xs minus 1". */
2329     brps = extract32(cpu->dbgdidr, 24, 4);
2330     wrps = extract32(cpu->dbgdidr, 28, 4);
2331     ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
2332 
2333     assert(ctx_cmps <= brps);
2334 
2335     /* The DBGDIDR and ID_AA64DFR0_EL1 define various properties
2336      * of the debug registers such as number of breakpoints;
2337      * check that if they both exist then they agree.
2338      */
2339     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
2340         assert(extract32(cpu->id_aa64dfr0, 12, 4) == brps);
2341         assert(extract32(cpu->id_aa64dfr0, 20, 4) == wrps);
2342         assert(extract32(cpu->id_aa64dfr0, 28, 4) == ctx_cmps);
2343     }
2344 
2345     define_one_arm_cp_reg(cpu, &dbgdidr);
2346     define_arm_cp_regs(cpu, debug_cp_reginfo);
2347 
2348     if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) {
2349         define_arm_cp_regs(cpu, debug_lpae_cp_reginfo);
2350     }
2351 
2352     for (i = 0; i < brps + 1; i++) {
2353         ARMCPRegInfo dbgregs[] = {
2354             { "DBGBVR", 14,0,i, 2,0,4,ARM_CP_STATE_BOTH,
2355               0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.dbgbvr[i]),
2356               NULL, NULL,dbgbvr_write, NULL,raw_write
2357             },
2358             { "DBGBCR", 14,0,i, 2,0,5, ARM_CP_STATE_BOTH,
2359               0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.dbgbcr[i]),
2360               NULL, NULL,dbgbcr_write, NULL,raw_write
2361             },
2362             REGINFO_SENTINEL
2363         };
2364         define_arm_cp_regs(cpu, dbgregs);
2365     }
2366 
2367     for (i = 0; i < wrps + 1; i++) {
2368         ARMCPRegInfo dbgregs[] = {
2369             { "DBGWVR", 14,0,i, 2,0,6, ARM_CP_STATE_BOTH,
2370               0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.dbgwvr[i]),
2371               NULL, NULL,dbgwvr_write, NULL,raw_write
2372             },
2373             { "DBGWCR", 14,0,i, 2,0,7, ARM_CP_STATE_BOTH,
2374               0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.dbgwcr[i]),
2375               NULL, NULL,dbgwcr_write, NULL,raw_write
2376             },
2377             REGINFO_SENTINEL
2378         };
2379         define_arm_cp_regs(cpu, dbgregs);
2380     }
2381 }
2382 
register_cp_regs_for_features(ARMCPU * cpu)2383 void register_cp_regs_for_features(ARMCPU *cpu)
2384 {
2385     /* Register all the coprocessor registers based on feature bits */
2386     CPUARMState *env = &cpu->env;
2387     if (arm_feature(env, ARM_FEATURE_M)) {
2388         /* M profile has no coprocessor registers */
2389         return;
2390     }
2391 
2392     define_arm_cp_regs(cpu, cp_reginfo);
2393     if (!arm_feature(env, ARM_FEATURE_V8)) {
2394         /* Must go early as it is full of wildcards that may be
2395          * overridden by later definitions.
2396          */
2397         define_arm_cp_regs(cpu, not_v8_cp_reginfo);
2398     }
2399 
2400     if (arm_feature(env, ARM_FEATURE_V6)) {
2401         /* The ID registers all have impdef reset values */
2402         ARMCPRegInfo v6_idregs[] = {
2403             { "ID_PFR0", 0,0,1, 3,0,0, ARM_CP_STATE_BOTH,
2404               ARM_CP_CONST, PL1_R, NULL, cpu->id_pfr0 },
2405             { "ID_PFR1", 0,0,1, 3,0,1, ARM_CP_STATE_BOTH,
2406               ARM_CP_CONST, PL1_R, NULL, cpu->id_pfr1 },
2407             { "ID_DFR0", 0,0,1, 3,0,2, ARM_CP_STATE_BOTH,
2408               ARM_CP_CONST, PL1_R, NULL, cpu->id_dfr0 },
2409             { "ID_AFR0", 0,0,1, 3,0,3, ARM_CP_STATE_BOTH,
2410               ARM_CP_CONST, PL1_R, NULL, cpu->id_afr0 },
2411             { "ID_MMFR0", 0,0,1, 3,0,4, ARM_CP_STATE_BOTH,
2412               ARM_CP_CONST, PL1_R, NULL, cpu->id_mmfr0 },
2413             { "ID_MMFR1", 0,0,1, 3,0,5, ARM_CP_STATE_BOTH,
2414               ARM_CP_CONST, PL1_R, NULL, cpu->id_mmfr1 },
2415             { "ID_MMFR2", 0,0,1, 3,0,6, ARM_CP_STATE_BOTH,
2416               ARM_CP_CONST, PL1_R, NULL, cpu->id_mmfr2 },
2417             { "ID_MMFR3", 0,0,1, 3,0,7, ARM_CP_STATE_BOTH,
2418               ARM_CP_CONST, PL1_R, NULL, cpu->id_mmfr3 },
2419             { "ID_ISAR0", 0,0,2, 3,0,0, ARM_CP_STATE_BOTH,
2420               ARM_CP_CONST, PL1_R, NULL, cpu->id_isar0 },
2421             { "ID_ISAR1", 0,0,2, 3,0,1, ARM_CP_STATE_BOTH,
2422               ARM_CP_CONST, PL1_R, NULL, cpu->id_isar1 },
2423             { "ID_ISAR2", 0,0,2, 3,0,2, ARM_CP_STATE_BOTH,
2424               ARM_CP_CONST, PL1_R, NULL, cpu->id_isar2 },
2425             { "ID_ISAR3", 0,0,2, 3,0,3, ARM_CP_STATE_BOTH,
2426               ARM_CP_CONST, PL1_R, NULL, cpu->id_isar3 },
2427             { "ID_ISAR4", 0,0,2, 3,0,4, ARM_CP_STATE_BOTH,
2428               ARM_CP_CONST, PL1_R, NULL, cpu->id_isar4 },
2429             { "ID_ISAR5", 0,0,2, 3,0,5, ARM_CP_STATE_BOTH,
2430               ARM_CP_CONST, PL1_R, NULL, cpu->id_isar5 },
2431             /* 6..7 are as yet unallocated and must RAZ */
2432             { "ID_ISAR6", 15,0,2, 0,0,6, 0,
2433               ARM_CP_CONST, PL1_R, NULL, 0 },
2434             { "ID_ISAR7", 15,0,2, 0,0,7, 0,
2435               ARM_CP_CONST, PL1_R, NULL, 0 },
2436             REGINFO_SENTINEL
2437         };
2438         define_arm_cp_regs(cpu, v6_idregs);
2439         define_arm_cp_regs(cpu, v6_cp_reginfo);
2440     } else {
2441         define_arm_cp_regs(cpu, not_v6_cp_reginfo);
2442     }
2443     if (arm_feature(env, ARM_FEATURE_V6K)) {
2444         define_arm_cp_regs(cpu, v6k_cp_reginfo);
2445     }
2446     if (arm_feature(env, ARM_FEATURE_V7MP)) {
2447         define_arm_cp_regs(cpu, v7mp_cp_reginfo);
2448     }
2449     if (arm_feature(env, ARM_FEATURE_V7)) {
2450         ARMCPRegInfo clidr = {
2451             "CLIDR", 0,0,0, 3,1,1, ARM_CP_STATE_BOTH,
2452             ARM_CP_CONST, PL1_R, NULL, cpu->clidr
2453         };
2454         /* v7 performance monitor control register: same implementor
2455          * field as main ID register, and we implement only the cycle
2456          * count register.
2457          */
2458 #ifndef CONFIG_USER_ONLY
2459         ARMCPRegInfo pmcr = {
2460             "PMCR", 15,9,12, 0,0,0, 0,
2461             ARM_CP_IO | ARM_CP_NO_MIGRATE, PL0_RW, NULL, 0, offsetoflow32(CPUARMState, cp15.c9_pmcr),
2462             pmreg_access, NULL,pmcr_write, NULL,raw_write,
2463         };
2464         ARMCPRegInfo pmcr64 = {
2465             "PMCR_EL0", 0,9,12, 3,3,0, ARM_CP_STATE_AA64,
2466             ARM_CP_IO, PL0_RW, NULL, cpu->midr & 0xff000000, offsetof(CPUARMState, cp15.c9_pmcr),
2467             pmreg_access, NULL,pmcr_write, NULL,raw_write,
2468         };
2469         define_one_arm_cp_reg(cpu, &pmcr);
2470         define_one_arm_cp_reg(cpu, &pmcr64);
2471 #endif
2472         define_one_arm_cp_reg(cpu, &clidr);
2473         define_arm_cp_regs(cpu, v7_cp_reginfo);
2474         define_debug_regs(cpu);
2475     } else {
2476         define_arm_cp_regs(cpu, not_v7_cp_reginfo);
2477     }
2478     if (arm_feature(env, ARM_FEATURE_V8)) {
2479         /* AArch64 ID registers, which all have impdef reset values */
2480         ARMCPRegInfo v8_idregs[] = {
2481             { "ID_AA64PFR0_EL1", 0,0,4, 3,0,0, ARM_CP_STATE_AA64,
2482               ARM_CP_CONST, PL1_R, NULL, cpu->id_aa64pfr0 },
2483             { "ID_AA64PFR1_EL1", 0,0,4, 3,0,1, ARM_CP_STATE_AA64,
2484               ARM_CP_CONST, PL1_R, NULL, cpu->id_aa64pfr1},
2485             { "ID_AA64DFR0_EL1", 0,0,5, 3,0,0, ARM_CP_STATE_AA64,
2486               ARM_CP_CONST, PL1_R, NULL,
2487               /* We mask out the PMUVer field, because we don't currently
2488                * implement the PMU. Not advertising it prevents the guest
2489                * from trying to use it and getting UNDEFs on registers we
2490                * don't implement.
2491                */
2492               cpu->id_aa64dfr0 & ~0xf00 },
2493             { "ID_AA64DFR1_EL1", 0,0,5, 3,0,1, ARM_CP_STATE_AA64,
2494               ARM_CP_CONST, PL1_R, NULL, cpu->id_aa64dfr1 },
2495             { "ID_AA64AFR0_EL1", 0,0,5, 3,0,4, ARM_CP_STATE_AA64,
2496               ARM_CP_CONST, PL1_R, NULL, cpu->id_aa64afr0 },
2497             { "ID_AA64AFR1_EL1", 0,0,5, 3,0,5, ARM_CP_STATE_AA64,
2498               ARM_CP_CONST, PL1_R, NULL, cpu->id_aa64afr1 },
2499             { "ID_AA64ISAR0_EL1", 0,0,6, 3,0,0, ARM_CP_STATE_AA64,
2500               ARM_CP_CONST, PL1_R, NULL, cpu->id_aa64isar0 },
2501             { "ID_AA64ISAR1_EL1", 0,0,6, 3,0,1, ARM_CP_STATE_AA64,
2502               ARM_CP_CONST, PL1_R, NULL, cpu->id_aa64isar1 },
2503             { "ID_AA64MMFR0_EL1", 0,0,7, 3,0,0, ARM_CP_STATE_AA64,
2504               ARM_CP_CONST, PL1_R, NULL, cpu->id_aa64mmfr0 },
2505             { "ID_AA64MMFR1_EL1", 0,0,7, 3,0,1, ARM_CP_STATE_AA64,
2506               ARM_CP_CONST, PL1_R, NULL, cpu->id_aa64mmfr1 },
2507             { "MVFR0_EL1", 0,0,3, 3,0,0, ARM_CP_STATE_AA64,
2508               ARM_CP_CONST, PL1_R, NULL, cpu->mvfr0 },
2509             { "MVFR1_EL1", 0,0,3, 3,0,1, ARM_CP_STATE_AA64,
2510               ARM_CP_CONST, PL1_R, NULL, cpu->mvfr1 },
2511             { "MVFR2_EL1", 0,0,3, 3,0,2, ARM_CP_STATE_AA64,
2512               ARM_CP_CONST, PL1_R, NULL, cpu->mvfr2 },
2513             REGINFO_SENTINEL
2514         };
2515         ARMCPRegInfo rvbar = {
2516             "RVBAR_EL1", 0,12,0, 3,0,2, ARM_CP_STATE_AA64,
2517             ARM_CP_CONST, PL1_R, NULL, cpu->rvbar
2518         };
2519         define_one_arm_cp_reg(cpu, &rvbar);
2520         define_arm_cp_regs(cpu, v8_idregs);
2521         define_arm_cp_regs(cpu, v8_cp_reginfo);
2522     }
2523     if (arm_feature(env, ARM_FEATURE_EL2)) {
2524         define_arm_cp_regs(cpu, v8_el2_cp_reginfo);
2525     } else {
2526         /* If EL2 is missing but higher ELs are enabled, we need to
2527          * register the no_el2 reginfos.
2528          */
2529         if (arm_feature(env, ARM_FEATURE_EL3)) {
2530             define_arm_cp_regs(cpu, v8_el3_no_el2_cp_reginfo);
2531         }
2532     }
2533     if (arm_feature(env, ARM_FEATURE_EL3)) {
2534         define_arm_cp_regs(cpu, v8_el3_cp_reginfo);
2535     }
2536     if (arm_feature(env, ARM_FEATURE_MPU)) {
2537         /* These are the MPU registers prior to PMSAv6. Any new
2538          * PMSA core later than the ARM946 will require that we
2539          * implement the PMSAv6 or PMSAv7 registers, which are
2540          * completely different.
2541          */
2542         assert(!arm_feature(env, ARM_FEATURE_V6));
2543         define_arm_cp_regs(cpu, pmsav5_cp_reginfo);
2544     } else {
2545         define_arm_cp_regs(cpu, vmsa_cp_reginfo);
2546     }
2547     if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
2548         define_arm_cp_regs(cpu, t2ee_cp_reginfo);
2549     }
2550     if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
2551         define_arm_cp_regs(cpu, generic_timer_cp_reginfo);
2552     }
2553     if (arm_feature(env, ARM_FEATURE_VAPA)) {
2554         define_arm_cp_regs(cpu, vapa_cp_reginfo);
2555     }
2556     if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) {
2557         define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo);
2558     }
2559     if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) {
2560         define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo);
2561     }
2562     if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) {
2563         define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo);
2564     }
2565     if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
2566         define_arm_cp_regs(cpu, omap_cp_reginfo);
2567     }
2568     if (arm_feature(env, ARM_FEATURE_STRONGARM)) {
2569         define_arm_cp_regs(cpu, strongarm_cp_reginfo);
2570     }
2571     if (arm_feature(env, ARM_FEATURE_XSCALE)) {
2572         define_arm_cp_regs(cpu, xscale_cp_reginfo);
2573     }
2574     if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) {
2575         define_arm_cp_regs(cpu, dummy_c15_cp_reginfo);
2576     }
2577     if (arm_feature(env, ARM_FEATURE_LPAE)) {
2578         define_arm_cp_regs(cpu, lpae_cp_reginfo);
2579     }
2580     /* Slightly awkwardly, the OMAP and StrongARM cores need all of
2581      * cp15 crn=0 to be writes-ignored, whereas for other cores they should
2582      * be read-only (ie write causes UNDEF exception).
2583      */
2584     {
2585         ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = {
2586             /* Pre-v8 MIDR space.
2587              * Note that the MIDR isn't a simple constant register because
2588              * of the TI925 behaviour where writes to another register can
2589              * cause the MIDR value to change.
2590              *
2591              * Unimplemented registers in the c15 0 0 0 space default to
2592              * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
2593              * and friends override accordingly.
2594              */
2595             { "MIDR", 15,0,0, 0,0,CP_ANY, 0,
2596               ARM_CP_OVERRIDE, PL1_R, NULL, cpu->midr, offsetof(CPUARMState, cp15.c0_cpuid),
2597               NULL, NULL,arm_cp_write_ignore, NULL,raw_write, },
2598             /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
2599             { "DUMMY",
2600               15,0,3, 0,0,CP_ANY, 0,
2601               ARM_CP_CONST, PL1_R, NULL, 0 },
2602             { "DUMMY",
2603               15,0,4, 0,0,CP_ANY, 0,
2604               ARM_CP_CONST, PL1_R, NULL, 0 },
2605             { "DUMMY",
2606               15,0,5, 0,0,CP_ANY, 0,
2607               ARM_CP_CONST, PL1_R, NULL, 0 },
2608             { "DUMMY",
2609               15,0,6, 0,0,CP_ANY, 0,
2610               ARM_CP_CONST, PL1_R, NULL, 0 },
2611             { "DUMMY",
2612               15,0,7, 0,0,CP_ANY, 0,
2613               ARM_CP_CONST, PL1_R, NULL, 0 },
2614             REGINFO_SENTINEL
2615         };
2616         ARMCPRegInfo id_v8_midr_cp_reginfo[] = {
2617             /* v8 MIDR -- the wildcard isn't necessary, and nor is the
2618              * variable-MIDR TI925 behaviour. Instead we have a single
2619              * (strictly speaking IMPDEF) alias of the MIDR, REVIDR.
2620              */
2621             { "MIDR_EL1", 0,0,0, 3,0,0, ARM_CP_STATE_BOTH,
2622               ARM_CP_CONST, PL1_R, NULL, cpu->midr },
2623             { "REVIDR_EL1", 0,0,0, 3,0,6, ARM_CP_STATE_BOTH,
2624               ARM_CP_CONST, PL1_R, NULL, cpu->midr },
2625             REGINFO_SENTINEL
2626         };
2627         ARMCPRegInfo id_cp_reginfo[] = {
2628             /* These are common to v8 and pre-v8 */
2629             { "CTR", 15,0,0, 0,0,1, 0,
2630               ARM_CP_CONST, PL1_R, NULL, cpu->ctr },
2631             { "CTR_EL0", 0,0,0, 3,3,1, ARM_CP_STATE_AA64,
2632              ARM_CP_CONST, PL0_R, NULL, cpu->ctr, 0,
2633              ctr_el0_access, },
2634             /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
2635             { "TCMTR", 15,0,0, 0,0,2, 0,
2636               ARM_CP_CONST, PL1_R, NULL, 0 },
2637             { "TLBTR", 15,0,0, 0,0,3, 0,
2638               ARM_CP_CONST, PL1_R, NULL, 0 },
2639             REGINFO_SENTINEL
2640         };
2641         ARMCPRegInfo crn0_wi_reginfo = {
2642             "CRN0_WI", 15,0,CP_ANY, 0,CP_ANY,CP_ANY, 0,
2643             ARM_CP_NOP | ARM_CP_OVERRIDE, PL1_W,
2644         };
2645         if (arm_feature(env, ARM_FEATURE_OMAPCP) ||
2646             arm_feature(env, ARM_FEATURE_STRONGARM)) {
2647             ARMCPRegInfo *r;
2648             /* Register the blanket "writes ignored" value first to cover the
2649              * whole space. Then update the specific ID registers to allow write
2650              * access, so that they ignore writes rather than causing them to
2651              * UNDEF.
2652              */
2653             define_one_arm_cp_reg(cpu, &crn0_wi_reginfo);
2654             for (r = id_pre_v8_midr_cp_reginfo;
2655                  r->type != ARM_CP_SENTINEL; r++) {
2656                 r->access = PL1_RW;
2657             }
2658             for (r = id_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) {
2659                 r->access = PL1_RW;
2660             }
2661         }
2662         if (arm_feature(env, ARM_FEATURE_V8)) {
2663             define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo);
2664         } else {
2665             define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo);
2666         }
2667         define_arm_cp_regs(cpu, id_cp_reginfo);
2668     }
2669 
2670     if (arm_feature(env, ARM_FEATURE_MPIDR)) {
2671         define_arm_cp_regs(cpu, mpidr_cp_reginfo);
2672     }
2673 
2674     if (arm_feature(env, ARM_FEATURE_AUXCR)) {
2675         ARMCPRegInfo auxcr = {
2676             "ACTLR_EL1", 0,1,0, 3,0,1, ARM_CP_STATE_BOTH,
2677             ARM_CP_CONST, PL1_RW, NULL, cpu->reset_auxcr
2678         };
2679         define_one_arm_cp_reg(cpu, &auxcr);
2680     }
2681 
2682     if (arm_feature(env, ARM_FEATURE_CBAR)) {
2683         if (arm_feature(env, ARM_FEATURE_AARCH64)) {
2684             /* 32 bit view is [31:18] 0...0 [43:32]. */
2685             uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18)
2686                 | extract64(cpu->reset_cbar, 32, 12);
2687             ARMCPRegInfo cbar_reginfo[] = {
2688                 { "CBAR", 15,15,0, 0,4,0, 0,
2689                   ARM_CP_CONST, PL1_R, NULL, cpu->reset_cbar },
2690                 { "CBAR_EL1", 0,15,3, 3,1,0, ARM_CP_STATE_AA64,
2691                   ARM_CP_CONST, PL1_R, NULL, cbar32 },
2692                 REGINFO_SENTINEL
2693             };
2694             /* We don't implement a r/w 64 bit CBAR currently */
2695             assert(arm_feature(env, ARM_FEATURE_CBAR_RO));
2696             define_arm_cp_regs(cpu, cbar_reginfo);
2697         } else {
2698             ARMCPRegInfo cbar = {
2699                 "CBAR", 15,15,0, 0,4,0, 0,
2700                 0, PL1_R|PL3_W, NULL, cpu->reset_cbar, offsetof(CPUARMState, cp15.c15_config_base_address)
2701             };
2702             if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
2703                 cbar.access = PL1_R;
2704                 cbar.fieldoffset = 0;
2705                 cbar.type = ARM_CP_CONST;
2706             }
2707             define_one_arm_cp_reg(cpu, &cbar);
2708         }
2709     }
2710 
2711     /* Generic registers whose values depend on the implementation */
2712     {
2713         ARMCPRegInfo sctlr = {
2714             "SCTLR", 0,1,0, 3,0,0, ARM_CP_STATE_BOTH,
2715             0, PL1_RW, NULL, cpu->reset_sctlr, offsetof(CPUARMState, cp15.c1_sys),
2716             NULL, NULL,sctlr_write, NULL,raw_write,
2717         };
2718         if (arm_feature(env, ARM_FEATURE_XSCALE)) {
2719             /* Normally we would always end the TB on an SCTLR write, but Linux
2720              * arch/arm/mach-pxa/sleep.S expects two instructions following
2721              * an MMU enable to execute from cache.  Imitate this behaviour.
2722              */
2723             sctlr.type |= ARM_CP_SUPPRESS_TB_END;
2724         }
2725         define_one_arm_cp_reg(cpu, &sctlr);
2726     }
2727 }
2728 
cpu_arm_init(struct uc_struct * uc,const char * cpu_model)2729 ARMCPU *cpu_arm_init(struct uc_struct *uc, const char *cpu_model)
2730 {
2731     return ARM_CPU(uc, cpu_generic_init(uc, TYPE_ARM_CPU, cpu_model));
2732 }
2733 
arm_cpu_register_gdb_regs_for_features(ARMCPU * cpu)2734 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
2735 {
2736 #if 0
2737     CPUState *cs = CPU(cpu);
2738     CPUARMState *env = &cpu->env;
2739 
2740     if (arm_feature(env, ARM_FEATURE_AARCH64)) {
2741         gdb_register_coprocessor(cs, aarch64_fpu_gdb_get_reg,
2742                                  aarch64_fpu_gdb_set_reg,
2743                                  34, "aarch64-fpu.xml", 0);
2744     } else if (arm_feature(env, ARM_FEATURE_NEON)) {
2745         gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
2746                                  51, "arm-neon.xml", 0);
2747     } else if (arm_feature(env, ARM_FEATURE_VFP3)) {
2748         gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
2749                                  35, "arm-vfp3.xml", 0);
2750     } else if (arm_feature(env, ARM_FEATURE_VFP)) {
2751         gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
2752                                  19, "arm-vfp.xml", 0);
2753     }
2754 #endif
2755 }
2756 
2757 /* Sort alphabetically by type name, except for "any". */
2758 #if 0
2759 static void arm_cpu_list_entry(gpointer data, gpointer user_data)
2760 {
2761     ObjectClass *oc = data;
2762     CPUListState *s = user_data;
2763     const char *typename;
2764     char *name;
2765 
2766     typename = object_class_get_name(oc);
2767     name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_ARM_CPU));
2768     (*s->cpu_fprintf)(s->file, "  %s\n",
2769                       name);
2770     g_free(name);
2771 }
2772 #endif
2773 
arm_cpu_list(FILE * f,fprintf_function cpu_fprintf)2774 void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2775 {
2776 #if 0
2777     CPUListState s = {
2778         .file = f,
2779         .cpu_fprintf = cpu_fprintf,
2780     };
2781     GSList *list;
2782 
2783     list = object_class_get_list(TYPE_ARM_CPU, false);
2784     list = g_slist_sort(list, arm_cpu_list_compare);
2785     (*cpu_fprintf)(f, "Available CPUs:\n");
2786     g_slist_foreach(list, arm_cpu_list_entry, &s);
2787     g_slist_free(list);
2788 #ifdef CONFIG_KVM
2789     /* The 'host' CPU type is dynamically registered only if KVM is
2790      * enabled, so we have to special-case it here:
2791      */
2792     (*cpu_fprintf)(f, "  host (only available in KVM mode)\n");
2793 #endif
2794 #endif
2795 }
2796 
add_cpreg_to_hashtable(ARMCPU * cpu,const ARMCPRegInfo * r,void * opaque,int state,int crm,int opc1,int opc2)2797 static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
2798                                    void *opaque, int state,
2799                                    int crm, int opc1, int opc2)
2800 {
2801     /* Private utility function for define_one_arm_cp_reg_with_opaque():
2802      * add a single reginfo struct to the hash table.
2803      */
2804     uint32_t *key = g_new(uint32_t, 1);
2805     ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo));
2806     int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0;
2807     if (r->state == ARM_CP_STATE_BOTH && state == ARM_CP_STATE_AA32) {
2808         /* The AArch32 view of a shared register sees the lower 32 bits
2809          * of a 64 bit backing field. It is not migratable as the AArch64
2810          * view handles that. AArch64 also handles reset.
2811          * We assume it is a cp15 register if the .cp field is left unset.
2812          */
2813         if (r2->cp == 0) {
2814             r2->cp = 15;
2815         }
2816         r2->type |= ARM_CP_NO_MIGRATE;
2817         r2->resetfn = arm_cp_reset_ignore;
2818 #ifdef HOST_WORDS_BIGENDIAN
2819         if (r2->fieldoffset) {
2820             r2->fieldoffset += sizeof(uint32_t);
2821         }
2822 #endif
2823     }
2824     if (state == ARM_CP_STATE_AA64) {
2825         /* To allow abbreviation of ARMCPRegInfo
2826          * definitions, we treat cp == 0 as equivalent to
2827          * the value for "standard guest-visible sysreg".
2828          * STATE_BOTH definitions are also always "standard
2829          * sysreg" in their AArch64 view (the .cp value may
2830          * be non-zero for the benefit of the AArch32 view).
2831          */
2832         if (r->cp == 0 || r->state == ARM_CP_STATE_BOTH) {
2833             r2->cp = CP_REG_ARM64_SYSREG_CP;
2834         }
2835         *key = ENCODE_AA64_CP_REG(r2->cp, r2->crn, crm,
2836                                   r2->opc0, opc1, opc2);
2837     } else {
2838         *key = ENCODE_CP_REG(r2->cp, is64, r2->crn, crm, opc1, opc2);
2839     }
2840     if (opaque) {
2841         r2->opaque = opaque;
2842     }
2843     /* reginfo passed to helpers is correct for the actual access,
2844      * and is never ARM_CP_STATE_BOTH:
2845      */
2846     r2->state = state;
2847     /* Make sure reginfo passed to helpers for wildcarded regs
2848      * has the correct crm/opc1/opc2 for this reg, not CP_ANY:
2849      */
2850     r2->crm = crm;
2851     r2->opc1 = opc1;
2852     r2->opc2 = opc2;
2853     /* By convention, for wildcarded registers only the first
2854      * entry is used for migration; the others are marked as
2855      * NO_MIGRATE so we don't try to transfer the register
2856      * multiple times. Special registers (ie NOP/WFI) are
2857      * never migratable.
2858      */
2859     if ((r->type & ARM_CP_SPECIAL) ||
2860         ((r->crm == CP_ANY) && crm != 0) ||
2861         ((r->opc1 == CP_ANY) && opc1 != 0) ||
2862         ((r->opc2 == CP_ANY) && opc2 != 0)) {
2863         r2->type |= ARM_CP_NO_MIGRATE;
2864     }
2865 
2866     /* Overriding of an existing definition must be explicitly
2867      * requested.
2868      */
2869     if (!(r->type & ARM_CP_OVERRIDE)) {
2870         ARMCPRegInfo *oldreg;
2871         oldreg = g_hash_table_lookup(cpu->cp_regs, key);
2872         if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) {
2873             fprintf(stderr, "Register redefined: cp=%d %d bit "
2874                     "crn=%d crm=%d opc1=%d opc2=%d, "
2875                     "was %s, now %s\n", r2->cp, 32 + 32 * is64,
2876                     r2->crn, r2->crm, r2->opc1, r2->opc2,
2877                     oldreg->name, r2->name);
2878             g_assert_not_reached();
2879         }
2880     }
2881     g_hash_table_insert(cpu->cp_regs, key, r2);
2882 }
2883 
2884 
define_one_arm_cp_reg_with_opaque(ARMCPU * cpu,const ARMCPRegInfo * r,void * opaque)2885 void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
2886                                        const ARMCPRegInfo *r, void *opaque)
2887 {
2888     /* Define implementations of coprocessor registers.
2889      * We store these in a hashtable because typically
2890      * there are less than 150 registers in a space which
2891      * is 16*16*16*8*8 = 262144 in size.
2892      * Wildcarding is supported for the crm, opc1 and opc2 fields.
2893      * If a register is defined twice then the second definition is
2894      * used, so this can be used to define some generic registers and
2895      * then override them with implementation specific variations.
2896      * At least one of the original and the second definition should
2897      * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
2898      * against accidental use.
2899      *
2900      * The state field defines whether the register is to be
2901      * visible in the AArch32 or AArch64 execution state. If the
2902      * state is set to ARM_CP_STATE_BOTH then we synthesise a
2903      * reginfo structure for the AArch32 view, which sees the lower
2904      * 32 bits of the 64 bit register.
2905      *
2906      * Only registers visible in AArch64 may set r->opc0; opc0 cannot
2907      * be wildcarded. AArch64 registers are always considered to be 64
2908      * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
2909      * the register, if any.
2910      */
2911     int crm, opc1, opc2, state;
2912     int crmmin = (r->crm == CP_ANY) ? 0 : r->crm;
2913     int crmmax = (r->crm == CP_ANY) ? 15 : r->crm;
2914     int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1;
2915     int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1;
2916     int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2;
2917     int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2;
2918     /* 64 bit registers have only CRm and Opc1 fields */
2919     assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn)));
2920     /* op0 only exists in the AArch64 encodings */
2921     assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0));
2922     /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
2923     assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT));
2924     /* The AArch64 pseudocode CheckSystemAccess() specifies that op1
2925      * encodes a minimum access level for the register. We roll this
2926      * runtime check into our general permission check code, so check
2927      * here that the reginfo's specified permissions are strict enough
2928      * to encompass the generic architectural permission check.
2929      */
2930     if (r->state != ARM_CP_STATE_AA32) {
2931         int mask = 0;
2932         switch (r->opc1) {
2933         case 0: case 1: case 2:
2934             /* min_EL EL1 */
2935             mask = PL1_RW;
2936             break;
2937         case 3:
2938             /* min_EL EL0 */
2939             mask = PL0_RW;
2940             break;
2941         case 4:
2942             /* min_EL EL2 */
2943             mask = PL2_RW;
2944             break;
2945         case 5:
2946             /* unallocated encoding, so not possible */
2947             assert(false);
2948             break;
2949         case 6:
2950             /* min_EL EL3 */
2951             mask = PL3_RW;
2952             break;
2953         case 7:
2954             /* min_EL EL1, secure mode only (we don't check the latter) */
2955             mask = PL1_RW;
2956             break;
2957         default:
2958             /* broken reginfo with out-of-range opc1 */
2959             assert(false);
2960             break;
2961         }
2962         /* assert our permissions are not too lax (stricter is fine) */
2963         assert((r->access & ~mask) == 0);
2964     }
2965 
2966     /* Check that the register definition has enough info to handle
2967      * reads and writes if they are permitted.
2968      */
2969     if (!(r->type & (ARM_CP_SPECIAL|ARM_CP_CONST))) {
2970         if (r->access & PL3_R) {
2971             assert(r->fieldoffset || r->readfn);
2972         }
2973         if (r->access & PL3_W) {
2974             assert(r->fieldoffset || r->writefn);
2975         }
2976     }
2977     /* Bad type field probably means missing sentinel at end of reg list */
2978     assert(cptype_valid(r->type));
2979     for (crm = crmmin; crm <= crmmax; crm++) {
2980         for (opc1 = opc1min; opc1 <= opc1max; opc1++) {
2981             for (opc2 = opc2min; opc2 <= opc2max; opc2++) {
2982                 for (state = ARM_CP_STATE_AA32;
2983                      state <= ARM_CP_STATE_AA64; state++) {
2984                     if (r->state != state && r->state != ARM_CP_STATE_BOTH) {
2985                         continue;
2986                     }
2987                     add_cpreg_to_hashtable(cpu, r, opaque, state,
2988                                            crm, opc1, opc2);
2989                 }
2990             }
2991         }
2992     }
2993 }
2994 
define_arm_cp_regs_with_opaque(ARMCPU * cpu,const ARMCPRegInfo * regs,void * opaque)2995 void define_arm_cp_regs_with_opaque(ARMCPU *cpu,
2996                                     const ARMCPRegInfo *regs, void *opaque)
2997 {
2998     /* Define a whole list of registers */
2999     const ARMCPRegInfo *r;
3000     for (r = regs; r->type != ARM_CP_SENTINEL; r++) {
3001         define_one_arm_cp_reg_with_opaque(cpu, r, opaque);
3002     }
3003 }
3004 
get_arm_cp_reginfo(GHashTable * cpregs,uint32_t encoded_cp)3005 const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp)
3006 {
3007     return g_hash_table_lookup(cpregs, &encoded_cp);
3008 }
3009 
arm_cp_write_ignore(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3010 void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
3011                          uint64_t value)
3012 {
3013     /* Helper coprocessor write function for write-ignore registers */
3014 }
3015 
arm_cp_read_zero(CPUARMState * env,const ARMCPRegInfo * ri)3016 uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri)
3017 {
3018     /* Helper coprocessor write function for read-as-zero registers */
3019     return 0;
3020 }
3021 
arm_cp_reset_ignore(CPUARMState * env,const ARMCPRegInfo * opaque)3022 void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque)
3023 {
3024     /* Helper coprocessor reset function for do-nothing-on-reset registers */
3025 }
3026 
bad_mode_switch(CPUARMState * env,int mode)3027 static int bad_mode_switch(CPUARMState *env, int mode)
3028 {
3029     /* Return true if it is not valid for us to switch to
3030      * this CPU mode (ie all the UNPREDICTABLE cases in
3031      * the ARM ARM CPSRWriteByInstr pseudocode).
3032      */
3033     switch (mode) {
3034     case ARM_CPU_MODE_USR:
3035     case ARM_CPU_MODE_SYS:
3036     case ARM_CPU_MODE_SVC:
3037     case ARM_CPU_MODE_ABT:
3038     case ARM_CPU_MODE_UND:
3039     case ARM_CPU_MODE_IRQ:
3040     case ARM_CPU_MODE_FIQ:
3041         return 0;
3042     case ARM_CPU_MODE_MON:
3043         return !arm_is_secure(env);
3044     default:
3045         return 1;
3046     }
3047 }
3048 
cpsr_read(CPUARMState * env)3049 uint32_t cpsr_read(CPUARMState *env)
3050 {
3051     int ZF;
3052     ZF = (env->ZF == 0);
3053     return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
3054         (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
3055         | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
3056         | ((env->condexec_bits & 0xfc) << 8)
3057         | (env->GE << 16) | (env->daif & CPSR_AIF);
3058 }
3059 
cpsr_write(CPUARMState * env,uint32_t val,uint32_t mask)3060 void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
3061 {
3062     if (mask & CPSR_NZCV) {
3063         env->ZF = (~val) & CPSR_Z;
3064         env->NF = val;
3065         env->CF = (val >> 29) & 1;
3066         env->VF = (val << 3) & 0x80000000;
3067     }
3068     if (mask & CPSR_Q)
3069         env->QF = ((val & CPSR_Q) != 0);
3070     if (mask & CPSR_T)
3071         env->thumb = ((val & CPSR_T) != 0);
3072     if (mask & CPSR_IT_0_1) {
3073         env->condexec_bits &= ~3;
3074         env->condexec_bits |= (val >> 25) & 3;
3075     }
3076     if (mask & CPSR_IT_2_7) {
3077         env->condexec_bits &= 3;
3078         env->condexec_bits |= (val >> 8) & 0xfc;
3079     }
3080     if (mask & CPSR_GE) {
3081         env->GE = (val >> 16) & 0xf;
3082     }
3083 
3084     env->daif &= ~(CPSR_AIF & mask);
3085     env->daif |= val & CPSR_AIF & mask;
3086 
3087     if ((env->uncached_cpsr ^ val) & mask & CPSR_M) {
3088         if (bad_mode_switch(env, val & CPSR_M)) {
3089             /* Attempt to switch to an invalid mode: this is UNPREDICTABLE.
3090              * We choose to ignore the attempt and leave the CPSR M field
3091              * untouched.
3092              */
3093             mask &= ~CPSR_M;
3094         } else {
3095             switch_mode(env, val & CPSR_M);
3096         }
3097     }
3098     mask &= ~CACHED_CPSR_BITS;
3099     env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
3100 }
3101 
3102 /* Sign/zero extend */
HELPER(sxtb16)3103 uint32_t HELPER(sxtb16)(uint32_t x)
3104 {
3105     uint32_t res;
3106     res = (uint16_t)(int8_t)x;
3107     res |= (uint32_t)(int8_t)(x >> 16) << 16;
3108     return res;
3109 }
3110 
HELPER(uxtb16)3111 uint32_t HELPER(uxtb16)(uint32_t x)
3112 {
3113     uint32_t res;
3114     res = (uint16_t)(uint8_t)x;
3115     res |= (uint32_t)(uint8_t)(x >> 16) << 16;
3116     return res;
3117 }
3118 
HELPER(clz_arm)3119 uint32_t HELPER(clz_arm)(uint32_t x)
3120 {
3121     return clz32(x);
3122 }
3123 
HELPER(sdiv)3124 int32_t HELPER(sdiv)(int32_t num, int32_t den)
3125 {
3126     if (den == 0)
3127       return 0;
3128     if (num == INT_MIN && den == -1)
3129       return INT_MIN;
3130     return num / den;
3131 }
3132 
HELPER(udiv)3133 uint32_t HELPER(udiv)(uint32_t num, uint32_t den)
3134 {
3135     if (den == 0)
3136       return 0;
3137     return num / den;
3138 }
3139 
HELPER(rbit)3140 uint32_t HELPER(rbit)(uint32_t x)
3141 {
3142     x =  ((x & 0xff000000) >> 24)
3143        | ((x & 0x00ff0000) >> 8)
3144        | ((x & 0x0000ff00) << 8)
3145        | ((x & 0x000000ff) << 24);
3146     x =  ((x & 0xf0f0f0f0) >> 4)
3147        | ((x & 0x0f0f0f0f) << 4);
3148     x =  ((x & 0x88888888) >> 3)
3149        | ((x & 0x44444444) >> 1)
3150        | ((x & 0x22222222) << 1)
3151        | ((x & 0x11111111) << 3);
3152     return x;
3153 }
3154 
3155 #if defined(CONFIG_USER_ONLY)
3156 
arm_cpu_handle_mmu_fault(CPUState * cs,vaddr address,int rw,int mmu_idx)3157 int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
3158                              int mmu_idx)
3159 {
3160     ARMCPU *cpu = ARM_CPU(NULL, cs);
3161     CPUARMState *env = &cpu->env;
3162 
3163     env->exception.vaddress = address;
3164     if (rw == 2) {
3165         cs->exception_index = EXCP_PREFETCH_ABORT;
3166     } else {
3167         cs->exception_index = EXCP_DATA_ABORT;
3168     }
3169     return 1;
3170 }
3171 
3172 /* These should probably raise undefined insn exceptions.  */
HELPER(v7m_msr)3173 void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
3174 {
3175     ARMCPU *cpu = arm_env_get_cpu(env);
3176 
3177     cpu_abort(CPU(cpu), "v7m_msr %d\n", reg);
3178 }
3179 
HELPER(v7m_mrs)3180 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
3181 {
3182     ARMCPU *cpu = arm_env_get_cpu(env);
3183 
3184     cpu_abort(CPU(cpu), "v7m_mrs %d\n", reg);
3185     return 0;
3186 }
3187 
switch_mode(CPUARMState * env,int mode)3188 void switch_mode(CPUARMState *env, int mode)
3189 {
3190     ARMCPU *cpu = arm_env_get_cpu(env);
3191 
3192     if (mode != ARM_CPU_MODE_USR) {
3193         cpu_abort(CPU(cpu), "Tried to switch out of user mode\n");
3194     }
3195 }
3196 
HELPER(set_r13_banked)3197 void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
3198 {
3199     ARMCPU *cpu = arm_env_get_cpu(env);
3200 
3201     cpu_abort(CPU(cpu), "banked r13 write\n");
3202 }
3203 
HELPER(get_r13_banked)3204 uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
3205 {
3206     ARMCPU *cpu = arm_env_get_cpu(env);
3207 
3208     cpu_abort(CPU(cpu), "banked r13 read\n");
3209     return 0;
3210 }
3211 
arm_excp_target_el(CPUState * cs,unsigned int excp_idx)3212 unsigned int arm_excp_target_el(CPUState *cs, unsigned int excp_idx)
3213 {
3214     return 1;
3215 }
3216 
3217 #else
3218 
3219 /* Map CPU modes onto saved register banks.  */
bank_number(int mode)3220 int bank_number(int mode)
3221 {
3222     switch (mode) {
3223     default:
3224     case ARM_CPU_MODE_USR:
3225     case ARM_CPU_MODE_SYS:
3226         return 0;
3227     case ARM_CPU_MODE_SVC:
3228         return 1;
3229     case ARM_CPU_MODE_ABT:
3230         return 2;
3231     case ARM_CPU_MODE_UND:
3232         return 3;
3233     case ARM_CPU_MODE_IRQ:
3234         return 4;
3235     case ARM_CPU_MODE_FIQ:
3236         return 5;
3237     case ARM_CPU_MODE_HYP:
3238         return 6;
3239     case ARM_CPU_MODE_MON:
3240         return 7;
3241     }
3242     //hw_error("bank number requested for bad CPSR mode value 0x%x\n", mode);
3243 }
3244 
switch_mode(CPUARMState * env,int mode)3245 void switch_mode(CPUARMState *env, int mode)
3246 {
3247     int old_mode;
3248     int i;
3249 
3250     old_mode = env->uncached_cpsr & CPSR_M;
3251     if (mode == old_mode)
3252         return;
3253 
3254     if (old_mode == ARM_CPU_MODE_FIQ) {
3255         memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
3256         memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
3257     } else if (mode == ARM_CPU_MODE_FIQ) {
3258         memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
3259         memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
3260     }
3261 
3262     i = bank_number(old_mode);
3263     env->banked_r13[i] = env->regs[13];
3264     env->banked_r14[i] = env->regs[14];
3265     env->banked_spsr[i] = env->spsr;
3266 
3267     i = bank_number(mode);
3268     env->regs[13] = env->banked_r13[i];
3269     env->regs[14] = env->banked_r14[i];
3270     env->spsr = env->banked_spsr[i];
3271 }
3272 
3273 /*
3274  * Determine the target EL for a given exception type.
3275  */
arm_excp_target_el(CPUState * cs,unsigned int excp_idx)3276 unsigned int arm_excp_target_el(CPUState *cs, unsigned int excp_idx)
3277 {
3278     CPUARMState *env = cs->env_ptr;
3279     unsigned int cur_el = arm_current_el(env);
3280     unsigned int target_el;
3281     /* FIXME: Use actual secure state.  */
3282     bool secure = false;
3283 
3284     if (!env->aarch64) {
3285         /* TODO: Add EL2 and 3 exception handling for AArch32.  */
3286         return 1;
3287     }
3288 
3289     switch (excp_idx) {
3290     case EXCP_HVC:
3291     case EXCP_HYP_TRAP:
3292         target_el = 2;
3293         break;
3294     case EXCP_SMC:
3295         target_el = 3;
3296         break;
3297     case EXCP_FIQ:
3298     case EXCP_IRQ:
3299     {
3300         const uint64_t hcr_mask = excp_idx == EXCP_FIQ ? HCR_FMO : HCR_IMO;
3301         const uint32_t scr_mask = excp_idx == EXCP_FIQ ? SCR_FIQ : SCR_IRQ;
3302 
3303         target_el = 1;
3304         if (!secure && (env->cp15.hcr_el2 & hcr_mask)) {
3305             target_el = 2;
3306         }
3307         if (env->cp15.scr_el3 & scr_mask) {
3308             target_el = 3;
3309         }
3310         break;
3311     }
3312     case EXCP_VIRQ:
3313     case EXCP_VFIQ:
3314         target_el = 1;
3315         break;
3316     default:
3317         target_el = MAX(cur_el, 1);
3318         break;
3319     }
3320     return target_el;
3321 }
3322 
v7m_push(CPUARMState * env,uint32_t val)3323 static void v7m_push(CPUARMState *env, uint32_t val)
3324 {
3325     CPUState *cs = CPU(arm_env_get_cpu(env));
3326 
3327     env->regs[13] -= 4;
3328     stl_phys(cs->as, env->regs[13], val);
3329 }
3330 
v7m_pop(CPUARMState * env)3331 static uint32_t v7m_pop(CPUARMState *env)
3332 {
3333     CPUState *cs = CPU(arm_env_get_cpu(env));
3334     uint32_t val;
3335 
3336     val = ldl_phys(cs->as, env->regs[13]);
3337     env->regs[13] += 4;
3338     return val;
3339 }
3340 
3341 /* Switch to V7M main or process stack pointer.  */
switch_v7m_sp(CPUARMState * env,int process)3342 static void switch_v7m_sp(CPUARMState *env, int process)
3343 {
3344     uint32_t tmp;
3345     if (env->v7m.current_sp != process) {
3346         tmp = env->v7m.other_sp;
3347         env->v7m.other_sp = env->regs[13];
3348         env->regs[13] = tmp;
3349         env->v7m.current_sp = process;
3350     }
3351 }
3352 
do_v7m_exception_exit(CPUARMState * env)3353 static void do_v7m_exception_exit(CPUARMState *env)
3354 {
3355     uint32_t type;
3356     uint32_t xpsr;
3357 
3358     type = env->regs[15];
3359     //if (env->v7m.exception != 0)
3360     //    armv7m_nvic_complete_irq(env->nvic, env->v7m.exception);
3361 
3362     /* Switch to the target stack.  */
3363     switch_v7m_sp(env, (type & 4) != 0);
3364     /* Pop registers.  */
3365     env->regs[0] = v7m_pop(env);
3366     env->regs[1] = v7m_pop(env);
3367     env->regs[2] = v7m_pop(env);
3368     env->regs[3] = v7m_pop(env);
3369     env->regs[12] = v7m_pop(env);
3370     env->regs[14] = v7m_pop(env);
3371     env->regs[15] = v7m_pop(env);
3372     xpsr = v7m_pop(env);
3373     xpsr_write(env, xpsr, 0xfffffdff);
3374     /* Undo stack alignment.  */
3375     if (xpsr & 0x200)
3376         env->regs[13] |= 4;
3377     /* ??? The exception return type specifies Thread/Handler mode.  However
3378        this is also implied by the xPSR value. Not sure what to do
3379        if there is a mismatch.  */
3380     /* ??? Likewise for mismatches between the CONTROL register and the stack
3381        pointer.  */
3382 }
3383 
arm_v7m_cpu_do_interrupt(CPUState * cs)3384 void arm_v7m_cpu_do_interrupt(CPUState *cs)
3385 {
3386     CPUARMState *env = cs->env_ptr;
3387     uint32_t xpsr = xpsr_read(env);
3388     uint32_t lr;
3389     uint32_t addr;
3390 
3391     arm_log_exception(cs->exception_index);
3392 
3393     lr = 0xfffffff1;
3394     if (env->v7m.current_sp)
3395         lr |= 4;
3396     if (env->v7m.exception == 0)
3397         lr |= 8;
3398 
3399     /* For exceptions we just mark as pending on the NVIC, and let that
3400        handle it.  */
3401     /* TODO: Need to escalate if the current priority is higher than the
3402        one we're raising.  */
3403     switch (cs->exception_index) {
3404     case EXCP_UDEF:
3405         //armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
3406         return;
3407     case EXCP_SWI:
3408         /* The PC already points to the next instruction.  */
3409         //armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC);
3410         return;
3411     case EXCP_PREFETCH_ABORT:
3412     case EXCP_DATA_ABORT:
3413         /* TODO: if we implemented the MPU registers, this is where we
3414          * should set the MMFAR, etc from exception.fsr and exception.vaddress.
3415          */
3416         //armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM);
3417         return;
3418     case EXCP_BKPT:
3419 #if 0
3420         if (semihosting_enabled) {
3421             int nr;
3422             nr = arm_lduw_code(env, env->regs[15], env->bswap_code) & 0xff;
3423             if (nr == 0xab) {
3424                 env->regs[15] += 2;
3425                 env->regs[0] = do_arm_semihosting(env);
3426                 qemu_log_mask(CPU_LOG_INT, "...handled as semihosting call\n");
3427                 return;
3428             }
3429         }
3430 #endif
3431         //armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG);
3432         return;
3433     case EXCP_IRQ:
3434         //env->v7m.exception = armv7m_nvic_acknowledge_irq(env->nvic);
3435         break;
3436     case EXCP_EXCEPTION_EXIT:
3437         do_v7m_exception_exit(env);
3438         return;
3439     default:
3440         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
3441         return; /* Never happens.  Keep compiler happy.  */
3442     }
3443 
3444     /* Align stack pointer.  */
3445     /* ??? Should only do this if Configuration Control Register
3446        STACKALIGN bit is set.  */
3447     if (env->regs[13] & 4) {
3448         env->regs[13] -= 4;
3449         xpsr |= 0x200;
3450     }
3451     /* Switch to the handler mode.  */
3452     v7m_push(env, xpsr);
3453     v7m_push(env, env->regs[15]);
3454     v7m_push(env, env->regs[14]);
3455     v7m_push(env, env->regs[12]);
3456     v7m_push(env, env->regs[3]);
3457     v7m_push(env, env->regs[2]);
3458     v7m_push(env, env->regs[1]);
3459     v7m_push(env, env->regs[0]);
3460     switch_v7m_sp(env, 0);
3461     /* Clear IT bits */
3462     env->condexec_bits = 0;
3463     env->regs[14] = lr;
3464     addr = ldl_phys(cs->as, env->v7m.vecbase + env->v7m.exception * 4);
3465     env->regs[15] = addr & 0xfffffffe;
3466     env->thumb = addr & 1;
3467 }
3468 
3469 /* Handle a CPU exception.  */
arm_cpu_do_interrupt(CPUState * cs)3470 void arm_cpu_do_interrupt(CPUState *cs)
3471 {
3472     CPUARMState *env = cs->env_ptr;
3473     ARMCPU *cpu = ARM_CPU(env->uc, cs);
3474     uint32_t addr;
3475     uint32_t mask;
3476     int new_mode;
3477     uint32_t offset;
3478     uint32_t moe;
3479 
3480     assert(!IS_M(env));
3481 
3482     arm_log_exception(cs->exception_index);
3483 
3484     if (arm_is_psci_call(cpu, cs->exception_index)) {
3485         arm_handle_psci_call(cpu);
3486         qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n");
3487         return;
3488     }
3489 
3490     /* If this is a debug exception we must update the DBGDSCR.MOE bits */
3491     switch (env->exception.syndrome >> ARM_EL_EC_SHIFT) {
3492     case EC_BREAKPOINT:
3493     case EC_BREAKPOINT_SAME_EL:
3494         moe = 1;
3495         break;
3496     case EC_WATCHPOINT:
3497     case EC_WATCHPOINT_SAME_EL:
3498         moe = 10;
3499         break;
3500     case EC_AA32_BKPT:
3501         moe = 3;
3502         break;
3503     case EC_VECTORCATCH:
3504         moe = 5;
3505         break;
3506     default:
3507         moe = 0;
3508         break;
3509     }
3510 
3511     if (moe) {
3512         env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe);
3513     }
3514 
3515     /* TODO: Vectored interrupt controller.  */
3516     switch (cs->exception_index) {
3517     case EXCP_UDEF:
3518         new_mode = ARM_CPU_MODE_UND;
3519         addr = 0x04;
3520         mask = CPSR_I;
3521         if (env->thumb)
3522             offset = 2;
3523         else
3524             offset = 4;
3525         break;
3526     case EXCP_SWI:
3527 #if 0
3528         if (semihosting_enabled) {
3529             /* Check for semihosting interrupt.  */
3530             if (env->thumb) {
3531                 mask = arm_lduw_code(env, env->regs[15] - 2, env->bswap_code)
3532                     & 0xff;
3533             } else {
3534                 mask = arm_ldl_code(env, env->regs[15] - 4, env->bswap_code)
3535                     & 0xffffff;
3536             }
3537             /* Only intercept calls from privileged modes, to provide some
3538                semblance of security.  */
3539             if (((mask == 0x123456 && !env->thumb)
3540                     || (mask == 0xab && env->thumb))
3541                   && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
3542                 env->regs[0] = do_arm_semihosting(env);
3543                 qemu_log_mask(CPU_LOG_INT, "...handled as semihosting call\n");
3544                 return;
3545             }
3546         }
3547 #endif
3548         new_mode = ARM_CPU_MODE_SVC;
3549         addr = 0x08;
3550         mask = CPSR_I;
3551         /* The PC already points to the next instruction.  */
3552         offset = 0;
3553         break;
3554     case EXCP_BKPT:
3555 #if 0
3556         /* See if this is a semihosting syscall.  */
3557         if (env->thumb && semihosting_enabled) {
3558             mask = arm_lduw_code(env, env->regs[15], env->bswap_code) & 0xff;
3559             if (mask == 0xab
3560                   && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
3561                 env->regs[15] += 2;
3562                 env->regs[0] = do_arm_semihosting(env);
3563                 qemu_log_mask(CPU_LOG_INT, "...handled as semihosting call\n");
3564                 return;
3565             }
3566         }
3567 #endif
3568         env->exception.fsr = 2;
3569         /* Fall through to prefetch abort.  */
3570     case EXCP_PREFETCH_ABORT:
3571         env->cp15.ifsr_el2 = env->exception.fsr;
3572         env->cp15.far_el[1] = deposit64(env->cp15.far_el[1], 32, 32,
3573                                         env->exception.vaddress);
3574         qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n",
3575                       env->cp15.ifsr_el2, (uint32_t)env->exception.vaddress);
3576         new_mode = ARM_CPU_MODE_ABT;
3577         addr = 0x0c;
3578         mask = CPSR_A | CPSR_I;
3579         offset = 4;
3580         break;
3581     case EXCP_DATA_ABORT:
3582         env->cp15.esr_el[1] = env->exception.fsr;
3583         env->cp15.far_el[1] = deposit64(env->cp15.far_el[1], 0, 32,
3584                                         env->exception.vaddress);
3585         qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n",
3586                       (uint32_t)env->cp15.esr_el[1],
3587                       (uint32_t)env->exception.vaddress);
3588         new_mode = ARM_CPU_MODE_ABT;
3589         addr = 0x10;
3590         mask = CPSR_A | CPSR_I;
3591         offset = 8;
3592         break;
3593     case EXCP_IRQ:
3594         new_mode = ARM_CPU_MODE_IRQ;
3595         addr = 0x18;
3596         /* Disable IRQ and imprecise data aborts.  */
3597         mask = CPSR_A | CPSR_I;
3598         offset = 4;
3599         break;
3600     case EXCP_FIQ:
3601         new_mode = ARM_CPU_MODE_FIQ;
3602         addr = 0x1c;
3603         /* Disable FIQ, IRQ and imprecise data aborts.  */
3604         mask = CPSR_A | CPSR_I | CPSR_F;
3605         offset = 4;
3606         break;
3607     case EXCP_SMC:
3608         new_mode = ARM_CPU_MODE_MON;
3609         addr = 0x08;
3610         mask = CPSR_A | CPSR_I | CPSR_F;
3611         offset = 0;
3612         break;
3613     default:
3614         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
3615         return; /* Never happens.  Keep compiler happy.  */
3616     }
3617     /* High vectors.  */
3618     if (env->cp15.c1_sys & SCTLR_V) {
3619         /* when enabled, base address cannot be remapped.  */
3620         addr += 0xffff0000;
3621     } else {
3622         /* ARM v7 architectures provide a vector base address register to remap
3623          * the interrupt vector table.
3624          * This register is only followed in non-monitor mode, and has a secure
3625          * and un-secure copy. Since the cpu is always in a un-secure operation
3626          * and is never in monitor mode this feature is always active.
3627          * Note: only bits 31:5 are valid.
3628          */
3629         addr += env->cp15.vbar_el[1];
3630     }
3631 
3632     if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
3633         env->cp15.scr_el3 &= ~SCR_NS;
3634     }
3635 
3636     switch_mode (env, new_mode);
3637     /* For exceptions taken to AArch32 we must clear the SS bit in both
3638      * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
3639      */
3640     env->uncached_cpsr &= ~PSTATE_SS;
3641     env->spsr = cpsr_read(env);
3642     /* Clear IT bits.  */
3643     env->condexec_bits = 0;
3644     /* Switch to the new mode, and to the correct instruction set.  */
3645     env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
3646     env->daif |= mask;
3647     /* this is a lie, as the was no c1_sys on V4T/V5, but who cares
3648      * and we should just guard the thumb mode on V4 */
3649     if (arm_feature(env, ARM_FEATURE_V4T)) {
3650         env->thumb = (env->cp15.c1_sys & SCTLR_TE) != 0;
3651     }
3652     env->regs[14] = env->regs[15] + offset;
3653     env->regs[15] = addr;
3654     cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
3655 }
3656 
3657 /* Check section/page access permissions.
3658    Returns the page protection flags, or zero if the access is not
3659    permitted.  */
check_ap(CPUARMState * env,int ap,int domain_prot,int access_type,int is_user)3660 static inline int check_ap(CPUARMState *env, int ap, int domain_prot,
3661                            int access_type, int is_user)
3662 {
3663   int prot_ro;
3664 
3665   if (domain_prot == 3) {
3666     return PAGE_READ | PAGE_WRITE;
3667   }
3668 
3669   if (access_type == 1)
3670       prot_ro = 0;
3671   else
3672       prot_ro = PAGE_READ;
3673 
3674   switch (ap) {
3675   case 0:
3676       if (arm_feature(env, ARM_FEATURE_V7)) {
3677           return 0;
3678       }
3679       if (access_type == 1)
3680           return 0;
3681       switch (env->cp15.c1_sys & (SCTLR_S | SCTLR_R)) {
3682       case SCTLR_S:
3683           return is_user ? 0 : PAGE_READ;
3684       case SCTLR_R:
3685           return PAGE_READ;
3686       default:
3687           return 0;
3688       }
3689   case 1:
3690       return is_user ? 0 : PAGE_READ | PAGE_WRITE;
3691   case 2:
3692       if (is_user)
3693           return prot_ro;
3694       else
3695           return PAGE_READ | PAGE_WRITE;
3696   case 3:
3697       return PAGE_READ | PAGE_WRITE;
3698   case 4: /* Reserved.  */
3699       return 0;
3700   case 5:
3701       return is_user ? 0 : prot_ro;
3702   case 6:
3703       return prot_ro;
3704   case 7:
3705       if (!arm_feature (env, ARM_FEATURE_V6K))
3706           return 0;
3707       return prot_ro;
3708   default:
3709       abort();
3710   }
3711 }
3712 
get_level1_table_address(CPUARMState * env,uint32_t * table,uint32_t address)3713 static bool get_level1_table_address(CPUARMState *env, uint32_t *table,
3714                                          uint32_t address)
3715 {
3716     if (address & env->cp15.c2_mask) {
3717         if ((env->cp15.c2_control & TTBCR_PD1)) {
3718             /* Translation table walk disabled for TTBR1 */
3719             return false;
3720         }
3721         *table = env->cp15.ttbr1_el1 & 0xffffc000;
3722     } else {
3723         if ((env->cp15.c2_control & TTBCR_PD0)) {
3724             /* Translation table walk disabled for TTBR0 */
3725             return false;
3726         }
3727         *table = env->cp15.ttbr0_el1 & env->cp15.c2_base_mask;
3728     }
3729     *table |= (address >> 18) & 0x3ffc;
3730     return true;
3731 }
3732 
get_phys_addr_v5(CPUARMState * env,uint32_t address,int access_type,int is_user,hwaddr * phys_ptr,int * prot,target_ulong * page_size)3733 static int get_phys_addr_v5(CPUARMState *env, uint32_t address, int access_type,
3734                             int is_user, hwaddr *phys_ptr,
3735                             int *prot, target_ulong *page_size)
3736 {
3737     CPUState *cs = CPU(arm_env_get_cpu(env));
3738     int code;
3739     uint32_t table;
3740     uint32_t desc;
3741     int type;
3742     int ap;
3743     int domain = 0;
3744     int domain_prot;
3745     hwaddr phys_addr;
3746 
3747     /* Pagetable walk.  */
3748     /* Lookup l1 descriptor.  */
3749     if (!get_level1_table_address(env, &table, address)) {
3750         /* Section translation fault if page walk is disabled by PD0 or PD1 */
3751         code = 5;
3752         goto do_fault;
3753     }
3754     desc = ldl_phys(cs->as, table);
3755     type = (desc & 3);
3756     domain = (desc >> 5) & 0x0f;
3757     domain_prot = (env->cp15.c3 >> (domain * 2)) & 3;
3758     if (type == 0) {
3759         /* Section translation fault.  */
3760         code = 5;
3761         goto do_fault;
3762     }
3763     if (domain_prot == 0 || domain_prot == 2) {
3764         if (type == 2)
3765             code = 9; /* Section domain fault.  */
3766         else
3767             code = 11; /* Page domain fault.  */
3768         goto do_fault;
3769     }
3770     if (type == 2) {
3771         /* 1Mb section.  */
3772         phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
3773         ap = (desc >> 10) & 3;
3774         code = 13;
3775         *page_size = 1024 * 1024;
3776     } else {
3777         /* Lookup l2 entry.  */
3778     if (type == 1) {
3779         /* Coarse pagetable.  */
3780         table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
3781     } else {
3782         /* Fine pagetable.  */
3783         table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
3784     }
3785         desc = ldl_phys(cs->as, table);
3786         switch (desc & 3) {
3787         case 0: /* Page translation fault.  */
3788             code = 7;
3789             goto do_fault;
3790         case 1: /* 64k page.  */
3791             phys_addr = (desc & 0xffff0000) | (address & 0xffff);
3792             ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
3793             *page_size = 0x10000;
3794             break;
3795         case 2: /* 4k page.  */
3796             phys_addr = (desc & 0xfffff000) | (address & 0xfff);
3797             ap = (desc >> (4 + ((address >> 9) & 6))) & 3;
3798             *page_size = 0x1000;
3799             break;
3800         case 3: /* 1k page.  */
3801         if (type == 1) {
3802         if (arm_feature(env, ARM_FEATURE_XSCALE)) {
3803             phys_addr = (desc & 0xfffff000) | (address & 0xfff);
3804         } else {
3805             /* Page translation fault.  */
3806             code = 7;
3807             goto do_fault;
3808         }
3809         } else {
3810         phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
3811         }
3812             ap = (desc >> 4) & 3;
3813             *page_size = 0x400;
3814             break;
3815         default:
3816             /* Never happens, but compiler isn't smart enough to tell.  */
3817             abort();
3818         }
3819         code = 15;
3820     }
3821     *prot = check_ap(env, ap, domain_prot, access_type, is_user);
3822     if (!*prot) {
3823         /* Access permission fault.  */
3824         goto do_fault;
3825     }
3826     *prot |= PAGE_EXEC;
3827     *phys_ptr = phys_addr;
3828     return 0;
3829 do_fault:
3830     return code | (domain << 4);
3831 }
3832 
get_phys_addr_v6(CPUARMState * env,uint32_t address,int access_type,int is_user,hwaddr * phys_ptr,int * prot,target_ulong * page_size)3833 static int get_phys_addr_v6(CPUARMState *env, uint32_t address, int access_type,
3834                             int is_user, hwaddr *phys_ptr,
3835                             int *prot, target_ulong *page_size)
3836 {
3837     CPUState *cs = CPU(arm_env_get_cpu(env));
3838     int code;
3839     uint32_t table;
3840     uint32_t desc;
3841     uint32_t xn;
3842     uint32_t pxn = 0;
3843     int type;
3844     int ap;
3845     int domain = 0;
3846     int domain_prot;
3847     hwaddr phys_addr;
3848 
3849     /* Pagetable walk.  */
3850     /* Lookup l1 descriptor.  */
3851     if (!get_level1_table_address(env, &table, address)) {
3852         /* Section translation fault if page walk is disabled by PD0 or PD1 */
3853         code = 5;
3854         goto do_fault;
3855     }
3856     desc = ldl_phys(cs->as, table);
3857     type = (desc & 3);
3858     if (type == 0 || (type == 3 && !arm_feature(env, ARM_FEATURE_PXN))) {
3859         /* Section translation fault, or attempt to use the encoding
3860          * which is Reserved on implementations without PXN.
3861          */
3862         code = 5;
3863         goto do_fault;
3864     }
3865     if ((type == 1) || !(desc & (1 << 18))) {
3866         /* Page or Section.  */
3867         domain = (desc >> 5) & 0x0f;
3868     }
3869     domain_prot = (env->cp15.c3 >> (domain * 2)) & 3;
3870     if (domain_prot == 0 || domain_prot == 2) {
3871         if (type != 1) {
3872             code = 9; /* Section domain fault.  */
3873         } else {
3874             code = 11; /* Page domain fault.  */
3875         }
3876         goto do_fault;
3877     }
3878     if (type != 1) {
3879         if (desc & (1 << 18)) {
3880             /* Supersection.  */
3881             phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
3882             *page_size = 0x1000000;
3883         } else {
3884             /* Section.  */
3885             phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
3886             *page_size = 0x100000;
3887         }
3888         ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
3889         xn = desc & (1 << 4);
3890         pxn = desc & 1;
3891         code = 13;
3892     } else {
3893         if (arm_feature(env, ARM_FEATURE_PXN)) {
3894             pxn = (desc >> 2) & 1;
3895         }
3896         /* Lookup l2 entry.  */
3897         table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
3898         desc = ldl_phys(cs->as, table);
3899         ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
3900         switch (desc & 3) {
3901         case 0: /* Page translation fault.  */
3902             code = 7;
3903             goto do_fault;
3904         case 1: /* 64k page.  */
3905             phys_addr = (desc & 0xffff0000) | (address & 0xffff);
3906             xn = desc & (1 << 15);
3907             *page_size = 0x10000;
3908             break;
3909         case 2: case 3: /* 4k page.  */
3910             phys_addr = (desc & 0xfffff000) | (address & 0xfff);
3911             xn = desc & 1;
3912             *page_size = 0x1000;
3913             break;
3914         default:
3915             /* Never happens, but compiler isn't smart enough to tell.  */
3916             abort();
3917         }
3918         code = 15;
3919     }
3920     if (domain_prot == 3) {
3921         *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
3922     } else {
3923         if (pxn && !is_user) {
3924             xn = 1;
3925         }
3926         if (xn && access_type == 2)
3927             goto do_fault;
3928 
3929         /* The simplified model uses AP[0] as an access control bit.  */
3930         if ((env->cp15.c1_sys & SCTLR_AFE) && (ap & 1) == 0) {
3931             /* Access flag fault.  */
3932             code = (code == 15) ? 6 : 3;
3933             goto do_fault;
3934         }
3935         *prot = check_ap(env, ap, domain_prot, access_type, is_user);
3936         if (!*prot) {
3937             /* Access permission fault.  */
3938             goto do_fault;
3939         }
3940         if (!xn) {
3941             *prot |= PAGE_EXEC;
3942         }
3943     }
3944     *phys_ptr = phys_addr;
3945     return 0;
3946 do_fault:
3947     return code | (domain << 4);
3948 }
3949 
3950 /* Fault type for long-descriptor MMU fault reporting; this corresponds
3951  * to bits [5..2] in the STATUS field in long-format DFSR/IFSR.
3952  */
3953 typedef enum {
3954     translation_fault = 1,
3955     access_fault = 2,
3956     permission_fault = 3,
3957 } MMUFaultType;
3958 
get_phys_addr_lpae(CPUARMState * env,target_ulong address,int access_type,int is_user,hwaddr * phys_ptr,int * prot,target_ulong * page_size_ptr)3959 static int get_phys_addr_lpae(CPUARMState *env, target_ulong address,
3960                               int access_type, int is_user,
3961                               hwaddr *phys_ptr, int *prot,
3962                               target_ulong *page_size_ptr)
3963 {
3964     CPUState *cs = CPU(arm_env_get_cpu(env));
3965     /* Read an LPAE long-descriptor translation table. */
3966     MMUFaultType fault_type = translation_fault;
3967     uint32_t level = 1;
3968     uint32_t epd;
3969     int32_t tsz;
3970     uint32_t tg;
3971     uint64_t ttbr;
3972     int ttbr_select;
3973     hwaddr descaddr, descmask;
3974     uint32_t tableattrs;
3975     target_ulong page_size;
3976     uint32_t attrs;
3977     int32_t granule_sz = 9;
3978     int32_t va_size = 32;
3979     int32_t tbi = 0;
3980     uint32_t t0sz;
3981     uint32_t t1sz;
3982 
3983     if (arm_el_is_aa64(env, 1)) {
3984         va_size = 64;
3985         if (extract64(address, 55, 1))
3986             tbi = extract64(env->cp15.c2_control, 38, 1);
3987         else
3988             tbi = extract64(env->cp15.c2_control, 37, 1);
3989         tbi *= 8;
3990     }
3991 
3992     /* Determine whether this address is in the region controlled by
3993      * TTBR0 or TTBR1 (or if it is in neither region and should fault).
3994      * This is a Non-secure PL0/1 stage 1 translation, so controlled by
3995      * TTBCR/TTBR0/TTBR1 in accordance with ARM ARM DDI0406C table B-32:
3996      */
3997     t0sz = extract32(env->cp15.c2_control, 0, 6);
3998     if (arm_el_is_aa64(env, 1)) {
3999         t0sz = MIN(t0sz, 39);
4000         t0sz = MAX(t0sz, 16);
4001     }
4002     t1sz = extract32(env->cp15.c2_control, 16, 6);
4003     if (arm_el_is_aa64(env, 1)) {
4004         t1sz = MIN(t1sz, 39);
4005         t1sz = MAX(t1sz, 16);
4006     }
4007     if (t0sz && !extract64(address, va_size - t0sz, t0sz - tbi)) {
4008         /* there is a ttbr0 region and we are in it (high bits all zero) */
4009         ttbr_select = 0;
4010     } else if (t1sz && !extract64(~address, va_size - t1sz, t1sz - tbi)) {
4011         /* there is a ttbr1 region and we are in it (high bits all one) */
4012         ttbr_select = 1;
4013     } else if (!t0sz) {
4014         /* ttbr0 region is "everything not in the ttbr1 region" */
4015         ttbr_select = 0;
4016     } else if (!t1sz) {
4017         /* ttbr1 region is "everything not in the ttbr0 region" */
4018         ttbr_select = 1;
4019     } else {
4020         /* in the gap between the two regions, this is a Translation fault */
4021         fault_type = translation_fault;
4022         goto do_fault;
4023     }
4024 
4025     /* Note that QEMU ignores shareability and cacheability attributes,
4026      * so we don't need to do anything with the SH, ORGN, IRGN fields
4027      * in the TTBCR.  Similarly, TTBCR:A1 selects whether we get the
4028      * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
4029      * implement any ASID-like capability so we can ignore it (instead
4030      * we will always flush the TLB any time the ASID is changed).
4031      */
4032     if (ttbr_select == 0) {
4033         ttbr = env->cp15.ttbr0_el1;
4034         epd = extract32(env->cp15.c2_control, 7, 1);
4035         tsz = t0sz;
4036 
4037         tg = extract32(env->cp15.c2_control, 14, 2);
4038         if (tg == 1) { /* 64KB pages */
4039             granule_sz = 13;
4040         }
4041         if (tg == 2) { /* 16KB pages */
4042             granule_sz = 11;
4043         }
4044     } else {
4045         ttbr = env->cp15.ttbr1_el1;
4046         epd = extract32(env->cp15.c2_control, 23, 1);
4047         tsz = t1sz;
4048 
4049         tg = extract32(env->cp15.c2_control, 30, 2);
4050         if (tg == 3)  { /* 64KB pages */
4051             granule_sz = 13;
4052         }
4053         if (tg == 1) { /* 16KB pages */
4054             granule_sz = 11;
4055         }
4056     }
4057 
4058     if (epd) {
4059         /* Translation table walk disabled => Translation fault on TLB miss */
4060         goto do_fault;
4061     }
4062 
4063     /* The starting level depends on the virtual address size (which can be
4064      * up to 48 bits) and the translation granule size. It indicates the number
4065      * of strides (granule_sz bits at a time) needed to consume the bits
4066      * of the input address. In the pseudocode this is:
4067      *  level = 4 - RoundUp((inputsize - grainsize) / stride)
4068      * where their 'inputsize' is our 'va_size - tsz', 'grainsize' is
4069      * our 'granule_sz + 3' and 'stride' is our 'granule_sz'.
4070      * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
4071      *     = 4 - (va_size - tsz - granule_sz - 3 + granule_sz - 1) / granule_sz
4072      *     = 4 - (va_size - tsz - 4) / granule_sz;
4073      */
4074     level = 4 - (va_size - tsz - 4) / granule_sz;
4075 
4076     /* Clear the vaddr bits which aren't part of the within-region address,
4077      * so that we don't have to special case things when calculating the
4078      * first descriptor address.
4079      */
4080     if (tsz) {
4081         address &= (1ULL << (va_size - tsz)) - 1;
4082     }
4083 
4084     descmask = (1ULL << (granule_sz + 3)) - 1;
4085 
4086     /* Now we can extract the actual base address from the TTBR */
4087     descaddr = extract64(ttbr, 0, 48);
4088     descaddr &= ~((1ULL << (va_size - tsz - (granule_sz * (4 - level)))) - 1);
4089 
4090     tableattrs = 0;
4091     for (;;) {
4092         uint64_t descriptor;
4093 
4094         descaddr |= (address >> (granule_sz * (4 - level))) & descmask;
4095         descaddr &= ~7ULL;
4096         descriptor = ldq_phys(cs->as, descaddr);
4097         if (!(descriptor & 1) ||
4098             (!(descriptor & 2) && (level == 3))) {
4099             /* Invalid, or the Reserved level 3 encoding */
4100             goto do_fault;
4101         }
4102         descaddr = descriptor & 0xfffffff000ULL;
4103 
4104         if ((descriptor & 2) && (level < 3)) {
4105             /* Table entry. The top five bits are attributes which  may
4106              * propagate down through lower levels of the table (and
4107              * which are all arranged so that 0 means "no effect", so
4108              * we can gather them up by ORing in the bits at each level).
4109              */
4110             tableattrs |= extract64(descriptor, 59, 5);
4111             level++;
4112             continue;
4113         }
4114         /* Block entry at level 1 or 2, or page entry at level 3.
4115          * These are basically the same thing, although the number
4116          * of bits we pull in from the vaddr varies.
4117          */
4118         page_size = (1ULL << ((granule_sz * (4 - level)) + 3));
4119         descaddr |= (address & (page_size - 1));
4120         /* Extract attributes from the descriptor and merge with table attrs */
4121         attrs = extract64(descriptor, 2, 10)
4122             | (extract64(descriptor, 52, 12) << 10);
4123         attrs |= extract32(tableattrs, 0, 2) << 11; /* XN, PXN */
4124         attrs |= extract32(tableattrs, 3, 1) << 5; /* APTable[1] => AP[2] */
4125         /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
4126          * means "force PL1 access only", which means forcing AP[1] to 0.
4127          */
4128         if (extract32(tableattrs, 2, 1)) {
4129             attrs &= ~(1 << 4);
4130         }
4131         /* Since we're always in the Non-secure state, NSTable is ignored. */
4132         break;
4133     }
4134     /* Here descaddr is the final physical address, and attributes
4135      * are all in attrs.
4136      */
4137     fault_type = access_fault;
4138     if ((attrs & (1 << 8)) == 0) {
4139         /* Access flag */
4140         goto do_fault;
4141     }
4142     fault_type = permission_fault;
4143     if (is_user && !(attrs & (1 << 4))) {
4144         /* Unprivileged access not enabled */
4145         goto do_fault;
4146     }
4147     *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
4148     if ((arm_feature(env, ARM_FEATURE_V8) && is_user && (attrs & (1 << 12))) ||
4149         (!arm_feature(env, ARM_FEATURE_V8) && (attrs & (1 << 12))) ||
4150         (!is_user && (attrs & (1 << 11)))) {
4151         /* XN/UXN or PXN. Since we only implement EL0/EL1 we unconditionally
4152          * treat XN/UXN as UXN for v8.
4153          */
4154         if (access_type == 2) {
4155             goto do_fault;
4156         }
4157         *prot &= ~PAGE_EXEC;
4158     }
4159     if (attrs & (1 << 5)) {
4160         /* Write access forbidden */
4161         if (access_type == 1) {
4162             goto do_fault;
4163         }
4164         *prot &= ~PAGE_WRITE;
4165     }
4166 
4167     *phys_ptr = descaddr;
4168     *page_size_ptr = page_size;
4169     return 0;
4170 
4171 do_fault:
4172     /* Long-descriptor format IFSR/DFSR value */
4173     return (1 << 9) | (fault_type << 2) | level;
4174 }
4175 
get_phys_addr_mpu(CPUARMState * env,uint32_t address,int access_type,int is_user,hwaddr * phys_ptr,int * prot)4176 static int get_phys_addr_mpu(CPUARMState *env, uint32_t address,
4177                              int access_type, int is_user,
4178                              hwaddr *phys_ptr, int *prot)
4179 {
4180     int n;
4181     uint32_t mask;
4182     uint32_t base;
4183 
4184     *phys_ptr = address;
4185     for (n = 7; n >= 0; n--) {
4186     base = env->cp15.c6_region[n];
4187     if ((base & 1) == 0)
4188         continue;
4189     mask = 1 << ((base >> 1) & 0x1f);
4190     /* Keep this shift separate from the above to avoid an
4191        (undefined) << 32.  */
4192     mask = (mask << 1) - 1;
4193     if (((base ^ address) & ~mask) == 0)
4194         break;
4195     }
4196     if (n < 0)
4197     return 2;
4198 
4199     if (access_type == 2) {
4200         mask = env->cp15.pmsav5_insn_ap;
4201     } else {
4202         mask = env->cp15.pmsav5_data_ap;
4203     }
4204     mask = (mask >> (n * 4)) & 0xf;
4205     switch (mask) {
4206     case 0:
4207     return 1;
4208     case 1:
4209     if (is_user)
4210       return 1;
4211     *prot = PAGE_READ | PAGE_WRITE;
4212     break;
4213     case 2:
4214     *prot = PAGE_READ;
4215     if (!is_user)
4216         *prot |= PAGE_WRITE;
4217     break;
4218     case 3:
4219     *prot = PAGE_READ | PAGE_WRITE;
4220     break;
4221     case 5:
4222     if (is_user)
4223         return 1;
4224     *prot = PAGE_READ;
4225     break;
4226     case 6:
4227     *prot = PAGE_READ;
4228     break;
4229     default:
4230     /* Bad permission.  */
4231     return 1;
4232     }
4233     *prot |= PAGE_EXEC;
4234     return 0;
4235 }
4236 
4237 /* get_phys_addr - get the physical address for this virtual address
4238  *
4239  * Find the physical address corresponding to the given virtual address,
4240  * by doing a translation table walk on MMU based systems or using the
4241  * MPU state on MPU based systems.
4242  *
4243  * Returns 0 if the translation was successful. Otherwise, phys_ptr,
4244  * prot and page_size are not filled in, and the return value provides
4245  * information on why the translation aborted, in the format of a
4246  * DFSR/IFSR fault register, with the following caveats:
4247  *  * we honour the short vs long DFSR format differences.
4248  *  * the WnR bit is never set (the caller must do this).
4249  *  * for MPU based systems we don't bother to return a full FSR format
4250  *    value.
4251  *
4252  * @env: CPUARMState
4253  * @address: virtual address to get physical address for
4254  * @access_type: 0 for read, 1 for write, 2 for execute
4255  * @is_user: 0 for privileged access, 1 for user
4256  * @phys_ptr: set to the physical address corresponding to the virtual address
4257  * @prot: set to the permissions for the page containing phys_ptr
4258  * @page_size: set to the size of the page containing phys_ptr
4259  */
get_phys_addr(CPUARMState * env,target_ulong address,int access_type,int is_user,hwaddr * phys_ptr,int * prot,target_ulong * page_size)4260 static inline int get_phys_addr(CPUARMState *env, target_ulong address,
4261                                 int access_type, int is_user,
4262                                 hwaddr *phys_ptr, int *prot,
4263                                 target_ulong *page_size)
4264 {
4265     /* Fast Context Switch Extension.  */
4266     if (address < 0x02000000)
4267         address += env->cp15.c13_fcse;
4268 
4269     if ((env->cp15.c1_sys & SCTLR_M) == 0) {
4270         /* MMU/MPU disabled.  */
4271         *phys_ptr = address;
4272         *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
4273         *page_size = TARGET_PAGE_SIZE;
4274         return 0;
4275     } else if (arm_feature(env, ARM_FEATURE_MPU)) {
4276         *page_size = TARGET_PAGE_SIZE;
4277     return get_phys_addr_mpu(env, address, access_type, is_user, phys_ptr,
4278                  prot);
4279     } else if (extended_addresses_enabled(env)) {
4280         return get_phys_addr_lpae(env, address, access_type, is_user, phys_ptr,
4281                                   prot, page_size);
4282     } else if (env->cp15.c1_sys & SCTLR_XP) {
4283         return get_phys_addr_v6(env, address, access_type, is_user, phys_ptr,
4284                                 prot, page_size);
4285     } else {
4286         return get_phys_addr_v5(env, address, access_type, is_user, phys_ptr,
4287                                 prot, page_size);
4288     }
4289 }
4290 
arm_cpu_handle_mmu_fault(CPUState * cs,vaddr address,int access_type,int mmu_idx)4291 int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
4292                              int access_type, int mmu_idx)
4293 {
4294     CPUARMState *env = cs->env_ptr;
4295     hwaddr phys_addr;
4296     target_ulong page_size;
4297     int prot;
4298     int ret, is_user;
4299     uint32_t syn;
4300     bool same_el = (arm_current_el(env) != 0);
4301 
4302     is_user = mmu_idx == MMU_USER_IDX;
4303     ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot,
4304                         &page_size);
4305     if (ret == 0) {
4306         /* Map a single [sub]page.  */
4307         phys_addr &= TARGET_PAGE_MASK;
4308         address &= TARGET_PAGE_MASK;
4309         tlb_set_page(cs, address, phys_addr, prot, mmu_idx, page_size);
4310         return 0;
4311     }
4312 
4313     /* AArch64 syndrome does not have an LPAE bit */
4314     syn = ret & ~(1 << 9);
4315 
4316     /* For insn and data aborts we assume there is no instruction syndrome
4317      * information; this is always true for exceptions reported to EL1.
4318      */
4319     if (access_type == 2) {
4320         syn = syn_insn_abort(same_el, 0, 0, syn);
4321         cs->exception_index = EXCP_PREFETCH_ABORT;
4322     } else {
4323         syn = syn_data_abort(same_el, 0, 0, 0, access_type == 1, syn);
4324         if (access_type == 1 && arm_feature(env, ARM_FEATURE_V6)) {
4325             ret |= (1 << 11);
4326         }
4327         cs->exception_index = EXCP_DATA_ABORT;
4328     }
4329 
4330     env->exception.syndrome = syn;
4331     env->exception.vaddress = address;
4332     env->exception.fsr = ret;
4333     return 1;
4334 }
4335 
arm_cpu_get_phys_page_debug(CPUState * cs,vaddr addr)4336 hwaddr arm_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
4337 {
4338     ARMCPU *cpu = ARM_CPU(NULL, cs);
4339     hwaddr phys_addr;
4340     target_ulong page_size;
4341     int prot;
4342     int ret;
4343 
4344     ret = get_phys_addr(&cpu->env, addr, 0, 0, &phys_addr, &prot, &page_size);
4345 
4346     if (ret != 0) {
4347         return -1;
4348     }
4349 
4350     return phys_addr;
4351 }
4352 
HELPER(set_r13_banked)4353 void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
4354 {
4355     if ((env->uncached_cpsr & CPSR_M) == mode) {
4356         env->regs[13] = val;
4357     } else {
4358         env->banked_r13[bank_number(mode)] = val;
4359     }
4360 }
4361 
HELPER(get_r13_banked)4362 uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
4363 {
4364     if ((env->uncached_cpsr & CPSR_M) == mode) {
4365         return env->regs[13];
4366     } else {
4367         return env->banked_r13[bank_number(mode)];
4368     }
4369 }
4370 
HELPER(v7m_mrs)4371 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
4372 {
4373     ARMCPU *cpu = arm_env_get_cpu(env);
4374 
4375     switch (reg) {
4376     case 0: /* APSR */
4377         return xpsr_read(env) & 0xf8000000;
4378     case 1: /* IAPSR */
4379         return xpsr_read(env) & 0xf80001ff;
4380     case 2: /* EAPSR */
4381         return xpsr_read(env) & 0xff00fc00;
4382     case 3: /* xPSR */
4383         return xpsr_read(env) & 0xff00fdff;
4384     case 5: /* IPSR */
4385         return xpsr_read(env) & 0x000001ff;
4386     case 6: /* EPSR */
4387         return xpsr_read(env) & 0x0700fc00;
4388     case 7: /* IEPSR */
4389         return xpsr_read(env) & 0x0700edff;
4390     case 8: /* MSP */
4391         return env->v7m.current_sp ? env->v7m.other_sp : env->regs[13];
4392     case 9: /* PSP */
4393         return env->v7m.current_sp ? env->regs[13] : env->v7m.other_sp;
4394     case 16: /* PRIMASK */
4395         return (env->daif & PSTATE_I) != 0;
4396     case 17: /* BASEPRI */
4397     case 18: /* BASEPRI_MAX */
4398         return env->v7m.basepri;
4399     case 19: /* FAULTMASK */
4400         return (env->daif & PSTATE_F) != 0;
4401     case 20: /* CONTROL */
4402         return env->v7m.control;
4403     default:
4404         /* ??? For debugging only.  */
4405         cpu_abort(CPU(cpu), "Unimplemented system register read (%d)\n", reg);
4406         return 0;
4407     }
4408 }
4409 
HELPER(v7m_msr)4410 void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
4411 {
4412     ARMCPU *cpu = arm_env_get_cpu(env);
4413 
4414     switch (reg) {
4415     case 0: /* APSR */
4416         xpsr_write(env, val, 0xf8000000);
4417         break;
4418     case 1: /* IAPSR */
4419         xpsr_write(env, val, 0xf8000000);
4420         break;
4421     case 2: /* EAPSR */
4422         xpsr_write(env, val, 0xfe00fc00);
4423         break;
4424     case 3: /* xPSR */
4425         xpsr_write(env, val, 0xfe00fc00);
4426         break;
4427     case 5: /* IPSR */
4428         /* IPSR bits are readonly.  */
4429         break;
4430     case 6: /* EPSR */
4431         xpsr_write(env, val, 0x0600fc00);
4432         break;
4433     case 7: /* IEPSR */
4434         xpsr_write(env, val, 0x0600fc00);
4435         break;
4436     case 8: /* MSP */
4437         if (env->v7m.current_sp)
4438             env->v7m.other_sp = val;
4439         else
4440             env->regs[13] = val;
4441         break;
4442     case 9: /* PSP */
4443         if (env->v7m.current_sp)
4444             env->regs[13] = val;
4445         else
4446             env->v7m.other_sp = val;
4447         break;
4448     case 16: /* PRIMASK */
4449         if (val & 1) {
4450             env->daif |= PSTATE_I;
4451         } else {
4452             env->daif &= ~PSTATE_I;
4453         }
4454         break;
4455     case 17: /* BASEPRI */
4456         env->v7m.basepri = val & 0xff;
4457         break;
4458     case 18: /* BASEPRI_MAX */
4459         val &= 0xff;
4460         if (val != 0 && (val < env->v7m.basepri || env->v7m.basepri == 0))
4461             env->v7m.basepri = val;
4462         break;
4463     case 19: /* FAULTMASK */
4464         if (val & 1) {
4465             env->daif |= PSTATE_F;
4466         } else {
4467             env->daif &= ~PSTATE_F;
4468         }
4469         break;
4470     case 20: /* CONTROL */
4471         env->v7m.control = val & 3;
4472         switch_v7m_sp(env, (val & 2) != 0);
4473         break;
4474     default:
4475         /* ??? For debugging only.  */
4476         cpu_abort(CPU(cpu), "Unimplemented system register write (%d)\n", reg);
4477         return;
4478     }
4479 }
4480 
4481 #endif
4482 
HELPER(dc_zva)4483 void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
4484 {
4485     /* Implement DC ZVA, which zeroes a fixed-length block of memory.
4486      * Note that we do not implement the (architecturally mandated)
4487      * alignment fault for attempts to use this on Device memory
4488      * (which matches the usual QEMU behaviour of not implementing either
4489      * alignment faults or any memory attribute handling).
4490      */
4491 
4492     ARMCPU *cpu = arm_env_get_cpu(env);
4493     uint64_t blocklen = 4 << cpu->dcz_blocksize;
4494     uint64_t vaddr = vaddr_in & ~(blocklen - 1);
4495 
4496 #ifndef CONFIG_USER_ONLY
4497     {
4498         /* Slightly awkwardly, QEMU's TARGET_PAGE_SIZE may be less than
4499          * the block size so we might have to do more than one TLB lookup.
4500          * We know that in fact for any v8 CPU the page size is at least 4K
4501          * and the block size must be 2K or less, but TARGET_PAGE_SIZE is only
4502          * 1K as an artefact of legacy v5 subpage support being present in the
4503          * same QEMU executable.
4504          */
4505 
4506         int maxidx = DIV_ROUND_UP(blocklen, TARGET_PAGE_SIZE);
4507         // msvc doesnt allow non-constant array sizes, so we work out the size it would be
4508         // TARGET_PAGE_SIZE is 1024
4509         // blocklen is 64
4510         // maxidx = (blocklen+TARGET_PAGE_SIZE-1) / TARGET_PAGE_SIZE
4511         //        = (64+1024-1) / 1024
4512         //        = 1
4513 #ifdef _MSC_VER
4514         void *hostaddr[1];
4515 #else
4516         void *hostaddr[maxidx];
4517 #endif
4518         int try, i;
4519 
4520         for (try = 0; try < 2; try++) {
4521 
4522             for (i = 0; i < maxidx; i++) {
4523                 hostaddr[i] = tlb_vaddr_to_host(env,
4524                                                 vaddr + TARGET_PAGE_SIZE * i,
4525                                                 1, cpu_mmu_index(env));
4526                 if (!hostaddr[i]) {
4527                     break;
4528                 }
4529             }
4530             if (i == maxidx) {
4531                 /* If it's all in the TLB it's fair game for just writing to;
4532                  * we know we don't need to update dirty status, etc.
4533                  */
4534                 for (i = 0; i < maxidx - 1; i++) {
4535                     memset(hostaddr[i], 0, TARGET_PAGE_SIZE);
4536                 }
4537                 memset(hostaddr[i], 0, blocklen - (i * TARGET_PAGE_SIZE));
4538                 return;
4539             }
4540             /* OK, try a store and see if we can populate the tlb. This
4541              * might cause an exception if the memory isn't writable,
4542              * in which case we will longjmp out of here. We must for
4543              * this purpose use the actual register value passed to us
4544              * so that we get the fault address right.
4545              */
4546             helper_ret_stb_mmu(env, vaddr_in, 0, cpu_mmu_index(env), GETRA());
4547             /* Now we can populate the other TLB entries, if any */
4548             for (i = 0; i < maxidx; i++) {
4549                 uint64_t va = vaddr + TARGET_PAGE_SIZE * i;
4550                 if (va != (vaddr_in & TARGET_PAGE_MASK)) {
4551                     helper_ret_stb_mmu(env, va, 0, cpu_mmu_index(env), GETRA());
4552                 }
4553             }
4554         }
4555 
4556         /* Slow path (probably attempt to do this to an I/O device or
4557          * similar, or clearing of a block of code we have translations
4558          * cached for). Just do a series of byte writes as the architecture
4559          * demands. It's not worth trying to use a cpu_physical_memory_map(),
4560          * memset(), unmap() sequence here because:
4561          *  + we'd need to account for the blocksize being larger than a page
4562          *  + the direct-RAM access case is almost always going to be dealt
4563          *    with in the fastpath code above, so there's no speed benefit
4564          *  + we would have to deal with the map returning NULL because the
4565          *    bounce buffer was in use
4566          */
4567         for (i = 0; i < blocklen; i++) {
4568             helper_ret_stb_mmu(env, vaddr + i, 0, cpu_mmu_index(env), GETRA());
4569         }
4570     }
4571 #else
4572     memset(g2h(vaddr), 0, blocklen);
4573 #endif
4574 }
4575 
4576 /* Note that signed overflow is undefined in C.  The following routines are
4577    careful to use unsigned types where modulo arithmetic is required.
4578    Failure to do so _will_ break on newer gcc.  */
4579 
4580 /* Signed saturating arithmetic.  */
4581 
4582 /* Perform 16-bit signed saturating addition.  */
add16_sat(uint16_t a,uint16_t b)4583 static inline uint16_t add16_sat(uint16_t a, uint16_t b)
4584 {
4585     uint16_t res;
4586 
4587     res = a + b;
4588     if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
4589         if (a & 0x8000)
4590             res = 0x8000;
4591         else
4592             res = 0x7fff;
4593     }
4594     return res;
4595 }
4596 
4597 /* Perform 8-bit signed saturating addition.  */
add8_sat(uint8_t a,uint8_t b)4598 static inline uint8_t add8_sat(uint8_t a, uint8_t b)
4599 {
4600     uint8_t res;
4601 
4602     res = a + b;
4603     if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
4604         if (a & 0x80)
4605             res = 0x80;
4606         else
4607             res = 0x7f;
4608     }
4609     return res;
4610 }
4611 
4612 /* Perform 16-bit signed saturating subtraction.  */
sub16_sat(uint16_t a,uint16_t b)4613 static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
4614 {
4615     uint16_t res;
4616 
4617     res = a - b;
4618     if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
4619         if (a & 0x8000)
4620             res = 0x8000;
4621         else
4622             res = 0x7fff;
4623     }
4624     return res;
4625 }
4626 
4627 /* Perform 8-bit signed saturating subtraction.  */
sub8_sat(uint8_t a,uint8_t b)4628 static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
4629 {
4630     uint8_t res;
4631 
4632     res = a - b;
4633     if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
4634         if (a & 0x80)
4635             res = 0x80;
4636         else
4637             res = 0x7f;
4638     }
4639     return res;
4640 }
4641 
4642 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
4643 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
4644 #define ADD8(a, b, n)  RESULT(add8_sat(a, b), n, 8);
4645 #define SUB8(a, b, n)  RESULT(sub8_sat(a, b), n, 8);
4646 #define PFX q
4647 
4648 #include "op_addsub.h"
4649 
4650 /* Unsigned saturating arithmetic.  */
add16_usat(uint16_t a,uint16_t b)4651 static inline uint16_t add16_usat(uint16_t a, uint16_t b)
4652 {
4653     uint16_t res;
4654     res = a + b;
4655     if (res < a)
4656         res = 0xffff;
4657     return res;
4658 }
4659 
sub16_usat(uint16_t a,uint16_t b)4660 static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
4661 {
4662     if (a > b)
4663         return a - b;
4664     else
4665         return 0;
4666 }
4667 
add8_usat(uint8_t a,uint8_t b)4668 static inline uint8_t add8_usat(uint8_t a, uint8_t b)
4669 {
4670     uint8_t res;
4671     res = a + b;
4672     if (res < a)
4673         res = 0xff;
4674     return res;
4675 }
4676 
sub8_usat(uint8_t a,uint8_t b)4677 static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
4678 {
4679     if (a > b)
4680         return a - b;
4681     else
4682         return 0;
4683 }
4684 
4685 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
4686 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
4687 #define ADD8(a, b, n)  RESULT(add8_usat(a, b), n, 8);
4688 #define SUB8(a, b, n)  RESULT(sub8_usat(a, b), n, 8);
4689 #define PFX uq
4690 
4691 #include "op_addsub.h"
4692 
4693 /* Signed modulo arithmetic.  */
4694 #define SARITH16(a, b, n, op) do { \
4695     int32_t sum; \
4696     sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
4697     RESULT(sum, n, 16); \
4698     if (sum >= 0) \
4699         ge |= 3 << (n * 2); \
4700     } while(0)
4701 
4702 #define SARITH8(a, b, n, op) do { \
4703     int32_t sum; \
4704     sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
4705     RESULT(sum, n, 8); \
4706     if (sum >= 0) \
4707         ge |= 1 << n; \
4708     } while(0)
4709 
4710 
4711 #define ADD16(a, b, n) SARITH16(a, b, n, +)
4712 #define SUB16(a, b, n) SARITH16(a, b, n, -)
4713 #define ADD8(a, b, n)  SARITH8(a, b, n, +)
4714 #define SUB8(a, b, n)  SARITH8(a, b, n, -)
4715 #define PFX s
4716 #define ARITH_GE
4717 
4718 #include "op_addsub.h"
4719 
4720 /* Unsigned modulo arithmetic.  */
4721 #define ADD16(a, b, n) do { \
4722     uint32_t sum; \
4723     sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
4724     RESULT(sum, n, 16); \
4725     if ((sum >> 16) == 1) \
4726         ge |= 3 << (n * 2); \
4727     } while(0)
4728 
4729 #define ADD8(a, b, n) do { \
4730     uint32_t sum; \
4731     sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
4732     RESULT(sum, n, 8); \
4733     if ((sum >> 8) == 1) \
4734         ge |= 1 << n; \
4735     } while(0)
4736 
4737 #define SUB16(a, b, n) do { \
4738     uint32_t sum; \
4739     sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
4740     RESULT(sum, n, 16); \
4741     if ((sum >> 16) == 0) \
4742         ge |= 3 << (n * 2); \
4743     } while(0)
4744 
4745 #define SUB8(a, b, n) do { \
4746     uint32_t sum; \
4747     sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
4748     RESULT(sum, n, 8); \
4749     if ((sum >> 8) == 0) \
4750         ge |= 1 << n; \
4751     } while(0)
4752 
4753 #define PFX u
4754 #define ARITH_GE
4755 
4756 #include "op_addsub.h"
4757 
4758 /* Halved signed arithmetic.  */
4759 #define ADD16(a, b, n) \
4760   RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
4761 #define SUB16(a, b, n) \
4762   RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
4763 #define ADD8(a, b, n) \
4764   RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
4765 #define SUB8(a, b, n) \
4766   RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
4767 #define PFX sh
4768 
4769 #include "op_addsub.h"
4770 
4771 /* Halved unsigned arithmetic.  */
4772 #define ADD16(a, b, n) \
4773   RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
4774 #define SUB16(a, b, n) \
4775   RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
4776 #define ADD8(a, b, n) \
4777   RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
4778 #define SUB8(a, b, n) \
4779   RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
4780 #define PFX uh
4781 
4782 #include "op_addsub.h"
4783 
do_usad(uint8_t a,uint8_t b)4784 static inline uint8_t do_usad(uint8_t a, uint8_t b)
4785 {
4786     if (a > b)
4787         return a - b;
4788     else
4789         return b - a;
4790 }
4791 
4792 /* Unsigned sum of absolute byte differences.  */
HELPER(usad8)4793 uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
4794 {
4795     uint32_t sum;
4796     sum = do_usad(a, b);
4797     sum += do_usad(a >> 8, b >> 8);
4798     sum += do_usad(a >> 16, b >>16);
4799     sum += do_usad(a >> 24, b >> 24);
4800     return sum;
4801 }
4802 
4803 /* For ARMv6 SEL instruction.  */
HELPER(sel_flags)4804 uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
4805 {
4806     uint32_t mask;
4807 
4808     mask = 0;
4809     if (flags & 1)
4810         mask |= 0xff;
4811     if (flags & 2)
4812         mask |= 0xff00;
4813     if (flags & 4)
4814         mask |= 0xff0000;
4815     if (flags & 8)
4816         mask |= 0xff000000;
4817     return (a & mask) | (b & ~mask);
4818 }
4819 
4820 /* VFP support.  We follow the convention used for VFP instructions:
4821    Single precision routines have a "s" suffix, double precision a
4822    "d" suffix.  */
4823 
4824 /* Convert host exception flags to vfp form.  */
vfp_exceptbits_from_host(int host_bits)4825 static inline int vfp_exceptbits_from_host(int host_bits)
4826 {
4827     int target_bits = 0;
4828 
4829     if (host_bits & float_flag_invalid)
4830         target_bits |= 1;
4831     if (host_bits & float_flag_divbyzero)
4832         target_bits |= 2;
4833     if (host_bits & float_flag_overflow)
4834         target_bits |= 4;
4835     if (host_bits & (float_flag_underflow | float_flag_output_denormal))
4836         target_bits |= 8;
4837     if (host_bits & float_flag_inexact)
4838         target_bits |= 0x10;
4839     if (host_bits & float_flag_input_denormal)
4840         target_bits |= 0x80;
4841     return target_bits;
4842 }
4843 
HELPER(vfp_get_fpscr)4844 uint32_t HELPER(vfp_get_fpscr)(CPUARMState *env)
4845 {
4846     int i;
4847     uint32_t fpscr;
4848 
4849     fpscr = (env->vfp.xregs[ARM_VFP_FPSCR] & 0xffc8ffff)
4850             | (env->vfp.vec_len << 16)
4851             | (env->vfp.vec_stride << 20);
4852     i = get_float_exception_flags(&env->vfp.fp_status);
4853     i |= get_float_exception_flags(&env->vfp.standard_fp_status);
4854     fpscr |= vfp_exceptbits_from_host(i);
4855     return fpscr;
4856 }
4857 
vfp_get_fpscr(CPUARMState * env)4858 uint32_t vfp_get_fpscr(CPUARMState *env)
4859 {
4860     return HELPER(vfp_get_fpscr)(env);
4861 }
4862 
4863 /* Convert vfp exception flags to target form.  */
vfp_exceptbits_to_host(int target_bits)4864 static inline int vfp_exceptbits_to_host(int target_bits)
4865 {
4866     int host_bits = 0;
4867 
4868     if (target_bits & 1)
4869         host_bits |= float_flag_invalid;
4870     if (target_bits & 2)
4871         host_bits |= float_flag_divbyzero;
4872     if (target_bits & 4)
4873         host_bits |= float_flag_overflow;
4874     if (target_bits & 8)
4875         host_bits |= float_flag_underflow;
4876     if (target_bits & 0x10)
4877         host_bits |= float_flag_inexact;
4878     if (target_bits & 0x80)
4879         host_bits |= float_flag_input_denormal;
4880     return host_bits;
4881 }
4882 
HELPER(vfp_set_fpscr)4883 void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
4884 {
4885     int i;
4886     uint32_t changed;
4887 
4888     changed = env->vfp.xregs[ARM_VFP_FPSCR];
4889     env->vfp.xregs[ARM_VFP_FPSCR] = (val & 0xffc8ffff);
4890     env->vfp.vec_len = (val >> 16) & 7;
4891     env->vfp.vec_stride = (val >> 20) & 3;
4892 
4893     changed ^= val;
4894     if (changed & (3 << 22)) {
4895         i = (val >> 22) & 3;
4896         switch (i) {
4897         case FPROUNDING_TIEEVEN:
4898             i = float_round_nearest_even;
4899             break;
4900         case FPROUNDING_POSINF:
4901             i = float_round_up;
4902             break;
4903         case FPROUNDING_NEGINF:
4904             i = float_round_down;
4905             break;
4906         case FPROUNDING_ZERO:
4907             i = float_round_to_zero;
4908             break;
4909         }
4910         set_float_rounding_mode(i, &env->vfp.fp_status);
4911     }
4912     if (changed & (1 << 24)) {
4913         set_flush_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status);
4914         set_flush_inputs_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status);
4915     }
4916     if (changed & (1 << 25))
4917         set_default_nan_mode((val & (1 << 25)) != 0, &env->vfp.fp_status);
4918 
4919     i = vfp_exceptbits_to_host(val);
4920     set_float_exception_flags(i, &env->vfp.fp_status);
4921     set_float_exception_flags(0, &env->vfp.standard_fp_status);
4922 }
4923 
vfp_set_fpscr(CPUARMState * env,uint32_t val)4924 void vfp_set_fpscr(CPUARMState *env, uint32_t val)
4925 {
4926     HELPER(vfp_set_fpscr)(env, val);
4927 }
4928 
4929 #define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
4930 
4931 #define VFP_BINOP(name) \
4932 float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \
4933 { \
4934     float_status *fpst = fpstp; \
4935     return float32_ ## name(a, b, fpst); \
4936 } \
4937 float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \
4938 { \
4939     float_status *fpst = fpstp; \
4940     return float64_ ## name(a, b, fpst); \
4941 }
4942 VFP_BINOP(add)
VFP_BINOP(sub)4943 VFP_BINOP(sub)
4944 VFP_BINOP(mul)
4945 VFP_BINOP(div)
4946 VFP_BINOP(min)
4947 VFP_BINOP(max)
4948 VFP_BINOP(minnum)
4949 VFP_BINOP(maxnum)
4950 #undef VFP_BINOP
4951 
4952 float32 VFP_HELPER(neg, s)(float32 a)
4953 {
4954     return float32_chs(a);
4955 }
4956 
VFP_HELPER(neg,d)4957 float64 VFP_HELPER(neg, d)(float64 a)
4958 {
4959     return float64_chs(a);
4960 }
4961 
VFP_HELPER(abs,s)4962 float32 VFP_HELPER(abs, s)(float32 a)
4963 {
4964     return float32_abs(a);
4965 }
4966 
VFP_HELPER(abs,d)4967 float64 VFP_HELPER(abs, d)(float64 a)
4968 {
4969     return float64_abs(a);
4970 }
4971 
VFP_HELPER(sqrt,s)4972 float32 VFP_HELPER(sqrt, s)(float32 a, CPUARMState *env)
4973 {
4974     return float32_sqrt(a, &env->vfp.fp_status);
4975 }
4976 
VFP_HELPER(sqrt,d)4977 float64 VFP_HELPER(sqrt, d)(float64 a, CPUARMState *env)
4978 {
4979     return float64_sqrt(a, &env->vfp.fp_status);
4980 }
4981 
4982 /* XXX: check quiet/signaling case */
4983 #define DO_VFP_cmp(p, type) \
4984 void VFP_HELPER(cmp, p)(type a, type b, CPUARMState *env)  \
4985 { \
4986     uint32_t flags; \
4987     switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \
4988     case 0: flags = 0x6; break; \
4989     case -1: flags = 0x8; break; \
4990     case 1: flags = 0x2; break; \
4991     default: case 2: flags = 0x3; break; \
4992     } \
4993     env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
4994         | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
4995 } \
4996 void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState *env) \
4997 { \
4998     uint32_t flags; \
4999     switch(type ## _compare(a, b, &env->vfp.fp_status)) { \
5000     case 0: flags = 0x6; break; \
5001     case -1: flags = 0x8; break; \
5002     case 1: flags = 0x2; break; \
5003     default: case 2: flags = 0x3; break; \
5004     } \
5005     env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
5006         | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
5007 }
DO_VFP_cmp(s,float32)5008 DO_VFP_cmp(s, float32)
5009 DO_VFP_cmp(d, float64)
5010 #undef DO_VFP_cmp
5011 
5012 /* Integer to float and float to integer conversions */
5013 
5014 #define CONV_ITOF(name, fsz, sign) \
5015     float##fsz HELPER(name)(uint32_t x, void *fpstp) \
5016 { \
5017     float_status *fpst = fpstp; \
5018     return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \
5019 }
5020 
5021 #define CONV_FTOI(name, fsz, sign, round) \
5022 uint32_t HELPER(name)(float##fsz x, void *fpstp) \
5023 { \
5024     float_status *fpst = fpstp; \
5025     if (float##fsz##_is_any_nan(x)) { \
5026         float_raise(float_flag_invalid, fpst); \
5027         return 0; \
5028     } \
5029     return float##fsz##_to_##sign##int32##round(x, fpst); \
5030 }
5031 
5032 #define FLOAT_CONVS(name, p, fsz, sign) \
5033 CONV_ITOF(vfp_##name##to##p, fsz, sign) \
5034 CONV_FTOI(vfp_to##name##p, fsz, sign, ) \
5035 CONV_FTOI(vfp_to##name##z##p, fsz, sign, _round_to_zero)
5036 
5037 FLOAT_CONVS(si, s, 32, )
5038 FLOAT_CONVS(si, d, 64, )
5039 FLOAT_CONVS(ui, s, 32, u)
5040 FLOAT_CONVS(ui, d, 64, u)
5041 
5042 #undef CONV_ITOF
5043 #undef CONV_FTOI
5044 #undef FLOAT_CONVS
5045 
5046 /* floating point conversion */
5047 float64 VFP_HELPER(fcvtd, s)(float32 x, CPUARMState *env)
5048 {
5049     float64 r = float32_to_float64(x, &env->vfp.fp_status);
5050     /* ARM requires that S<->D conversion of any kind of NaN generates
5051      * a quiet NaN by forcing the most significant frac bit to 1.
5052      */
5053     return float64_maybe_silence_nan(r);
5054 }
5055 
VFP_HELPER(fcvts,d)5056 float32 VFP_HELPER(fcvts, d)(float64 x, CPUARMState *env)
5057 {
5058     float32 r =  float64_to_float32(x, &env->vfp.fp_status);
5059     /* ARM requires that S<->D conversion of any kind of NaN generates
5060      * a quiet NaN by forcing the most significant frac bit to 1.
5061      */
5062     return float32_maybe_silence_nan(r);
5063 }
5064 
5065 /* VFP3 fixed point conversion.  */
5066 #define VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
5067 float##fsz HELPER(vfp_##name##to##p)(uint##isz##_t  x, uint32_t shift, \
5068                                      void *fpstp) \
5069 { \
5070     float_status *fpst = fpstp; \
5071     float##fsz tmp; \
5072     tmp = itype##_to_##float##fsz(x, fpst); \
5073     return float##fsz##_scalbn(tmp, -(int)shift, fpst); \
5074 }
5075 
5076 /* Notice that we want only input-denormal exception flags from the
5077  * scalbn operation: the other possible flags (overflow+inexact if
5078  * we overflow to infinity, output-denormal) aren't correct for the
5079  * complete scale-and-convert operation.
5080  */
5081 #define VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, round) \
5082 uint##isz##_t HELPER(vfp_to##name##p##round)(float##fsz x, \
5083                                              uint32_t shift, \
5084                                              void *fpstp) \
5085 { \
5086     float_status *fpst = fpstp; \
5087     int old_exc_flags = get_float_exception_flags(fpst); \
5088     float##fsz tmp; \
5089     if (float##fsz##_is_any_nan(x)) { \
5090         float_raise(float_flag_invalid, fpst); \
5091         return 0; \
5092     } \
5093     tmp = float##fsz##_scalbn(x, shift, fpst); \
5094     old_exc_flags |= get_float_exception_flags(fpst) \
5095         & float_flag_input_denormal; \
5096     set_float_exception_flags(old_exc_flags, fpst); \
5097     return float##fsz##_to_##itype##round(tmp, fpst); \
5098 }
5099 
5100 #define VFP_CONV_FIX(name, p, fsz, isz, itype)                   \
5101 VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype)                     \
5102 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, _round_to_zero) \
5103 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, )
5104 
5105 #define VFP_CONV_FIX_A64(name, p, fsz, isz, itype)               \
5106 VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype)                     \
5107 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, )
5108 
5109 VFP_CONV_FIX(sh, d, 64, 64, int16)
5110 VFP_CONV_FIX(sl, d, 64, 64, int32)
5111 VFP_CONV_FIX_A64(sq, d, 64, 64, int64)
5112 VFP_CONV_FIX(uh, d, 64, 64, uint16)
5113 VFP_CONV_FIX(ul, d, 64, 64, uint32)
5114 VFP_CONV_FIX_A64(uq, d, 64, 64, uint64)
5115 VFP_CONV_FIX(sh, s, 32, 32, int16)
5116 VFP_CONV_FIX(sl, s, 32, 32, int32)
5117 VFP_CONV_FIX_A64(sq, s, 32, 64, int64)
5118 VFP_CONV_FIX(uh, s, 32, 32, uint16)
5119 VFP_CONV_FIX(ul, s, 32, 32, uint32)
5120 VFP_CONV_FIX_A64(uq, s, 32, 64, uint64)
5121 #undef VFP_CONV_FIX
5122 #undef VFP_CONV_FIX_FLOAT
5123 #undef VFP_CONV_FLOAT_FIX_ROUND
5124 
5125 /* Set the current fp rounding mode and return the old one.
5126  * The argument is a softfloat float_round_ value.
5127  */
HELPER(set_rmode)5128 uint32_t HELPER(set_rmode)(uint32_t rmode, CPUARMState *env)
5129 {
5130     float_status *fp_status = &env->vfp.fp_status;
5131 
5132     uint32_t prev_rmode = get_float_rounding_mode(fp_status);
5133     set_float_rounding_mode(rmode, fp_status);
5134 
5135     return prev_rmode;
5136 }
5137 
5138 /* Set the current fp rounding mode in the standard fp status and return
5139  * the old one. This is for NEON instructions that need to change the
5140  * rounding mode but wish to use the standard FPSCR values for everything
5141  * else. Always set the rounding mode back to the correct value after
5142  * modifying it.
5143  * The argument is a softfloat float_round_ value.
5144  */
HELPER(set_neon_rmode)5145 uint32_t HELPER(set_neon_rmode)(uint32_t rmode, CPUARMState *env)
5146 {
5147     float_status *fp_status = &env->vfp.standard_fp_status;
5148 
5149     uint32_t prev_rmode = get_float_rounding_mode(fp_status);
5150     set_float_rounding_mode(rmode, fp_status);
5151 
5152     return prev_rmode;
5153 }
5154 
5155 /* Half precision conversions.  */
do_fcvt_f16_to_f32(uint32_t a,CPUARMState * env,float_status * s)5156 static float32 do_fcvt_f16_to_f32(uint32_t a, CPUARMState *env, float_status *s)
5157 {
5158     int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
5159     float32 r = float16_to_float32(make_float16(a), ieee, s);
5160     if (ieee) {
5161         return float32_maybe_silence_nan(r);
5162     }
5163     return r;
5164 }
5165 
do_fcvt_f32_to_f16(float32 a,CPUARMState * env,float_status * s)5166 static uint32_t do_fcvt_f32_to_f16(float32 a, CPUARMState *env, float_status *s)
5167 {
5168     int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
5169     float16 r = float32_to_float16(a, ieee, s);
5170     if (ieee) {
5171         r = float16_maybe_silence_nan(r);
5172     }
5173     return float16_val(r);
5174 }
5175 
HELPER(neon_fcvt_f16_to_f32)5176 float32 HELPER(neon_fcvt_f16_to_f32)(uint32_t a, CPUARMState *env)
5177 {
5178     return do_fcvt_f16_to_f32(a, env, &env->vfp.standard_fp_status);
5179 }
5180 
HELPER(neon_fcvt_f32_to_f16)5181 uint32_t HELPER(neon_fcvt_f32_to_f16)(float32 a, CPUARMState *env)
5182 {
5183     return do_fcvt_f32_to_f16(a, env, &env->vfp.standard_fp_status);
5184 }
5185 
HELPER(vfp_fcvt_f16_to_f32)5186 float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, CPUARMState *env)
5187 {
5188     return do_fcvt_f16_to_f32(a, env, &env->vfp.fp_status);
5189 }
5190 
HELPER(vfp_fcvt_f32_to_f16)5191 uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, CPUARMState *env)
5192 {
5193     return do_fcvt_f32_to_f16(a, env, &env->vfp.fp_status);
5194 }
5195 
HELPER(vfp_fcvt_f16_to_f64)5196 float64 HELPER(vfp_fcvt_f16_to_f64)(uint32_t a, CPUARMState *env)
5197 {
5198     int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
5199     float64 r = float16_to_float64(make_float16(a), ieee, &env->vfp.fp_status);
5200     if (ieee) {
5201         return float64_maybe_silence_nan(r);
5202     }
5203     return r;
5204 }
5205 
HELPER(vfp_fcvt_f64_to_f16)5206 uint32_t HELPER(vfp_fcvt_f64_to_f16)(float64 a, CPUARMState *env)
5207 {
5208     int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
5209     float16 r = float64_to_float16(a, ieee, &env->vfp.fp_status);
5210     if (ieee) {
5211         r = float16_maybe_silence_nan(r);
5212     }
5213     return float16_val(r);
5214 }
5215 
5216 #define float32_two make_float32(0x40000000)
5217 #define float32_three make_float32(0x40400000)
5218 #define float32_one_point_five make_float32(0x3fc00000)
5219 
HELPER(recps_f32)5220 float32 HELPER(recps_f32)(float32 a, float32 b, CPUARMState *env)
5221 {
5222     float_status *s = &env->vfp.standard_fp_status;
5223     if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
5224         (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
5225         if (!(float32_is_zero(a) || float32_is_zero(b))) {
5226             float_raise(float_flag_input_denormal, s);
5227         }
5228         return float32_two;
5229     }
5230     return float32_sub(float32_two, float32_mul(a, b, s), s);
5231 }
5232 
HELPER(rsqrts_f32)5233 float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUARMState *env)
5234 {
5235     float_status *s = &env->vfp.standard_fp_status;
5236     float32 product;
5237     if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
5238         (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
5239         if (!(float32_is_zero(a) || float32_is_zero(b))) {
5240             float_raise(float_flag_input_denormal, s);
5241         }
5242         return float32_one_point_five;
5243     }
5244     product = float32_mul(a, b, s);
5245     return float32_div(float32_sub(float32_three, product, s), float32_two, s);
5246 }
5247 
5248 /* NEON helpers.  */
5249 
5250 /* Constants 256 and 512 are used in some helpers; we avoid relying on
5251  * int->float conversions at run-time.  */
5252 #define float64_256 make_float64(0x4070000000000000LL)
5253 #define float64_512 make_float64(0x4080000000000000LL)
5254 #define float32_maxnorm make_float32(0x7f7fffff)
5255 #define float64_maxnorm make_float64(0x7fefffffffffffffLL)
5256 
5257 /* Reciprocal functions
5258  *
5259  * The algorithm that must be used to calculate the estimate
5260  * is specified by the ARM ARM, see FPRecipEstimate()
5261  */
5262 
recip_estimate(float64 a,float_status * real_fp_status)5263 static float64 recip_estimate(float64 a, float_status *real_fp_status)
5264 {
5265     /* These calculations mustn't set any fp exception flags,
5266      * so we use a local copy of the fp_status.
5267      */
5268     float_status dummy_status = *real_fp_status;
5269     float_status *s = &dummy_status;
5270     /* q = (int)(a * 512.0) */
5271     float64 q = float64_mul(float64_512, a, s);
5272     int64_t q_int = float64_to_int64_round_to_zero(q, s);
5273 
5274     /* r = 1.0 / (((double)q + 0.5) / 512.0) */
5275     q = int64_to_float64(q_int, s);
5276     q = float64_add(q, float64_half, s);
5277     q = float64_div(q, float64_512, s);
5278     q = float64_div(float64_one, q, s);
5279 
5280     /* s = (int)(256.0 * r + 0.5) */
5281     q = float64_mul(q, float64_256, s);
5282     q = float64_add(q, float64_half, s);
5283     q_int = float64_to_int64_round_to_zero(q, s);
5284 
5285     /* return (double)s / 256.0 */
5286     return float64_div(int64_to_float64(q_int, s), float64_256, s);
5287 }
5288 
5289 /* Common wrapper to call recip_estimate */
call_recip_estimate(float64 num,int off,float_status * fpst)5290 static float64 call_recip_estimate(float64 num, int off, float_status *fpst)
5291 {
5292     uint64_t val64 = float64_val(num);
5293     uint64_t frac = extract64(val64, 0, 52);
5294     int64_t exp = extract64(val64, 52, 11);
5295     uint64_t sbit;
5296     float64 scaled, estimate;
5297 
5298     /* Generate the scaled number for the estimate function */
5299     if (exp == 0) {
5300         if (extract64(frac, 51, 1) == 0) {
5301             exp = -1;
5302             frac = extract64(frac, 0, 50) << 2;
5303         } else {
5304             frac = extract64(frac, 0, 51) << 1;
5305         }
5306     }
5307 
5308     /* scaled = '0' : '01111111110' : fraction<51:44> : Zeros(44); */
5309     scaled = make_float64((0x3feULL << 52)
5310                           | extract64(frac, 44, 8) << 44);
5311 
5312     estimate = recip_estimate(scaled, fpst);
5313 
5314     /* Build new result */
5315     val64 = float64_val(estimate);
5316     sbit = 0x8000000000000000ULL & val64;
5317     exp = off - exp;
5318     frac = extract64(val64, 0, 52);
5319 
5320     if (exp == 0) {
5321         frac = 1ULL << 51 | extract64(frac, 1, 51);
5322     } else if (exp == -1) {
5323         frac = 1ULL << 50 | extract64(frac, 2, 50);
5324         exp = 0;
5325     }
5326 
5327     return make_float64(sbit | (exp << 52) | frac);
5328 }
5329 
round_to_inf(float_status * fpst,bool sign_bit)5330 static bool round_to_inf(float_status *fpst, bool sign_bit)
5331 {
5332     switch (fpst->float_rounding_mode) {
5333     case float_round_nearest_even: /* Round to Nearest */
5334         return true;
5335     case float_round_up: /* Round to +Inf */
5336         return !sign_bit;
5337     case float_round_down: /* Round to -Inf */
5338         return sign_bit;
5339     case float_round_to_zero: /* Round to Zero */
5340         return false;
5341     default:
5342         break;
5343     }
5344 
5345     g_assert_not_reached();
5346     return false;
5347 }
5348 
HELPER(recpe_f32)5349 float32 HELPER(recpe_f32)(float32 input, void *fpstp)
5350 {
5351     float_status *fpst = fpstp;
5352     float32 f32 = float32_squash_input_denormal(input, fpst);
5353     uint32_t f32_val = float32_val(f32);
5354     uint32_t f32_sbit = 0x80000000ULL & f32_val;
5355     int32_t f32_exp = extract32(f32_val, 23, 8);
5356     uint32_t f32_frac = extract32(f32_val, 0, 23);
5357     float64 f64, r64;
5358     uint64_t r64_val;
5359     int64_t r64_exp;
5360     uint64_t r64_frac;
5361 
5362     if (float32_is_any_nan(f32)) {
5363         float32 nan = f32;
5364         if (float32_is_signaling_nan(f32)) {
5365             float_raise(float_flag_invalid, fpst);
5366             nan = float32_maybe_silence_nan(f32);
5367         }
5368         if (fpst->default_nan_mode) {
5369             nan =  float32_default_nan;
5370         }
5371         return nan;
5372     } else if (float32_is_infinity(f32)) {
5373         return float32_set_sign(float32_zero, float32_is_neg(f32));
5374     } else if (float32_is_zero(f32)) {
5375         float_raise(float_flag_divbyzero, fpst);
5376         return float32_set_sign(float32_infinity, float32_is_neg(f32));
5377     } else if ((f32_val & ~(1ULL << 31)) < (1ULL << 21)) {
5378         /* Abs(value) < 2.0^-128 */
5379         float_raise(float_flag_overflow | float_flag_inexact, fpst);
5380         if (round_to_inf(fpst, f32_sbit)) {
5381             return float32_set_sign(float32_infinity, float32_is_neg(f32));
5382         } else {
5383             return float32_set_sign(float32_maxnorm, float32_is_neg(f32));
5384         }
5385     } else if (f32_exp >= 253 && fpst->flush_to_zero) {
5386         float_raise(float_flag_underflow, fpst);
5387         return float32_set_sign(float32_zero, float32_is_neg(f32));
5388     }
5389 
5390 
5391     f64 = make_float64(((int64_t)(f32_exp) << 52) | (int64_t)(f32_frac) << 29);
5392     r64 = call_recip_estimate(f64, 253, fpst);
5393     r64_val = float64_val(r64);
5394     r64_exp = extract64(r64_val, 52, 11);
5395     r64_frac = extract64(r64_val, 0, 52);
5396 
5397     /* result = sign : result_exp<7:0> : fraction<51:29>; */
5398     return make_float32(f32_sbit |
5399                         (r64_exp & 0xff) << 23 |
5400                         extract64(r64_frac, 29, 24));
5401 }
5402 
HELPER(recpe_f64)5403 float64 HELPER(recpe_f64)(float64 input, void *fpstp)
5404 {
5405     float_status *fpst = fpstp;
5406     float64 f64 = float64_squash_input_denormal(input, fpst);
5407     uint64_t f64_val = float64_val(f64);
5408     uint64_t f64_sbit = 0x8000000000000000ULL & f64_val;
5409     int64_t f64_exp = extract64(f64_val, 52, 11);
5410     float64 r64;
5411     uint64_t r64_val;
5412     int64_t r64_exp;
5413     uint64_t r64_frac;
5414 
5415     /* Deal with any special cases */
5416     if (float64_is_any_nan(f64)) {
5417         float64 nan = f64;
5418         if (float64_is_signaling_nan(f64)) {
5419             float_raise(float_flag_invalid, fpst);
5420             nan = float64_maybe_silence_nan(f64);
5421         }
5422         if (fpst->default_nan_mode) {
5423             nan =  float64_default_nan;
5424         }
5425         return nan;
5426     } else if (float64_is_infinity(f64)) {
5427         return float64_set_sign(float64_zero, float64_is_neg(f64));
5428     } else if (float64_is_zero(f64)) {
5429         float_raise(float_flag_divbyzero, fpst);
5430         return float64_set_sign(float64_infinity, float64_is_neg(f64));
5431     } else if ((f64_val & ~(1ULL << 63)) < (1ULL << 50)) {
5432         /* Abs(value) < 2.0^-1024 */
5433         float_raise(float_flag_overflow | float_flag_inexact, fpst);
5434         if (round_to_inf(fpst, f64_sbit)) {
5435             return float64_set_sign(float64_infinity, float64_is_neg(f64));
5436         } else {
5437             return float64_set_sign(float64_maxnorm, float64_is_neg(f64));
5438         }
5439     } else if (f64_exp >= 1023 && fpst->flush_to_zero) {
5440         float_raise(float_flag_underflow, fpst);
5441         return float64_set_sign(float64_zero, float64_is_neg(f64));
5442     }
5443 
5444     r64 = call_recip_estimate(f64, 2045, fpst);
5445     r64_val = float64_val(r64);
5446     r64_exp = extract64(r64_val, 52, 11);
5447     r64_frac = extract64(r64_val, 0, 52);
5448 
5449     /* result = sign : result_exp<10:0> : fraction<51:0> */
5450     return make_float64(f64_sbit |
5451                         ((r64_exp & 0x7ff) << 52) |
5452                         r64_frac);
5453 }
5454 
5455 /* The algorithm that must be used to calculate the estimate
5456  * is specified by the ARM ARM.
5457  */
recip_sqrt_estimate(float64 a,float_status * real_fp_status)5458 static float64 recip_sqrt_estimate(float64 a, float_status *real_fp_status)
5459 {
5460     /* These calculations mustn't set any fp exception flags,
5461      * so we use a local copy of the fp_status.
5462      */
5463     float_status dummy_status = *real_fp_status;
5464     float_status *s = &dummy_status;
5465     float64 q;
5466     int64_t q_int;
5467 
5468     if (float64_lt(a, float64_half, s)) {
5469         /* range 0.25 <= a < 0.5 */
5470 
5471         /* a in units of 1/512 rounded down */
5472         /* q0 = (int)(a * 512.0);  */
5473         q = float64_mul(float64_512, a, s);
5474         q_int = float64_to_int64_round_to_zero(q, s);
5475 
5476         /* reciprocal root r */
5477         /* r = 1.0 / sqrt(((double)q0 + 0.5) / 512.0);  */
5478         q = int64_to_float64(q_int, s);
5479         q = float64_add(q, float64_half, s);
5480         q = float64_div(q, float64_512, s);
5481         q = float64_sqrt(q, s);
5482         q = float64_div(float64_one, q, s);
5483     } else {
5484         /* range 0.5 <= a < 1.0 */
5485 
5486         int64_t q_int;
5487 
5488         /* a in units of 1/256 rounded down */
5489         /* q1 = (int)(a * 256.0); */
5490         q = float64_mul(float64_256, a, s);
5491         q_int = float64_to_int64_round_to_zero(q, s);
5492 
5493         /* reciprocal root r */
5494         /* r = 1.0 /sqrt(((double)q1 + 0.5) / 256); */
5495         q = int64_to_float64(q_int, s);
5496         q = float64_add(q, float64_half, s);
5497         q = float64_div(q, float64_256, s);
5498         q = float64_sqrt(q, s);
5499         q = float64_div(float64_one, q, s);
5500     }
5501     /* r in units of 1/256 rounded to nearest */
5502     /* s = (int)(256.0 * r + 0.5); */
5503 
5504     q = float64_mul(q, float64_256,s );
5505     q = float64_add(q, float64_half, s);
5506     q_int = float64_to_int64_round_to_zero(q, s);
5507 
5508     /* return (double)s / 256.0;*/
5509     return float64_div(int64_to_float64(q_int, s), float64_256, s);
5510 }
5511 
HELPER(rsqrte_f32)5512 float32 HELPER(rsqrte_f32)(float32 input, void *fpstp)
5513 {
5514     float_status *s = fpstp;
5515     float32 f32 = float32_squash_input_denormal(input, s);
5516     uint32_t val = float32_val(f32);
5517     uint32_t f32_sbit = 0x80000000 & val;
5518     int32_t f32_exp = extract32(val, 23, 8);
5519     uint32_t f32_frac = extract32(val, 0, 23);
5520     uint64_t f64_frac;
5521     uint64_t val64;
5522     int result_exp;
5523     float64 f64;
5524 
5525     if (float32_is_any_nan(f32)) {
5526         float32 nan = f32;
5527         if (float32_is_signaling_nan(f32)) {
5528             float_raise(float_flag_invalid, s);
5529             nan = float32_maybe_silence_nan(f32);
5530         }
5531         if (s->default_nan_mode) {
5532             nan =  float32_default_nan;
5533         }
5534         return nan;
5535     } else if (float32_is_zero(f32)) {
5536         float_raise(float_flag_divbyzero, s);
5537         return float32_set_sign(float32_infinity, float32_is_neg(f32));
5538     } else if (float32_is_neg(f32)) {
5539         float_raise(float_flag_invalid, s);
5540         return float32_default_nan;
5541     } else if (float32_is_infinity(f32)) {
5542         return float32_zero;
5543     }
5544 
5545     /* Scale and normalize to a double-precision value between 0.25 and 1.0,
5546      * preserving the parity of the exponent.  */
5547 
5548     f64_frac = ((uint64_t) f32_frac) << 29;
5549     if (f32_exp == 0) {
5550         while (extract64(f64_frac, 51, 1) == 0) {
5551             f64_frac = f64_frac << 1;
5552             f32_exp = f32_exp-1;
5553         }
5554         f64_frac = extract64(f64_frac, 0, 51) << 1;
5555     }
5556 
5557     if (extract64(f32_exp, 0, 1) == 0) {
5558         f64 = make_float64(((uint64_t) f32_sbit) << 32
5559                            | (0x3feULL << 52)
5560                            | f64_frac);
5561     } else {
5562         f64 = make_float64(((uint64_t) f32_sbit) << 32
5563                            | (0x3fdULL << 52)
5564                            | f64_frac);
5565     }
5566 
5567     result_exp = (380 - f32_exp) / 2;
5568 
5569     f64 = recip_sqrt_estimate(f64, s);
5570 
5571     val64 = float64_val(f64);
5572 
5573     val = ((result_exp & 0xff) << 23)
5574         | ((val64 >> 29)  & 0x7fffff);
5575     return make_float32(val);
5576 }
5577 
HELPER(rsqrte_f64)5578 float64 HELPER(rsqrte_f64)(float64 input, void *fpstp)
5579 {
5580     float_status *s = fpstp;
5581     float64 f64 = float64_squash_input_denormal(input, s);
5582     uint64_t val = float64_val(f64);
5583     uint64_t f64_sbit = 0x8000000000000000ULL & val;
5584     int64_t f64_exp = extract64(val, 52, 11);
5585     uint64_t f64_frac = extract64(val, 0, 52);
5586     int64_t result_exp;
5587     uint64_t result_frac;
5588 
5589     if (float64_is_any_nan(f64)) {
5590         float64 nan = f64;
5591         if (float64_is_signaling_nan(f64)) {
5592             float_raise(float_flag_invalid, s);
5593             nan = float64_maybe_silence_nan(f64);
5594         }
5595         if (s->default_nan_mode) {
5596             nan =  float64_default_nan;
5597         }
5598         return nan;
5599     } else if (float64_is_zero(f64)) {
5600         float_raise(float_flag_divbyzero, s);
5601         return float64_set_sign(float64_infinity, float64_is_neg(f64));
5602     } else if (float64_is_neg(f64)) {
5603         float_raise(float_flag_invalid, s);
5604         return float64_default_nan;
5605     } else if (float64_is_infinity(f64)) {
5606         return float64_zero;
5607     }
5608 
5609     /* Scale and normalize to a double-precision value between 0.25 and 1.0,
5610      * preserving the parity of the exponent.  */
5611 
5612     if (f64_exp == 0) {
5613         while (extract64(f64_frac, 51, 1) == 0) {
5614             f64_frac = f64_frac << 1;
5615             f64_exp = f64_exp - 1;
5616         }
5617         f64_frac = extract64(f64_frac, 0, 51) << 1;
5618     }
5619 
5620     if (extract64(f64_exp, 0, 1) == 0) {
5621         f64 = make_float64(f64_sbit
5622                            | (0x3feULL << 52)
5623                            | f64_frac);
5624     } else {
5625         f64 = make_float64(f64_sbit
5626                            | (0x3fdULL << 52)
5627                            | f64_frac);
5628     }
5629 
5630     result_exp = (3068 - f64_exp) / 2;
5631 
5632     f64 = recip_sqrt_estimate(f64, s);
5633 
5634     result_frac = extract64(float64_val(f64), 0, 52);
5635 
5636     return make_float64(f64_sbit |
5637                         ((result_exp & 0x7ff) << 52) |
5638                         result_frac);
5639 }
5640 
HELPER(recpe_u32)5641 uint32_t HELPER(recpe_u32)(uint32_t a, void *fpstp)
5642 {
5643     float_status *s = fpstp;
5644     float64 f64;
5645 
5646     if ((a & 0x80000000) == 0) {
5647         return 0xffffffff;
5648     }
5649 
5650     f64 = make_float64((0x3feULL << 52)
5651                        | ((int64_t)(a & 0x7fffffff) << 21));
5652 
5653     f64 = recip_estimate(f64, s);
5654 
5655     return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff);
5656 }
5657 
HELPER(rsqrte_u32)5658 uint32_t HELPER(rsqrte_u32)(uint32_t a, void *fpstp)
5659 {
5660     float_status *fpst = fpstp;
5661     float64 f64;
5662 
5663     if ((a & 0xc0000000) == 0) {
5664         return 0xffffffff;
5665     }
5666 
5667     if (a & 0x80000000) {
5668         f64 = make_float64((0x3feULL << 52)
5669                            | ((uint64_t)(a & 0x7fffffff) << 21));
5670     } else { /* bits 31-30 == '01' */
5671         f64 = make_float64((0x3fdULL << 52)
5672                            | ((uint64_t)(a & 0x3fffffff) << 22));
5673     }
5674 
5675     f64 = recip_sqrt_estimate(f64, fpst);
5676 
5677     return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff);
5678 }
5679 
5680 /* VFPv4 fused multiply-accumulate */
VFP_HELPER(muladd,s)5681 float32 VFP_HELPER(muladd, s)(float32 a, float32 b, float32 c, void *fpstp)
5682 {
5683     float_status *fpst = fpstp;
5684     return float32_muladd(a, b, c, 0, fpst);
5685 }
5686 
VFP_HELPER(muladd,d)5687 float64 VFP_HELPER(muladd, d)(float64 a, float64 b, float64 c, void *fpstp)
5688 {
5689     float_status *fpst = fpstp;
5690     return float64_muladd(a, b, c, 0, fpst);
5691 }
5692 
5693 /* ARMv8 round to integral */
HELPER(rints_exact)5694 float32 HELPER(rints_exact)(float32 x, void *fp_status)
5695 {
5696     return float32_round_to_int(x, fp_status);
5697 }
5698 
HELPER(rintd_exact)5699 float64 HELPER(rintd_exact)(float64 x, void *fp_status)
5700 {
5701     return float64_round_to_int(x, fp_status);
5702 }
5703 
HELPER(rints)5704 float32 HELPER(rints)(float32 x, void *fp_status)
5705 {
5706     int old_flags = get_float_exception_flags(fp_status), new_flags;
5707     float32 ret;
5708 
5709     ret = float32_round_to_int(x, fp_status);
5710 
5711     /* Suppress any inexact exceptions the conversion produced */
5712     if (!(old_flags & float_flag_inexact)) {
5713         new_flags = get_float_exception_flags(fp_status);
5714         set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status);
5715     }
5716 
5717     return ret;
5718 }
5719 
HELPER(rintd)5720 float64 HELPER(rintd)(float64 x, void *fp_status)
5721 {
5722     int old_flags = get_float_exception_flags(fp_status), new_flags;
5723     float64 ret;
5724 
5725     ret = float64_round_to_int(x, fp_status);
5726 
5727     new_flags = get_float_exception_flags(fp_status);
5728 
5729     /* Suppress any inexact exceptions the conversion produced */
5730     if (!(old_flags & float_flag_inexact)) {
5731         new_flags = get_float_exception_flags(fp_status);
5732         set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status);
5733     }
5734 
5735     return ret;
5736 }
5737 
5738 /* Convert ARM rounding mode to softfloat */
arm_rmode_to_sf(int rmode)5739 int arm_rmode_to_sf(int rmode)
5740 {
5741     switch (rmode) {
5742     case FPROUNDING_TIEAWAY:
5743         rmode = float_round_ties_away;
5744         break;
5745     case FPROUNDING_ODD:
5746         /* FIXME: add support for TIEAWAY and ODD */
5747         qemu_log_mask(LOG_UNIMP, "arm: unimplemented rounding mode: %d\n",
5748                       rmode);
5749     case FPROUNDING_TIEEVEN:
5750     default:
5751         rmode = float_round_nearest_even;
5752         break;
5753     case FPROUNDING_POSINF:
5754         rmode = float_round_up;
5755         break;
5756     case FPROUNDING_NEGINF:
5757         rmode = float_round_down;
5758         break;
5759     case FPROUNDING_ZERO:
5760         rmode = float_round_to_zero;
5761         break;
5762     }
5763     return rmode;
5764 }
5765 
5766 /* CRC helpers.
5767  * The upper bytes of val (above the number specified by 'bytes') must have
5768  * been zeroed out by the caller.
5769  */
HELPER(crc32_arm)5770 uint32_t HELPER(crc32_arm)(uint32_t acc, uint32_t val, uint32_t bytes)
5771 {
5772 #if 0   // FIXME
5773     uint8_t buf[4];
5774 
5775     stl_le_p(buf, val);
5776 
5777     /* zlib crc32 converts the accumulator and output to one's complement.  */
5778     return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff;
5779 #endif
5780     return 0;
5781 }
5782 
HELPER(crc32c)5783 uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes)
5784 {
5785     uint8_t buf[4];
5786 
5787     stl_le_p(buf, val);
5788 
5789     /* Linux crc32c converts the output to one's complement.  */
5790     return crc32c(acc, buf, bytes) ^ 0xffffffff;
5791 }
5792