xref: /qemu/target/xtensa/op_helper.c (revision b25f23e7)
1 /*
2  * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *     * Redistributions of source code must retain the above copyright
8  *       notice, this list of conditions and the following disclaimer.
9  *     * Redistributions in binary form must reproduce the above copyright
10  *       notice, this list of conditions and the following disclaimer in the
11  *       documentation and/or other materials provided with the distribution.
12  *     * Neither the name of the Open Source and Linux Lab nor the
13  *       names of its contributors may be used to endorse or promote products
14  *       derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
20  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include "qemu/osdep.h"
29 #include "cpu.h"
30 #include "exec/helper-proto.h"
31 #include "qemu/host-utils.h"
32 #include "exec/exec-all.h"
33 #include "exec/cpu_ldst.h"
34 #include "exec/address-spaces.h"
35 #include "qemu/timer.h"
36 
37 void xtensa_cpu_do_unaligned_access(CPUState *cs,
38         vaddr addr, MMUAccessType access_type,
39         int mmu_idx, uintptr_t retaddr)
40 {
41     XtensaCPU *cpu = XTENSA_CPU(cs);
42     CPUXtensaState *env = &cpu->env;
43 
44     if (xtensa_option_enabled(env->config, XTENSA_OPTION_UNALIGNED_EXCEPTION) &&
45             !xtensa_option_enabled(env->config, XTENSA_OPTION_HW_ALIGNMENT)) {
46         cpu_restore_state(CPU(cpu), retaddr);
47         HELPER(exception_cause_vaddr)(env,
48                 env->pc, LOAD_STORE_ALIGNMENT_CAUSE, addr);
49     }
50 }
51 
52 void tlb_fill(CPUState *cs, target_ulong vaddr, MMUAccessType access_type,
53               int mmu_idx, uintptr_t retaddr)
54 {
55     XtensaCPU *cpu = XTENSA_CPU(cs);
56     CPUXtensaState *env = &cpu->env;
57     uint32_t paddr;
58     uint32_t page_size;
59     unsigned access;
60     int ret = xtensa_get_physical_addr(env, true, vaddr, access_type, mmu_idx,
61             &paddr, &page_size, &access);
62 
63     qemu_log_mask(CPU_LOG_MMU, "%s(%08x, %d, %d) -> %08x, ret = %d\n",
64                   __func__, vaddr, access_type, mmu_idx, paddr, ret);
65 
66     if (ret == 0) {
67         tlb_set_page(cs,
68                      vaddr & TARGET_PAGE_MASK,
69                      paddr & TARGET_PAGE_MASK,
70                      access, mmu_idx, page_size);
71     } else {
72         cpu_restore_state(cs, retaddr);
73         HELPER(exception_cause_vaddr)(env, env->pc, ret, vaddr);
74     }
75 }
76 
77 void xtensa_cpu_do_unassigned_access(CPUState *cs, hwaddr addr,
78                                      bool is_write, bool is_exec, int opaque,
79                                      unsigned size)
80 {
81     XtensaCPU *cpu = XTENSA_CPU(cs);
82     CPUXtensaState *env = &cpu->env;
83 
84     HELPER(exception_cause_vaddr)(env, env->pc,
85                                   is_exec ?
86                                   INSTR_PIF_ADDR_ERROR_CAUSE :
87                                   LOAD_STORE_PIF_ADDR_ERROR_CAUSE,
88                                   is_exec ? addr : cs->mem_io_vaddr);
89 }
90 
91 static void tb_invalidate_virtual_addr(CPUXtensaState *env, uint32_t vaddr)
92 {
93     uint32_t paddr;
94     uint32_t page_size;
95     unsigned access;
96     int ret = xtensa_get_physical_addr(env, false, vaddr, 2, 0,
97             &paddr, &page_size, &access);
98     if (ret == 0) {
99         tb_invalidate_phys_addr(&address_space_memory, paddr);
100     }
101 }
102 
103 void HELPER(exception)(CPUXtensaState *env, uint32_t excp)
104 {
105     CPUState *cs = CPU(xtensa_env_get_cpu(env));
106 
107     cs->exception_index = excp;
108     if (excp == EXCP_YIELD) {
109         env->yield_needed = 0;
110     }
111     if (excp == EXCP_DEBUG) {
112         env->exception_taken = 0;
113     }
114     cpu_loop_exit(cs);
115 }
116 
117 void HELPER(exception_cause)(CPUXtensaState *env, uint32_t pc, uint32_t cause)
118 {
119     uint32_t vector;
120 
121     env->pc = pc;
122     if (env->sregs[PS] & PS_EXCM) {
123         if (env->config->ndepc) {
124             env->sregs[DEPC] = pc;
125         } else {
126             env->sregs[EPC1] = pc;
127         }
128         vector = EXC_DOUBLE;
129     } else {
130         env->sregs[EPC1] = pc;
131         vector = (env->sregs[PS] & PS_UM) ? EXC_USER : EXC_KERNEL;
132     }
133 
134     env->sregs[EXCCAUSE] = cause;
135     env->sregs[PS] |= PS_EXCM;
136 
137     HELPER(exception)(env, vector);
138 }
139 
140 void HELPER(exception_cause_vaddr)(CPUXtensaState *env,
141         uint32_t pc, uint32_t cause, uint32_t vaddr)
142 {
143     env->sregs[EXCVADDR] = vaddr;
144     HELPER(exception_cause)(env, pc, cause);
145 }
146 
147 void debug_exception_env(CPUXtensaState *env, uint32_t cause)
148 {
149     if (xtensa_get_cintlevel(env) < env->config->debug_level) {
150         HELPER(debug_exception)(env, env->pc, cause);
151     }
152 }
153 
154 void HELPER(debug_exception)(CPUXtensaState *env, uint32_t pc, uint32_t cause)
155 {
156     unsigned level = env->config->debug_level;
157 
158     env->pc = pc;
159     env->sregs[DEBUGCAUSE] = cause;
160     env->sregs[EPC1 + level - 1] = pc;
161     env->sregs[EPS2 + level - 2] = env->sregs[PS];
162     env->sregs[PS] = (env->sregs[PS] & ~PS_INTLEVEL) | PS_EXCM |
163         (level << PS_INTLEVEL_SHIFT);
164     HELPER(exception)(env, EXC_DEBUG);
165 }
166 
167 static void copy_window_from_phys(CPUXtensaState *env,
168         uint32_t window, uint32_t phys, uint32_t n)
169 {
170     assert(phys < env->config->nareg);
171     if (phys + n <= env->config->nareg) {
172         memcpy(env->regs + window, env->phys_regs + phys,
173                 n * sizeof(uint32_t));
174     } else {
175         uint32_t n1 = env->config->nareg - phys;
176         memcpy(env->regs + window, env->phys_regs + phys,
177                 n1 * sizeof(uint32_t));
178         memcpy(env->regs + window + n1, env->phys_regs,
179                 (n - n1) * sizeof(uint32_t));
180     }
181 }
182 
183 static void copy_phys_from_window(CPUXtensaState *env,
184         uint32_t phys, uint32_t window, uint32_t n)
185 {
186     assert(phys < env->config->nareg);
187     if (phys + n <= env->config->nareg) {
188         memcpy(env->phys_regs + phys, env->regs + window,
189                 n * sizeof(uint32_t));
190     } else {
191         uint32_t n1 = env->config->nareg - phys;
192         memcpy(env->phys_regs + phys, env->regs + window,
193                 n1 * sizeof(uint32_t));
194         memcpy(env->phys_regs, env->regs + window + n1,
195                 (n - n1) * sizeof(uint32_t));
196     }
197 }
198 
199 
200 static inline unsigned windowbase_bound(unsigned a, const CPUXtensaState *env)
201 {
202     return a & (env->config->nareg / 4 - 1);
203 }
204 
205 static inline unsigned windowstart_bit(unsigned a, const CPUXtensaState *env)
206 {
207     return 1 << windowbase_bound(a, env);
208 }
209 
210 void xtensa_sync_window_from_phys(CPUXtensaState *env)
211 {
212     copy_window_from_phys(env, 0, env->sregs[WINDOW_BASE] * 4, 16);
213 }
214 
215 void xtensa_sync_phys_from_window(CPUXtensaState *env)
216 {
217     copy_phys_from_window(env, env->sregs[WINDOW_BASE] * 4, 0, 16);
218 }
219 
220 static void rotate_window_abs(CPUXtensaState *env, uint32_t position)
221 {
222     xtensa_sync_phys_from_window(env);
223     env->sregs[WINDOW_BASE] = windowbase_bound(position, env);
224     xtensa_sync_window_from_phys(env);
225 }
226 
227 static void rotate_window(CPUXtensaState *env, uint32_t delta)
228 {
229     rotate_window_abs(env, env->sregs[WINDOW_BASE] + delta);
230 }
231 
232 void HELPER(wsr_windowbase)(CPUXtensaState *env, uint32_t v)
233 {
234     rotate_window_abs(env, v);
235 }
236 
237 void HELPER(entry)(CPUXtensaState *env, uint32_t pc, uint32_t s, uint32_t imm)
238 {
239     int callinc = (env->sregs[PS] & PS_CALLINC) >> PS_CALLINC_SHIFT;
240     if (s > 3 || ((env->sregs[PS] & (PS_WOE | PS_EXCM)) ^ PS_WOE) != 0) {
241         qemu_log_mask(LOG_GUEST_ERROR, "Illegal entry instruction(pc = %08x), PS = %08x\n",
242                       pc, env->sregs[PS]);
243         HELPER(exception_cause)(env, pc, ILLEGAL_INSTRUCTION_CAUSE);
244     } else {
245         uint32_t windowstart = xtensa_replicate_windowstart(env) >>
246             (env->sregs[WINDOW_BASE] + 1);
247 
248         if (windowstart & ((1 << callinc) - 1)) {
249             HELPER(window_check)(env, pc, callinc);
250         }
251         env->regs[(callinc << 2) | (s & 3)] = env->regs[s] - (imm << 3);
252         rotate_window(env, callinc);
253         env->sregs[WINDOW_START] |=
254             windowstart_bit(env->sregs[WINDOW_BASE], env);
255     }
256 }
257 
258 void HELPER(window_check)(CPUXtensaState *env, uint32_t pc, uint32_t w)
259 {
260     uint32_t windowbase = windowbase_bound(env->sregs[WINDOW_BASE], env);
261     uint32_t windowstart = xtensa_replicate_windowstart(env) >>
262         (env->sregs[WINDOW_BASE] + 1);
263     uint32_t n = ctz32(windowstart) + 1;
264 
265     assert(n <= w);
266 
267     rotate_window(env, n);
268     env->sregs[PS] = (env->sregs[PS] & ~PS_OWB) |
269         (windowbase << PS_OWB_SHIFT) | PS_EXCM;
270     env->sregs[EPC1] = env->pc = pc;
271 
272     switch (ctz32(windowstart >> n)) {
273     case 0:
274         HELPER(exception)(env, EXC_WINDOW_OVERFLOW4);
275         break;
276     case 1:
277         HELPER(exception)(env, EXC_WINDOW_OVERFLOW8);
278         break;
279     default:
280         HELPER(exception)(env, EXC_WINDOW_OVERFLOW12);
281         break;
282     }
283 }
284 
285 uint32_t HELPER(retw)(CPUXtensaState *env, uint32_t pc)
286 {
287     int n = (env->regs[0] >> 30) & 0x3;
288     int m = 0;
289     uint32_t windowbase = windowbase_bound(env->sregs[WINDOW_BASE], env);
290     uint32_t windowstart = env->sregs[WINDOW_START];
291     uint32_t ret_pc = 0;
292 
293     if (windowstart & windowstart_bit(windowbase - 1, env)) {
294         m = 1;
295     } else if (windowstart & windowstart_bit(windowbase - 2, env)) {
296         m = 2;
297     } else if (windowstart & windowstart_bit(windowbase - 3, env)) {
298         m = 3;
299     }
300 
301     if (n == 0 || (m != 0 && m != n) ||
302             ((env->sregs[PS] & (PS_WOE | PS_EXCM)) ^ PS_WOE) != 0) {
303         qemu_log_mask(LOG_GUEST_ERROR, "Illegal retw instruction(pc = %08x), "
304                       "PS = %08x, m = %d, n = %d\n",
305                       pc, env->sregs[PS], m, n);
306         HELPER(exception_cause)(env, pc, ILLEGAL_INSTRUCTION_CAUSE);
307     } else {
308         int owb = windowbase;
309 
310         ret_pc = (pc & 0xc0000000) | (env->regs[0] & 0x3fffffff);
311 
312         rotate_window(env, -n);
313         if (windowstart & windowstart_bit(env->sregs[WINDOW_BASE], env)) {
314             env->sregs[WINDOW_START] &= ~windowstart_bit(owb, env);
315         } else {
316             /* window underflow */
317             env->sregs[PS] = (env->sregs[PS] & ~PS_OWB) |
318                 (windowbase << PS_OWB_SHIFT) | PS_EXCM;
319             env->sregs[EPC1] = env->pc = pc;
320 
321             if (n == 1) {
322                 HELPER(exception)(env, EXC_WINDOW_UNDERFLOW4);
323             } else if (n == 2) {
324                 HELPER(exception)(env, EXC_WINDOW_UNDERFLOW8);
325             } else if (n == 3) {
326                 HELPER(exception)(env, EXC_WINDOW_UNDERFLOW12);
327             }
328         }
329     }
330     return ret_pc;
331 }
332 
333 void HELPER(rotw)(CPUXtensaState *env, uint32_t imm4)
334 {
335     rotate_window(env, imm4);
336 }
337 
338 void HELPER(restore_owb)(CPUXtensaState *env)
339 {
340     rotate_window_abs(env, (env->sregs[PS] & PS_OWB) >> PS_OWB_SHIFT);
341 }
342 
343 void HELPER(movsp)(CPUXtensaState *env, uint32_t pc)
344 {
345     if ((env->sregs[WINDOW_START] &
346             (windowstart_bit(env->sregs[WINDOW_BASE] - 3, env) |
347              windowstart_bit(env->sregs[WINDOW_BASE] - 2, env) |
348              windowstart_bit(env->sregs[WINDOW_BASE] - 1, env))) == 0) {
349         HELPER(exception_cause)(env, pc, ALLOCA_CAUSE);
350     }
351 }
352 
353 void HELPER(wsr_lbeg)(CPUXtensaState *env, uint32_t v)
354 {
355     if (env->sregs[LBEG] != v) {
356         tb_invalidate_virtual_addr(env, env->sregs[LEND] - 1);
357         env->sregs[LBEG] = v;
358     }
359 }
360 
361 void HELPER(wsr_lend)(CPUXtensaState *env, uint32_t v)
362 {
363     if (env->sregs[LEND] != v) {
364         tb_invalidate_virtual_addr(env, env->sregs[LEND] - 1);
365         env->sregs[LEND] = v;
366         tb_invalidate_virtual_addr(env, env->sregs[LEND] - 1);
367     }
368 }
369 
370 void HELPER(dump_state)(CPUXtensaState *env)
371 {
372     XtensaCPU *cpu = xtensa_env_get_cpu(env);
373 
374     cpu_dump_state(CPU(cpu), stderr, fprintf, 0);
375 }
376 
377 void HELPER(waiti)(CPUXtensaState *env, uint32_t pc, uint32_t intlevel)
378 {
379     CPUState *cpu;
380 
381     env->pc = pc;
382     env->sregs[PS] = (env->sregs[PS] & ~PS_INTLEVEL) |
383         (intlevel << PS_INTLEVEL_SHIFT);
384     check_interrupts(env);
385     if (env->pending_irq_level) {
386         cpu_loop_exit(CPU(xtensa_env_get_cpu(env)));
387         return;
388     }
389 
390     cpu = CPU(xtensa_env_get_cpu(env));
391     cpu->halted = 1;
392     HELPER(exception)(env, EXCP_HLT);
393 }
394 
395 void HELPER(update_ccount)(CPUXtensaState *env)
396 {
397     uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
398 
399     env->ccount_time = now;
400     env->sregs[CCOUNT] = env->ccount_base +
401         (uint32_t)((now - env->time_base) *
402                    env->config->clock_freq_khz / 1000000);
403 }
404 
405 void HELPER(wsr_ccount)(CPUXtensaState *env, uint32_t v)
406 {
407     int i;
408 
409     HELPER(update_ccount)(env);
410     env->ccount_base += v - env->sregs[CCOUNT];
411     for (i = 0; i < env->config->nccompare; ++i) {
412         HELPER(update_ccompare)(env, i);
413     }
414 }
415 
416 void HELPER(update_ccompare)(CPUXtensaState *env, uint32_t i)
417 {
418     uint64_t dcc;
419 
420     HELPER(update_ccount)(env);
421     dcc = (uint64_t)(env->sregs[CCOMPARE + i] - env->sregs[CCOUNT] - 1) + 1;
422     timer_mod(env->ccompare[i].timer,
423               env->ccount_time + (dcc * 1000000) / env->config->clock_freq_khz);
424     env->yield_needed = 1;
425 }
426 
427 void HELPER(check_interrupts)(CPUXtensaState *env)
428 {
429     check_interrupts(env);
430 }
431 
432 void HELPER(itlb_hit_test)(CPUXtensaState *env, uint32_t vaddr)
433 {
434     get_page_addr_code(env, vaddr);
435 }
436 
437 /*!
438  * Check vaddr accessibility/cache attributes and raise an exception if
439  * specified by the ATOMCTL SR.
440  *
441  * Note: local memory exclusion is not implemented
442  */
443 void HELPER(check_atomctl)(CPUXtensaState *env, uint32_t pc, uint32_t vaddr)
444 {
445     uint32_t paddr, page_size, access;
446     uint32_t atomctl = env->sregs[ATOMCTL];
447     int rc = xtensa_get_physical_addr(env, true, vaddr, 1,
448             xtensa_get_cring(env), &paddr, &page_size, &access);
449 
450     /*
451      * s32c1i never causes LOAD_PROHIBITED_CAUSE exceptions,
452      * see opcode description in the ISA
453      */
454     if (rc == 0 &&
455             (access & (PAGE_READ | PAGE_WRITE)) != (PAGE_READ | PAGE_WRITE)) {
456         rc = STORE_PROHIBITED_CAUSE;
457     }
458 
459     if (rc) {
460         HELPER(exception_cause_vaddr)(env, pc, rc, vaddr);
461     }
462 
463     /*
464      * When data cache is not configured use ATOMCTL bypass field.
465      * See ISA, 4.3.12.4 The Atomic Operation Control Register (ATOMCTL)
466      * under the Conditional Store Option.
467      */
468     if (!xtensa_option_enabled(env->config, XTENSA_OPTION_DCACHE)) {
469         access = PAGE_CACHE_BYPASS;
470     }
471 
472     switch (access & PAGE_CACHE_MASK) {
473     case PAGE_CACHE_WB:
474         atomctl >>= 2;
475         /* fall through */
476     case PAGE_CACHE_WT:
477         atomctl >>= 2;
478         /* fall through */
479     case PAGE_CACHE_BYPASS:
480         if ((atomctl & 0x3) == 0) {
481             HELPER(exception_cause_vaddr)(env, pc,
482                     LOAD_STORE_ERROR_CAUSE, vaddr);
483         }
484         break;
485 
486     case PAGE_CACHE_ISOLATE:
487         HELPER(exception_cause_vaddr)(env, pc,
488                 LOAD_STORE_ERROR_CAUSE, vaddr);
489         break;
490 
491     default:
492         break;
493     }
494 }
495 
496 void HELPER(wsr_memctl)(CPUXtensaState *env, uint32_t v)
497 {
498     if (xtensa_option_enabled(env->config, XTENSA_OPTION_ICACHE)) {
499         if (extract32(v, MEMCTL_IUSEWAYS_SHIFT, MEMCTL_IUSEWAYS_LEN) >
500             env->config->icache_ways) {
501             deposit32(v, MEMCTL_IUSEWAYS_SHIFT, MEMCTL_IUSEWAYS_LEN,
502                       env->config->icache_ways);
503         }
504     }
505     if (xtensa_option_enabled(env->config, XTENSA_OPTION_DCACHE)) {
506         if (extract32(v, MEMCTL_DUSEWAYS_SHIFT, MEMCTL_DUSEWAYS_LEN) >
507             env->config->dcache_ways) {
508             deposit32(v, MEMCTL_DUSEWAYS_SHIFT, MEMCTL_DUSEWAYS_LEN,
509                       env->config->dcache_ways);
510         }
511         if (extract32(v, MEMCTL_DALLOCWAYS_SHIFT, MEMCTL_DALLOCWAYS_LEN) >
512             env->config->dcache_ways) {
513             deposit32(v, MEMCTL_DALLOCWAYS_SHIFT, MEMCTL_DALLOCWAYS_LEN,
514                       env->config->dcache_ways);
515         }
516     }
517     env->sregs[MEMCTL] = v & env->config->memctl_mask;
518 }
519 
520 void HELPER(wsr_rasid)(CPUXtensaState *env, uint32_t v)
521 {
522     XtensaCPU *cpu = xtensa_env_get_cpu(env);
523 
524     v = (v & 0xffffff00) | 0x1;
525     if (v != env->sregs[RASID]) {
526         env->sregs[RASID] = v;
527         tlb_flush(CPU(cpu));
528     }
529 }
530 
531 static uint32_t get_page_size(const CPUXtensaState *env, bool dtlb, uint32_t way)
532 {
533     uint32_t tlbcfg = env->sregs[dtlb ? DTLBCFG : ITLBCFG];
534 
535     switch (way) {
536     case 4:
537         return (tlbcfg >> 16) & 0x3;
538 
539     case 5:
540         return (tlbcfg >> 20) & 0x1;
541 
542     case 6:
543         return (tlbcfg >> 24) & 0x1;
544 
545     default:
546         return 0;
547     }
548 }
549 
550 /*!
551  * Get bit mask for the virtual address bits translated by the TLB way
552  */
553 uint32_t xtensa_tlb_get_addr_mask(const CPUXtensaState *env, bool dtlb, uint32_t way)
554 {
555     if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
556         bool varway56 = dtlb ?
557             env->config->dtlb.varway56 :
558             env->config->itlb.varway56;
559 
560         switch (way) {
561         case 4:
562             return 0xfff00000 << get_page_size(env, dtlb, way) * 2;
563 
564         case 5:
565             if (varway56) {
566                 return 0xf8000000 << get_page_size(env, dtlb, way);
567             } else {
568                 return 0xf8000000;
569             }
570 
571         case 6:
572             if (varway56) {
573                 return 0xf0000000 << (1 - get_page_size(env, dtlb, way));
574             } else {
575                 return 0xf0000000;
576             }
577 
578         default:
579             return 0xfffff000;
580         }
581     } else {
582         return REGION_PAGE_MASK;
583     }
584 }
585 
586 /*!
587  * Get bit mask for the 'VPN without index' field.
588  * See ISA, 4.6.5.6, data format for RxTLB0
589  */
590 static uint32_t get_vpn_mask(const CPUXtensaState *env, bool dtlb, uint32_t way)
591 {
592     if (way < 4) {
593         bool is32 = (dtlb ?
594                 env->config->dtlb.nrefillentries :
595                 env->config->itlb.nrefillentries) == 32;
596         return is32 ? 0xffff8000 : 0xffffc000;
597     } else if (way == 4) {
598         return xtensa_tlb_get_addr_mask(env, dtlb, way) << 2;
599     } else if (way <= 6) {
600         uint32_t mask = xtensa_tlb_get_addr_mask(env, dtlb, way);
601         bool varway56 = dtlb ?
602             env->config->dtlb.varway56 :
603             env->config->itlb.varway56;
604 
605         if (varway56) {
606             return mask << (way == 5 ? 2 : 3);
607         } else {
608             return mask << 1;
609         }
610     } else {
611         return 0xfffff000;
612     }
613 }
614 
615 /*!
616  * Split virtual address into VPN (with index) and entry index
617  * for the given TLB way
618  */
619 void split_tlb_entry_spec_way(const CPUXtensaState *env, uint32_t v, bool dtlb,
620         uint32_t *vpn, uint32_t wi, uint32_t *ei)
621 {
622     bool varway56 = dtlb ?
623         env->config->dtlb.varway56 :
624         env->config->itlb.varway56;
625 
626     if (!dtlb) {
627         wi &= 7;
628     }
629 
630     if (wi < 4) {
631         bool is32 = (dtlb ?
632                 env->config->dtlb.nrefillentries :
633                 env->config->itlb.nrefillentries) == 32;
634         *ei = (v >> 12) & (is32 ? 0x7 : 0x3);
635     } else {
636         switch (wi) {
637         case 4:
638             {
639                 uint32_t eibase = 20 + get_page_size(env, dtlb, wi) * 2;
640                 *ei = (v >> eibase) & 0x3;
641             }
642             break;
643 
644         case 5:
645             if (varway56) {
646                 uint32_t eibase = 27 + get_page_size(env, dtlb, wi);
647                 *ei = (v >> eibase) & 0x3;
648             } else {
649                 *ei = (v >> 27) & 0x1;
650             }
651             break;
652 
653         case 6:
654             if (varway56) {
655                 uint32_t eibase = 29 - get_page_size(env, dtlb, wi);
656                 *ei = (v >> eibase) & 0x7;
657             } else {
658                 *ei = (v >> 28) & 0x1;
659             }
660             break;
661 
662         default:
663             *ei = 0;
664             break;
665         }
666     }
667     *vpn = v & xtensa_tlb_get_addr_mask(env, dtlb, wi);
668 }
669 
670 /*!
671  * Split TLB address into TLB way, entry index and VPN (with index).
672  * See ISA, 4.6.5.5 - 4.6.5.8 for the TLB addressing format
673  */
674 static void split_tlb_entry_spec(CPUXtensaState *env, uint32_t v, bool dtlb,
675         uint32_t *vpn, uint32_t *wi, uint32_t *ei)
676 {
677     if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
678         *wi = v & (dtlb ? 0xf : 0x7);
679         split_tlb_entry_spec_way(env, v, dtlb, vpn, *wi, ei);
680     } else {
681         *vpn = v & REGION_PAGE_MASK;
682         *wi = 0;
683         *ei = (v >> 29) & 0x7;
684     }
685 }
686 
687 static xtensa_tlb_entry *get_tlb_entry(CPUXtensaState *env,
688         uint32_t v, bool dtlb, uint32_t *pwi)
689 {
690     uint32_t vpn;
691     uint32_t wi;
692     uint32_t ei;
693 
694     split_tlb_entry_spec(env, v, dtlb, &vpn, &wi, &ei);
695     if (pwi) {
696         *pwi = wi;
697     }
698     return xtensa_tlb_get_entry(env, dtlb, wi, ei);
699 }
700 
701 uint32_t HELPER(rtlb0)(CPUXtensaState *env, uint32_t v, uint32_t dtlb)
702 {
703     if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
704         uint32_t wi;
705         const xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, &wi);
706         return (entry->vaddr & get_vpn_mask(env, dtlb, wi)) | entry->asid;
707     } else {
708         return v & REGION_PAGE_MASK;
709     }
710 }
711 
712 uint32_t HELPER(rtlb1)(CPUXtensaState *env, uint32_t v, uint32_t dtlb)
713 {
714     const xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, NULL);
715     return entry->paddr | entry->attr;
716 }
717 
718 void HELPER(itlb)(CPUXtensaState *env, uint32_t v, uint32_t dtlb)
719 {
720     if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
721         uint32_t wi;
722         xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, &wi);
723         if (entry->variable && entry->asid) {
724             tlb_flush_page(CPU(xtensa_env_get_cpu(env)), entry->vaddr);
725             entry->asid = 0;
726         }
727     }
728 }
729 
730 uint32_t HELPER(ptlb)(CPUXtensaState *env, uint32_t v, uint32_t dtlb)
731 {
732     if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
733         uint32_t wi;
734         uint32_t ei;
735         uint8_t ring;
736         int res = xtensa_tlb_lookup(env, v, dtlb, &wi, &ei, &ring);
737 
738         switch (res) {
739         case 0:
740             if (ring >= xtensa_get_ring(env)) {
741                 return (v & 0xfffff000) | wi | (dtlb ? 0x10 : 0x8);
742             }
743             break;
744 
745         case INST_TLB_MULTI_HIT_CAUSE:
746         case LOAD_STORE_TLB_MULTI_HIT_CAUSE:
747             HELPER(exception_cause_vaddr)(env, env->pc, res, v);
748             break;
749         }
750         return 0;
751     } else {
752         return (v & REGION_PAGE_MASK) | 0x1;
753     }
754 }
755 
756 void xtensa_tlb_set_entry_mmu(const CPUXtensaState *env,
757         xtensa_tlb_entry *entry, bool dtlb,
758         unsigned wi, unsigned ei, uint32_t vpn, uint32_t pte)
759 {
760     entry->vaddr = vpn;
761     entry->paddr = pte & xtensa_tlb_get_addr_mask(env, dtlb, wi);
762     entry->asid = (env->sregs[RASID] >> ((pte >> 1) & 0x18)) & 0xff;
763     entry->attr = pte & 0xf;
764 }
765 
766 void xtensa_tlb_set_entry(CPUXtensaState *env, bool dtlb,
767         unsigned wi, unsigned ei, uint32_t vpn, uint32_t pte)
768 {
769     XtensaCPU *cpu = xtensa_env_get_cpu(env);
770     CPUState *cs = CPU(cpu);
771     xtensa_tlb_entry *entry = xtensa_tlb_get_entry(env, dtlb, wi, ei);
772 
773     if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
774         if (entry->variable) {
775             if (entry->asid) {
776                 tlb_flush_page(cs, entry->vaddr);
777             }
778             xtensa_tlb_set_entry_mmu(env, entry, dtlb, wi, ei, vpn, pte);
779             tlb_flush_page(cs, entry->vaddr);
780         } else {
781             qemu_log_mask(LOG_GUEST_ERROR, "%s %d, %d, %d trying to set immutable entry\n",
782                           __func__, dtlb, wi, ei);
783         }
784     } else {
785         tlb_flush_page(cs, entry->vaddr);
786         if (xtensa_option_enabled(env->config,
787                     XTENSA_OPTION_REGION_TRANSLATION)) {
788             entry->paddr = pte & REGION_PAGE_MASK;
789         }
790         entry->attr = pte & 0xf;
791     }
792 }
793 
794 void HELPER(wtlb)(CPUXtensaState *env, uint32_t p, uint32_t v, uint32_t dtlb)
795 {
796     uint32_t vpn;
797     uint32_t wi;
798     uint32_t ei;
799     split_tlb_entry_spec(env, v, dtlb, &vpn, &wi, &ei);
800     xtensa_tlb_set_entry(env, dtlb, wi, ei, vpn, p);
801 }
802 
803 
804 void HELPER(wsr_ibreakenable)(CPUXtensaState *env, uint32_t v)
805 {
806     uint32_t change = v ^ env->sregs[IBREAKENABLE];
807     unsigned i;
808 
809     for (i = 0; i < env->config->nibreak; ++i) {
810         if (change & (1 << i)) {
811             tb_invalidate_virtual_addr(env, env->sregs[IBREAKA + i]);
812         }
813     }
814     env->sregs[IBREAKENABLE] = v & ((1 << env->config->nibreak) - 1);
815 }
816 
817 void HELPER(wsr_ibreaka)(CPUXtensaState *env, uint32_t i, uint32_t v)
818 {
819     if (env->sregs[IBREAKENABLE] & (1 << i) && env->sregs[IBREAKA + i] != v) {
820         tb_invalidate_virtual_addr(env, env->sregs[IBREAKA + i]);
821         tb_invalidate_virtual_addr(env, v);
822     }
823     env->sregs[IBREAKA + i] = v;
824 }
825 
826 static void set_dbreak(CPUXtensaState *env, unsigned i, uint32_t dbreaka,
827         uint32_t dbreakc)
828 {
829     CPUState *cs = CPU(xtensa_env_get_cpu(env));
830     int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
831     uint32_t mask = dbreakc | ~DBREAKC_MASK;
832 
833     if (env->cpu_watchpoint[i]) {
834         cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[i]);
835     }
836     if (dbreakc & DBREAKC_SB) {
837         flags |= BP_MEM_WRITE;
838     }
839     if (dbreakc & DBREAKC_LB) {
840         flags |= BP_MEM_READ;
841     }
842     /* contiguous mask after inversion is one less than some power of 2 */
843     if ((~mask + 1) & ~mask) {
844         qemu_log_mask(LOG_GUEST_ERROR, "DBREAKC mask is not contiguous: 0x%08x\n", dbreakc);
845         /* cut mask after the first zero bit */
846         mask = 0xffffffff << (32 - clo32(mask));
847     }
848     if (cpu_watchpoint_insert(cs, dbreaka & mask, ~mask + 1,
849             flags, &env->cpu_watchpoint[i])) {
850         env->cpu_watchpoint[i] = NULL;
851         qemu_log_mask(LOG_GUEST_ERROR, "Failed to set data breakpoint at 0x%08x/%d\n",
852                       dbreaka & mask, ~mask + 1);
853     }
854 }
855 
856 void HELPER(wsr_dbreaka)(CPUXtensaState *env, uint32_t i, uint32_t v)
857 {
858     uint32_t dbreakc = env->sregs[DBREAKC + i];
859 
860     if ((dbreakc & DBREAKC_SB_LB) &&
861             env->sregs[DBREAKA + i] != v) {
862         set_dbreak(env, i, v, dbreakc);
863     }
864     env->sregs[DBREAKA + i] = v;
865 }
866 
867 void HELPER(wsr_dbreakc)(CPUXtensaState *env, uint32_t i, uint32_t v)
868 {
869     if ((env->sregs[DBREAKC + i] ^ v) & (DBREAKC_SB_LB | DBREAKC_MASK)) {
870         if (v & DBREAKC_SB_LB) {
871             set_dbreak(env, i, env->sregs[DBREAKA + i], v);
872         } else {
873             if (env->cpu_watchpoint[i]) {
874                 CPUState *cs = CPU(xtensa_env_get_cpu(env));
875 
876                 cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[i]);
877                 env->cpu_watchpoint[i] = NULL;
878             }
879         }
880     }
881     env->sregs[DBREAKC + i] = v;
882 }
883 
884 void HELPER(wur_fcr)(CPUXtensaState *env, uint32_t v)
885 {
886     static const int rounding_mode[] = {
887         float_round_nearest_even,
888         float_round_to_zero,
889         float_round_up,
890         float_round_down,
891     };
892 
893     env->uregs[FCR] = v & 0xfffff07f;
894     set_float_rounding_mode(rounding_mode[v & 3], &env->fp_status);
895 }
896 
897 float32 HELPER(abs_s)(float32 v)
898 {
899     return float32_abs(v);
900 }
901 
902 float32 HELPER(neg_s)(float32 v)
903 {
904     return float32_chs(v);
905 }
906 
907 float32 HELPER(add_s)(CPUXtensaState *env, float32 a, float32 b)
908 {
909     return float32_add(a, b, &env->fp_status);
910 }
911 
912 float32 HELPER(sub_s)(CPUXtensaState *env, float32 a, float32 b)
913 {
914     return float32_sub(a, b, &env->fp_status);
915 }
916 
917 float32 HELPER(mul_s)(CPUXtensaState *env, float32 a, float32 b)
918 {
919     return float32_mul(a, b, &env->fp_status);
920 }
921 
922 float32 HELPER(madd_s)(CPUXtensaState *env, float32 a, float32 b, float32 c)
923 {
924     return float32_muladd(b, c, a, 0,
925             &env->fp_status);
926 }
927 
928 float32 HELPER(msub_s)(CPUXtensaState *env, float32 a, float32 b, float32 c)
929 {
930     return float32_muladd(b, c, a, float_muladd_negate_product,
931             &env->fp_status);
932 }
933 
934 uint32_t HELPER(ftoi)(float32 v, uint32_t rounding_mode, uint32_t scale)
935 {
936     float_status fp_status = {0};
937 
938     set_float_rounding_mode(rounding_mode, &fp_status);
939     return float32_to_int32(
940             float32_scalbn(v, scale, &fp_status), &fp_status);
941 }
942 
943 uint32_t HELPER(ftoui)(float32 v, uint32_t rounding_mode, uint32_t scale)
944 {
945     float_status fp_status = {0};
946     float32 res;
947 
948     set_float_rounding_mode(rounding_mode, &fp_status);
949 
950     res = float32_scalbn(v, scale, &fp_status);
951 
952     if (float32_is_neg(v) && !float32_is_any_nan(v)) {
953         return float32_to_int32(res, &fp_status);
954     } else {
955         return float32_to_uint32(res, &fp_status);
956     }
957 }
958 
959 float32 HELPER(itof)(CPUXtensaState *env, uint32_t v, uint32_t scale)
960 {
961     return float32_scalbn(int32_to_float32(v, &env->fp_status),
962             (int32_t)scale, &env->fp_status);
963 }
964 
965 float32 HELPER(uitof)(CPUXtensaState *env, uint32_t v, uint32_t scale)
966 {
967     return float32_scalbn(uint32_to_float32(v, &env->fp_status),
968             (int32_t)scale, &env->fp_status);
969 }
970 
971 static inline void set_br(CPUXtensaState *env, bool v, uint32_t br)
972 {
973     if (v) {
974         env->sregs[BR] |= br;
975     } else {
976         env->sregs[BR] &= ~br;
977     }
978 }
979 
980 void HELPER(un_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
981 {
982     set_br(env, float32_unordered_quiet(a, b, &env->fp_status), br);
983 }
984 
985 void HELPER(oeq_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
986 {
987     set_br(env, float32_eq_quiet(a, b, &env->fp_status), br);
988 }
989 
990 void HELPER(ueq_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
991 {
992     int v = float32_compare_quiet(a, b, &env->fp_status);
993     set_br(env, v == float_relation_equal || v == float_relation_unordered, br);
994 }
995 
996 void HELPER(olt_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
997 {
998     set_br(env, float32_lt_quiet(a, b, &env->fp_status), br);
999 }
1000 
1001 void HELPER(ult_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
1002 {
1003     int v = float32_compare_quiet(a, b, &env->fp_status);
1004     set_br(env, v == float_relation_less || v == float_relation_unordered, br);
1005 }
1006 
1007 void HELPER(ole_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
1008 {
1009     set_br(env, float32_le_quiet(a, b, &env->fp_status), br);
1010 }
1011 
1012 void HELPER(ule_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
1013 {
1014     int v = float32_compare_quiet(a, b, &env->fp_status);
1015     set_br(env, v != float_relation_greater, br);
1016 }
1017 
1018 uint32_t HELPER(rer)(CPUXtensaState *env, uint32_t addr)
1019 {
1020     return address_space_ldl(env->address_space_er, addr,
1021                              (MemTxAttrs){0}, NULL);
1022 }
1023 
1024 void HELPER(wer)(CPUXtensaState *env, uint32_t data, uint32_t addr)
1025 {
1026     address_space_stl(env->address_space_er, addr, data,
1027                       (MemTxAttrs){0}, NULL);
1028 }
1029