1 /*
2 * MicroBlaze helper routines.
3 *
4 * Copyright (c) 2009 Edgar E. Iglesias <edgar.iglesias@gmail.com>
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "qemu/host-utils.h"
25 #include "exec/log.h"
26
27 #if defined(CONFIG_USER_ONLY)
28
mb_cpu_do_interrupt(CPUState * cs)29 void mb_cpu_do_interrupt(CPUState *cs)
30 {
31 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
32 CPUMBState *env = &cpu->env;
33
34 cs->exception_index = -1;
35 env->res_addr = RES_ADDR_NONE;
36 env->regs[14] = env->pc;
37 }
38
mb_cpu_tlb_fill(CPUState * cs,vaddr address,int size,MMUAccessType access_type,int mmu_idx,bool probe,uintptr_t retaddr)39 bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
40 MMUAccessType access_type, int mmu_idx,
41 bool probe, uintptr_t retaddr)
42 {
43 cs->exception_index = 0xaa;
44 cpu_loop_exit_restore(cs, retaddr);
45 }
46
47 #else /* !CONFIG_USER_ONLY */
48
mb_cpu_tlb_fill(CPUState * cs,vaddr address,int size,MMUAccessType access_type,int mmu_idx,bool probe,uintptr_t retaddr)49 bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
50 MMUAccessType access_type, int mmu_idx,
51 bool probe, uintptr_t retaddr)
52 {
53 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
54 CPUMBState *env = &cpu->env;
55 MicroBlazeMMULookup lu;
56 unsigned int hit;
57 int prot;
58
59 if (mmu_idx == MMU_NOMMU_IDX) {
60 /* MMU disabled or not available. */
61 address &= TARGET_PAGE_MASK;
62 prot = PAGE_BITS;
63 tlb_set_page(cs, address, address, prot, mmu_idx, TARGET_PAGE_SIZE);
64 return true;
65 }
66
67 hit = mmu_translate(cpu, &lu, address, access_type, mmu_idx);
68 if (likely(hit)) {
69 uint32_t vaddr = address & TARGET_PAGE_MASK;
70 uint32_t paddr = lu.paddr + vaddr - lu.vaddr;
71
72 qemu_log_mask(CPU_LOG_MMU, "MMU map mmu=%d v=%x p=%x prot=%x\n",
73 mmu_idx, vaddr, paddr, lu.prot);
74 tlb_set_page(cs, vaddr, paddr, lu.prot, mmu_idx, TARGET_PAGE_SIZE);
75 return true;
76 }
77
78 /* TLB miss. */
79 if (probe) {
80 return false;
81 }
82
83 qemu_log_mask(CPU_LOG_MMU, "mmu=%d miss v=%" VADDR_PRIx "\n",
84 mmu_idx, address);
85
86 env->ear = address;
87 switch (lu.err) {
88 case ERR_PROT:
89 env->esr = access_type == MMU_INST_FETCH ? 17 : 16;
90 env->esr |= (access_type == MMU_DATA_STORE) << 10;
91 break;
92 case ERR_MISS:
93 env->esr = access_type == MMU_INST_FETCH ? 19 : 18;
94 env->esr |= (access_type == MMU_DATA_STORE) << 10;
95 break;
96 default:
97 abort();
98 }
99
100 if (cs->exception_index == EXCP_MMU) {
101 cpu_abort(cs, "recursive faults\n");
102 }
103
104 /* TLB miss. */
105 cs->exception_index = EXCP_MMU;
106 cpu_loop_exit_restore(cs, retaddr);
107 }
108
mb_cpu_do_interrupt(CPUState * cs)109 void mb_cpu_do_interrupt(CPUState *cs)
110 {
111 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
112 CPUMBState *env = &cpu->env;
113 uint32_t t, msr = mb_cpu_read_msr(env);
114 bool set_esr;
115
116 /* IMM flag cannot propagate across a branch and into the dslot. */
117 assert((env->iflags & (D_FLAG | IMM_FLAG)) != (D_FLAG | IMM_FLAG));
118 /* BIMM flag cannot be set without D_FLAG. */
119 assert((env->iflags & (D_FLAG | BIMM_FLAG)) != BIMM_FLAG);
120 /* RTI flags are private to translate. */
121 assert(!(env->iflags & (DRTI_FLAG | DRTE_FLAG | DRTB_FLAG)));
122
123 switch (cs->exception_index) {
124 case EXCP_HW_EXCP:
125 if (!(cpu->cfg.pvr_regs[0] & PVR0_USE_EXC_MASK)) {
126 qemu_log_mask(LOG_GUEST_ERROR,
127 "Exception raised on system without exceptions!\n");
128 return;
129 }
130
131 qemu_log_mask(CPU_LOG_INT,
132 "INT: HWE at pc=%08x msr=%08x iflags=%x\n",
133 env->pc, msr, env->iflags);
134
135 /* Exception breaks branch + dslot sequence? */
136 set_esr = true;
137 env->esr &= ~D_FLAG;
138 if (env->iflags & D_FLAG) {
139 env->esr |= D_FLAG;
140 env->btr = env->btarget;
141 }
142
143 /* Exception in progress. */
144 msr |= MSR_EIP;
145 env->regs[17] = env->pc + 4;
146 env->pc = cpu->cfg.base_vectors + 0x20;
147 break;
148
149 case EXCP_MMU:
150 qemu_log_mask(CPU_LOG_INT,
151 "INT: MMU at pc=%08x msr=%08x "
152 "ear=%" PRIx64 " iflags=%x\n",
153 env->pc, msr, env->ear, env->iflags);
154
155 /* Exception breaks branch + dslot sequence? */
156 set_esr = true;
157 env->esr &= ~D_FLAG;
158 if (env->iflags & D_FLAG) {
159 env->esr |= D_FLAG;
160 env->btr = env->btarget;
161 /* Reexecute the branch. */
162 env->regs[17] = env->pc - (env->iflags & BIMM_FLAG ? 8 : 4);
163 } else if (env->iflags & IMM_FLAG) {
164 /* Reexecute the imm. */
165 env->regs[17] = env->pc - 4;
166 } else {
167 env->regs[17] = env->pc;
168 }
169
170 /* Exception in progress. */
171 msr |= MSR_EIP;
172 env->pc = cpu->cfg.base_vectors + 0x20;
173 break;
174
175 case EXCP_IRQ:
176 assert(!(msr & (MSR_EIP | MSR_BIP)));
177 assert(msr & MSR_IE);
178 assert(!(env->iflags & (D_FLAG | IMM_FLAG)));
179
180 qemu_log_mask(CPU_LOG_INT,
181 "INT: DEV at pc=%08x msr=%08x iflags=%x\n",
182 env->pc, msr, env->iflags);
183 set_esr = false;
184
185 /* Disable interrupts. */
186 msr &= ~MSR_IE;
187 env->regs[14] = env->pc;
188 env->pc = cpu->cfg.base_vectors + 0x10;
189 break;
190
191 case EXCP_HW_BREAK:
192 assert(!(env->iflags & (D_FLAG | IMM_FLAG)));
193
194 qemu_log_mask(CPU_LOG_INT,
195 "INT: BRK at pc=%08x msr=%08x iflags=%x\n",
196 env->pc, msr, env->iflags);
197 set_esr = false;
198
199 /* Break in progress. */
200 msr |= MSR_BIP;
201 env->regs[16] = env->pc;
202 env->pc = cpu->cfg.base_vectors + 0x18;
203 break;
204
205 default:
206 cpu_abort(cs, "unhandled exception type=%d\n", cs->exception_index);
207 /* not reached */
208 }
209
210 /* Save previous mode, disable mmu, disable user-mode. */
211 t = (msr & (MSR_VM | MSR_UM)) << 1;
212 msr &= ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM);
213 msr |= t;
214 mb_cpu_write_msr(env, msr);
215
216 env->res_addr = RES_ADDR_NONE;
217 env->iflags = 0;
218
219 if (!set_esr) {
220 qemu_log_mask(CPU_LOG_INT,
221 " to pc=%08x msr=%08x\n", env->pc, msr);
222 } else if (env->esr & D_FLAG) {
223 qemu_log_mask(CPU_LOG_INT,
224 " to pc=%08x msr=%08x esr=%04x btr=%08x\n",
225 env->pc, msr, env->esr, env->btr);
226 } else {
227 qemu_log_mask(CPU_LOG_INT,
228 " to pc=%08x msr=%08x esr=%04x\n",
229 env->pc, msr, env->esr);
230 }
231 }
232
mb_cpu_get_phys_page_debug(CPUState * cs,vaddr addr)233 hwaddr mb_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
234 {
235 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
236 CPUMBState *env = &cpu->env;
237 target_ulong vaddr, paddr = 0;
238 MicroBlazeMMULookup lu;
239 int mmu_idx = cpu_mmu_index(env, false);
240 unsigned int hit;
241
242 if (mmu_idx != MMU_NOMMU_IDX) {
243 hit = mmu_translate(cpu, &lu, addr, 0, 0);
244 if (hit) {
245 vaddr = addr & TARGET_PAGE_MASK;
246 paddr = lu.paddr + vaddr - lu.vaddr;
247 } else
248 paddr = 0; /* ???. */
249 } else
250 paddr = addr & TARGET_PAGE_MASK;
251
252 return paddr;
253 }
254 #endif
255
mb_cpu_exec_interrupt(CPUState * cs,int interrupt_request)256 bool mb_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
257 {
258 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
259 CPUMBState *env = &cpu->env;
260
261 if ((interrupt_request & CPU_INTERRUPT_HARD)
262 && (env->msr & MSR_IE)
263 && !(env->msr & (MSR_EIP | MSR_BIP))
264 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
265 cs->exception_index = EXCP_IRQ;
266 mb_cpu_do_interrupt(cs);
267 return true;
268 }
269 return false;
270 }
271
mb_cpu_do_unaligned_access(CPUState * cs,vaddr addr,MMUAccessType access_type,int mmu_idx,uintptr_t retaddr)272 void mb_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
273 MMUAccessType access_type,
274 int mmu_idx, uintptr_t retaddr)
275 {
276 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
277 uint32_t esr, iflags;
278
279 /* Recover the pc and iflags from the corresponding insn_start. */
280 cpu_restore_state(cs, retaddr, true);
281 iflags = cpu->env.iflags;
282
283 qemu_log_mask(CPU_LOG_INT,
284 "Unaligned access addr=" TARGET_FMT_lx " pc=%x iflags=%x\n",
285 (target_ulong)addr, cpu->env.pc, iflags);
286
287 esr = ESR_EC_UNALIGNED_DATA;
288 if (likely(iflags & ESR_ESS_FLAG)) {
289 esr |= iflags & ESR_ESS_MASK;
290 } else {
291 qemu_log_mask(LOG_UNIMP, "Unaligned access without ESR_ESS_FLAG\n");
292 }
293
294 cpu->env.ear = addr;
295 cpu->env.esr = esr;
296 cs->exception_index = EXCP_HW_EXCP;
297 cpu_loop_exit(cs);
298 }
299