1 /*
2 * PowerPC emulation helpers for QEMU.
3 *
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "cpu.h"
21 #include "hw/ppc/ppc.h"
22 #include "exec/helper-proto.h"
23 #include "exec/exec-all.h"
24 #include "qemu/log.h"
25 #include "qemu/main-loop.h"
26
27 /*****************************************************************************/
28 /* SPR accesses */
29
helper_load_tbl(CPUPPCState * env)30 target_ulong helper_load_tbl(CPUPPCState *env)
31 {
32 return (target_ulong)cpu_ppc_load_tbl(env);
33 }
34
helper_load_tbu(CPUPPCState * env)35 target_ulong helper_load_tbu(CPUPPCState *env)
36 {
37 return cpu_ppc_load_tbu(env);
38 }
39
helper_load_atbl(CPUPPCState * env)40 target_ulong helper_load_atbl(CPUPPCState *env)
41 {
42 return (target_ulong)cpu_ppc_load_atbl(env);
43 }
44
helper_load_atbu(CPUPPCState * env)45 target_ulong helper_load_atbu(CPUPPCState *env)
46 {
47 return cpu_ppc_load_atbu(env);
48 }
49
helper_load_vtb(CPUPPCState * env)50 target_ulong helper_load_vtb(CPUPPCState *env)
51 {
52 return cpu_ppc_load_vtb(env);
53 }
54
55 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
helper_load_purr(CPUPPCState * env)56 target_ulong helper_load_purr(CPUPPCState *env)
57 {
58 return (target_ulong)cpu_ppc_load_purr(env);
59 }
60
helper_store_purr(CPUPPCState * env,target_ulong val)61 void helper_store_purr(CPUPPCState *env, target_ulong val)
62 {
63 CPUState *cs = env_cpu(env);
64 CPUState *ccs;
65 uint32_t nr_threads = cs->nr_threads;
66
67 if (nr_threads == 1 || !(env->flags & POWERPC_FLAG_SMT_1LPAR)) {
68 cpu_ppc_store_purr(env, val);
69 return;
70 }
71
72 THREAD_SIBLING_FOREACH(cs, ccs) {
73 CPUPPCState *cenv = &POWERPC_CPU(ccs)->env;
74 cpu_ppc_store_purr(cenv, val);
75 }
76 }
77 #endif
78
79 #if !defined(CONFIG_USER_ONLY)
helper_store_tbl(CPUPPCState * env,target_ulong val)80 void helper_store_tbl(CPUPPCState *env, target_ulong val)
81 {
82 CPUState *cs = env_cpu(env);
83 CPUState *ccs;
84 uint32_t nr_threads = cs->nr_threads;
85
86 if (nr_threads == 1 || !(env->flags & POWERPC_FLAG_SMT_1LPAR)) {
87 cpu_ppc_store_tbl(env, val);
88 return;
89 }
90
91 THREAD_SIBLING_FOREACH(cs, ccs) {
92 CPUPPCState *cenv = &POWERPC_CPU(ccs)->env;
93 cpu_ppc_store_tbl(cenv, val);
94 }
95 }
96
helper_store_tbu(CPUPPCState * env,target_ulong val)97 void helper_store_tbu(CPUPPCState *env, target_ulong val)
98 {
99 CPUState *cs = env_cpu(env);
100 CPUState *ccs;
101 uint32_t nr_threads = cs->nr_threads;
102
103 if (nr_threads == 1 || !(env->flags & POWERPC_FLAG_SMT_1LPAR)) {
104 cpu_ppc_store_tbu(env, val);
105 return;
106 }
107
108 THREAD_SIBLING_FOREACH(cs, ccs) {
109 CPUPPCState *cenv = &POWERPC_CPU(ccs)->env;
110 cpu_ppc_store_tbu(cenv, val);
111 }
112 }
113
helper_store_atbl(CPUPPCState * env,target_ulong val)114 void helper_store_atbl(CPUPPCState *env, target_ulong val)
115 {
116 cpu_ppc_store_atbl(env, val);
117 }
118
helper_store_atbu(CPUPPCState * env,target_ulong val)119 void helper_store_atbu(CPUPPCState *env, target_ulong val)
120 {
121 cpu_ppc_store_atbu(env, val);
122 }
123
helper_load_decr(CPUPPCState * env)124 target_ulong helper_load_decr(CPUPPCState *env)
125 {
126 return cpu_ppc_load_decr(env);
127 }
128
helper_store_decr(CPUPPCState * env,target_ulong val)129 void helper_store_decr(CPUPPCState *env, target_ulong val)
130 {
131 cpu_ppc_store_decr(env, val);
132 }
133
helper_load_hdecr(CPUPPCState * env)134 target_ulong helper_load_hdecr(CPUPPCState *env)
135 {
136 return cpu_ppc_load_hdecr(env);
137 }
138
helper_store_hdecr(CPUPPCState * env,target_ulong val)139 void helper_store_hdecr(CPUPPCState *env, target_ulong val)
140 {
141 CPUState *cs = env_cpu(env);
142 CPUState *ccs;
143 uint32_t nr_threads = cs->nr_threads;
144
145 if (nr_threads == 1 || !(env->flags & POWERPC_FLAG_SMT_1LPAR)) {
146 cpu_ppc_store_hdecr(env, val);
147 return;
148 }
149
150 THREAD_SIBLING_FOREACH(cs, ccs) {
151 CPUPPCState *cenv = &POWERPC_CPU(ccs)->env;
152 cpu_ppc_store_hdecr(cenv, val);
153 }
154 }
155
helper_store_vtb(CPUPPCState * env,target_ulong val)156 void helper_store_vtb(CPUPPCState *env, target_ulong val)
157 {
158 CPUState *cs = env_cpu(env);
159 CPUState *ccs;
160 uint32_t nr_threads = cs->nr_threads;
161
162 if (nr_threads == 1 || !(env->flags & POWERPC_FLAG_SMT_1LPAR)) {
163 cpu_ppc_store_vtb(env, val);
164 return;
165 }
166
167 THREAD_SIBLING_FOREACH(cs, ccs) {
168 CPUPPCState *cenv = &POWERPC_CPU(ccs)->env;
169 cpu_ppc_store_vtb(cenv, val);
170 }
171 }
172
helper_store_tbu40(CPUPPCState * env,target_ulong val)173 void helper_store_tbu40(CPUPPCState *env, target_ulong val)
174 {
175 CPUState *cs = env_cpu(env);
176 CPUState *ccs;
177 uint32_t nr_threads = cs->nr_threads;
178
179 if (nr_threads == 1 || !(env->flags & POWERPC_FLAG_SMT_1LPAR)) {
180 cpu_ppc_store_tbu40(env, val);
181 return;
182 }
183
184 THREAD_SIBLING_FOREACH(cs, ccs) {
185 CPUPPCState *cenv = &POWERPC_CPU(ccs)->env;
186 cpu_ppc_store_tbu40(cenv, val);
187 }
188 }
189
helper_load_40x_pit(CPUPPCState * env)190 target_ulong helper_load_40x_pit(CPUPPCState *env)
191 {
192 return load_40x_pit(env);
193 }
194
helper_store_40x_pit(CPUPPCState * env,target_ulong val)195 void helper_store_40x_pit(CPUPPCState *env, target_ulong val)
196 {
197 store_40x_pit(env, val);
198 }
199
helper_store_40x_tcr(CPUPPCState * env,target_ulong val)200 void helper_store_40x_tcr(CPUPPCState *env, target_ulong val)
201 {
202 store_40x_tcr(env, val);
203 }
204
helper_store_40x_tsr(CPUPPCState * env,target_ulong val)205 void helper_store_40x_tsr(CPUPPCState *env, target_ulong val)
206 {
207 store_40x_tsr(env, val);
208 }
209
helper_store_booke_tcr(CPUPPCState * env,target_ulong val)210 void helper_store_booke_tcr(CPUPPCState *env, target_ulong val)
211 {
212 store_booke_tcr(env, val);
213 }
214
helper_store_booke_tsr(CPUPPCState * env,target_ulong val)215 void helper_store_booke_tsr(CPUPPCState *env, target_ulong val)
216 {
217 store_booke_tsr(env, val);
218 }
219
220 #if defined(TARGET_PPC64)
221 /*
222 * POWER processor Timebase Facility
223 */
224
225 /*
226 * The TBST is the timebase state machine, which is a per-core machine that
227 * is used to synchronize the core TB with the ChipTOD. States 3,4,5 are
228 * not used in POWER8/9/10.
229 *
230 * The state machine gets driven by writes to TFMR SPR from the core, and
231 * by signals from the ChipTOD. The state machine table for common
232 * transitions is as follows (according to hardware specs, not necessarily
233 * this implementation):
234 *
235 * | Cur | Event | New |
236 * +----------------+----------------------------------+-----+
237 * | 0 RESET | TFMR |= LOAD_TOD_MOD | 1 |
238 * | 1 SEND_TOD_MOD | "immediate transition" | 2 |
239 * | 2 NOT_SET | mttbu/mttbu40/mttbl | 2 |
240 * | 2 NOT_SET | TFMR |= MOVE_CHIP_TOD_TO_TB | 6 |
241 * | 6 SYNC_WAIT | "sync pulse from ChipTOD" | 7 |
242 * | 7 GET_TOD | ChipTOD xscom MOVE_TOD_TO_TB_REG | 8 |
243 * | 8 TB_RUNNING | mttbu/mttbu40 | 8 |
244 * | 8 TB_RUNNING | TFMR |= LOAD_TOD_MOD | 1 |
245 * | 8 TB_RUNNING | mttbl | 9 |
246 * | 9 TB_ERROR | TFMR |= CLEAR_TB_ERRORS | 0 |
247 *
248 * - LOAD_TOD_MOD will also move states 2,6 to state 1, omitted from table
249 * because it's not a typical init flow.
250 *
251 * - The ERROR state can be entered from most/all other states on invalid
252 * states (e.g., if some TFMR control bit is set from a state where it's
253 * not listed to cause a transition away from), omitted to avoid clutter.
254 *
255 * Note: mttbl causes a timebase error because this inevitably causes
256 * ticks to be lost and TB to become unsynchronized, whereas TB can be
257 * adjusted using mttbu* without losing ticks. mttbl behaviour is not
258 * modelled.
259 *
260 * Note: the TB state machine does not actually cause any real TB adjustment!
261 * TB starts out synchronized across all vCPUs (hardware threads) in
262 * QMEU, so for now the purpose of the TBST and ChipTOD model is simply
263 * to step through firmware initialisation sequences.
264 */
tfmr_get_tb_state(uint64_t tfmr)265 static unsigned int tfmr_get_tb_state(uint64_t tfmr)
266 {
267 return (tfmr & TFMR_TBST_ENCODED) >> (63 - 31);
268 }
269
tfmr_new_tb_state(uint64_t tfmr,unsigned int tbst)270 static uint64_t tfmr_new_tb_state(uint64_t tfmr, unsigned int tbst)
271 {
272 tfmr &= ~TFMR_TBST_LAST;
273 tfmr |= (tfmr & TFMR_TBST_ENCODED) >> 4; /* move state to last state */
274 tfmr &= ~TFMR_TBST_ENCODED;
275 tfmr |= (uint64_t)tbst << (63 - 31); /* move new state to state */
276
277 if (tbst == TBST_TB_RUNNING) {
278 tfmr |= TFMR_TB_VALID;
279 } else {
280 tfmr &= ~TFMR_TB_VALID;
281 }
282
283 return tfmr;
284 }
285
write_tfmr(CPUPPCState * env,target_ulong val)286 static void write_tfmr(CPUPPCState *env, target_ulong val)
287 {
288 CPUState *cs = env_cpu(env);
289
290 if (cs->nr_threads == 1) {
291 env->spr[SPR_TFMR] = val;
292 } else {
293 CPUState *ccs;
294 THREAD_SIBLING_FOREACH(cs, ccs) {
295 CPUPPCState *cenv = &POWERPC_CPU(ccs)->env;
296 cenv->spr[SPR_TFMR] = val;
297 }
298 }
299 }
300
tb_state_machine_step(CPUPPCState * env)301 static void tb_state_machine_step(CPUPPCState *env)
302 {
303 uint64_t tfmr = env->spr[SPR_TFMR];
304 unsigned int tbst = tfmr_get_tb_state(tfmr);
305
306 if (!(tfmr & TFMR_TB_ECLIPZ) || tbst == TBST_TB_ERROR) {
307 return;
308 }
309
310 if (env->pnv_tod_tbst.tb_sync_pulse_timer) {
311 env->pnv_tod_tbst.tb_sync_pulse_timer--;
312 } else {
313 tfmr |= TFMR_TB_SYNC_OCCURED;
314 write_tfmr(env, tfmr);
315 }
316
317 if (env->pnv_tod_tbst.tb_state_timer) {
318 env->pnv_tod_tbst.tb_state_timer--;
319 return;
320 }
321
322 if (tfmr & TFMR_LOAD_TOD_MOD) {
323 tfmr &= ~TFMR_LOAD_TOD_MOD;
324 if (tbst == TBST_GET_TOD) {
325 tfmr = tfmr_new_tb_state(tfmr, TBST_TB_ERROR);
326 tfmr |= TFMR_FIRMWARE_CONTROL_ERROR;
327 } else {
328 tfmr = tfmr_new_tb_state(tfmr, TBST_SEND_TOD_MOD);
329 /* State seems to transition immediately */
330 tfmr = tfmr_new_tb_state(tfmr, TBST_NOT_SET);
331 }
332 } else if (tfmr & TFMR_MOVE_CHIP_TOD_TO_TB) {
333 if (tbst == TBST_SYNC_WAIT) {
334 tfmr = tfmr_new_tb_state(tfmr, TBST_GET_TOD);
335 env->pnv_tod_tbst.tb_state_timer = 3;
336 } else if (tbst == TBST_GET_TOD) {
337 if (env->pnv_tod_tbst.tod_sent_to_tb) {
338 tfmr = tfmr_new_tb_state(tfmr, TBST_TB_RUNNING);
339 tfmr &= ~TFMR_MOVE_CHIP_TOD_TO_TB;
340 env->pnv_tod_tbst.tb_ready_for_tod = 0;
341 env->pnv_tod_tbst.tod_sent_to_tb = 0;
342 }
343 } else {
344 qemu_log_mask(LOG_GUEST_ERROR, "TFMR error: MOVE_CHIP_TOD_TO_TB "
345 "state machine in invalid state 0x%x\n", tbst);
346 tfmr = tfmr_new_tb_state(tfmr, TBST_TB_ERROR);
347 tfmr |= TFMR_FIRMWARE_CONTROL_ERROR;
348 env->pnv_tod_tbst.tb_ready_for_tod = 0;
349 }
350 }
351
352 write_tfmr(env, tfmr);
353 }
354
helper_load_tfmr(CPUPPCState * env)355 target_ulong helper_load_tfmr(CPUPPCState *env)
356 {
357 tb_state_machine_step(env);
358
359 return env->spr[SPR_TFMR] | TFMR_TB_ECLIPZ;
360 }
361
helper_store_tfmr(CPUPPCState * env,target_ulong val)362 void helper_store_tfmr(CPUPPCState *env, target_ulong val)
363 {
364 uint64_t tfmr = env->spr[SPR_TFMR];
365 uint64_t clear_on_write;
366 unsigned int tbst = tfmr_get_tb_state(tfmr);
367
368 if (!(val & TFMR_TB_ECLIPZ)) {
369 qemu_log_mask(LOG_UNIMP, "TFMR non-ECLIPZ mode not implemented\n");
370 tfmr &= ~TFMR_TBST_ENCODED;
371 tfmr &= ~TFMR_TBST_LAST;
372 goto out;
373 }
374
375 /* Update control bits */
376 tfmr = (tfmr & ~TFMR_CONTROL_MASK) | (val & TFMR_CONTROL_MASK);
377
378 /* Several bits are clear-on-write, only one is implemented so far */
379 clear_on_write = val & TFMR_FIRMWARE_CONTROL_ERROR;
380 tfmr &= ~clear_on_write;
381
382 /*
383 * mtspr always clears this. The sync pulse timer makes it come back
384 * after the second mfspr.
385 */
386 tfmr &= ~TFMR_TB_SYNC_OCCURED;
387 env->pnv_tod_tbst.tb_sync_pulse_timer = 1;
388
389 if (ppc_cpu_tir(env_archcpu(env)) != 0 &&
390 (val & (TFMR_LOAD_TOD_MOD | TFMR_MOVE_CHIP_TOD_TO_TB))) {
391 qemu_log_mask(LOG_UNIMP, "TFMR timebase state machine can only be "
392 "driven by thread 0\n");
393 goto out;
394 }
395
396 if (((tfmr | val) & (TFMR_LOAD_TOD_MOD | TFMR_MOVE_CHIP_TOD_TO_TB)) ==
397 (TFMR_LOAD_TOD_MOD | TFMR_MOVE_CHIP_TOD_TO_TB)) {
398 qemu_log_mask(LOG_GUEST_ERROR, "TFMR error: LOAD_TOD_MOD and "
399 "MOVE_CHIP_TOD_TO_TB both set\n");
400 tfmr = tfmr_new_tb_state(tfmr, TBST_TB_ERROR);
401 tfmr |= TFMR_FIRMWARE_CONTROL_ERROR;
402 env->pnv_tod_tbst.tb_ready_for_tod = 0;
403 goto out;
404 }
405
406 if (tfmr & TFMR_CLEAR_TB_ERRORS) {
407 /*
408 * Workbook says TFMR_CLEAR_TB_ERRORS should be written twice.
409 * This is not simulated/required here.
410 */
411 tfmr = tfmr_new_tb_state(tfmr, TBST_RESET);
412 tfmr &= ~TFMR_CLEAR_TB_ERRORS;
413 tfmr &= ~TFMR_LOAD_TOD_MOD;
414 tfmr &= ~TFMR_MOVE_CHIP_TOD_TO_TB;
415 tfmr &= ~TFMR_FIRMWARE_CONTROL_ERROR; /* XXX: should this be cleared? */
416 env->pnv_tod_tbst.tb_ready_for_tod = 0;
417 env->pnv_tod_tbst.tod_sent_to_tb = 0;
418 goto out;
419 }
420
421 if (tbst == TBST_TB_ERROR) {
422 qemu_log_mask(LOG_GUEST_ERROR, "TFMR error: mtspr TFMR in TB_ERROR"
423 " state\n");
424 tfmr |= TFMR_FIRMWARE_CONTROL_ERROR;
425 return;
426 }
427
428 if (tfmr & TFMR_LOAD_TOD_MOD) {
429 /* Wait for an arbitrary 3 mfspr until the next state transition. */
430 env->pnv_tod_tbst.tb_state_timer = 3;
431 } else if (tfmr & TFMR_MOVE_CHIP_TOD_TO_TB) {
432 if (tbst == TBST_NOT_SET) {
433 tfmr = tfmr_new_tb_state(tfmr, TBST_SYNC_WAIT);
434 env->pnv_tod_tbst.tb_ready_for_tod = 1;
435 env->pnv_tod_tbst.tb_state_timer = 3; /* arbitrary */
436 } else {
437 qemu_log_mask(LOG_GUEST_ERROR, "TFMR error: MOVE_CHIP_TOD_TO_TB "
438 "not in TB not set state 0x%x\n",
439 tbst);
440 tfmr = tfmr_new_tb_state(tfmr, TBST_TB_ERROR);
441 tfmr |= TFMR_FIRMWARE_CONTROL_ERROR;
442 env->pnv_tod_tbst.tb_ready_for_tod = 0;
443 }
444 }
445
446 out:
447 write_tfmr(env, tfmr);
448 }
449 #endif
450
451 /*****************************************************************************/
452 /* Embedded PowerPC specific helpers */
453
454 /* XXX: to be improved to check access rights when in user-mode */
helper_load_dcr(CPUPPCState * env,target_ulong dcrn)455 target_ulong helper_load_dcr(CPUPPCState *env, target_ulong dcrn)
456 {
457 uint32_t val = 0;
458
459 if (unlikely(env->dcr_env == NULL)) {
460 qemu_log_mask(LOG_GUEST_ERROR, "No DCR environment\n");
461 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
462 POWERPC_EXCP_INVAL |
463 POWERPC_EXCP_INVAL_INVAL, GETPC());
464 } else {
465 int ret;
466
467 bql_lock();
468 ret = ppc_dcr_read(env->dcr_env, (uint32_t)dcrn, &val);
469 bql_unlock();
470 if (unlikely(ret != 0)) {
471 qemu_log_mask(LOG_GUEST_ERROR, "DCR read error %d %03x\n",
472 (uint32_t)dcrn, (uint32_t)dcrn);
473 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
474 POWERPC_EXCP_INVAL |
475 POWERPC_EXCP_INVAL_INVAL, GETPC());
476 }
477 }
478 return val;
479 }
480
helper_store_dcr(CPUPPCState * env,target_ulong dcrn,target_ulong val)481 void helper_store_dcr(CPUPPCState *env, target_ulong dcrn, target_ulong val)
482 {
483 if (unlikely(env->dcr_env == NULL)) {
484 qemu_log_mask(LOG_GUEST_ERROR, "No DCR environment\n");
485 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
486 POWERPC_EXCP_INVAL |
487 POWERPC_EXCP_INVAL_INVAL, GETPC());
488 } else {
489 int ret;
490 bql_lock();
491 ret = ppc_dcr_write(env->dcr_env, (uint32_t)dcrn, (uint32_t)val);
492 bql_unlock();
493 if (unlikely(ret != 0)) {
494 qemu_log_mask(LOG_GUEST_ERROR, "DCR write error %d %03x\n",
495 (uint32_t)dcrn, (uint32_t)dcrn);
496 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
497 POWERPC_EXCP_INVAL |
498 POWERPC_EXCP_INVAL_INVAL, GETPC());
499 }
500 }
501 }
502 #endif
503