1 /* $Id: sparc-misc.c,v 1.14 2010/06/05 16:17:19 fredette Exp $ */
2
3 /* ic/sparc/sparc-misc.c - miscellaneous things for the SPARC emulator: */
4
5 /*
6 * Copyright (c) 2005 Matt Fredette
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by Matt Fredette.
20 * 4. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
27 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
29 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
32 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 * POSSIBILITY OF SUCH DAMAGE.
34 */
35
36 /* includes: */
37 #include "sparc-impl.h"
38
39 _TME_RCSID("$Id: sparc-misc.c,v 1.14 2010/06/05 16:17:19 fredette Exp $");
40
41 #include "sparc-bus-auto.c"
42
43 /* our bus signal handler: */
44 static int
_tme_sparc_bus_signal(struct tme_bus_connection * conn_bus,unsigned int signal)45 _tme_sparc_bus_signal(struct tme_bus_connection *conn_bus, unsigned int signal)
46 {
47 struct tme_sparc *ic;
48 unsigned int level;
49
50 /* recover our IC: */
51 ic = conn_bus->tme_bus_connection.tme_connection_element->tme_element_private;
52
53 /* get the level. this must be an edge: */
54 assert (signal & TME_BUS_SIGNAL_EDGE);
55 level = signal - TME_BUS_SIGNAL_EDGE;
56 signal = TME_BUS_SIGNAL_WHICH(signal);
57 level ^= signal;
58
59 /* lock the external mutex: */
60 tme_mutex_lock(&ic->tme_sparc_external_mutex);
61
62 /* if the signal is asserted: */
63 if (level == TME_BUS_SIGNAL_LEVEL_ASSERTED) {
64
65 /* update the asserted flags for these signals: */
66 if (__tme_predict_true(signal == TME_BUS_SIGNAL_BG)) {
67 tme_memory_atomic_write_flag(&ic->tme_sparc_external_bg_asserted, TRUE);
68 }
69 else if (signal == TME_BUS_SIGNAL_RESET) {
70 tme_memory_atomic_write_flag(&ic->tme_sparc_external_reset_asserted, TRUE);
71 }
72 else {
73 tme_memory_atomic_write_flag(&ic->tme_sparc_external_halt_asserted, TRUE);
74 }
75 }
76
77 /* otherwise, the signal must be negated: */
78 else {
79 assert (level == TME_BUS_SIGNAL_LEVEL_NEGATED);
80
81 /* update the asserted or negated flags for these signals: */
82 if (__tme_predict_true(signal == TME_BUS_SIGNAL_BG)) {
83 tme_memory_atomic_write_flag(&ic->tme_sparc_external_bg_asserted, FALSE);
84 }
85 else if (signal == TME_BUS_SIGNAL_RESET) {
86 tme_memory_atomic_write_flag(&ic->tme_sparc_external_reset_negated, TRUE);
87 }
88 else {
89 tme_memory_atomic_write_flag(&ic->tme_sparc_external_halt_negated, TRUE);
90 }
91 }
92
93 /* write the external flag before any earlier signal flag write: */
94 tme_memory_barrier(ic, sizeof(*ic), TME_MEMORY_BARRIER_WRITE_BEFORE_WRITE);
95 tme_memory_atomic_write_flag(&ic->tme_sparc_external_flag, TRUE);
96
97 /* notify any thread waiting on the external condition: */
98 tme_cond_notify(&ic->tme_sparc_external_cond, FALSE);
99
100 /* unlock the external mutex: */
101 tme_mutex_unlock(&ic->tme_sparc_external_mutex);
102 return (TME_OK);
103 }
104
105 /* our interrupt handler: */
106 static int
_tme_sparc_bus_interrupt(struct tme_sparc_bus_connection * conn_sparc,unsigned int ipl)107 _tme_sparc_bus_interrupt(struct tme_sparc_bus_connection *conn_sparc, unsigned int ipl)
108 {
109 struct tme_sparc *ic;
110
111 /* recover our IC: */
112 ic = conn_sparc->tme_sparc_bus_connection.tme_bus_connection.tme_connection_element->tme_element_private;
113
114 /* lock the external mutex: */
115 tme_mutex_lock(&ic->tme_sparc_external_mutex);
116
117 /* set the interrupt line: */
118 tme_memory_atomic_write8(&ic->tme_sparc_external_ipl,
119 ipl,
120 &ic->tme_sparc_external_ipl_rwlock,
121 sizeof(tme_uint8_t));
122
123 /* write the external flag before the earlier ipl write: */
124 tme_memory_barrier(ic, sizeof(*ic), TME_MEMORY_BARRIER_WRITE_BEFORE_WRITE);
125 tme_memory_atomic_write_flag(&ic->tme_sparc_external_flag, TRUE);
126
127 /* notify any thread waiting on the external condition: */
128 tme_cond_notify(&ic->tme_sparc_external_cond, FALSE);
129
130 /* unlock the external mutex: */
131 tme_mutex_unlock(&ic->tme_sparc_external_mutex);
132 return (TME_OK);
133 }
134
135 /* the idle function, used when the processor is halted or stopped: */
136 static void
tme_sparc_idle(struct tme_sparc * ic)137 tme_sparc_idle(struct tme_sparc *ic)
138 {
139 /* lock the external mutex: */
140 tme_mutex_lock(&ic->tme_sparc_external_mutex);
141
142 /* loop forever: */
143 for (;;) {
144
145 /* check for any external signal: */
146 (*ic->_tme_sparc_external_check)(ic, TME_SPARC_EXTERNAL_CHECK_MUTEX_LOCKED);
147
148 /* await an external condition: */
149 tme_cond_wait_yield(&ic->tme_sparc_external_cond, &ic->tme_sparc_external_mutex);
150 }
151 }
152
153 /* this resets idle detection: */
154 static void
_tme_sparc_idle_reset(struct tme_sparc * ic)155 _tme_sparc_idle_reset(struct tme_sparc *ic)
156 {
157
158 /* reset the main idle PC to state one, and assume that the idle
159 type has an idle PC range and zero the idle PC range upper
160 bound: */
161 if (TME_SPARC_VERSION(ic) >= 9) {
162 #ifdef TME_HAVE_INT64_T
163 ic->tme_sparc_idle_pcs_64[0] = TME_SPARC_IDLE_TYPE_PC_STATE(1);
164 ic->tme_sparc_idle_pcs_64[1] = 0;
165 #endif /* TME_HAVE_INT64_T */
166 }
167 else {
168 ic->tme_sparc_idle_pcs_32[0] = TME_SPARC_IDLE_TYPE_PC_STATE(1);
169 ic->tme_sparc_idle_pcs_32[1] = 0;
170 }
171 }
172
173 /* the sparc thread: */
174 static void
tme_sparc_thread(struct tme_sparc * ic)175 tme_sparc_thread(struct tme_sparc *ic)
176 {
177
178 /* we use longjmp to redispatch: */
179 do { } while (setjmp(ic->_tme_sparc_dispatcher));
180
181 /* we must not have a busy instruction TLB entry: */
182 assert (ic->_tme_sparc_itlb_current_token == NULL);
183
184 /* dispatch on the current mode: */
185 switch (ic->_tme_sparc_mode) {
186
187 case TME_SPARC_MODE_EXECUTION:
188
189 /* if we may update the runlength with this instruction burst,
190 note its start time: */
191 if (ic->tme_sparc_runlength_update_next == 0
192 && (ic->_tme_sparc_instruction_burst_remaining
193 == ic->_tme_sparc_instruction_burst)) {
194 ic->tme_sparc_runlength.tme_runlength_cycles_start = tme_misc_cycles();
195 }
196
197 (*ic->_tme_sparc_execute)(ic);
198 /* NOTREACHED */
199
200 case TME_SPARC_MODE_STOP:
201 case TME_SPARC_MODE_HALT:
202 case TME_SPARC_MODE_OFF:
203 tme_sparc_idle(ic);
204 /* NOTREACHED */
205
206 case TME_SPARC_MODE_TIMING_LOOP:
207 tme_sparc_timing_loop_finish(ic);
208 /* NOTREACHED */
209
210 default:
211 abort();
212 }
213 /* NOTREACHED */
214 }
215
216 /* the TLB filler for when we are on a generic bus: */
217 static int
_tme_sparc_generic_tlb_fill(struct tme_sparc_bus_connection * conn_sparc,struct tme_sparc_tlb * tlb,tme_uint32_t asi_mask,tme_bus_addr_t external_address,unsigned int cycles)218 _tme_sparc_generic_tlb_fill(struct tme_sparc_bus_connection *conn_sparc,
219 struct tme_sparc_tlb *tlb,
220 tme_uint32_t asi_mask,
221 tme_bus_addr_t external_address,
222 unsigned int cycles)
223 {
224 struct tme_sparc *ic;
225
226 /* recover our IC: */
227 ic = conn_sparc->tme_sparc_bus_connection.tme_bus_connection.tme_connection_element->tme_element_private;
228
229 /* call the generic bus TLB filler: */
230 (ic->_tme_sparc_bus_generic->tme_bus_tlb_fill)
231 (ic->_tme_sparc_bus_generic,
232 &tlb->tme_sparc_tlb_bus_tlb,
233 external_address,
234 cycles);
235
236 return (TME_OK);
237 }
238
239 /* this sets the run length: */
240 static void
_tme_sparc_runlength(struct tme_sparc * ic,tme_uint32_t instruction_burst_msec)241 _tme_sparc_runlength(struct tme_sparc *ic,
242 tme_uint32_t instruction_burst_msec)
243 {
244 union tme_value64 runlength_target_cycles;
245 unsigned int runlength_update_hz;
246
247 /* set the run length target cycles: */
248 runlength_target_cycles.tme_value64_uint32_lo
249 = (tme_misc_cycles_per_ms()
250 * instruction_burst_msec);
251 runlength_target_cycles.tme_value64_uint32_hi = 0;
252 tme_runlength_target_cycles(&ic->tme_sparc_runlength, runlength_target_cycles);
253
254 /* set the run length update period: */
255 runlength_update_hz = 50;
256 ic->tme_sparc_runlength_update_period
257 = (((1000
258 + (instruction_burst_msec - 1)
259 / instruction_burst_msec)
260 + (runlength_update_hz - 1))
261 / runlength_update_hz);
262 }
263
264 /* the sparc command function: */
265 static int
_tme_sparc_command(struct tme_element * element,const char * const * args,char ** _output)266 _tme_sparc_command(struct tme_element *element, const char * const * args, char **_output)
267 {
268 struct tme_sparc *ic;
269 unsigned int idle_type_saved;
270 tme_uint32_t instruction_burst_msec;
271 int usage;
272 tme_uint32_t prom_delay_factor;
273
274 /* recover our IC: */
275 ic = (struct tme_sparc *) element->tme_element_private;
276
277 /* the "idle-type" command: */
278 if (TME_ARG_IS(args[1], "idle-type")) {
279
280 /* save the current idle type and set it to none: */
281 idle_type_saved = ic->tme_sparc_idle_type;
282 ic->tme_sparc_idle_type = TME_SPARC_IDLE_TYPE_NULL;
283
284 /* if we're not setting the idle type to none: */
285 if (!TME_ARG_IS(args[2], "none")) {
286
287 /* check for a supported idle type: */
288 #define _TME_SPARC_IDLE_TYPE(x, s) \
289 do { \
290 if (TME_SPARC_IDLE_TYPE_IS_SUPPORTED(ic, x) \
291 && TME_ARG_IS(args[2], s)) { \
292 ic->tme_sparc_idle_type = (x); \
293 } \
294 } while (/* CONSTCOND */ 0)
295 _TME_SPARC_IDLE_TYPE(TME_SPARC_IDLE_TYPE_NETBSD32_TYPE_0, "netbsd32-type-0");
296 _TME_SPARC_IDLE_TYPE(TME_SPARC_IDLE_TYPE_SUNOS32_TYPE_0, "sunos32-type-0");
297 _TME_SPARC_IDLE_TYPE(TME_SPARC_IDLE_TYPE_NETBSD32_TYPE_1, "netbsd32-type-1");
298 _TME_SPARC_IDLE_TYPE(TME_SPARC_IDLE_TYPE_NETBSD64_TYPE_0, "netbsd64-type-0");
299 _TME_SPARC_IDLE_TYPE(TME_SPARC_IDLE_TYPE_NETBSD64_TYPE_1, "netbsd64-type-1");
300 _TME_SPARC_IDLE_TYPE(TME_SPARC_IDLE_TYPE_SUNOS64_TYPE_0, "sunos64-type-0");
301 #undef _TME_SPARC_IDLE_TYPE
302
303 /* if the idle type isn't supported: */
304 if (ic->tme_sparc_idle_type == TME_SPARC_IDLE_TYPE_NULL) {
305
306 /* restore the idle type and return a usage: */
307 ic->tme_sparc_idle_type = idle_type_saved;
308
309 tme_output_append_error(_output,
310 "%s %s idle-type { none",
311 _("usage:"),
312 args[0]);
313
314 /* add in the supported idle types: */
315 #define _TME_SPARC_IDLE_TYPE(x, s) \
316 do { \
317 if (TME_SPARC_IDLE_TYPE_IS_SUPPORTED(ic, x)) {\
318 tme_output_append_error(_output, " | %s", \
319 s); \
320 } \
321 } while (/* CONSTCOND */ 0)
322 _TME_SPARC_IDLE_TYPE(TME_SPARC_IDLE_TYPE_NETBSD32_TYPE_0, "netbsd32-type-0");
323 _TME_SPARC_IDLE_TYPE(TME_SPARC_IDLE_TYPE_SUNOS32_TYPE_0, "sunos32-type-0");
324 _TME_SPARC_IDLE_TYPE(TME_SPARC_IDLE_TYPE_NETBSD32_TYPE_1, "netbsd32-type-1");
325 _TME_SPARC_IDLE_TYPE(TME_SPARC_IDLE_TYPE_NETBSD64_TYPE_0, "netbsd64-type-0");
326 _TME_SPARC_IDLE_TYPE(TME_SPARC_IDLE_TYPE_NETBSD64_TYPE_1, "netbsd64-type-1");
327 _TME_SPARC_IDLE_TYPE(TME_SPARC_IDLE_TYPE_SUNOS64_TYPE_0, "sunos64-type-0");
328 #undef _TME_SPARC_IDLE_TYPE
329
330 tme_output_append_error(_output, " }");
331 return (EINVAL);
332 }
333 }
334
335 /* poison all idle type state: */
336 _tme_sparc_idle_reset(ic);
337 }
338
339 /* the run-length command: */
340 else if (TME_ARG_IS(args[1], "run-length")) {
341
342 /* get the run length, in milliseconds: */
343 instruction_burst_msec = tme_misc_unumber_parse(args[2], 0);
344
345 /* if this command is bad: */
346 if (instruction_burst_msec == 0
347 || args[3] != NULL) {
348 tme_output_append_error(_output,
349 "%s run-length %s",
350 _("usage:"),
351 _("MILLISECONDS"));
352 }
353
354 /* otherwise, set the run length: */
355 else {
356 _tme_sparc_runlength(ic,
357 instruction_burst_msec);
358 }
359 }
360
361 /* the prom-delay-factor command: */
362 else if (TME_ARG_IS(args[1], "prom-delay-factor")) {
363
364 /* get the PROM delay factor: */
365 usage = FALSE;
366 if (TME_ARG_IS(args[2], "best")) {
367 prom_delay_factor = TME_SPARC_PROM_DELAY_FACTOR_BEST;
368 }
369 else if (TME_ARG_IS(args[2], "uncorrected")) {
370 prom_delay_factor = TME_SPARC_PROM_DELAY_FACTOR_UNCORRECTED;
371 }
372 else if (TME_ARG_IS(args[2], "min")) {
373 prom_delay_factor = TME_SPARC_PROM_DELAY_FACTOR_MIN;
374 }
375 else {
376 prom_delay_factor = tme_misc_unumber_parse_any(args[2], &usage);
377 }
378 if (usage) {
379 tme_output_append_error(_output,
380 "%s prom-delay-factor { best | uncorrected | min | %s }",
381 _("usage:"),
382 _("FACTOR"));
383 }
384 else {
385 ic->tme_sparc_prom_delay_factor = prom_delay_factor;
386 }
387 }
388
389 /* any other command: */
390 else {
391 if (args[1] != NULL) {
392 tme_output_append_error(_output,
393 "%s '%s', ",
394 _("unknown command"),
395 args[1]);
396 }
397 tme_output_append_error(_output,
398 _("available %s commands:%s run-length"),
399 args[0],
400 (TME_SPARC_IDLE_TYPE_IS_SUPPORTED(ic, (0 - (unsigned int) 1))
401 ? " idle-type"
402 : ""));
403 return (EINVAL);
404 }
405
406 return (TME_OK);
407 }
408
409 /* the connection scorer: */
410 static int
_tme_sparc_connection_score(struct tme_connection * conn,unsigned int * _score)411 _tme_sparc_connection_score(struct tme_connection *conn, unsigned int *_score)
412 {
413 struct tme_sparc_bus_connection *conn_sparc;
414 struct tme_upa_bus_connection *conn_upa;
415 struct tme_bus_connection *conn_bus;
416 unsigned int score;
417
418 /* assume that this connection is useless: */
419 score = 0;
420
421 /* dispatch on the connection type: */
422 switch (conn->tme_connection_type) {
423
424 /* this must be a bus, and not another sparc chip: */
425 case TME_CONNECTION_BUS_SPARC:
426 conn_sparc = (struct tme_sparc_bus_connection *) conn->tme_connection_other;
427 conn_bus = &conn_sparc->tme_sparc_bus_connection;
428 if (conn_bus->tme_bus_tlb_set_add != NULL
429 && conn_sparc->tme_sparc_bus_tlb_fill != NULL
430 && conn_sparc->tme_sparc_bus_fpu_strict == NULL) {
431 score = 10;
432 }
433 break;
434
435 /* this must be a controller, and not another agent: */
436 case TME_CONNECTION_BUS_UPA:
437 conn_upa = (struct tme_upa_bus_connection *) conn->tme_connection_other;
438 conn_bus = &conn_upa->tme_upa_bus_connection;
439 if (conn_upa->tme_upa_bus_interrupt != NULL
440 && conn_bus->tme_bus_tlb_set_add != NULL
441 && conn_bus->tme_bus_tlb_fill != NULL) {
442 score = 10;
443 }
444 break;
445
446 /* this must be a bus, and not another chip: */
447 case TME_CONNECTION_BUS_GENERIC:
448 conn_bus = (struct tme_bus_connection *) conn->tme_connection_other;
449 if (conn_bus->tme_bus_tlb_set_add != NULL
450 && conn_bus->tme_bus_tlb_fill != NULL) {
451 score = 1;
452 }
453 break;
454
455 default: abort();
456 }
457
458 *_score = score;
459 return (TME_OK);
460 }
461
462 /* this makes a new connection: */
463 static int
_tme_sparc_connection_make(struct tme_connection * conn,unsigned int state)464 _tme_sparc_connection_make(struct tme_connection *conn, unsigned int state)
465 {
466 struct tme_sparc *ic;
467 struct tme_upa_bus_connection *conn_upa;
468 struct tme_sparc_bus_connection *conn_sparc;
469 struct tme_bus_connection *conn_bus;
470 struct tme_connection *conn_other;
471 struct tme_bus_tlb_set_info tlb_set_info;
472 struct tme_sparc_tlb *tlb;
473 struct tme_token *token;
474 int rc;
475
476 /* since the CPU is halted, it won't be making any connection calls,
477 so we only have to do work when the connection is fully made: */
478 if (state == TME_CONNECTION_FULL) {
479
480 /* recover our IC: */
481 ic = conn->tme_connection_element->tme_element_private;
482
483 /* dispatch on the connection type: */
484 conn_other = conn->tme_connection_other;
485 switch (conn->tme_connection_type) {
486
487 case TME_CONNECTION_BUS_SPARC:
488 conn_sparc = (struct tme_sparc_bus_connection *) conn_other;
489 ic->_tme_sparc_bus_connection = conn_sparc;
490 conn_bus = &conn_sparc->tme_sparc_bus_connection;
491 break;
492
493 case TME_CONNECTION_BUS_UPA:
494 conn_upa = (struct tme_upa_bus_connection *) conn_other;
495 ic->_tme_upa_bus_connection = conn_upa;
496 assert (&conn_upa->tme_upa_bus_connection == (struct tme_bus_connection *) conn_other);
497 /* FALLTHROUGH */
498
499 /* we need an adaptation layer: */
500 case TME_CONNECTION_BUS_GENERIC:
501 conn_bus = (struct tme_bus_connection *) conn_other;
502 conn_sparc = tme_new0(struct tme_sparc_bus_connection, 1);
503 conn_sparc->tme_sparc_bus_connection.tme_bus_connection.tme_connection_element = conn->tme_connection_element;
504 conn_sparc->tme_sparc_bus_tlb_fill = _tme_sparc_generic_tlb_fill;
505 ic->_tme_sparc_bus_connection = conn_sparc;
506 ic->_tme_sparc_bus_generic = conn_bus;
507 break;
508
509 default: abort();
510 }
511
512 /* make the TLB set information: */
513 memset(&tlb_set_info, 0, sizeof(tlb_set_info));
514 tlb_set_info.tme_bus_tlb_set_info_token0 = &ic->tme_sparc_tlb_tokens[0];
515 tlb_set_info.tme_bus_tlb_set_info_token_stride = sizeof(struct tme_token);
516 tlb_set_info.tme_bus_tlb_set_info_token_count = TME_ARRAY_ELS(ic->tme_sparc_tlbs);
517 tlb_set_info.tme_bus_tlb_set_info_bus_context = &ic->tme_sparc_memory_context_default;
518
519 #if TME_HAVE_RECODE
520
521 /* if this is a v9 CPU, and we have 64-bit recode support: */
522 if (TME_SPARC_VERSION(ic) >= 9) {
523 #if TME_RECODE_SIZE_GUEST_MAX > TME_RECODE_SIZE_32
524
525 /* we will use the tokens in the 64-bit recode TLBs: */
526 tlb_set_info.tme_bus_tlb_set_info_token0 = &ic->tme_sparc_recode_tlb64s[0].tme_recode_tlb_c16_a64_token;
527 tlb_set_info.tme_bus_tlb_set_info_token_stride = sizeof(ic->tme_sparc_recode_tlb64s[0]);
528 #endif /* TME_RECODE_SIZE_GUEST_MAX > TME_RECODE_SIZE_32 */
529 }
530
531 /* otherwise, this is a v7 or v8 CPU: */
532 else {
533
534 /* we will use the tokens in the 32-bit recode TLBs: */
535 tlb_set_info.tme_bus_tlb_set_info_token0 = &ic->tme_sparc_recode_tlb32s[0].tme_recode_tlb_c16_a32_token;
536 tlb_set_info.tme_bus_tlb_set_info_token_stride = sizeof(ic->tme_sparc_recode_tlb32s[0]);
537 }
538
539 #endif /* TME_HAVE_RECODE */
540
541 /* initialize the TLBs in the set: */
542 tlb = &ic->tme_sparc_tlbs[0];
543 token = tlb_set_info.tme_bus_tlb_set_info_token0;
544 do {
545
546 /* initialize this token: */
547 tme_token_init(token);
548
549 /* connect this token with this TLB: */
550 tlb->tme_sparc_tlb_bus_tlb.tme_bus_tlb_token = token;
551
552 /* advance: */
553 token = (struct tme_token *) (tlb_set_info.tme_bus_tlb_set_info_token_stride + (tme_uint8_t *) token);
554 } while (++tlb <= &ic->tme_sparc_tlbs[TME_ARRAY_ELS(ic->tme_sparc_tlbs) - 1]);
555
556 /* add the TLB set: */
557 rc = ((*conn_bus->tme_bus_tlb_set_add)
558 (conn_bus,
559 &tlb_set_info));
560 assert (rc == TME_OK);
561
562 /* if this is a v7 cpu: */
563 if (TME_SPARC_VERSION(ic) == 7) {
564
565 /* get the maximum bus context from the bus: */
566 ic->tme_sparc_memory_context_max = tlb_set_info.tme_bus_tlb_set_info_bus_context_max;
567 }
568
569 #ifdef TME_HAVE_RECODE
570
571 /* the maximum bus context must fit in 16 bits: */
572 assert (ic->tme_sparc_memory_context_max <= 0xffff);
573
574 #endif /* TME_HAVE_RECODE */
575 }
576
577 /* NB: the machine needs to issue a reset to bring the CPU out of halt. */
578 return (TME_OK);
579 }
580
581 /* this breaks a connection: */
582 static int
_tme_sparc_connection_break(struct tme_connection * conn,unsigned int state)583 _tme_sparc_connection_break(struct tme_connection *conn, unsigned int state)
584 {
585 abort();
586 return (0);
587 }
588
589 /* this makes new connection sides: */
590 static int
_tme_sparc_connections_new(struct tme_element * element,const char * const * args,struct tme_connection ** _conns,char ** _output)591 _tme_sparc_connections_new(struct tme_element *element, const char * const *args, struct tme_connection **_conns, char **_output)
592 {
593 struct tme_sparc *ic;
594 struct tme_upa_bus_connection *conn_upa;
595 struct tme_sparc_bus_connection *conn_sparc;
596 struct tme_bus_connection *conn_bus;
597 struct tme_connection *conn;
598
599 /* recover our data structure: */
600 ic = element->tme_element_private;
601
602 /* if we already have a bus connection, we can take no more connections: */
603 if (ic->_tme_sparc_bus_connection != NULL) {
604 return (TME_OK);
605 }
606
607 /* if this is a v9 CPU: */
608 if (TME_SPARC_VERSION(ic) >= 9) {
609
610 /* create our side of a UPA bus connection: */
611 conn_upa = tme_new0(struct tme_upa_bus_connection, 1);
612 conn_upa->tme_upa_bus_connection.tme_bus_connection.tme_connection_type = TME_CONNECTION_BUS_UPA;
613 #ifdef TME_HAVE_INT64_T
614 conn_upa->tme_upa_bus_interrupt = ic->_tme_sparc_upa_interrupt;
615 #endif /* TME_HAVE_INT64_T */
616 conn_bus = &conn_upa->tme_upa_bus_connection;
617 conn_bus->tme_bus_tlb_fill = ic->_tme_sparc_tlb_fill;
618 }
619
620 /* otherwise, this is a v7 or v8 CPU: */
621 else {
622
623 /* create our side of a generic bus connection: */
624 conn_bus = tme_new0(struct tme_bus_connection, 1);
625 conn_bus->tme_bus_connection.tme_connection_type = TME_CONNECTION_BUS_GENERIC;
626 conn_bus->tme_bus_signal = _tme_sparc_bus_signal;
627 conn_bus->tme_bus_tlb_set_add = NULL;
628 conn_bus->tme_bus_tlb_fill = NULL;
629 conn = &conn_bus->tme_bus_connection;
630 conn->tme_connection_next = *_conns;
631 conn->tme_connection_score = _tme_sparc_connection_score;
632 conn->tme_connection_make = _tme_sparc_connection_make;
633 conn->tme_connection_break = _tme_sparc_connection_break;
634
635 /* add this connection to the set of possibilities: */
636 *_conns = conn;
637
638 /* create our side of a sparc bus connection: */
639 conn_sparc = tme_new0(struct tme_sparc_bus_connection, 1);
640 conn_sparc->tme_sparc_bus_connection.tme_bus_connection.tme_connection_type = TME_CONNECTION_BUS_SPARC;
641 conn_sparc->tme_sparc_bus_interrupt = _tme_sparc_bus_interrupt;
642 conn_sparc->tme_sparc_bus_tlb_fill = NULL;
643 conn_sparc->tme_sparc_bus_fpu_strict = tme_sparc_fpu_strict;
644 conn_bus = &conn_sparc->tme_sparc_bus_connection;
645 conn_bus->tme_bus_tlb_fill = NULL;
646 }
647
648 /* finish the preferred bus connection: */
649 conn_bus->tme_bus_signal = _tme_sparc_bus_signal;
650 conn_bus->tme_bus_tlb_set_add = NULL;
651 conn = &conn_bus->tme_bus_connection;
652 conn->tme_connection_next = *_conns;
653 conn->tme_connection_score = _tme_sparc_connection_score;
654 conn->tme_connection_make = _tme_sparc_connection_make;
655 conn->tme_connection_break = _tme_sparc_connection_break;
656
657 /* add this connection to the set of possibilities: */
658 *_conns = conn;
659
660 /* done: */
661 return (TME_OK);
662 }
663
664 /* the common sparc synchronization initialization: */
665 void
tme_sparc_sync_init(struct tme_sparc * ic)666 tme_sparc_sync_init(struct tme_sparc *ic)
667 {
668
669 /* initialize the external mutex: */
670 tme_mutex_init(&ic->tme_sparc_external_mutex);
671
672 /* initialize the external condition: */
673 tme_cond_init(&ic->tme_sparc_external_cond);
674 }
675
676 /* the common sparc new function: */
677 int
tme_sparc_new(struct tme_sparc * ic,const char * const * args,const void * extra,char ** _output)678 tme_sparc_new(struct tme_sparc *ic, const char * const *args, const void *extra, char **_output)
679 {
680 struct tme_element *element;
681 int arg_i;
682 int usage;
683 tme_uint32_t cycles_per_ms;
684 tme_uint32_t cycles_scaled_per_ms;
685 const char *cycles_scaled_per_ms_arg;
686 unsigned int cwp;
687 unsigned int cwp_offset;
688 tme_uint32_t asi;
689
690 /* assume that we have no FPU: */
691 ic->tme_sparc_fpu_fsr = TME_SPARC_FSR_VER_missing;
692
693 /* if we don't have a tlb page size: */
694 if (ic->tme_sparc_tlb_page_size_log2 == 0) {
695
696 /* assume that we are in a machine with a 4K page size: */
697 /* XXX FIXME - we never attempt to discover the machine's actual
698 page size. however, using the wrong page size doesn't affect
699 correctness, only performance. using a smaller page size means
700 that accesses to different parts of the same true page can be
701 spread over multiple DTLB entries. using a larger page size
702 means that accesses to adjacent true pages can collide in one
703 DTLB entry. we assume that using a smaller page size hurts
704 performance less than using a larger page size: */
705 ic->tme_sparc_tlb_page_size_log2 = 12; /* log2(4096) */
706 }
707
708 /* check our arguments: */
709 arg_i = 1;
710 usage = FALSE;
711 cycles_per_ms = tme_misc_cycles_per_ms();
712 cycles_scaled_per_ms = cycles_per_ms;
713 cycles_scaled_per_ms_arg = NULL;
714 ic->tme_sparc_prom_delay_factor = TME_SPARC_PROM_DELAY_FACTOR_BEST;
715 for (;;) {
716
717 /* if this is a cycles scaling argument: */
718 if (TME_ARG_IS(args[arg_i + 0], "tick-frequency")) {
719 cycles_scaled_per_ms_arg = args[arg_i + 0];
720 cycles_scaled_per_ms = tme_misc_unumber_parse_any(args[arg_i + 1], &usage) / 1000;
721 if (usage) {
722 break;
723 }
724 arg_i += 2;
725 }
726
727 /* if we've run out of arguments: */
728 else if (args[arg_i + 0] == NULL) {
729 break;
730 }
731
732 /* this is either a bad argument or an FPU argument: */
733 else {
734
735 /* if this is not an FPU argument: */
736 if (!tme_sparc_fpu_new(ic, args, &arg_i, &usage, _output)) {
737 tme_output_append_error(_output,
738 "%s %s, ",
739 args[arg_i],
740 _("unexpected"));
741 usage = TRUE;
742 }
743
744 if (usage) {
745 break;
746 }
747 }
748 }
749
750 /* set the cycles scaling: */
751 if (cycles_scaled_per_ms == 0) {
752 if (!usage) {
753 tme_output_append_error(_output,
754 "tick-frequency %s %s, ",
755 cycles_scaled_per_ms_arg,
756 _("too small"));
757 usage = TRUE;
758 }
759 }
760 else {
761 tme_misc_cycles_scaling(&ic->tme_sparc_cycles_scaling,
762 cycles_scaled_per_ms,
763 cycles_per_ms);
764 tme_misc_cycles_scaling(&ic->tme_sparc_cycles_unscaling,
765 cycles_per_ms,
766 cycles_scaled_per_ms);
767 ic->tme_sparc_cycles_scaled_per_usec = (cycles_scaled_per_ms + 999) / 1000;
768 }
769
770 if (usage) {
771 tme_output_append_error(_output,
772 "%s %s [ tick-frequency %s ]",
773 _("usage:"),
774 args[0],
775 _("TICK-FREQUENCY"));
776 tme_sparc_fpu_usage(ic, _output);
777 tme_free(ic);
778 return (EINVAL);
779 }
780
781 /* we have no bus connection yet: */
782 ic->_tme_sparc_bus_connection = NULL;
783
784 /* fill the element: */
785 element = ic->tme_sparc_element;
786 element->tme_element_private = ic;
787 element->tme_element_connections_new = _tme_sparc_connections_new;
788 element->tme_element_command = _tme_sparc_command;
789
790 /* initialize the instruction burst runlength state: */
791 ic->_tme_sparc_instruction_burst = 800;
792 ic->tme_sparc_runlength.tme_runlength_history_count = 64;
793 tme_runlength_init(&ic->tme_sparc_runlength);
794 _tme_sparc_runlength(ic, 2);
795 ic->tme_sparc_runlength_update_next = ic->tme_sparc_runlength_update_period;
796
797 /* set the idle instruction burst size: */
798 ic->_tme_sparc_instruction_burst_idle = 10;
799
800 /* start the first instruction burst: */
801 ic->_tme_sparc_instruction_burst_remaining
802 = ic->_tme_sparc_instruction_burst;
803 ic->_tme_sparc_instruction_burst_other = TRUE;
804
805 /* force the processor to be off: */
806 ic->_tme_sparc_mode = TME_SPARC_MODE_OFF;
807
808 /* initialize the external state: */
809 tme_memory_atomic_init_flag(&ic->tme_sparc_external_flag, FALSE);
810 tme_memory_atomic_init_flag(&ic->tme_sparc_external_reset_asserted, TRUE);
811 tme_memory_atomic_init_flag(&ic->tme_sparc_external_reset_negated, FALSE);
812 tme_memory_atomic_init_flag(&ic->tme_sparc_external_halt_asserted, FALSE);
813 tme_memory_atomic_init_flag(&ic->tme_sparc_external_halt_negated, FALSE);
814 tme_memory_atomic_init_flag(&ic->tme_sparc_external_bg_asserted, FALSE);
815 ic->tme_sparc_external_ipl = TME_SPARC_IPL_NONE;
816 tme_rwlock_init(&ic->tme_sparc_external_ipl_rwlock);
817
818 /* update the CWP offset: */
819 if (TME_SPARC_VERSION(ic) >= 9) {
820 cwp = ic->tme_sparc64_ireg_cwp;
821 TME_SPARC64_CWP_UPDATE(ic, cwp, cwp_offset);
822 }
823 else {
824 cwp = TME_FIELD_MASK_EXTRACTU(ic->tme_sparc32_ireg_psr, TME_SPARC32_PSR_CWP);
825 TME_SPARC32_CWP_UPDATE(ic, cwp, cwp_offset);
826 }
827
828 /* if the specific CPU doesn't provide any ASI handlers: */
829 if (ic->_tme_sparc_ls_asi_handlers == NULL) {
830
831 if (TME_SPARC_VERSION(ic) >= 9) {
832
833 /* this shouldn't happen: */
834 abort();
835 }
836
837 else {
838
839 /* by default, all sparc32 ASIs are special, except for the
840 required ASIs: */
841 for (asi = 0; asi < TME_ARRAY_ELS(ic->tme_sparc_asis); asi++) {
842 ic->tme_sparc_asis[asi].tme_sparc_asi_mask_flags = TME_SPARC32_ASI_MASK_FLAG_SPECIAL;
843 }
844 ic->tme_sparc_asis[TME_SPARC32_ASI_UI].tme_sparc_asi_mask_flags = !TME_SPARC32_ASI_MASK_FLAG_SPECIAL;
845 ic->tme_sparc_asis[TME_SPARC32_ASI_SI].tme_sparc_asi_mask_flags = !TME_SPARC32_ASI_MASK_FLAG_SPECIAL;
846 ic->tme_sparc_asis[TME_SPARC32_ASI_UD].tme_sparc_asi_mask_flags = !TME_SPARC32_ASI_MASK_FLAG_SPECIAL;
847 ic->tme_sparc_asis[TME_SPARC32_ASI_SD].tme_sparc_asi_mask_flags = !TME_SPARC32_ASI_MASK_FLAG_SPECIAL;
848 }
849 }
850
851 /* poison all idle type state: */
852 _tme_sparc_idle_reset(ic);
853
854 /* initialize recoding: */
855 tme_sparc_recode_init(ic);
856
857 /* start the sparc thread: */
858 tme_thread_create((tme_thread_t) tme_sparc_thread, ic);
859
860 return (TME_OK);
861 }
862
863 /* this redispatches: */
864 void
tme_sparc_redispatch(struct tme_sparc * ic)865 tme_sparc_redispatch(struct tme_sparc *ic)
866 {
867 struct tme_token *token;
868
869 /* end any recode verifying: */
870 tme_sparc_recode_verify_end(ic, TME_SPARC_TRAP_none);
871
872 /* if we have a busy instruction TLB entry: */
873 token = ic->_tme_sparc_itlb_current_token;
874 if (__tme_predict_true(token != NULL)) {
875
876 /* unbusy and forget the instruction TLB entry: */
877 tme_token_unbusy(token);
878 ic->_tme_sparc_itlb_current_token = NULL;
879 }
880
881 /* do the redispatch: */
882 #ifdef _TME_SPARC_STATS
883 ic->tme_sparc_stats.tme_sparc_stats_redispatches++;
884 #endif /* _TME_SPARC_STATS */
885 longjmp(ic->_tme_sparc_dispatcher, 1);
886 }
887
888 /* our global verify hook function: */
889 #undef tme_sparc_verify_hook
890 void
tme_sparc_verify_hook(void)891 tme_sparc_verify_hook(void)
892 {
893 }
894
895 /* the common sparc reset function: */
896 void
tme_sparc_do_reset(struct tme_sparc * ic)897 tme_sparc_do_reset(struct tme_sparc *ic)
898 {
899
900 /* if this is a v7 or v8 CPU: */
901 if (ic->tme_sparc_version < 9) {
902
903 /* set the initial PCs: */
904 ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_PC_NEXT) = 0;
905 ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_PC_NEXT_NEXT) = sizeof(tme_uint32_t);
906
907 /* force supervisor mode, traps disabled: */
908 ic->tme_sparc32_ireg_psr
909 = ((ic->tme_sparc32_ireg_psr
910 & ~TME_SPARC32_PSR_ET)
911 | TME_SPARC32_PSR_S);
912 }
913
914 /* otherwise, this is a v9 CPU: */
915 else {
916
917 /* XXX WRITEME */
918 abort();
919 }
920
921 /* reset the FPU: */
922 tme_sparc_fpu_reset(ic);
923
924 /* poison all idle type state, to force the idle type to retrain: */
925 _tme_sparc_idle_reset(ic);
926
927 /* start execution: */
928 ic->_tme_sparc_mode = TME_SPARC_MODE_EXECUTION;
929 tme_sparc_redispatch(ic);
930 }
931
932 /* the common sparc idle function: */
933 void
tme_sparc_do_idle(struct tme_sparc * ic)934 tme_sparc_do_idle(struct tme_sparc *ic)
935 {
936
937 /* NB: since the interrupt that causes us to leave stop mode will
938 call tme_sparc32_trap_preinstruction(), this function can only be
939 called on a preinstruction boundary (i.e., while PC still points
940 to the (completed!) instruction that triggered the idle
941 condition): */
942
943 /* this will not be a full instruction burst: */
944 ic->_tme_sparc_instruction_burst_other = TRUE;
945
946 /* redispatch into stop mode: */
947 ic->_tme_sparc_mode = TME_SPARC_MODE_STOP;
948 tme_sparc_redispatch(ic);
949 }
950
951 /* this checks for external signals: */
952 void
tme_sparc32_external_check(struct tme_sparc * ic,int flags)953 tme_sparc32_external_check(struct tme_sparc *ic,
954 int flags)
955 {
956 unsigned int ipl;
957
958 /* if RESET has been negated since the last check: */
959 if (__tme_predict_false(tme_memory_atomic_read_flag(&ic->tme_sparc_external_reset_negated))) {
960
961 /* clear the RESET asserted flag, then clear the RESET negated
962 flag: */
963 tme_memory_atomic_write_flag(&ic->tme_sparc_external_reset_asserted, FALSE);
964 tme_memory_barrier(ic, sizeof(*ic), TME_MEMORY_BARRIER_WRITE_BEFORE_WRITE);
965 tme_memory_atomic_write_flag(&ic->tme_sparc_external_reset_negated, FALSE);
966
967 /* start reset trap processing: */
968 if (flags & TME_SPARC_EXTERNAL_CHECK_MUTEX_LOCKED) {
969 tme_mutex_unlock(&ic->tme_sparc_external_mutex);
970 }
971 tme_sparc32_trap_preinstruction(ic, TME_SPARC32_TRAP_reset);
972 }
973
974 /* if RESET is asserted: */
975 if (__tme_predict_false(tme_memory_atomic_read_flag(&ic->tme_sparc_external_reset_asserted))) {
976
977 /* halt: */
978 if (flags & TME_SPARC_EXTERNAL_CHECK_MUTEX_LOCKED) {
979 tme_mutex_unlock(&ic->tme_sparc_external_mutex);
980 }
981 ic->_tme_sparc_mode = TME_SPARC_MODE_HALT;
982 tme_sparc_redispatch(ic);
983 }
984
985 /* if an interrupt needs service: */
986 ipl = tme_memory_atomic_read8(&ic->tme_sparc_external_ipl,
987 &ic->tme_sparc_external_ipl_rwlock,
988 sizeof(tme_uint8_t));
989 assert (ipl <= TME_SPARC_IPL_MAX);
990 if (ipl >= TME_SPARC_IPL_MIN) {
991
992 /* if we can't service this interrupt now, we need to set the
993 external flag again so we keep coming back to try again.
994
995 even if we do service this interrupt now, we still need to set
996 the external flag again - because we may not service all of the
997 devices interrupting at this level, and the bus won't bother to
998 make another interrupt level callout if the level isn't
999 actually changing. we need to set the external flag again so
1000 we keep coming back to try again: */
1001 tme_memory_atomic_write_flag(&ic->tme_sparc_external_flag, TRUE);
1002
1003 /* if we are not halted and an interrupt can be serviced, start
1004 interrupt trap processing: */
1005 if (ic->_tme_sparc_mode != TME_SPARC_MODE_HALT
1006 && (ic->tme_sparc32_ireg_psr & TME_SPARC32_PSR_ET)
1007 && (ipl == TME_SPARC_IPL_NMI
1008 || ipl > TME_FIELD_MASK_EXTRACTU(ic->tme_sparc32_ireg_psr, TME_SPARC32_PSR_PIL))) {
1009
1010 if (flags & TME_SPARC_EXTERNAL_CHECK_MUTEX_LOCKED) {
1011 tme_mutex_unlock(&ic->tme_sparc_external_mutex);
1012 }
1013
1014 /* dispatch the trap: */
1015 tme_sparc32_trap_preinstruction(ic, TME_SPARC32_TRAP_interrupt_level(ipl));
1016 }
1017 }
1018
1019 /* there are no traps to process: */
1020 }
1021
1022 /* this triggers sparc32 trap processing on a preinstruction boundary: */
1023 void
tme_sparc32_trap_preinstruction(struct tme_sparc * ic,tme_uint32_t trap)1024 tme_sparc32_trap_preinstruction(struct tme_sparc *ic, tme_uint32_t trap)
1025 {
1026
1027 /* end any recode verifying: */
1028 tme_sparc_recode_verify_end(ic, TME_SPARC_TRAP_none);
1029
1030 /* shift the next instruction's PC and next-next PC up: */
1031 ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_PC) = ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_PC_NEXT);
1032 ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_PC_NEXT) = ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_PC_NEXT_NEXT);
1033
1034 /* do the rest of the sparc32 trap processing: */
1035 tme_sparc32_trap(ic, trap);
1036 }
1037
1038 /* this triggers sparc32 trap processing by an instruction: */
1039 void
tme_sparc32_trap(struct tme_sparc * ic,tme_uint32_t trap)1040 tme_sparc32_trap(struct tme_sparc *ic, tme_uint32_t trap)
1041 {
1042 unsigned int cwp;
1043 unsigned int cwp_offset;
1044 unsigned int reg_17;
1045
1046 /* end any recode verifying: */
1047 tme_sparc_recode_verify_end(ic, trap);
1048
1049 /* stop idling: */
1050 TME_SPARC_IDLE_STOP(ic);
1051
1052 /* reset traps are handled specially: */
1053 if (__tme_predict_false(trap == TME_SPARC32_TRAP_reset)) {
1054 tme_sparc_do_reset(ic);
1055 /* NOTREACHED */
1056 }
1057
1058 /* "The processor enters error_mode state when a trap occurs while
1059 ET = 0. An implementation should preserve as much processor state
1060 as possible when this happens. Standard trap actions (such as
1061 decrementing CWP and saving state information in locals) should
1062 not occur when entering error_mode. In particular, the tt field
1063 of the TBR is only written during a transition into error_mode
1064 state in the singular case of a RETT instruction that traps while
1065 ET = 0. In this case, tt is written to indicate the type of
1066 exception that was induced by the RETT instruction.
1067
1068 What occurs after error_mode is entered is
1069 implementation-dependent; typically the processor triggers an
1070 external reset, causing a reset trap (see below). */
1071 if (__tme_predict_false((ic->tme_sparc32_ireg_psr & TME_SPARC32_PSR_ET) == 0)) {
1072
1073 /* if we were executing a RETT instruction: */
1074 assert (ic->_tme_sparc_mode == TME_SPARC_MODE_EXECUTION);
1075 if ((ic->_tme_sparc_insn
1076 & ((3 << 30) | (0x3f << 19)))
1077 == ((tme_uint32_t) (2 << 30) | (0x39 << 19))) {
1078
1079 /* update the TBR register: */
1080 TME_FIELD_MASK_DEPOSITU(ic->tme_sparc32_ireg_tbr, 0xff, trap);
1081 }
1082
1083 /* reset the processor: */
1084 tme_log(TME_SPARC_LOG_HANDLE(ic), 0, EPERM,
1085 (TME_SPARC_LOG_HANDLE(ic),
1086 _("took a trap while traps disabled, processor reset")));
1087 tme_sparc32_trap(ic, TME_SPARC32_TRAP_reset);
1088 }
1089
1090 /* "Traps are disabled: ET <- 0.
1091 The existing user/supervisor mode is preserved: PS <- S.
1092 The user/supervisor mode is changed to supervisor: S <- 1." */
1093 ic->tme_sparc32_ireg_psr
1094 = ((ic->tme_sparc32_ireg_psr
1095 & ~(TME_SPARC32_PSR_ET
1096 | TME_SPARC32_PSR_PS))
1097 | ((ic->tme_sparc32_ireg_psr
1098 & TME_SPARC32_PSR_S)
1099 / (TME_SPARC32_PSR_S
1100 / TME_SPARC32_PSR_PS))
1101 | TME_SPARC32_PSR_S);
1102
1103 /* "The register window is advanced to a new window:
1104 CWP <- ((CWP - 1) modulo NWINDOWS)
1105 [note: without test for window overflow]." */
1106 cwp = TME_FIELD_MASK_EXTRACTU(ic->tme_sparc32_ireg_psr, TME_SPARC32_PSR_CWP);
1107 cwp -= 1;
1108 cwp %= ic->tme_sparc_nwindows;
1109 TME_FIELD_MASK_DEPOSITU(ic->tme_sparc32_ireg_psr, TME_SPARC32_PSR_CWP, cwp);
1110 TME_SPARC32_CWP_UPDATE(ic, cwp, cwp_offset);
1111 reg_17 = 17;
1112 TME_SPARC_REG_INDEX(ic, reg_17);
1113
1114 /* "The trapped program counters are saved in local registers 1 and
1115 2 of the new window: r[17] <- PC, r[18] <- nPC." */
1116 ic->tme_sparc_ireg_uint32(reg_17 + 0) = ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_PC);
1117 ic->tme_sparc_ireg_uint32(reg_17 + 1) = ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_PC_NEXT);
1118
1119 /* "The tt field is written to the particular value that identifies
1120 the exception or interrupt request, except as defined for `Reset
1121 Trap' and `Error Mode' above." */
1122 TME_FIELD_MASK_DEPOSITU(ic->tme_sparc32_ireg_tbr, 0x00000ff0, trap);
1123
1124 /* "If the trap is not a reset trap, control is transferred into the
1125 trap table: PC <- TBR, nPC <- TBR + 4." */
1126 ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_PC_NEXT) = ic->tme_sparc32_ireg_tbr;
1127 ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_PC_NEXT_NEXT) = ic->tme_sparc32_ireg_tbr + sizeof(tme_uint32_t);
1128
1129 /* log the trap: */
1130 tme_sparc_log(ic, 250, TME_OK,
1131 (TME_SPARC_LOG_HANDLE(ic),
1132 _("trap tt 0x%03" TME_PRIx32 " handler-%%pc 0x%08" TME_PRIx32),
1133 TME_SPARC_TRAP_TT(trap),
1134 ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_PC_NEXT)));
1135
1136 /* redispatch: */
1137 ic->_tme_sparc_mode = TME_SPARC_MODE_EXECUTION;
1138 tme_sparc_redispatch(ic);
1139 }
1140
1141 /* the default sparc32 load/store bus cycle functions: */
1142 void
tme_sparc32_ls_bus_cycle(const struct tme_sparc * ic,struct tme_sparc_ls * ls)1143 tme_sparc32_ls_bus_cycle(const struct tme_sparc *ic,
1144 struct tme_sparc_ls *ls)
1145 {
1146 ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_port = TME_BUS_CYCLE_PORT(0, TME_BUS32_LOG2);
1147 ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_lane_routing
1148 = &(tme_sparc32_router
1149 [TME_SPARC_BUS_ROUTER_INDEX(TME_BUS32_LOG2,
1150 ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_size,
1151 (tme_uint32_t) ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_address)]);
1152 }
1153
1154 /* the default sparc32 load/store direct address map function: */
1155 void
tme_sparc32_ls_address_map(struct tme_sparc * ic,struct tme_sparc_ls * ls)1156 tme_sparc32_ls_address_map(struct tme_sparc *ic,
1157 struct tme_sparc_ls *ls)
1158 {
1159 ls->tme_sparc_ls_tlb_map.tme_bus_tlb_addr_first = 0;
1160 ls->tme_sparc_ls_tlb_map.tme_bus_tlb_addr_last = 0 - (tme_bus_addr_t) 1;
1161 ls->tme_sparc_ls_tlb_map.tme_bus_tlb_cycles_ok = TME_BUS_CYCLE_READ | TME_BUS_CYCLE_WRITE;
1162 ls->tme_sparc_ls_tlb_map.tme_bus_tlb_addr_offset = 0;
1163 }
1164
1165 /* the default sparc32 load/store trap function: */
1166 void
tme_sparc32_ls_trap(struct tme_sparc * ic,struct tme_sparc_ls * ls)1167 tme_sparc32_ls_trap(struct tme_sparc *ic,
1168 struct tme_sparc_ls *ls)
1169 {
1170 tme_uint32_t lsinfo;
1171 tme_uint32_t ls_faults;
1172 tme_uint32_t trap;
1173 tme_uint32_t fault_trap;
1174
1175 /* get the information about this load/store: */
1176 lsinfo = ls->tme_sparc_ls_lsinfo;
1177
1178 /* get the list of faults from this load/store: */
1179 ls_faults = ls->tme_sparc_ls_faults;
1180
1181 /* we only support the sparc32 load/store faults: */
1182 assert ((ls_faults
1183 & ~(TME_SPARC_LS_FAULT_ADDRESS_NOT_ALIGNED
1184 | TME_SPARC_LS_FAULT_LDD_STD_RD_ODD
1185 | TME_SPARC_LS_FAULT_BUS_FAULT
1186 | TME_SPARC_LS_FAULT_BUS_ERROR)) == 0);
1187
1188 /* start with no fault: */
1189 trap = TME_SPARC_TRAP_none;
1190
1191 /* convert the faults into the highest-priority trap: */
1192 if (ls_faults & TME_SPARC_LS_FAULT_ADDRESS_NOT_ALIGNED) {
1193 trap = TME_MIN(trap, TME_SPARC32_TRAP_mem_address_not_aligned);
1194 }
1195 if (ls_faults & TME_SPARC_LS_FAULT_LDD_STD_RD_ODD) {
1196 trap = TME_MIN(trap, TME_SPARC32_TRAP_illegal_instruction);
1197 }
1198 if (ls_faults
1199 & (TME_SPARC_LS_FAULT_BUS_FAULT
1200 | TME_SPARC_LS_FAULT_BUS_ERROR)) {
1201 fault_trap
1202 = ((lsinfo & TME_SPARC_LSINFO_OP_FETCH)
1203 ? TME_SPARC32_TRAP_instruction_access_exception
1204 : TME_SPARC32_TRAP_data_access_exception);
1205 trap = TME_MIN(trap, fault_trap);
1206 }
1207
1208 /* there must be some fault: */
1209 assert (trap != TME_SPARC_TRAP_none);
1210
1211 /* trap: */
1212 tme_sparc32_trap(ic, trap);
1213 }
1214
1215 /* the default sparc nnPC trap function: */
1216 void
tme_sparc_nnpc_trap(struct tme_sparc * ic,tme_uint32_t ls_faults)1217 tme_sparc_nnpc_trap(struct tme_sparc *ic,
1218 tme_uint32_t ls_faults)
1219 {
1220 struct tme_sparc_ls ls;
1221 struct tme_sparc_tlb tlb_dummy;
1222
1223 /* make a limited load/store structure: */
1224 ls.tme_sparc_ls_faults = ls_faults;
1225 ls.tme_sparc_ls_lsinfo = TME_SPARC_LSINFO_OP_FETCH;
1226 ls.tme_sparc_ls_asi_mask = ic->tme_sparc_asi_mask_insn;
1227 ls.tme_sparc_ls_tlb = &tlb_dummy;
1228 tlb_dummy.tme_sparc_tlb_asi_mask
1229 = (!TME_SPARC64_ASI_MASK_FLAG_TLB_SIDE_EFFECTS
1230 );
1231 if (sizeof(tme_sparc_ireg_umax_t) > sizeof(tme_uint32_t)
1232 && TME_SPARC_VERSION(ic) >= 9) {
1233 #ifdef TME_HAVE_INT64_T
1234 ls.tme_sparc_ls_context = ic->tme_sparc_memory_context_primary;
1235 ls.tme_sparc_ls_address64 = ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_PC_NEXT_NEXT);
1236 #endif /* TME_HAVE_INT64_T */
1237 }
1238 else {
1239 ls.tme_sparc_ls_context = ic->tme_sparc_memory_context_default;
1240 ls.tme_sparc_ls_address32 = ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_PC_NEXT_NEXT);
1241 }
1242
1243 /* trap: */
1244 (*ic->_tme_sparc_ls_trap)(ic, &ls);
1245 }
1246
1247 /* the default load/store bus fault function: */
1248 void
tme_sparc_ls_bus_fault(struct tme_sparc * ic,struct tme_sparc_ls * ls,int err)1249 tme_sparc_ls_bus_fault(struct tme_sparc *ic,
1250 struct tme_sparc_ls *ls,
1251 int err)
1252 {
1253 tme_uint32_t lsinfo;
1254 tme_uint32_t cycle_size;
1255 tme_uint32_t ls_fault;
1256
1257 /* get the information about this load/store: */
1258 lsinfo = ls->tme_sparc_ls_lsinfo;
1259
1260 /* if this load/store ignores all bus faults: */
1261 if (lsinfo & TME_SPARC_LSINFO_NO_FAULT) {
1262
1263 /* update the load/store to get past the fault: */
1264 cycle_size = ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_size;
1265 if (TME_SPARC_VERSION(ic) >= 9) {
1266 #ifdef TME_HAVE_INT64_T
1267 ls->tme_sparc_ls_address64 += cycle_size;
1268 #endif /* TME_HAVE_INT64_T */
1269 }
1270 else {
1271 ls->tme_sparc_ls_address32 += cycle_size;
1272 }
1273 ls->tme_sparc_ls_buffer_offset += cycle_size;
1274 ls->tme_sparc_ls_size -= cycle_size;
1275 return;
1276 }
1277
1278 /* convert the bus error code into a fault: */
1279 switch (err) {
1280 case EFAULT:
1281 ls_fault = TME_SPARC_LS_FAULT_BUS_FAULT;
1282 break;
1283 case ENOENT:
1284 case EIO:
1285 ls_fault = TME_SPARC_LS_FAULT_BUS_ERROR;
1286 break;
1287 default: abort();
1288 }
1289
1290 /* add in this fault: */
1291 ls->tme_sparc_ls_faults |= ls_fault;
1292 }
1293
1294 #ifdef TME_HAVE_INT64_T
1295
1296 /* this triggers sparc64 trap processing on a preinstruction boundary: */
1297 void
tme_sparc64_trap_preinstruction(struct tme_sparc * ic,tme_uint32_t trap)1298 tme_sparc64_trap_preinstruction(struct tme_sparc *ic, tme_uint32_t trap)
1299 {
1300
1301 /* end any recode verifying: */
1302 tme_sparc_recode_verify_end(ic, TME_SPARC_TRAP_none);
1303
1304 /* shift the next instruction's PC and next-next PC up: */
1305 ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_PC) = ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_PC_NEXT);
1306 ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_PC_NEXT) = ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_PC_NEXT_NEXT);
1307
1308 /* do the rest of the sparc64 trap processing: */
1309 tme_sparc64_trap(ic, trap);
1310 }
1311
1312 /* this triggers sparc64 trap processing by an instruction: */
1313 void
tme_sparc64_trap(struct tme_sparc * ic,tme_uint32_t trap)1314 tme_sparc64_trap(struct tme_sparc *ic, tme_uint32_t trap)
1315 {
1316 tme_uint32_t tt;
1317 unsigned int tl;
1318 tme_uint32_t pstate;
1319 tme_uint32_t tstate_0_31;
1320 tme_int32_t cwp_addend;
1321 tme_uint32_t cwp;
1322 unsigned int cwp_offset;
1323 unsigned int wstate;
1324 tme_uint64_t pc;
1325
1326 /* end any recode verifying: */
1327 tme_sparc_recode_verify_end(ic, trap);
1328
1329 /* stop idling: */
1330 TME_SPARC_IDLE_STOP(ic);
1331
1332 /* get this trap's tt value: */
1333 tt = TME_SPARC_TRAP_TT(trap);
1334
1335 /* get the current TL: */
1336 tl = ic->tme_sparc64_ireg_tl;
1337
1338 /* if this is some kind of reset: */
1339 #if (TME_SPARC_TRAP_TT(TME_SPARC64_TRAP_power_on_reset) + 1) != TME_SPARC_TRAP_TT(TME_SPARC64_TRAP_watchdog_reset)
1340 #error "TME_SPARC64_TRAP_ values changed"
1341 #endif
1342 #if (TME_SPARC_TRAP_TT(TME_SPARC64_TRAP_power_on_reset) + 2) != TME_SPARC_TRAP_TT(TME_SPARC64_TRAP_externally_initiated_reset)
1343 #error "TME_SPARC64_TRAP_ values changed"
1344 #endif
1345 #if (TME_SPARC_TRAP_TT(TME_SPARC64_TRAP_power_on_reset) + 3) != TME_SPARC_TRAP_TT(TME_SPARC64_TRAP_software_initiated_reset)
1346 #error "TME_SPARC64_TRAP_ values changed"
1347 #endif
1348 if (__tme_predict_false((tt >= TME_SPARC_TRAP_TT(TME_SPARC64_TRAP_power_on_reset)
1349 && tt <= TME_SPARC_TRAP_TT(TME_SPARC64_TRAP_software_initiated_reset))
1350 || (trap & TME_SPARC_TRAP_IMPDEP_RESET))) {
1351
1352 /* if this is an SIR at TL == MAXTL: */
1353 if (tt == TME_SPARC_TRAP_TT(TME_SPARC64_TRAP_software_initiated_reset)
1354 && tl == ic->tme_sparc64_maxtl) {
1355
1356 /* enter error_state: */
1357 tme_sparc64_trap_error_state(ic);
1358 /* NOTREACHED */
1359 }
1360
1361 /* enter RED_state, if we're not there already, at min(TL + 1,
1362 MAXTL): */
1363 pstate = ic->tme_sparc64_ireg_pstate;
1364 pstate |= TME_SPARC64_PSTATE_RED;
1365 tl = tl + 1;
1366 tl = TME_MIN(tl, ic->tme_sparc64_maxtl);
1367
1368 /* if this is a POR: */
1369 if (tt == TME_SPARC_TRAP_TT(TME_SPARC64_TRAP_power_on_reset)) {
1370
1371 /* reset the FPU: */
1372 tme_sparc_fpu_reset(ic);
1373
1374 /* poison all idle type state, to force the idle type to retrain: */
1375 _tme_sparc_idle_reset(ic);
1376
1377 /* clear PSTATE.TLE, which will be copied into PSTATE.CLE: */
1378 pstate &= ~TME_SPARC64_PSTATE_TLE;
1379
1380 /* set TICK.NPT: */
1381 ic->tme_sparc64_ireg_tick_npt = TRUE;
1382
1383 /* zero TICK.counter: */
1384 ic->tme_sparc64_ireg_tick_offset
1385 = (0 - tme_misc_cycles_scaled(&ic->tme_sparc_cycles_scaling, 0).tme_value64_uint);
1386
1387 /* enter RED_state at MAXTL: */
1388 tl = ic->tme_sparc64_maxtl;
1389 }
1390
1391 /* if this is an XIR: */
1392 else if (tt == TME_SPARC_TRAP_TT(TME_SPARC64_TRAP_externally_initiated_reset)) {
1393
1394 /* zero TICK.counter: */
1395 ic->tme_sparc64_ireg_tick_offset
1396 = (0 - tme_misc_cycles_scaled(&ic->tme_sparc_cycles_scaling, 0).tme_value64_uint);
1397 }
1398 }
1399
1400 /* otherwise, this is a normal trap or interrupt: */
1401 else {
1402
1403 /* increment TL: */
1404 tl = tl + 1;
1405
1406 /* if we were already at MAXTL: */
1407 if (__tme_predict_false(tl > ic->tme_sparc64_maxtl)) {
1408
1409 /* enter error_state: */
1410 tme_sparc64_trap_error_state(ic);
1411 /* NOTREACHED */
1412 }
1413
1414 /* get PSTATE: */
1415 pstate = ic->tme_sparc64_ireg_pstate;
1416
1417 /* if we are now at MAXTL: */
1418 if (tl == ic->tme_sparc64_maxtl) {
1419
1420 /* enter RED_state, if we're not there already: */
1421 pstate |= TME_SPARC64_PSTATE_RED;
1422 }
1423 }
1424
1425 /* save ASI: */
1426 tstate_0_31 = ic->tme_sparc64_ireg_asi;
1427 tstate_0_31
1428 *= (_TME_FIELD_MASK_FACTOR(TME_SPARC64_TSTATE_MASK_ASI)
1429 / _TME_FIELD_MASK_FACTOR(TME_SPARC64_TSTATE_MASK_PSTATE));
1430
1431 /* save PSTATE: */
1432 assert (ic->tme_sparc64_ireg_pstate
1433 <= (TME_SPARC64_TSTATE_MASK_PSTATE
1434 / _TME_FIELD_MASK_FACTOR(TME_SPARC64_TSTATE_MASK_PSTATE)));
1435 tstate_0_31 += ic->tme_sparc64_ireg_pstate;
1436 tstate_0_31
1437 *= (_TME_FIELD_MASK_FACTOR(TME_SPARC64_TSTATE_MASK_PSTATE)
1438 / _TME_FIELD_MASK_FACTOR(TME_SPARC64_TSTATE_MASK_CWP));
1439
1440 /* save CWP: */
1441 tstate_0_31 += ic->tme_sparc64_ireg_cwp;
1442
1443 /* store the least-significant 32 bits of TSTATE[TL]: */
1444 ic->tme_sparc64_ireg_tstate(tl) = tstate_0_31;
1445
1446 /* save CCR directly into TSTATE[TL]: */
1447 ic->tme_sparc64_ireg_tstate_ccr(tl) = ic->tme_sparc64_ireg_ccr;
1448
1449 /* save TPC[TL] and TNPC[TL]: */
1450 ic->tme_sparc64_ireg_tpc(tl) = ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_PC);
1451 ic->tme_sparc64_ireg_tnpc(tl) = ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_PC_NEXT);
1452
1453 /* finish the normal PSTATE update: */
1454 pstate
1455 &= ~(TME_SPARC64_PSTATE_AM
1456 + TME_SPARC64_PSTATE_IE
1457 + TME_SPARC64_PSTATE_CLE);
1458 if (__tme_predict_false(pstate & TME_SPARC64_PSTATE_RED)) {
1459 pstate &= ~TME_SPARC64_PSTATE_MM;
1460 }
1461 pstate
1462 |= (TME_SPARC64_PSTATE_PEF
1463 + TME_SPARC64_PSTATE_PRIV
1464 + TME_SPARC64_PSTATE_AG
1465 + ((pstate
1466 & TME_SPARC64_PSTATE_TLE)
1467 * (TME_SPARC64_PSTATE_CLE
1468 / TME_SPARC64_PSTATE_TLE)));
1469
1470 /* call the implementation-specific PSTATE update function to set
1471 the final value for PSTATE: */
1472 (*ic->_tme_sparc64_update_pstate)(ic, pstate, trap);
1473
1474 /* if this is a clean_window trap: */
1475 if (tt == TME_SPARC_TRAP_TT(TME_SPARC64_TRAP_clean_window)) {
1476 cwp_addend = 1;
1477 }
1478
1479 /* otherwise, if this is a window spill trap: */
1480 else if (tt == TME_SPARC_TRAP_TT(TME_SPARC64_TRAP_spill_normal(0))) {
1481 cwp_addend = ic->tme_sparc64_ireg_cansave + 2;
1482 }
1483
1484 /* otherwise, if this is a window fill trap: */
1485 else if (tt == TME_SPARC_TRAP_TT(TME_SPARC64_TRAP_fill_normal(0))) {
1486 cwp_addend = -1;
1487 }
1488
1489 /* otherwise, this trap does not update CWP: */
1490 else {
1491 cwp_addend = 0;
1492 }
1493
1494 /* if this trap updates CWP: */
1495 if (cwp_addend != 0) {
1496
1497 /* update CWP: */
1498 cwp = ic->tme_sparc64_ireg_cwp;
1499 cwp += cwp_addend;
1500 assert (ic->tme_sparc64_ireg_winstates_mask != 0);
1501 cwp &= ic->tme_sparc64_ireg_winstates_mask;
1502 cwp = (tme_uint8_t) cwp;
1503 ic->tme_sparc64_ireg_cwp = cwp;
1504 TME_SPARC64_CWP_UPDATE(ic, cwp, cwp_offset);
1505
1506 /* if this is a window spill or fill trap: */
1507 if (tt != TME_SPARC_TRAP_TT(TME_SPARC64_TRAP_clean_window)) {
1508
1509 /* make the exact trap vector: */
1510 wstate = ic->tme_sparc64_ireg_wstate;
1511 if (ic->tme_sparc64_ireg_otherwin) {
1512 #if (TME_SPARC_TRAP_TT(TME_SPARC64_TRAP_spill_other(0)) - TME_SPARC_TRAP_TT(TME_SPARC64_TRAP_spill_normal(0))) != (TME_SPARC_TRAP_TT(TME_SPARC64_TRAP_fill_other(0)) - TME_SPARC_TRAP_TT(TME_SPARC64_TRAP_fill_normal(0)))
1513 #error "TME_SPARC64_TRAP_ values changed"
1514 #endif
1515 tt += (TME_SPARC_TRAP_TT(TME_SPARC64_TRAP_spill_other(0))
1516 - TME_SPARC_TRAP_TT(TME_SPARC64_TRAP_spill_normal(0)));
1517 wstate /= (TME_SPARC64_WSTATE_OTHER / TME_SPARC64_WSTATE_NORMAL);
1518 }
1519 tt += (4 * (wstate & TME_SPARC64_WSTATE_NORMAL));
1520 }
1521 }
1522
1523 /* if we are in RED_state: */
1524 if (__tme_predict_false(ic->tme_sparc64_ireg_pstate & TME_SPARC64_PSTATE_RED)) {
1525
1526 /* transfer control into the RED_state_trap_vector table: */
1527 pc = ic->tme_sparc64_rstvaddr;
1528 }
1529
1530 /* otherwise, we are not in RED_state: */
1531 else {
1532
1533 /* transfer control into the normal trap vector table: */
1534 pc = (ic->tme_sparc64_ireg_tl == 0 ? 0 : TME_BIT(14));
1535 pc |= ic->tme_sparc64_ireg_tba;
1536 }
1537
1538 /* save the trap type: */
1539 ic->tme_sparc64_ireg_tt(tl) = tt;
1540
1541 /* update TL: */
1542 ic->tme_sparc64_ireg_tl = tl;
1543
1544 /* transfer control to the trap vector table: */
1545 pc += (tt * 0x20);
1546 ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_PC_NEXT) = pc;
1547 ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_PC_NEXT_NEXT) = pc | sizeof(tme_uint32_t);
1548
1549 /* log the trap: */
1550 tme_sparc_log(ic, 250, TME_OK,
1551 (TME_SPARC_LOG_HANDLE(ic),
1552 _("trap tl %u tt 0x%03" TME_PRIx32 " handler-%%pc 0x%016" TME_PRIx64),
1553 tl,
1554 tt,
1555 pc));
1556
1557 /* redispatch: */
1558 ic->_tme_sparc_mode = TME_SPARC_MODE_EXECUTION;
1559 tme_sparc_redispatch(ic);
1560 }
1561
1562 /* this enters the sparc64 error_state: */
1563 void
tme_sparc64_trap_error_state(struct tme_sparc * ic)1564 tme_sparc64_trap_error_state(struct tme_sparc *ic)
1565 {
1566 abort();
1567 }
1568
1569 #endif /* TME_HAVE_INT64_T */
1570
1571 /* this returns the current instruction TLB entry: */
1572 struct tme_sparc_tlb *
tme_sparc_itlb_current(struct tme_sparc * ic)1573 tme_sparc_itlb_current(struct tme_sparc *ic)
1574 {
1575 struct tme_token *token;
1576 tme_uint32_t tlb_i;
1577 struct tme_sparc_tlb *itlb_current;
1578
1579 /* there must be a current instruction TLB entry: */
1580 token = ic->_tme_sparc_itlb_current_token;
1581 assert (token != NULL);
1582
1583 /* recover the index of the instruction TLB entry: */
1584 tlb_i
1585 = (
1586 #ifdef TME_HAVE_INT64_T
1587 TME_SPARC_VERSION(ic) >= 9
1588 ?
1589 #if TME_HAVE_RECODE && TME_RECODE_SIZE_GUEST_MAX > TME_RECODE_SIZE_32
1590 (((struct tme_recode_tlb_c16_a64 *)
1591 (((char *) token)
1592 - (((char *) &(((struct tme_recode_tlb_c16_a64 *) 0)->tme_recode_tlb_c16_a64_token))
1593 - (char *) 0)))
1594 - &ic->tme_sparc_recode_tlb64s[0])
1595 #else /* !TME_HAVE_RECODE || TME_RECODE_SIZE_GUEST_MAX <= TME_RECODE_SIZE_32 */
1596 (token - &ic->tme_sparc_tlb_tokens[0])
1597 #endif /* !TME_HAVE_RECODE || TME_RECODE_SIZE_GUEST_MAX <= TME_RECODE_SIZE_32 */
1598 :
1599 #endif /* TME_HAVE_INT64_T */
1600 #if TME_HAVE_RECODE
1601 (((struct tme_recode_tlb_c16_a32 *)
1602 (((char *) token)
1603 - (((char *) &(((struct tme_recode_tlb_c16_a32 *) 0)->tme_recode_tlb_c16_a32_token))
1604 - (char *) 0)))
1605 - &ic->tme_sparc_recode_tlb32s[0])
1606 #else /* !TME_HAVE_RECODE */
1607 (token - &ic->tme_sparc_tlb_tokens[0])
1608 #endif /* !TME_HAVE_RECODE */
1609 );
1610 #if TME_HAVE_RECODE
1611 if (TME_SPARC_VERSION(ic) >= 9) {
1612 #if TME_RECODE_SIZE_GUEST_MAX > TME_RECODE_SIZE_32
1613 assert (token == &ic->tme_sparc_recode_tlb64s[tlb_i].tme_recode_tlb_c16_a64_token);
1614 #endif /* TME_RECODE_SIZE_GUEST_MAX > TME_RECODE_SIZE_32 */
1615 }
1616 else {
1617 assert (token == &ic->tme_sparc_recode_tlb32s[tlb_i].tme_recode_tlb_c16_a32_token);
1618 }
1619 #endif /* TME_HAVE_RECODE */
1620
1621 /* get the current instruction TLB entry: */
1622 itlb_current = &ic->tme_sparc_tlbs[tlb_i];
1623 assert (itlb_current->tme_sparc_tlb_bus_tlb.tme_bus_tlb_token == token);
1624
1625 return (itlb_current);
1626 }
1627
1628 /* this peeks at an instruction. it returns all-bits-one if there is
1629 no valid instruction TLB entry that allows fast reading and applies
1630 to the address: */
1631 tme_uint32_t
tme_sparc_insn_peek(struct tme_sparc * ic,tme_sparc_ireg_umax_t pc_unmasked)1632 tme_sparc_insn_peek(struct tme_sparc *ic,
1633 tme_sparc_ireg_umax_t pc_unmasked)
1634 {
1635 tme_sparc_ireg_umax_t pc;
1636 tme_uint32_t tlb_hash;
1637 const struct tme_sparc_tlb *itlb;
1638 tme_uint32_t insn;
1639 const struct tme_sparc_tlb *itlb_current;
1640
1641 /* mask the address: */
1642 pc = pc_unmasked;
1643 #ifdef TME_HAVE_INT64_T
1644 if (TME_SPARC_VERSION(ic) >= 9) {
1645 pc &= ic->tme_sparc_address_mask;
1646 }
1647 #endif /* TME_HAVE_INT64_T */
1648
1649 /* the address must be 32-bit aligned: */
1650 assert ((pc % sizeof(tme_uint32_t)) == 0);
1651
1652 /* NB: we don't have to check if the PC is in any virtual address
1653 hole, because we never make valid instruction TLB entries for
1654 addresses in a hole: */
1655
1656 /* hash the instruction TLB entry: */
1657 tlb_hash = TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, pc);
1658 itlb = &ic->tme_sparc_tlbs[TME_SPARC_ITLB_ENTRY(ic, tlb_hash)];
1659
1660 /* if this instruction TLB entry is valid, covers this ASI and
1661 address, and allows fast reading: */
1662 if (tme_bus_tlb_is_valid(&itlb->tme_sparc_tlb_bus_tlb)
1663 && TME_SPARC_TLB_ASI_MASK_OK(itlb, ic->tme_sparc_asi_mask_insn)
1664 && itlb->tme_sparc_tlb_addr_first <= pc
1665 && pc <= itlb->tme_sparc_tlb_addr_last
1666 && itlb->tme_sparc_tlb_emulator_off_read != TME_EMULATOR_OFF_UNDEF) {
1667
1668 /* fetch the instruction: */
1669 insn = tme_memory_bus_read32((const tme_shared tme_uint32_t *) (itlb->tme_sparc_tlb_emulator_off_read + pc),
1670 itlb->tme_sparc_tlb_bus_rwlock,
1671 sizeof(tme_uint32_t),
1672 (TME_SPARC_VERSION(ic) < 9
1673 ? sizeof(tme_uint32_t)
1674 : sizeof(tme_uint32_t) * 2));
1675 insn = tme_betoh_u32(insn);
1676 return (insn);
1677 }
1678
1679 /* if there is recode support: */
1680 if (TME_SPARC_HAVE_RECODE(ic)) {
1681
1682 /* return failure. if the PC doesn't hash to the current
1683 instruction TLB entry, the current instruction TLB entry won't
1684 cover it (because tme_sparc_recode_chain_tlb_update() limits
1685 instruction TLB entries to covering just one page). if the PC
1686 does hash to the current instruction TLB entry, we just checked
1687 that above: */
1688 return (0xffffffff);
1689 }
1690
1691 /* assume that we can't fetch the nearby instruction: */
1692 insn = 0xffffffff;
1693
1694 /* if the current instruction TLB entry is valid and covers the address: */
1695 itlb_current = tme_sparc_itlb_current(ic);
1696 if (tme_bus_tlb_is_valid(&itlb_current->tme_sparc_tlb_bus_tlb)
1697 && itlb_current->tme_sparc_tlb_addr_first <= pc
1698 && pc <= itlb_current->tme_sparc_tlb_addr_last) {
1699
1700 /* the current instruction TLB entry must cover this ASI and allow
1701 fast reading: */
1702 assert (TME_SPARC_TLB_ASI_MASK_OK(itlb_current, ic->tme_sparc_asi_mask_insn));
1703 assert (itlb_current->tme_sparc_tlb_emulator_off_read != TME_EMULATOR_OFF_UNDEF);
1704
1705 /* fetch the nearby instruction: */
1706 insn = tme_memory_bus_read32((const tme_shared tme_uint32_t *) (itlb_current->tme_sparc_tlb_emulator_off_read + pc),
1707 itlb_current->tme_sparc_tlb_bus_rwlock,
1708 sizeof(tme_uint32_t),
1709 (TME_SPARC_VERSION(ic) < 9
1710 ? sizeof(tme_uint32_t)
1711 : sizeof(tme_uint32_t) * 2));
1712 insn = tme_betoh_u32(insn);
1713 }
1714
1715 return (insn);
1716 }
1717
1718 /* this peeks at an instruction at some offset from the current PC.
1719 it returns all-bits-one if there is no valid instruction TLB entry
1720 that allows fast reading and applies to the address: */
1721 tme_uint32_t
tme_sparc_fetch_nearby(struct tme_sparc * ic,long offset_in_insns)1722 tme_sparc_fetch_nearby(struct tme_sparc *ic, long offset_in_insns)
1723 {
1724 tme_sparc_ireg_umax_t pc_unmasked;
1725
1726 /* get the PC: */
1727 pc_unmasked
1728 = (
1729 #ifdef TME_HAVE_INT64_T
1730 TME_SPARC_VERSION(ic) >= 9
1731 ? ((tme_uint64_t)
1732 (ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_PC)
1733 + (tme_int64_t) (offset_in_insns * (long) sizeof(tme_uint32_t))))
1734 :
1735 #endif /* TME_HAVE_INT64_T */
1736 ((tme_uint32_t)
1737 (ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_PC)
1738 + (tme_int32_t) (offset_in_insns * (long) sizeof(tme_uint32_t)))));
1739
1740 /* peek at the instruction: */
1741 return (tme_sparc_insn_peek(ic, pc_unmasked));
1742 }
1743
1744 /* this unlocks data structures before a callout: */
1745 void
tme_sparc_callout_unlock(struct tme_sparc * ic)1746 tme_sparc_callout_unlock(struct tme_sparc *ic)
1747 {
1748 struct tme_token *token;
1749
1750 assert ((ic->_tme_sparc_mode == TME_SPARC_MODE_EXECUTION)
1751 || (ic->_tme_sparc_itlb_current_token == NULL));
1752
1753 /* if we have a busy instruction TLB entry: */
1754 token = ic->_tme_sparc_itlb_current_token;
1755 if (__tme_predict_true(token != NULL)) {
1756
1757 /* unbusy the instruction TLB entry: */
1758 tme_token_unbusy(token);
1759 }
1760 }
1761
1762 /* this relocks data structures after a callout: */
1763 void
tme_sparc_callout_relock(struct tme_sparc * ic)1764 tme_sparc_callout_relock(struct tme_sparc *ic)
1765 {
1766 struct tme_token *token;
1767 struct tme_sparc_tlb *tlb;
1768
1769 assert ((ic->_tme_sparc_mode == TME_SPARC_MODE_EXECUTION)
1770 || (ic->_tme_sparc_itlb_current_token == NULL));
1771
1772 /* if we have a busy instruction TLB entry: */
1773 token = ic->_tme_sparc_itlb_current_token;
1774 if (__tme_predict_true(token != NULL)) {
1775
1776 /* rebusy the instruction TLB entry: */
1777 tme_token_busy(token);
1778 tlb = tme_sparc_itlb_current(ic);
1779
1780 /* if this instruction TLB entry is invalid, or isn't
1781 for the current context: */
1782 if (tme_bus_tlb_is_invalid(&tlb->tme_sparc_tlb_bus_tlb)
1783 || (tlb->tme_sparc_tlb_context <= ic->tme_sparc_memory_context_max
1784 && tlb->tme_sparc_tlb_context != ic->tme_sparc_memory_context_default)) {
1785
1786 /* poison this instruction TLB entry, so we won't try to do any
1787 fast fetches with it: */
1788 tlb->tme_sparc_tlb_addr_first = 1;
1789 tlb->tme_sparc_tlb_addr_last = 0;
1790 }
1791 }
1792
1793 /* if we need to do an external check: */
1794 if (tme_memory_atomic_read_flag(&ic->tme_sparc_external_flag)) {
1795
1796 /* after the currently executing instruction finishes, check for
1797 external resets, halts, or interrupts: */
1798 ic->_tme_sparc_instruction_burst_remaining = 0;
1799 ic->_tme_sparc_instruction_burst_other = TRUE;
1800 }
1801 }
1802
1803 #if 0
1804 #include <stdio.h>
1805
1806 /* this dumps out the sparc state: */
1807 void
1808 tme_sparc32_dump(const struct tme_sparc *ic)
1809 {
1810 unsigned int cwp_first;
1811 unsigned int cwp;
1812 unsigned int reg_i;
1813 unsigned int reg_base;
1814 unsigned int ireg;
1815
1816 /* dump out the windowed integer registers, finishing with the
1817 current window: */
1818 cwp_first = TME_FIELD_MASK_EXTRACTU(ic->tme_sparc32_ireg_psr, TME_SPARC32_PSR_CWP);
1819 cwp_first += TME_SPARC_NWINDOWS(ic) - 1;
1820 cwp_first %= TME_SPARC_NWINDOWS(ic);
1821 cwp = cwp_first;
1822 do {
1823 for (reg_i = 0; reg_i < 8; reg_i++) {
1824 for (reg_base = 24; reg_base > 8; reg_base -= 8) {
1825
1826 ireg = reg_base + reg_i + (cwp * 16);
1827 if (ireg > ((TME_SPARC_NWINDOWS(ic) * 16) + 7)) {
1828 ireg -= (TME_SPARC_NWINDOWS(ic) * 16);
1829 }
1830
1831 fprintf(stderr,
1832 "w%u.%%%c%u[%p] = 0x%08x ",
1833 cwp,
1834 (reg_base == 24
1835 ? 'i'
1836 : 'l'),
1837 reg_i,
1838 &ic->tme_sparc_ireg_uint32(ireg),
1839 ic->tme_sparc_ireg_uint32(ireg));
1840 }
1841 fprintf(stderr, "\n");
1842 }
1843 cwp--;
1844 cwp %= TME_SPARC_NWINDOWS(ic);
1845 } while (cwp != cwp_first);
1846
1847 /* dump out the global registers and the current window's output
1848 registers: */
1849 cwp = TME_FIELD_MASK_EXTRACTU(ic->tme_sparc32_ireg_psr, TME_SPARC32_PSR_CWP);
1850 for (reg_i = 0; reg_i < 8; reg_i++) {
1851
1852 ireg = reg_i;
1853 fprintf(stderr,
1854 " %%g%u[%p] = 0x%08x ",
1855 ireg,
1856 &ic->tme_sparc_ireg_uint32(ireg),
1857 ic->tme_sparc_ireg_uint32(ireg));
1858
1859 ireg = 8 + reg_i + (cwp * 16);
1860 if (ireg > ((TME_SPARC_NWINDOWS(ic) * 16) + 7)) {
1861 ireg -= (TME_SPARC_NWINDOWS(ic) * 16);
1862 }
1863
1864 fprintf(stderr,
1865 "w%u.%%o%u[%p] = 0x%08x ",
1866 cwp,
1867 reg_i,
1868 &ic->tme_sparc_ireg_uint32(ireg),
1869 ic->tme_sparc_ireg_uint32(ireg));
1870 fprintf(stderr, "\n");
1871 }
1872
1873 /* dump out the PCs: */
1874 fprintf(stderr, "%%pc = 0x%08x %%pc_next = 0x%08x %%pc_next_next = 0x%08x\n",
1875 ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_PC),
1876 ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_PC_NEXT),
1877 ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_PC_NEXT_NEXT));
1878
1879 /* dump out the PSR: */
1880 fprintf(stderr, "%%psr = 0x%08x", ic->tme_sparc32_ireg_psr);
1881 fprintf(stderr, " cwp = %u",
1882 TME_FIELD_MASK_EXTRACTU(ic->tme_sparc32_ireg_psr, TME_SPARC32_PSR_CWP));
1883 fprintf(stderr, " pil = 0x%x",
1884 TME_FIELD_MASK_EXTRACTU(ic->tme_sparc32_ireg_psr, TME_SPARC32_PSR_PIL));
1885 if (ic->tme_sparc32_ireg_psr & TME_SPARC32_PSR_ET) {
1886 fprintf(stderr, " ET");
1887 }
1888 fprintf(stderr, " %c",
1889 (ic->tme_sparc32_ireg_psr & TME_SPARC32_PSR_S
1890 ? 'S'
1891 : 'U'));
1892 fprintf(stderr, " flags:");
1893 if (ic->tme_sparc32_ireg_psr & TME_SPARC32_PSR_ICC_N) {
1894 fprintf(stderr, " N");
1895 }
1896 if (ic->tme_sparc32_ireg_psr & TME_SPARC32_PSR_ICC_Z) {
1897 fprintf(stderr, " Z");
1898 }
1899 if (ic->tme_sparc32_ireg_psr & TME_SPARC32_PSR_ICC_V) {
1900 fprintf(stderr, " V");
1901 }
1902 if (ic->tme_sparc32_ireg_psr & TME_SPARC32_PSR_ICC_C) {
1903 fprintf(stderr, " C");
1904 }
1905 fprintf(stderr, "\n");
1906
1907 /* dump out the instruction and the WIM: */
1908 fprintf(stderr, "insn = 0x%08x %%wim = 0x%08x\n",
1909 ic->_tme_sparc_insn,
1910 ic->tme_sparc32_ireg_wim);
1911 }
1912
1913 void
1914 tme_sparc64_dump_memory(struct tme_sparc *ic, tme_uint64_t address, tme_uint32_t resid)
1915 {
1916 tme_uint64_t address_display;
1917 tme_uint32_t tlb_hash;
1918 struct tme_sparc_tlb *dtlb;
1919 tme_memory_atomic_flag_t tlb_busy_old;
1920 const tme_shared tme_uint8_t *memory;
1921 tme_uint32_t count;
1922 tme_uint32_t byte_i;
1923
1924 /* we always display aligned rows: */
1925 address_display = address & (((tme_uint32_t) 0) - (sizeof(tme_uint32_t) * 2));
1926 resid += (address - address_display);
1927
1928 /* while we have memory to dump: */
1929 for (; resid > 0; ) {
1930
1931 /* get the DTLB entry, and busy it if it isn't already: */
1932 tlb_hash = TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address_display);
1933 dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, tlb_hash)];
1934 tlb_busy_old = dtlb->tme_sparc_tlb_bus_tlb.tme_bus_tlb_token->tme_token_busy;
1935 dtlb->tme_sparc_tlb_bus_tlb.tme_bus_tlb_token->tme_token_busy = TRUE;
1936
1937 /* read more data: */
1938 count = TME_MIN(resid, sizeof(tme_uint32_t) * 2);
1939 memory
1940 = (TME_SPARC_VERSION(ic) < 9
1941 ? tme_sparc32_ls(ic,
1942 address_display,
1943 (tme_uint32_t *) NULL,
1944 (TME_SPARC_LSINFO_SIZE(sizeof(tme_uint32_t) * 2)
1945 + TME_SPARC_LSINFO_OP_LD
1946 + TME_SPARC_LSINFO_NO_FAULT))
1947 : tme_sparc64_ls(ic,
1948 address_display,
1949 (tme_uint64_t *) NULL,
1950 (TME_SPARC_LSINFO_SIZE(sizeof(tme_uint32_t) * 2)
1951 + TME_SPARC_LSINFO_OP_LD
1952 + TME_SPARC_LSINFO_NO_FAULT)));
1953
1954 /* restore the DTLB busy flag: */
1955 dtlb->tme_sparc_tlb_bus_tlb.tme_bus_tlb_token->tme_token_busy = tlb_busy_old;
1956
1957 /* display the row: */
1958 fprintf(stderr, "0x%0*" TME_PRIx64 " ", (8 << (TME_SPARC_VERSION(ic) >= 9)), address_display);
1959 for (byte_i = 0;
1960 byte_i < count;
1961 byte_i++, address_display++) {
1962 if (address_display < address) {
1963 fprintf(stderr, " ");
1964 }
1965 else {
1966 fprintf(stderr, " %02x",
1967 memory[address_display]);
1968 address++;
1969 }
1970 resid--;
1971 }
1972 fputc('\n', stderr);
1973 }
1974 }
1975
1976 #undef TME_SPARC_VERSION
1977 #define TME_SPARC_VERSION(ic) (9)
1978 #include "sparc-kgdb.c"
1979 #undef TME_SPARC_VERSION
1980
1981 #endif /* 1 */
1982