1 /* $Id: sparc-execute.c,v 1.10 2010/02/20 21:58:15 fredette Exp $ */
2 
3 /* ic/sparc/sparc-execute.c - executes SPARC instructions: */
4 
5 /*
6  * Copyright (c) 2005, 2009 Matt Fredette
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *      This product includes software developed by Matt Fredette.
20  * 4. The name of the author may not be used to endorse or promote products
21  *    derived from this software without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
27  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
29  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
31  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
32  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33  * POSSIBILITY OF SUCH DAMAGE.
34  */
35 
36 _TME_RCSID("$Id: sparc-execute.c,v 1.10 2010/02/20 21:58:15 fredette Exp $");
37 
38 /* includes: */
39 #include "sparc-auto.h"
40 
41 #if (TME_SPARC_VERSION(ic) < 9)
42 #define tme_sparc_ireg_t tme_uint32_t
43 #define tme_sparc_ireg(x)  tme_sparc_ireg_uint32(x)
44 #define tme_sparc_idle_pcs tme_sparc_idle_pcs_32
45 #define TME_PRIxSPARCREG "0x%08" TME_PRIx32
46 #else  /* TME_SPARC_VERSION(ic) >= 9 */
47 #define tme_sparc_ireg_t tme_uint64_t
48 #define tme_sparc_ireg(x)  tme_sparc_ireg_uint64(x)
49 #define tme_sparc_idle_pcs tme_sparc_idle_pcs_64
50 #define TME_PRIxSPARCREG "0x%016" TME_PRIx64
51 #endif /* TME_SPARC_VERSION(ic) >= 9 */
52 
53 /* the sparc instruction executor: */
54 static void
_TME_SPARC_EXECUTE_NAME(struct tme_sparc * ic)55 _TME_SPARC_EXECUTE_NAME(struct tme_sparc *ic)
56 {
57   tme_uint32_t asi_mask_insn;
58   tme_uint32_t asi_mask_data;
59   struct tme_sparc_tlb *itlb_current;
60   struct tme_sparc_tlb itlb_invalid;
61   struct tme_token token_invalid;
62   tme_sparc_ireg_t pc_previous;
63   tme_sparc_ireg_t pc;
64   tme_uint32_t insn;
65   tme_uint32_t tlb_hash;
66   const tme_shared tme_uint8_t *emulator_off;
67   unsigned int opcode;
68   unsigned int reg_rs1;
69   unsigned int reg_rs2;
70   unsigned int reg_rd;
71   int annulled;
72   int branch_dot;
73   tme_uint32_t branch_dot_burst;
74 #if TME_SPARC_VERSION(ic) >= 9
75   unsigned int cc;
76   tme_uint64_t value_rs1;
77 #endif /* TME_SPARC_VERSION(ic) >= 9 */
78   tme_uint8_t conds_mask_icc;
79   tme_uint8_t conds_mask_fcc;
80   tme_uint16_t conds_mask;
81   unsigned int cond;
82   tme_int32_t disp;
83   tme_sparc_ireg_t pc_next_next;
84   unsigned int reg_o0;
85 
86   /* get the default address space identifiers and masks: */
87   if (TME_SPARC_VERSION(ic) < 9) {
88     if (TME_SPARC_PRIV(ic)) {
89       asi_mask_insn = TME_SPARC32_ASI_MASK_SI;
90       asi_mask_data = TME_SPARC32_ASI_MASK_SD;
91     }
92     else {
93       asi_mask_insn = TME_SPARC32_ASI_MASK_UI;
94       asi_mask_data = TME_SPARC32_ASI_MASK_UD;
95     }
96   }
97   else {
98     if (__tme_predict_false((TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_NUCLEUS)
99 			    && ic->tme_sparc64_ireg_tl > 0)) {
100       asi_mask_insn
101 	= TME_SPARC64_ASI_MASK_NUCLEUS(!TME_SPARC64_ASI_FLAG_LITTLE);
102       ic->tme_sparc_memory_context_default = 0;
103     }
104     else {
105       asi_mask_insn
106 	= TME_SPARC64_ASI_MASK_REQUIRED_UNRESTRICTED((TME_SPARC_PRIV(ic)
107 						      ? !TME_SPARC64_ASI_MASK_FLAG_INSN_AS_IF_USER
108 						      : TME_SPARC64_ASI_MASK_FLAG_INSN_AS_IF_USER)
109 						     + !TME_SPARC64_ASI_FLAG_SECONDARY
110 						     + !TME_SPARC64_ASI_FLAG_NO_FAULT
111 						     + !TME_SPARC64_ASI_FLAG_LITTLE);
112       ic->tme_sparc_memory_context_default = ic->tme_sparc_memory_context_primary;
113     }
114     asi_mask_data = asi_mask_insn;
115     if (__tme_predict_false(ic->tme_sparc64_ireg_pstate & TME_SPARC64_PSTATE_CLE)) {
116       assert ((TME_SPARC64_ASI_MASK_NUCLEUS(!TME_SPARC64_ASI_FLAG_LITTLE)
117 	       ^ TME_SPARC64_ASI_MASK_NUCLEUS(TME_SPARC64_ASI_FLAG_LITTLE))
118 	      == (TME_SPARC64_ASI_MASK_REQUIRED_UNRESTRICTED(!TME_SPARC64_ASI_FLAG_LITTLE)
119 		  ^ TME_SPARC64_ASI_MASK_REQUIRED_UNRESTRICTED(TME_SPARC64_ASI_FLAG_LITTLE)));
120       asi_mask_data
121 	^= (TME_SPARC64_ASI_MASK_NUCLEUS(!TME_SPARC64_ASI_FLAG_LITTLE)
122 	    ^ TME_SPARC64_ASI_MASK_NUCLEUS(TME_SPARC64_ASI_FLAG_LITTLE));
123     }
124   }
125   ic->tme_sparc_asi_mask_insn = asi_mask_insn;
126   ic->tme_sparc_asi_mask_data = asi_mask_data;
127 
128 #if TME_SPARC_HAVE_RECODE(ic)
129 
130   /* set the recode read/write TLB flags mask to and with the flags
131      from a read/write thunk, before being tested against the flags in
132      a recode DTLB entry.  this TLB flags mask must clear flags that
133      do not apply, based on the current state: */
134   ic->tme_sparc_recode_rw_tlb_flags
135     = (TME_RECODE_TLB_FLAGS_MASK(ic->tme_sparc_recode_ic)
136        - (
137 
138 	  /* the load and store flags for the other privilege level do
139 	     not apply, because we're not at that privilege level: */
140 	  (TME_SPARC_PRIV(ic)
141 	   ? (TME_SPARC_RECODE_TLB_FLAG_LD_USER(ic)
142 	      + TME_SPARC_RECODE_TLB_FLAG_ST_USER(ic))
143 	   : (TME_SPARC_RECODE_TLB_FLAG_LD_PRIV(ic)
144 	      + TME_SPARC_RECODE_TLB_FLAG_ST_PRIV(ic)))
145 
146 	  /* on a v9 CPU, if the ASI register has the default data
147 	     ASI, but with the no-fault bit set, the ASI register is
148 	     correct for no-fault loads, and the no-fault load bit
149 	     doesn't apply: */
150 	  + ((TME_SPARC_VERSION(ic) >= 9
151 	      && ((TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_NUCLEUS) == 0
152 		  || ic->tme_sparc64_ireg_tl == 0)
153 	      && (ic->tme_sparc64_ireg_asi
154 		  == (TME_SPARC_ASI_MASK_WHICH(asi_mask_data)
155 		      + TME_SPARC64_ASI_FLAG_NO_FAULT)))
156 	     ? TME_SPARC_RECODE_TLB_FLAG_LD_NF(ic)
157 	     : 0)));
158 
159   /* set the recode chain TLB flags mask to and with the flags from
160      the chain thunk, before being tested against the flags in a
161      recode ITLB entry.  this TLB flags mask must clear flags that do
162      not apply, based on the current state: */
163   ic->tme_sparc_recode_chain_tlb_flags
164     = (TME_RECODE_TLB_FLAGS_MASK(ic->tme_sparc_recode_ic)
165        - (
166 
167 	  /* the fetch flags for the other privilege level do not
168 	     apply, because we're not at that privilege level: */
169 	  (TME_SPARC_PRIV(ic)
170 	   ? TME_SPARC_RECODE_TLB_FLAG_CHAIN_USER(ic)
171 	   : TME_SPARC_RECODE_TLB_FLAG_CHAIN_PRIV(ic))
172 	  ));
173 
174 #endif /* TME_SPARC_HAVE_RECODE(ic) */
175 
176   /* create an invalid instruction TLB entry, and use it as the initial
177      current instruction TLB entry: */
178   tme_token_init(&token_invalid);
179   itlb_invalid.tme_sparc_tlb_addr_first = 1;
180   itlb_invalid.tme_sparc_tlb_addr_last = 0;
181   itlb_invalid.tme_sparc_tlb_bus_tlb.tme_bus_tlb_token = &token_invalid;
182   itlb_current = &itlb_invalid;
183 
184   /* busy the invalid instruction TLB entry: */
185   assert (ic->_tme_sparc_itlb_current_token == NULL);
186   tme_token_busy(&token_invalid);
187   ic->_tme_sparc_itlb_current_token = &token_invalid;
188 
189   /* the first instruction will not be annulled: */
190   annulled = FALSE;
191 
192   /* the last instruction was not a taken branch to .: */
193   branch_dot = FALSE;
194   branch_dot_burst = 0;
195 
196   for (;;) {
197 
198     /* if we have used up our instruction burst: */
199     if (__tme_predict_false(ic->_tme_sparc_instruction_burst_remaining == 0)) {
200 
201       /* if the last instruction was a taken branch to .: */
202       if (__tme_predict_false(branch_dot)) {
203 
204 	/* clear the taken branch to . flag and restore the
205 	   instruction burst that had been remaining: */
206 	branch_dot = FALSE;
207 	ic->_tme_sparc_instruction_burst_remaining = branch_dot_burst;
208 
209 	/* if the next instruction will be annulled: */
210 	if (__tme_predict_false(annulled)) {
211 
212 	  /* a taken branch to . that annuls its branch delay slot
213 	     must be a "ba,a .", since taken conditional branches
214 	     never annul.  "ba,a ." makes an infinite loop.
215 
216 	     we can just go idle here, but we must make sure that any
217 	     trap sees %pc on the branch to ., and not its branch
218 	     delay slot (since we don't track the annulled bit in the
219 	     processor structure), and we must make sure that %pc_next
220 	     is the branch to . delay slot (because otherwise it would
221 	     look like we didn't loop even once): */
222 	  ic->tme_sparc_ireg(TME_SPARC_IREG_PC_NEXT)
223 	    = ic->tme_sparc_ireg(TME_SPARC_IREG_PC);
224 	  ic->tme_sparc_ireg(TME_SPARC_IREG_PC_NEXT_NEXT)
225 	    = (ic->tme_sparc_ireg(TME_SPARC_IREG_PC)
226 	       + sizeof(tme_uint32_t));
227 	  if (TME_SPARC_VERSION(ic) >= 9) {
228 	    ic->tme_sparc_ireg(TME_SPARC_IREG_PC_NEXT_NEXT) &= ic->tme_sparc_address_mask;
229 	  }
230 	  tme_sparc_do_idle(ic);
231 	  /* NOTREACHED */
232 	}
233 
234 	/* if the branch delay instruction immediately follows the
235 	   branch to .: */
236 	if (ic->tme_sparc_ireg(TME_SPARC_IREG_PC_NEXT)
237 	    == (ic->tme_sparc_ireg(TME_SPARC_IREG_PC)
238 		+ sizeof(tme_uint32_t))) {
239 
240 	  /* if this branch to . is not a timing loop, this will
241 	     return.  if it's a timing loop that doesn't sleep, this
242 	     will return.  otherwise, this won't return: */
243 	  tme_sparc_timing_loop_start(ic);
244 	}
245 
246 	/* continue now, to finish the instruction burst that
247 	   had been remaining: */
248 	continue;
249       }
250 
251       /* if this was a full instruction burst: */
252       if (!ic->_tme_sparc_instruction_burst_other) {
253 
254 	/* if it's time to update the runlength: */
255 	if (ic->tme_sparc_runlength_update_next == 0) {
256 
257 	  /* update the runlength: */
258 #ifndef _TME_SPARC_RECODE_VERIFY
259 	  tme_runlength_update(&ic->tme_sparc_runlength);
260 #endif /* !_TME_SPARC_RECODE_VERIFY */
261 
262 	  /* start another runlength update period: */
263 	  ic->tme_sparc_runlength_update_next = ic->tme_sparc_runlength_update_period;
264 	}
265 
266 	/* advance in the runlength update period: */
267 	ic->tme_sparc_runlength_update_next--;
268 
269 	/* we are not in a full instruction burst: */
270 	ic->_tme_sparc_instruction_burst_other = TRUE;
271       }
272 
273       /* if the next instruction will be annulled: */
274       if (__tme_predict_false(annulled)) {
275 
276 	/* NB that we have to handle the next instruction now, in the
277 	   immediate next iteration of the execution loop, since we
278 	   don't track the annulled bit in the processor structure,
279 	   and we want to do good emulation and actually fetch the
280 	   instruction (as opposed to just advancing the PCs now).
281 	   start an instruction burst of one instruction: */
282 	ic->_tme_sparc_instruction_burst_remaining = 1;
283 	continue;
284       }
285 
286       /* if we need to do an external check: */
287       if (tme_memory_atomic_read_flag(&ic->tme_sparc_external_flag)) {
288 
289 	/* do an external check: */
290 	tme_memory_atomic_write_flag(&ic->tme_sparc_external_flag, FALSE);
291 	tme_memory_barrier(ic, sizeof(*ic), TME_MEMORY_BARRIER_READ_BEFORE_READ);
292 	(*ic->_tme_sparc_external_check)(ic, TME_SPARC_EXTERNAL_CHECK_NULL);
293       }
294 
295       /* start a new instruction burst: */
296       ic->_tme_sparc_instruction_burst_remaining
297 	= ic->_tme_sparc_instruction_burst;
298       ic->_tme_sparc_instruction_burst_other = FALSE;
299 
300       /* if the next PC might be in an idle PC range: */
301       pc = ic->tme_sparc_ireg(TME_SPARC_IREG_PC_NEXT);
302       if (__tme_predict_false(pc < ic->tme_sparc_idle_pcs[1])) {
303 
304 	/* if we haven't detected the idle PC yet: */
305 	if (__tme_predict_false(TME_SPARC_IDLE_TYPE_PC_STATE(ic->tme_sparc_idle_pcs[0]) != 0)) {
306 	  /* nothing to do */
307 	}
308 
309 	/* if the next PC and the delay PC are both in the idle PC
310 	   range, and this idle type has an idle PC range: */
311 	else if (__tme_predict_false(pc >= ic->tme_sparc_idle_pcs[0])) {
312 	  if (TME_SPARC_IDLE_TYPE_IS(ic, TME_SPARC_IDLE_TYPES_PC_RANGE)
313 	      && ic->tme_sparc_ireg(TME_SPARC_IREG_PC_NEXT_NEXT) >= ic->tme_sparc_idle_pcs[0]
314 	      && ic->tme_sparc_ireg(TME_SPARC_IREG_PC_NEXT_NEXT) < ic->tme_sparc_idle_pcs[1]) {
315 
316 	    /* if we haven't marked any idles yet, or if we have
317 	       marked one and the next PC is at or behind that PC: */
318 	    if (ic->tme_sparc_idle_marks == 0
319 		|| (ic->tme_sparc_idle_marks == 1
320 		    && pc <= ic->tme_sparc_idle_pcs[2])) {
321 
322 	      /* mark the idle: */
323 	      ic->tme_sparc_idle_marks++;
324 
325 	      /* we won't mark another idle until we detect a
326 		 backwards control transfer in the idle PC range,
327 		 indicating another iteration of the idle loop: */
328 	      ic->tme_sparc_idle_pcs[2] = pc;
329 	    }
330 	  }
331 	}
332       }
333 
334       /* if we have marked any idles: */
335       if (__tme_predict_false(ic->tme_sparc_idle_marks != 0)) {
336 
337 	/* if we have marked one idle: */
338 	if (ic->tme_sparc_idle_marks == 1) {
339 
340 	  /* start a new idle instruction burst: */
341 	  ic->_tme_sparc_instruction_burst_remaining
342 	    = ic->_tme_sparc_instruction_burst_idle;
343 	  ic->_tme_sparc_instruction_burst_other = TRUE;
344 	}
345 
346 	/* otherwise, we have marked two consecutive idles without a
347 	   trap: */
348 	else {
349 	  assert (ic->tme_sparc_idle_marks == 2);
350 
351 	  /* idle: */
352 	  tme_sparc_do_idle(ic);
353 	}
354       }
355 
356       /* if this is a cooperative threading system: */
357 #if TME_THREADS_COOPERATIVE
358 
359       /* unbusy the current instruction TLB entry: */
360       assert (ic->_tme_sparc_itlb_current_token
361 	      == itlb_current->tme_sparc_tlb_bus_tlb.tme_bus_tlb_token);
362       tme_sparc_tlb_unbusy(itlb_current);
363       ic->_tme_sparc_itlb_current_token = NULL;
364 
365       /* yield: */
366       tme_thread_yield();
367 #endif /* TME_THREADS_COOPERATIVE */
368 
369       /* if we may update the runlength with this instruction burst,
370 	 note its start time: */
371       if (ic->tme_sparc_runlength_update_next == 0) {
372 	ic->tme_sparc_runlength.tme_runlength_cycles_start = tme_misc_cycles();
373       }
374     }
375 
376     /* we can't know that this instruction is a taken branch to .: */
377     assert (!branch_dot);
378 
379     /* we are going to use one instruction in the burst: */
380     ic->_tme_sparc_instruction_burst_remaining--;
381 #ifdef _TME_SPARC_STATS
382     ic->tme_sparc_stats.tme_sparc_stats_insns_total++;
383 #endif /* _TME_SPARC_STATS */
384 
385     /* save the previous PC: */
386     pc_previous = ic->tme_sparc_ireg(TME_SPARC_IREG_PC);
387 
388     /* if we're replaying recoded instructions: */
389     if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
390 
391       /* if the previous instruction was the last instruction to
392 	 verify, return now: */
393       if (__tme_predict_false(tme_sparc_recode_verify_replay_last_pc(ic) == pc_previous)) {
394 	assert (ic->_tme_sparc_itlb_current_token != &token_invalid);
395 	return;
396       }
397 
398       /* poison pc_previous to prevent all recoding: */
399       pc_previous = ic->tme_sparc_ireg(TME_SPARC_IREG_PC_NEXT) - sizeof(tme_uint32_t);
400     }
401 
402     /* update the PCs and get the PC of the instruction to execute: */
403     pc = ic->tme_sparc_ireg(TME_SPARC_IREG_PC_NEXT);
404     ic->tme_sparc_ireg(TME_SPARC_IREG_PC) = pc;
405     pc_next_next = ic->tme_sparc_ireg(TME_SPARC_IREG_PC_NEXT_NEXT);
406     ic->tme_sparc_ireg(TME_SPARC_IREG_PC_NEXT) = pc_next_next;
407     pc_next_next += sizeof(tme_uint32_t);
408     if (TME_SPARC_VERSION(ic) >= 9) {
409       pc_next_next &= ic->tme_sparc_address_mask;
410       assert ((pc | ic->tme_sparc_ireg(TME_SPARC_IREG_PC_NEXT))
411 	      < ic->tme_sparc_address_mask);
412     }
413     ic->tme_sparc_ireg(TME_SPARC_IREG_PC_NEXT_NEXT) = pc_next_next;
414 
415     /* NB that we only save instruction TLB entries that allow fast
416        reading, and we also change tme_sparc_tlb_addr_last to be the
417        last PC covered by the entry (it's normally the last address
418        covered by the entry).  this allows us to do minimal checking
419        of the current instruction TLB entry at itlb_current: */
420 
421     /* if the current instruction TLB entry covers this address: */
422     if (__tme_predict_true(((tme_sparc_ireg_t) itlb_current->tme_sparc_tlb_addr_first) <= pc
423 			   && pc <= ((tme_sparc_ireg_t) itlb_current->tme_sparc_tlb_addr_last))) {
424 
425       /* the current instruction TLB entry must cover this
426 	 address and allow reading: */
427       assert (TME_SPARC_TLB_ASI_MASK_OK(itlb_current, asi_mask_insn)
428 	      && (itlb_current->tme_sparc_tlb_context > ic->tme_sparc_memory_context_max
429 		  || itlb_current->tme_sparc_tlb_context == ic->tme_sparc_memory_context_default)
430 	      && itlb_current->tme_sparc_tlb_addr_first <= pc
431 	      && pc <= itlb_current->tme_sparc_tlb_addr_last);
432 
433       /* fetch the instruction: */
434       insn = tme_memory_bus_read32((const tme_shared tme_uint32_t *) (itlb_current->tme_sparc_tlb_emulator_off_read + pc),
435 				   itlb_current->tme_sparc_tlb_bus_rwlock,
436 				   sizeof(tme_uint32_t),
437 				   sizeof(tme_sparc_ireg_t));
438       insn = tme_betoh_u32(insn);
439     }
440 
441     /* otherwise, our current TLB entry doesn't cover this address: */
442     else {
443 
444       /* unbusy the current instruction TLB entry: */
445       assert (ic->_tme_sparc_itlb_current_token
446 	      == itlb_current->tme_sparc_tlb_bus_tlb.tme_bus_tlb_token);
447       tme_sparc_tlb_unbusy(itlb_current);
448 
449       /* rehash the current instruction TLB entry: */
450       tlb_hash = TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, pc);
451       itlb_current = &ic->tme_sparc_tlbs[TME_SPARC_ITLB_ENTRY(ic, tlb_hash)];
452 
453       /* busy the current instruction TLB entry: */
454       tme_sparc_tlb_busy(itlb_current);
455       ic->_tme_sparc_itlb_current_token = itlb_current->tme_sparc_tlb_bus_tlb.tme_bus_tlb_token;
456 
457       /* if the new current instruction TLB entry is valid and covers
458          this address: */
459       if (tme_bus_tlb_is_valid(&itlb_current->tme_sparc_tlb_bus_tlb)
460 	  && __tme_predict_true(TME_SPARC_TLB_ASI_MASK_OK(itlb_current, asi_mask_insn)
461 				&& (itlb_current->tme_sparc_tlb_context > ic->tme_sparc_memory_context_max
462 				    || itlb_current->tme_sparc_tlb_context == ic->tme_sparc_memory_context_default)
463 				&& pc >= (tme_sparc_ireg_t) itlb_current->tme_sparc_tlb_addr_first
464 				&& pc <= (tme_sparc_ireg_t) itlb_current->tme_sparc_tlb_addr_last)) {
465 
466 	/* fetch the instruction: */
467 	insn = tme_memory_bus_read32((const tme_shared tme_uint32_t *) (itlb_current->tme_sparc_tlb_emulator_off_read + pc),
468 				     itlb_current->tme_sparc_tlb_bus_rwlock,
469 				     sizeof(tme_uint32_t),
470 				     sizeof(tme_sparc_ireg_t));
471 	insn = tme_betoh_u32(insn);
472       }
473 
474       /* otherwise, the new current instruction TLB entry is not valid
475          or does not cover this address: */
476       else {
477 
478 	/* the slow fetch will manage unbusying and busying the
479 	   current instruction TLB entry, so make sure that doesn't
480 	   happen at unlock and relock time: */
481 	ic->_tme_sparc_itlb_current_token = NULL;
482 
483 	/* fetch the instruction: */
484 	emulator_off =
485 #if TME_SPARC_VERSION(ic) < 9
486 	  tme_sparc32_ls
487 #else  /* TME_SPARC_VERSION(ic) >= 9 */
488 	  tme_sparc64_ls
489 #endif /* TME_SPARC_VERSION(ic) >= 9 */
490 	  (ic,
491 	   pc,
492 	   (tme_sparc_ireg_t *) NULL,
493 	   (TME_SPARC_LSINFO_SIZE(sizeof(tme_uint32_t))
494 	    + TME_SPARC_LSINFO_ASI(TME_SPARC_ASI_MASK_WHICH(asi_mask_insn))
495 	    + TME_SPARC_LSINFO_A
496 	    + TME_SPARC_LSINFO_OP_FETCH
497 	    + (annulled
498 	       ? TME_SPARC_LSINFO_NO_FAULT
499 	       : 0)));
500 	assert (emulator_off != TME_EMULATOR_OFF_UNDEF);
501 
502 	/* unbusy and busy the current instruction TLB entry at unlock
503 	   and relock time again: */
504 	ic->_tme_sparc_itlb_current_token = itlb_current->tme_sparc_tlb_bus_tlb.tme_bus_tlb_token;
505 
506 	/* if this current instruction TLB entry covers the entire
507 	   instruction and allows fast reading: */
508 	if (__tme_predict_true(emulator_off == itlb_current->tme_sparc_tlb_emulator_off_read)) {
509 
510 	  /* the current instruction TLB entry must now cover this
511 	     address and allow reading: */
512 	  /* NB that tme_sparc_tlb_addr_last has not been changed yet: */
513 	  assert (TME_SPARC_TLB_ASI_MASK_OK(itlb_current, asi_mask_insn)
514 		  && (itlb_current->tme_sparc_tlb_context > ic->tme_sparc_memory_context_max
515 		      || itlb_current->tme_sparc_tlb_context == ic->tme_sparc_memory_context_default)
516 		  && itlb_current->tme_sparc_tlb_addr_first <= pc
517 		  && (pc + sizeof(tme_uint32_t) - 1) <= itlb_current->tme_sparc_tlb_addr_last);
518 
519 	  /* fetch the instruction: */
520 	  insn = tme_memory_bus_read32((const tme_shared tme_uint32_t *) (itlb_current->tme_sparc_tlb_emulator_off_read + pc),
521 				       itlb_current->tme_sparc_tlb_bus_rwlock,
522 				       sizeof(tme_uint32_t),
523 				       sizeof(tme_sparc_ireg_t));
524 	  insn = tme_betoh_u32(insn);
525 
526 	  /* modify tme_sparc_tlb_addr_last of this first to represent the last valid
527 	     PC covered by the entry: */
528 	  itlb_current->tme_sparc_tlb_addr_last
529 	    &= (((tme_bus_addr_t) 0) - sizeof(tme_uint32_t));
530 	}
531 
532 	/* otherwise, this instruction TLB entry does not cover the
533 	   entire instruction and/or it does not allow fast reading.
534 	   the instruction has already been loaded into the memory
535 	   buffer: */
536 	else {
537 
538 	  /* unbusy the current instruction TLB entry and poison it,
539 	     so we won't try to do any fast fetches with it: */
540 	  assert (ic->_tme_sparc_itlb_current_token
541 		  == itlb_current->tme_sparc_tlb_bus_tlb.tme_bus_tlb_token);
542 	  tme_sparc_tlb_unbusy(itlb_current);
543 	  itlb_current->tme_sparc_tlb_addr_first = 1;
544 	  itlb_current->tme_sparc_tlb_addr_last = 0;
545 	  ic->_tme_sparc_itlb_current_token = NULL;
546 
547 	  /* fetch the instruction from the memory buffer: */
548 	  assert ((emulator_off + pc) == ic->tme_sparc_memory_buffer.tme_sparc_memory_buffer8s);
549 	  insn = ic->tme_sparc_memory_buffer.tme_sparc_memory_buffer32s[0];
550 	  insn = tme_betoh_u32(insn);
551 #ifdef _TME_SPARC_STATS
552 	  ic->tme_sparc_stats.tme_sparc_stats_insns_slow++;
553 #endif /* _TME_SPARC_STATS */
554 
555 	  /* busy the invalid instruction TLB entry: */
556 	  itlb_current = &itlb_invalid;
557 	  assert (ic->_tme_sparc_itlb_current_token == NULL);
558 	  tme_sparc_tlb_busy(itlb_current);
559 	  ic->_tme_sparc_itlb_current_token = itlb_current->tme_sparc_tlb_bus_tlb.tme_bus_tlb_token;
560 	}
561       }
562 
563       /* if this instruction has been annulled: */
564       if (__tme_predict_false(annulled)) {
565 
566 	/* make this instruction a nop: */
567 	insn = 0x01000000;
568 
569 	/* when an annulled instruction also happens to be a branch
570 	   target, we can't run or make an instructions thunk
571 	   associated with its PC, since instructions thunks don't
572 	   take the annulled bit as any kind of parameter.  we poison
573 	   pc_previous to prevent this from happening.  annulled
574 	   instructions that are also branch targets should be pretty
575 	   rare anyways: */
576 	pc_previous = pc - sizeof(tme_uint32_t);
577       }
578 
579       /* the next instruction will not be annulled: */
580       annulled = FALSE;
581     }
582 
583     /* start this instruction: */
584     ic->_tme_sparc_insn = insn;
585 
586     /* set %g0 to zero: */
587     ic->tme_sparc_ireg(TME_SPARC_G0_OFFSET(ic) + TME_SPARC_IREG_G0) = 0;
588 
589 #if TME_SPARC_HAVE_RECODE(ic)
590 
591     /* if this is the idle PC, and the idle type marks the idle when
592        control reaches the idle PC: */
593     if (__tme_predict_false(pc == ic->tme_sparc_idle_pcs[0])) {
594       if (TME_SPARC_IDLE_TYPE_IS(ic,
595 				 (TME_SPARC_IDLE_TYPES_TARGET_CALL
596 				  | TME_SPARC_IDLE_TYPES_TARGET_BRANCH
597 				  ))) {
598 
599 	/* mark the idle: */
600 	TME_SPARC_IDLE_MARK(ic);
601 
602 	/* poison the previous PC to prevent all recoding, to
603 	   guarantee that we always see the idle PC (if we allowed the
604 	   idle PC to be recoded, it might get chained to): */
605 	pc_previous = pc - sizeof(tme_uint32_t);
606       }
607     }
608 
609     /* if this PC does not follow the previous PC, but the next PC
610        follows this PC, this PC is a simple control transfer target: */
611     if (__tme_predict_false(((tme_sparc_ireg_t) (pc - sizeof(tme_uint32_t)))
612 			    != pc_previous)) {
613       if (__tme_predict_true(((tme_sparc_ireg_t) (pc + sizeof(tme_uint32_t)))
614 			     == ic->tme_sparc_ireg(TME_SPARC_IREG_PC_NEXT))) {
615 	tme_recode_thunk_off_t insns_thunk = 0; /* XXX gcc -Wuninitialized */
616 
617 	/* if the current instruction TLB entry is not the invalid TLB
618 	   entry, and there is an instructions thunk for this PC: */
619 	if (__tme_predict_true(itlb_current != &itlb_invalid
620 			       && (insns_thunk
621 				   = tme_sparc_recode(ic,
622 						      itlb_current,
623 						      ((const tme_shared tme_uint32_t *)
624 						       (itlb_current->tme_sparc_tlb_emulator_off_read
625 							+ pc)))) != 0)) {
626 
627 	  /* begin verifying this instructions thunk: */
628 	  tme_sparc_recode_verify_begin(ic);
629 
630 	  /* like this execution loop, the recode instructions thunks
631 	     expect PC_next to be the next instruction to execute.
632 	     we've already updated the PCs above, so we have to undo
633 	     the update of PC_next.  NB that we don't have to undo PC
634 	     or PC_next_next, since the instructions thunks don't read
635 	     them: */
636 	  pc = ic->tme_sparc_ireg(TME_SPARC_IREG_PC);
637 	  ic->tme_sparc_ireg(TME_SPARC_IREG_PC_NEXT) = pc;
638 
639 	  /* run the recode instructions thunk: */
640 	  TME_SPARC_STAT_N(ic, tme_sparc_stats_insns_total, -1);
641 	  tme_recode_insns_thunk_run(&ic->tme_sparc_ic,
642 				     ic->tme_sparc_recode_insns_group.tme_recode_insns_group_chain_thunk,
643 				     insns_thunk);
644 
645 	  /* set PC_next_next from PC_next, since the recode
646 	     instructions thunks usually don't.  (this won't destroy
647 	     any specially set PC_next_next, because any instruction
648 	     that sets one is supposed to redispatch.) */
649 	  pc = ic->tme_sparc_ireg(TME_SPARC_IREG_PC_NEXT);
650 	  ic->tme_sparc_ireg(TME_SPARC_IREG_PC_NEXT_NEXT) = pc + sizeof(tme_uint32_t);
651 
652 	  /* end verifying this instructions thunk: */
653 	  tme_sparc_recode_verify_end(ic, TME_SPARC_TRAP_none);
654 
655 	  /* we force a PC to make it look like a control transfer has
656 	     happened (one probably has), to encourage creation of
657 	     another instructions thunk.  this is something like the
658 	     opposite of poisoning: */
659 	  ic->tme_sparc_ireg(TME_SPARC_IREG_PC) = pc;
660 
661 	  /* instead of figuring out what the currently busy
662 	     instruction TLB entry is, we simply unbusy the currently
663 	     busy instruction TLB token and make the current
664 	     instruction TLB entry invalid: */
665 	  assert (ic->_tme_sparc_itlb_current_token != NULL);
666 	  tme_token_unbusy(ic->_tme_sparc_itlb_current_token);
667 	  itlb_current = &itlb_invalid;
668 	  tme_token_busy(&token_invalid);
669 	  ic->_tme_sparc_itlb_current_token = &token_invalid;
670 
671 	  /* restart the loop: */
672 	  continue;
673 	}
674       }
675     }
676 
677 #endif /* TME_SPARC_HAVE_RECODE(ic) */
678 
679     /* if this is a format three instruction (op is two or three): */
680     if (__tme_predict_true(insn >= 0x80000000)) {
681 
682       /* if the i bit is zero: */
683       if (__tme_predict_true((insn & TME_BIT(13)) == 0)) {
684 
685 	/* decode rs2: */
686 	reg_rs2 = TME_FIELD_MASK_EXTRACTU(insn, TME_SPARC_FORMAT3_MASK_RS2);
687 	TME_SPARC_REG_INDEX(ic, reg_rs2);
688       }
689 
690       /* otherwise, the i bit is one: */
691       else {
692 
693 	/* decode simm13: */
694 	ic->tme_sparc_ireg(TME_SPARC_IREG_TMP(0)) = TME_FIELD_MASK_EXTRACTS(insn, (tme_sparc_ireg_t) 0x1fff);
695 	reg_rs2 = TME_SPARC_IREG_TMP(0);
696       }
697 
698       /* decode rs1: */
699       reg_rs1 = TME_FIELD_MASK_EXTRACTU(insn, TME_SPARC_FORMAT3_MASK_RS1);
700       TME_SPARC_REG_INDEX(ic, reg_rs1);
701 
702       /* decode rd: */
703       reg_rd = TME_FIELD_MASK_EXTRACTU(insn, TME_SPARC_FORMAT3_MASK_RD);
704       TME_SPARC_REG_INDEX(ic, reg_rd);
705 
706       /* form the opcode index: */
707       opcode = TME_FIELD_MASK_EXTRACTU(insn, (0x3f << 19));
708       opcode += ((insn >> (30 - 6)) & 0x40);
709 
710       /* run the instruction: */
711       (*_TME_SPARC_EXECUTE_OPMAP[opcode])
712 	(ic,
713 	 &ic->tme_sparc_ireg(reg_rs1),
714 	 &ic->tme_sparc_ireg(reg_rs2),
715 	 &ic->tme_sparc_ireg(reg_rd));
716     }
717 
718     /* otherwise, if this is a format two instruction: */
719     else if (__tme_predict_true(insn < 0x40000000)) {
720 
721       /* dispatch on op2: */
722       switch (TME_FIELD_MASK_EXTRACTU(insn, (0x7 << 22))) {
723 
724 #if TME_SPARC_VERSION(ic) >= 9
725       case 1: /* BPcc */
726 
727 	/* if cc0 is set, this is an illegal instruction: */
728 	if (__tme_predict_false(insn & TME_BIT(20))) {
729 	  TME_SPARC_INSN_TRAP(TME_SPARC_TRAP(ic,illegal_instruction));
730 	}
731 
732 	/* get %icc or %xcc: */
733 	cc = ic->tme_sparc64_ireg_ccr;
734 	if (insn & TME_BIT(21)) {
735 	  cc /= (TME_SPARC64_CCR_XCC / TME_SPARC64_CCR_ICC);
736 	}
737 	cc = TME_FIELD_MASK_EXTRACTU(cc, TME_SPARC64_CCR_ICC);
738 
739 	/* get the conditions mask: */
740 	conds_mask = _tme_sparc_conds_icc[cc];
741 
742 	/* add the not-conditions to the conditions mask: */
743 	conds_mask += ((conds_mask ^ 0xff) << 8);
744 
745 	/* clear cc1, cc0, and p: */
746 	insn &= ~(TME_BIT(21) + TME_BIT(20) + TME_BIT(19));
747 
748 	/* flip the most significant bit of the disp19: */
749 	insn ^= TME_BIT(18);
750 
751 	/* sign-extend the disp19 to a disp22: */
752 	/* NB: this potentially destroys op2: */
753 	insn += TME_BIT(22) - TME_BIT(18);
754 	break;
755 
756       case 3: /* BPr */
757 
758 	/* if bit 28 is set, or if the least significant two bits of
759 	   cond are clear, this is an illegal instruction: */
760 	if (__tme_predict_false((insn & TME_BIT(28))
761 				|| (insn & (0x3 << 25)) == TME_SPARC_COND_N)) {
762 	  TME_SPARC_INSN_TRAP(TME_SPARC_TRAP(ic,illegal_instruction));
763 	}
764 
765 	/* decode rs1: */
766 	reg_rs1 = TME_FIELD_MASK_EXTRACTU(insn, TME_SPARC_FORMAT3_MASK_RS1);
767 	TME_SPARC_REG_INDEX(ic, reg_rs1);
768 
769 	/* make a conditions mask, with the E and LE conditions if the
770 	   register is zero, and with the L and LE conditions if the
771 	   register is less than zero: */
772 	value_rs1 = ic->tme_sparc_ireg(reg_rs1);
773 	conds_mask
774 	  = (((value_rs1 == 0)
775 	      * (TME_BIT(TME_SPARC_COND_E)
776 		 + TME_BIT(TME_SPARC_COND_LE)))
777 	     | ((((tme_int64_t) value_rs1) < 0)
778 		* (TME_BIT(TME_SPARC_COND_L)
779 		   + TME_BIT(TME_SPARC_COND_LE))));
780 
781 	/* add the not-conditions to the conditions mask: */
782 	conds_mask += ((conds_mask ^ 0xf) << 4);
783 
784 	/* clear rs1 and p, move d16hi down, and clear d16hi: */
785 	insn
786 	  = ((insn & ~((2 << 21) - (1 << 14)))
787 	     + ((insn & (3 << 20)) >> (20 - 14)));
788 
789 	/* flip the most significant bit of the disp16: */
790 	insn ^= TME_BIT(15);
791 
792 	/* sign-extend the disp16 to a disp22: */
793 	/* NB: this potentially destroys op2: */
794 	insn += TME_BIT(22) - TME_BIT(15);
795 	break;
796 
797       case 5: /* FBPfcc: */
798 	TME_SPARC_INSN_FPU;
799 
800 	/* get the right %fcc: */
801 	cc = TME_FIELD_MASK_EXTRACTU(insn, (0x3 << 20));
802 	if (cc == 0) {
803 	  cc = ic->tme_sparc_fpu_fsr / _TME_FIELD_MASK_FACTOR(TME_SPARC_FSR_FCC);
804 	}
805 	else {
806 	  cc = ic->tme_sparc_fpu_xfsr >> (2 * (cc - 1));
807 	}
808 	cc &= (TME_SPARC_FSR_FCC / _TME_FIELD_MASK_FACTOR(TME_SPARC_FSR_FCC));
809 
810 	/* get the conditions mask: */
811 	conds_mask = _tme_sparc_conds_fcc[cc];
812 
813 	/* add the not-conditions to the conditions mask: */
814 	conds_mask += ((conds_mask ^ 0xff) << 8);
815 
816 	/* clear cc1, cc0, and p: */
817 	insn &= ~(TME_BIT(21) + TME_BIT(20) + TME_BIT(19));
818 
819 	/* flip the most significant bit of the disp19: */
820 	insn ^= TME_BIT(18);
821 
822 	/* sign-extend the disp19 to a disp22: */
823 	/* NB: this potentially destroys op2: */
824 	insn += TME_BIT(22) - TME_BIT(18);
825 	break;
826 
827 #endif /* TME_SPARC_VERSION(ic) >= 9 */
828 
829       default:
830 
831       case 0: /* UNIMP: */
832 	TME_SPARC_INSN_TRAP(TME_SPARC_TRAP(ic,illegal_instruction));
833 	continue;
834 
835       case 2: /* Bicc: */
836 	conds_mask_icc = _tme_sparc_conds_icc[
837 #if TME_SPARC_VERSION(ic) < 9
838 	  TME_FIELD_MASK_EXTRACTU(ic->tme_sparc32_ireg_psr, TME_SPARC32_PSR_ICC)
839 #else  /* TME_SPARC_VERSION(ic) >= 9 */
840 	  TME_FIELD_MASK_EXTRACTU(ic->tme_sparc64_ireg_ccr, TME_SPARC64_CCR_ICC)
841 #endif /* TME_SPARC_VERSION(ic) >= 9 */
842 	];
843 
844 	/* add the not-conditions to the conditions mask: */
845 	conds_mask = conds_mask_icc ^ 0xff;
846 	conds_mask = (conds_mask << 8) | conds_mask_icc;
847 	break;
848 
849       case 4: /* SETHI: */
850 
851 	/* decode rd: */
852 	reg_rd = TME_FIELD_MASK_EXTRACTU(insn, TME_SPARC_FORMAT3_MASK_RD);
853 	TME_SPARC_REG_INDEX(ic, reg_rd);
854 	ic->tme_sparc_ireg(reg_rd) = (insn << 10);
855 	continue;
856 
857       case 6: /* FBfcc: */
858 	TME_SPARC_INSN_FPU;
859 	conds_mask_fcc = _tme_sparc_conds_fcc[TME_FIELD_MASK_EXTRACTU(ic->tme_sparc_fpu_fsr, TME_SPARC_FSR_FCC)];
860 
861 	/* add the not-conditions to the conditions mask: */
862 	conds_mask = conds_mask_fcc ^ 0xff;
863 	conds_mask = (conds_mask << 8) | conds_mask_fcc;
864 	break;
865       }
866 
867       /* get the condition field: */
868       cond = TME_FIELD_MASK_EXTRACTU(insn, (0xf << 25));
869 
870       /* if this conditional branch is taken: */
871       if (conds_mask & TME_BIT(cond)) {
872 
873 	/* get the raw displacement: */
874 	disp = TME_FIELD_MASK_EXTRACTS(insn, 0x003fffff);
875 
876 	/* if there is no recode support, and the raw displacement is zero: */
877 	if (__tme_predict_false(!TME_SPARC_HAVE_RECODE(ic)
878 				&& disp == 0)) {
879 
880 	  /* a taken branch to . is probably a timing loop.  instead
881 	     of handling that here, which would involve function calls
882 	     that would probably hurt register allocation, instead we
883 	     just set a flag and pretend that this is the last
884 	     instruction in the burst.  when we start a new burst
885 	     above, we will find the flag set and do the handling
886 	     then: */
887 	  branch_dot = TRUE;
888 	  branch_dot_burst = ic->_tme_sparc_instruction_burst_remaining;
889 	  ic->_tme_sparc_instruction_burst_remaining = 0;
890 
891 	  /* the raw displacement is zero: */
892 	  /* NB: this is not necessary for correctness, but is an
893 	     attempt to encourage better register allocation: */
894 	  disp = 0;
895 	}
896 
897 	/* do the delayed control transfer: */
898 	pc_next_next
899 	  = (ic->tme_sparc_ireg(TME_SPARC_IREG_PC)
900 	     + (disp << 2));
901 	if (TME_SPARC_VERSION(ic) >= 9) {
902 	  pc_next_next &= ic->tme_sparc_address_mask;
903 	}
904 	ic->tme_sparc_ireg(TME_SPARC_IREG_PC_NEXT_NEXT) = pc_next_next;
905 
906 	/* if there is no recode support, and the delayed control
907 	   transfer target is the idle PC, and this idle type marks
908 	   the idle on a branch to the idle PC: */
909 	if (__tme_predict_false(!TME_SPARC_HAVE_RECODE(ic)
910 				&& pc_next_next == ic->tme_sparc_idle_pcs[0])) {
911 	  if (TME_SPARC_IDLE_TYPE_IS(ic, TME_SPARC_IDLE_TYPES_TARGET_BRANCH)) {
912 
913 	    /* mark the idle: */
914 	    TME_SPARC_IDLE_MARK(ic);
915 	  }
916 	}
917 
918 	/* if this was a conditional branch, clear the annul bit in
919            the instruction image: */
920 	if (cond & 7) {
921 	  insn &= ~TME_BIT(29);
922 	}
923       }
924 
925       /* if the annul bit it set: */
926       if (insn & TME_BIT(29)) {
927 
928 	/* the next instruction will be annulled.  to get the
929 	   execution loop to pay attention to the annulled bit,
930 	   make the current instruction TLB entry invalid: */
931 	annulled = TRUE;
932 	assert (ic->_tme_sparc_itlb_current_token
933 		== itlb_current->tme_sparc_tlb_bus_tlb.tme_bus_tlb_token);
934 	tme_sparc_tlb_unbusy(itlb_current);
935 	itlb_current = &itlb_invalid;
936 	tme_token_busy(&token_invalid);
937 	ic->_tme_sparc_itlb_current_token = &token_invalid;
938       }
939     }
940 
941     /* otherwise, this is a format one instruction: */
942     else {
943 
944       /* get the current PC: */
945       pc = ic->tme_sparc_ireg(TME_SPARC_IREG_PC);
946 
947       /* write the PC of the CALL into r[15]: */
948       ic->tme_sparc_ireg(((ic)->tme_sparc_reg8_offset[15 / 8] * 8) + 15) = pc;
949 
950       /* get the delayed control transfer target: */
951       pc_next_next = pc + (tme_int32_t) (insn << 2);
952       if (TME_SPARC_VERSION(ic) >= 9) {
953 	pc_next_next &= ic->tme_sparc_address_mask;
954       }
955 
956       /* if there is no recode support, and the delayed control
957 	 transfer target is the idle PC, and this idle type marks
958 	 the idle on a call to the idle PC: */
959       if (__tme_predict_false(!TME_SPARC_HAVE_RECODE(ic)
960 			      && pc_next_next == ic->tme_sparc_idle_pcs[0])) {
961 	if (TME_SPARC_IDLE_TYPE_IS(ic, TME_SPARC_IDLE_TYPES_TARGET_CALL)) {
962 
963 	  /* mark the idle: */
964 	  TME_SPARC_IDLE_MARK(ic);
965 	}
966       }
967 
968       /* log the call: */
969       reg_o0 = 8;
970       TME_SPARC_REG_INDEX(ic, reg_o0);
971       tme_sparc_log(ic, 250, TME_OK,
972 		    (TME_SPARC_LOG_HANDLE(ic),
973 		     _("call " TME_PRIxSPARCREG " %%o0 " TME_PRIxSPARCREG " %%o1 " TME_PRIxSPARCREG " %%o2 " TME_PRIxSPARCREG " %%o3 " TME_PRIxSPARCREG " %%o4 " TME_PRIxSPARCREG " %%o5 " TME_PRIxSPARCREG),
974 		     pc_next_next,
975 		     ic->tme_sparc_ireg(reg_o0 + 0),
976 		     ic->tme_sparc_ireg(reg_o0 + 1),
977 		     ic->tme_sparc_ireg(reg_o0 + 2),
978 		     ic->tme_sparc_ireg(reg_o0 + 3),
979 		     ic->tme_sparc_ireg(reg_o0 + 4),
980 		     ic->tme_sparc_ireg(reg_o0 + 5)));
981 
982       /* do the delayed control transfer: */
983       ic->tme_sparc_ireg(TME_SPARC_IREG_PC_NEXT_NEXT) = pc_next_next;
984     }
985   }
986 
987   /* NOTREACHED */
988 }
989