1 /* automatically generated by sparc-insns-auto.sh, do not edit! */
2 _TME_RCSID("$Id: sparc-insns-auto.sh,v 1.10 2010/06/05 16:13:41 fredette Exp $");
3
4 #include "sparc-impl.h"
5
6 /* an all-bits-zero float for use with _tme_sparc*_fpu_mem_fpreg(): */
7 #if TME_FLOAT_FORMAT_NULL != 0
8 #error "TME_FLOAT_FORMAT_NULL changed"
9 #endif
10 static struct tme_float _tme_sparc_float_null;
11
12 #undef TME_SPARC_VERSION
13 #define TME_SPARC_VERSION(ic) (8)
14
15 static tme_uint32_t
_tme_sparc32_alternate_asi_mask(struct tme_sparc * ic)16 _tme_sparc32_alternate_asi_mask(struct tme_sparc *ic)
17 {
18 unsigned int asi_data;
19 unsigned int asi_mask_flags;
20 tme_uint32_t asi_mask_data;
21
22 /* get the ASI, assuming that the i bit is zero: */
23 asi_data = TME_FIELD_MASK_EXTRACTU(TME_SPARC_INSN, (0xff << 5));
24
25 /* this is a privileged instruction: */
26 TME_SPARC_INSN_PRIV;
27
28 /* if the i bit is one, this is an illegal instruction: */
29 if (__tme_predict_false(TME_SPARC_INSN & TME_BIT(13))) {
30 TME_SPARC_INSN_ILL(ic);
31 }
32
33 /* get the flags for this ASI: */
34 asi_mask_flags = ic->tme_sparc_asis[asi_data].tme_sparc_asi_mask_flags;
35
36 /* make the ASI mask: */
37 if (asi_mask_flags & TME_SPARC32_ASI_MASK_FLAG_SPECIAL) {
38 asi_mask_data
39 = TME_SPARC_ASI_MASK_SPECIAL(asi_data, TRUE);
40 }
41 else {
42 asi_mask_data = TME_SPARC32_ASI_MASK(asi_data, asi_data);
43 }
44
45 /* if this ASI has a special handler: */
46 if (__tme_predict_false(ic->tme_sparc_asis[TME_SPARC_ASI_MASK_WHICH(asi_mask_data)].tme_sparc_asi_handler != 0)) {
47
48 /* force a slow load or store, which will call the special handler: */
49 asi_mask_data |= TME_SPARC_ASI_MASK_FLAG_UNDEF;
50 }
51
52 return (asi_mask_data);
53 }
54
55 static struct tme_float *
_tme_sparc32_fpu_mem_fpreg(struct tme_sparc * ic,tme_uint32_t misaligned,struct tme_float * float_buffer)56 _tme_sparc32_fpu_mem_fpreg(struct tme_sparc *ic,
57 tme_uint32_t misaligned,
58 struct tme_float *float_buffer)
59 {
60 unsigned int float_format;
61 unsigned int fpreg_format;
62 tme_uint32_t fp_store;
63 unsigned int fpu_mode;
64 unsigned int fpreg_number;
65
66 /* NB: this checks for various traps by their priority order: */
67
68 TME_SPARC_INSN_FPU_ENABLED;
69
70 /* get the floating-point format: */
71 float_format = float_buffer->tme_float_format;
72
73 /* convert the floating-point format into the ieee754
74 floating-point register file format: */
75 #if (TME_FLOAT_FORMAT_NULL | TME_IEEE754_FPREG_FORMAT_NULL) != 0
76 #error "TME_FLOAT_FORMAT_ or TME_IEEE754_FPREG_FORMAT_ values changed"
77 #endif
78 #if TME_FLOAT_FORMAT_IEEE754_SINGLE < TME_IEEE754_FPREG_FORMAT_SINGLE
79 #error "TME_FLOAT_FORMAT_ or TME_IEEE754_FPREG_FORMAT_ values changed"
80 #endif
81 #if (TME_FLOAT_FORMAT_IEEE754_SINGLE / TME_IEEE754_FPREG_FORMAT_SINGLE) != (TME_FLOAT_FORMAT_IEEE754_DOUBLE / TME_IEEE754_FPREG_FORMAT_DOUBLE)
82 #error "TME_FLOAT_FORMAT_ or TME_IEEE754_FPREG_FORMAT_ values changed"
83 #endif
84 assert (float_format == TME_FLOAT_FORMAT_NULL
85 || float_format == TME_FLOAT_FORMAT_IEEE754_SINGLE
86 || float_format == TME_FLOAT_FORMAT_IEEE754_DOUBLE);
87 fpreg_format = float_format / (TME_FLOAT_FORMAT_IEEE754_SINGLE / TME_IEEE754_FPREG_FORMAT_SINGLE);
88
89 /* if the memory address is misaligned, return the
90 float buffer now. the eventual load or store will
91 cause the mem_address_not_aligned trap: */
92
93 /* if the memory address is misaligned: */
94 #if TME_IEEE754_FPREG_FORMAT_NULL != 0 || TME_IEEE754_FPREG_FORMAT_SINGLE != 1 || TME_IEEE754_FPREG_FORMAT_DOUBLE != 2 || TME_IEEE754_FPREG_FORMAT_QUAD != 4
95 #error "TME_IEEE754_FPREG_FORMAT_ values changed"
96 #endif
97 assert (fpreg_format == TME_IEEE754_FPREG_FORMAT_NULL
98 || fpreg_format == TME_IEEE754_FPREG_FORMAT_SINGLE
99 || fpreg_format == TME_IEEE754_FPREG_FORMAT_DOUBLE
100 || fpreg_format == TME_IEEE754_FPREG_FORMAT_QUAD);
101 misaligned &= ((sizeof(tme_uint32_t) * fpreg_format) - 1);
102 if (__tme_predict_false(misaligned)) {
103
104 return (float_buffer);
105 }
106
107 /* see if this is a floating-point load or store: */
108 /* NB: all of the floating-point instructions that use
109 this preamble have bit two of op3 clear for a load,
110 and set for a store: */
111 fp_store = (TME_SPARC_INSN & TME_BIT(19 + 2));
112
113 /* if the FPU isn't in execute mode: */
114 fpu_mode = ic->tme_sparc_fpu_mode;
115 if (__tme_predict_false(fpu_mode != TME_SPARC_FPU_MODE_EXECUTE)) {
116
117 /* if this is a floating-point load, or if this is a
118 floating-point store and a floating-point exception
119 is pending: */
120 if (!fp_store
121 || fpu_mode == TME_SPARC_FPU_MODE_EXCEPTION_PENDING) {
122
123 /* do an FPU exception check: */
124 tme_sparc_fpu_exception_check(ic);
125 }
126 }
127
128 /* if this is not a load or store of a floating-point register: */
129 if (fpreg_format == TME_IEEE754_FPREG_FORMAT_NULL) {
130 return (float_buffer);
131 }
132
133 /* decode rd: */
134 fpreg_number
135 = tme_sparc_fpu_fpreg_decode(ic,
136 TME_FIELD_MASK_EXTRACTU(TME_SPARC_INSN,
137 TME_SPARC_FORMAT3_MASK_RD),
138 fpreg_format);
139
140 /* make sure this floating-point register has the right precision: */
141 tme_sparc_fpu_fpreg_format(ic, fpreg_number, fpreg_format | TME_IEEE754_FPREG_FORMAT_BUILTIN);
142
143 /* if this is a floating-point load: */
144 if (!fp_store) {
145
146 /* mark rd as dirty: */
147 TME_SPARC_FPU_DIRTY(ic, fpreg_number);
148 }
149
150 /* return the floating-point register: */
151 return (&ic->tme_sparc_fpu_fpregs[fpreg_number]);
152 }
153 #define _tme_sparc32_fpu_mem(ic) \
154 do { _tme_sparc32_fpu_mem_fpreg(ic, 0, &_tme_sparc_float_null); } while (/* CONSTCOND */ 0)
155
156 /* this does a sparc32 "add SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_add,tme_uint32_t)157 TME_SPARC_FORMAT3(tme_sparc32_add, tme_uint32_t)
158 {
159 tme_uint32_t src1;
160 tme_uint32_t src2;
161 tme_uint32_t dst;
162
163 /* get the operands: */
164 src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
165 src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
166
167 /* perform the operation: */
168 dst = src1 + src2;
169
170 /* store the destination: */
171 TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
172
173 TME_SPARC_INSN_OK;
174 }
175
176 /* this does a sparc32 "addcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_addcc,tme_uint32_t)177 TME_SPARC_FORMAT3(tme_sparc32_addcc, tme_uint32_t)
178 {
179 tme_uint32_t src1;
180 tme_uint32_t src2;
181 tme_uint32_t dst;
182 tme_uint32_t cc;
183
184 /* get the operands: */
185 src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
186 src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
187
188 /* perform the operation: */
189 dst = src1 + src2;
190
191 /* store the destination: */
192 TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
193
194 /* set Z if the destination is zero: */
195 cc = ((((tme_int32_t) dst) == 0) * TME_SPARC32_PSR_ICC_Z);
196
197 /* set N if the destination is negative: */
198 cc += ((((tme_int32_t) dst) < 0) * TME_SPARC32_PSR_ICC_N);
199
200 /* if the operands are the same sign, and the destination has
201 a different sign, set V: */
202 cc += ((((tme_int32_t) ((src2 ^ dst) & (src1 ^ (src2 ^ (((tme_uint32_t) 0) - 1))))) < 0) * TME_SPARC32_PSR_ICC_V);
203
204 /* if src1 and src2 both have the high bit set, or if dst does
205 not have the high bit set and either src1 or src2 does, set C: */
206 cc += (((tme_int32_t) (((tme_uint32_t) (src1 & src2)) | ((((tme_uint32_t) dst) ^ (((tme_uint32_t) 0) - 1)) & ((tme_uint32_t) (src1 | src2))))) < 0) * TME_SPARC32_PSR_ICC_C;
207
208 /* set the condition codes: */
209 ic->tme_sparc32_ireg_psr = (ic->tme_sparc32_ireg_psr & ~TME_SPARC32_PSR_ICC) | cc;
210
211 TME_SPARC_INSN_OK;
212 }
213
214 /* this does a sparc32 "sub SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_sub,tme_uint32_t)215 TME_SPARC_FORMAT3(tme_sparc32_sub, tme_uint32_t)
216 {
217 tme_uint32_t src1;
218 tme_uint32_t src2;
219 tme_uint32_t dst;
220
221 /* get the operands: */
222 src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
223 src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
224
225 /* perform the operation: */
226 dst = src1 - src2;
227
228 /* store the destination: */
229 TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
230
231 TME_SPARC_INSN_OK;
232 }
233
234 /* this does a sparc32 "subcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_subcc,tme_uint32_t)235 TME_SPARC_FORMAT3(tme_sparc32_subcc, tme_uint32_t)
236 {
237 tme_uint32_t src1;
238 tme_uint32_t src2;
239 tme_uint32_t dst;
240 tme_uint32_t cc;
241
242 /* get the operands: */
243 src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
244 src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
245
246 /* perform the operation: */
247 dst = src1 - src2;
248
249 /* store the destination: */
250 TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
251
252 /* set Z if the destination is zero: */
253 cc = ((((tme_int32_t) dst) == 0) * TME_SPARC32_PSR_ICC_Z);
254
255 /* set N if the destination is negative: */
256 cc += ((((tme_int32_t) dst) < 0) * TME_SPARC32_PSR_ICC_N);
257
258 /* if the operands are different signs, and the destination has
259 a different sign from the first operand, set V: */
260 cc += ((((tme_int32_t) ((src1 ^ src2) & (src1 ^ dst))) < 0) * TME_SPARC32_PSR_ICC_V);
261
262 /* if src2 is greater than src1, set C: */
263 cc += ((((tme_uint32_t) src2) > ((tme_uint32_t) src1))) * TME_SPARC32_PSR_ICC_C;
264
265 /* set the condition codes: */
266 ic->tme_sparc32_ireg_psr = (ic->tme_sparc32_ireg_psr & ~TME_SPARC32_PSR_ICC) | cc;
267
268 TME_SPARC_INSN_OK;
269 }
270
271 /* this does a sparc32 "or SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_or,tme_uint32_t)272 TME_SPARC_FORMAT3(tme_sparc32_or, tme_uint32_t)
273 {
274 tme_uint32_t src1;
275 tme_uint32_t src2;
276 tme_uint32_t dst;
277
278 /* get the operands: */
279 src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
280 src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
281
282 /* perform the operation: */
283 dst = src1 | src2;
284
285 /* store the destination: */
286 TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
287
288 TME_SPARC_INSN_OK;
289 }
290
291 /* this does a sparc32 "orcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_orcc,tme_uint32_t)292 TME_SPARC_FORMAT3(tme_sparc32_orcc, tme_uint32_t)
293 {
294 tme_uint32_t src1;
295 tme_uint32_t src2;
296 tme_uint32_t dst;
297 tme_uint32_t cc;
298
299 /* get the operands: */
300 src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
301 src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
302
303 /* perform the operation: */
304 dst = src1 | src2;
305
306 /* store the destination: */
307 TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
308
309 /* set Z if the destination is zero: */
310 cc = ((((tme_int32_t) dst) == 0) * TME_SPARC32_PSR_ICC_Z);
311
312 /* set N if the destination is negative: */
313 cc += ((((tme_int32_t) dst) < 0) * TME_SPARC32_PSR_ICC_N);
314
315 /* set the condition codes: */
316 ic->tme_sparc32_ireg_psr = (ic->tme_sparc32_ireg_psr & ~TME_SPARC32_PSR_ICC) | cc;
317
318 TME_SPARC_INSN_OK;
319 }
320
321 /* this does a sparc32 "orn SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_orn,tme_uint32_t)322 TME_SPARC_FORMAT3(tme_sparc32_orn, tme_uint32_t)
323 {
324 tme_uint32_t src1;
325 tme_uint32_t src2;
326 tme_uint32_t dst;
327
328 /* get the operands: */
329 src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
330 src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
331
332 /* perform the operation: */
333 dst = src1 | ~src2;
334
335 /* store the destination: */
336 TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
337
338 TME_SPARC_INSN_OK;
339 }
340
341 /* this does a sparc32 "orncc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_orncc,tme_uint32_t)342 TME_SPARC_FORMAT3(tme_sparc32_orncc, tme_uint32_t)
343 {
344 tme_uint32_t src1;
345 tme_uint32_t src2;
346 tme_uint32_t dst;
347 tme_uint32_t cc;
348
349 /* get the operands: */
350 src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
351 src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
352
353 /* perform the operation: */
354 dst = src1 | ~src2;
355
356 /* store the destination: */
357 TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
358
359 /* set Z if the destination is zero: */
360 cc = ((((tme_int32_t) dst) == 0) * TME_SPARC32_PSR_ICC_Z);
361
362 /* set N if the destination is negative: */
363 cc += ((((tme_int32_t) dst) < 0) * TME_SPARC32_PSR_ICC_N);
364
365 /* set the condition codes: */
366 ic->tme_sparc32_ireg_psr = (ic->tme_sparc32_ireg_psr & ~TME_SPARC32_PSR_ICC) | cc;
367
368 TME_SPARC_INSN_OK;
369 }
370
371 /* this does a sparc32 "and SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_and,tme_uint32_t)372 TME_SPARC_FORMAT3(tme_sparc32_and, tme_uint32_t)
373 {
374 tme_uint32_t src1;
375 tme_uint32_t src2;
376 tme_uint32_t dst;
377
378 /* get the operands: */
379 src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
380 src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
381
382 /* perform the operation: */
383 dst = src1 & src2;
384
385 /* store the destination: */
386 TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
387
388 TME_SPARC_INSN_OK;
389 }
390
391 /* this does a sparc32 "andcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_andcc,tme_uint32_t)392 TME_SPARC_FORMAT3(tme_sparc32_andcc, tme_uint32_t)
393 {
394 tme_uint32_t src1;
395 tme_uint32_t src2;
396 tme_uint32_t dst;
397 tme_uint32_t cc;
398
399 /* get the operands: */
400 src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
401 src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
402
403 /* perform the operation: */
404 dst = src1 & src2;
405
406 /* store the destination: */
407 TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
408
409 /* set Z if the destination is zero: */
410 cc = ((((tme_int32_t) dst) == 0) * TME_SPARC32_PSR_ICC_Z);
411
412 /* set N if the destination is negative: */
413 cc += ((((tme_int32_t) dst) < 0) * TME_SPARC32_PSR_ICC_N);
414
415 /* set the condition codes: */
416 ic->tme_sparc32_ireg_psr = (ic->tme_sparc32_ireg_psr & ~TME_SPARC32_PSR_ICC) | cc;
417
418 TME_SPARC_INSN_OK;
419 }
420
421 /* this does a sparc32 "andn SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_andn,tme_uint32_t)422 TME_SPARC_FORMAT3(tme_sparc32_andn, tme_uint32_t)
423 {
424 tme_uint32_t src1;
425 tme_uint32_t src2;
426 tme_uint32_t dst;
427
428 /* get the operands: */
429 src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
430 src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
431
432 /* perform the operation: */
433 dst = src1 & ~src2;
434
435 /* store the destination: */
436 TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
437
438 TME_SPARC_INSN_OK;
439 }
440
441 /* this does a sparc32 "andncc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_andncc,tme_uint32_t)442 TME_SPARC_FORMAT3(tme_sparc32_andncc, tme_uint32_t)
443 {
444 tme_uint32_t src1;
445 tme_uint32_t src2;
446 tme_uint32_t dst;
447 tme_uint32_t cc;
448
449 /* get the operands: */
450 src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
451 src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
452
453 /* perform the operation: */
454 dst = src1 & ~src2;
455
456 /* store the destination: */
457 TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
458
459 /* set Z if the destination is zero: */
460 cc = ((((tme_int32_t) dst) == 0) * TME_SPARC32_PSR_ICC_Z);
461
462 /* set N if the destination is negative: */
463 cc += ((((tme_int32_t) dst) < 0) * TME_SPARC32_PSR_ICC_N);
464
465 /* set the condition codes: */
466 ic->tme_sparc32_ireg_psr = (ic->tme_sparc32_ireg_psr & ~TME_SPARC32_PSR_ICC) | cc;
467
468 TME_SPARC_INSN_OK;
469 }
470
471 /* this does a sparc32 "xor SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_xor,tme_uint32_t)472 TME_SPARC_FORMAT3(tme_sparc32_xor, tme_uint32_t)
473 {
474 tme_uint32_t src1;
475 tme_uint32_t src2;
476 tme_uint32_t dst;
477
478 /* get the operands: */
479 src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
480 src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
481
482 /* perform the operation: */
483 dst = src1 ^ src2;
484
485 /* store the destination: */
486 TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
487
488 TME_SPARC_INSN_OK;
489 }
490
491 /* this does a sparc32 "xorcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_xorcc,tme_uint32_t)492 TME_SPARC_FORMAT3(tme_sparc32_xorcc, tme_uint32_t)
493 {
494 tme_uint32_t src1;
495 tme_uint32_t src2;
496 tme_uint32_t dst;
497 tme_uint32_t cc;
498
499 /* get the operands: */
500 src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
501 src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
502
503 /* perform the operation: */
504 dst = src1 ^ src2;
505
506 /* store the destination: */
507 TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
508
509 /* set Z if the destination is zero: */
510 cc = ((((tme_int32_t) dst) == 0) * TME_SPARC32_PSR_ICC_Z);
511
512 /* set N if the destination is negative: */
513 cc += ((((tme_int32_t) dst) < 0) * TME_SPARC32_PSR_ICC_N);
514
515 /* set the condition codes: */
516 ic->tme_sparc32_ireg_psr = (ic->tme_sparc32_ireg_psr & ~TME_SPARC32_PSR_ICC) | cc;
517
518 TME_SPARC_INSN_OK;
519 }
520
521 /* this does a sparc32 "xnor SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_xnor,tme_uint32_t)522 TME_SPARC_FORMAT3(tme_sparc32_xnor, tme_uint32_t)
523 {
524 tme_uint32_t src1;
525 tme_uint32_t src2;
526 tme_uint32_t dst;
527
528 /* get the operands: */
529 src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
530 src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
531
532 /* perform the operation: */
533 dst = src1 ^ ~src2;
534
535 /* store the destination: */
536 TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
537
538 TME_SPARC_INSN_OK;
539 }
540
541 /* this does a sparc32 "xnorcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_xnorcc,tme_uint32_t)542 TME_SPARC_FORMAT3(tme_sparc32_xnorcc, tme_uint32_t)
543 {
544 tme_uint32_t src1;
545 tme_uint32_t src2;
546 tme_uint32_t dst;
547 tme_uint32_t cc;
548
549 /* get the operands: */
550 src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
551 src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
552
553 /* perform the operation: */
554 dst = src1 ^ ~src2;
555
556 /* store the destination: */
557 TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
558
559 /* set Z if the destination is zero: */
560 cc = ((((tme_int32_t) dst) == 0) * TME_SPARC32_PSR_ICC_Z);
561
562 /* set N if the destination is negative: */
563 cc += ((((tme_int32_t) dst) < 0) * TME_SPARC32_PSR_ICC_N);
564
565 /* set the condition codes: */
566 ic->tme_sparc32_ireg_psr = (ic->tme_sparc32_ireg_psr & ~TME_SPARC32_PSR_ICC) | cc;
567
568 TME_SPARC_INSN_OK;
569 }
570
571 /* this does a sparc32 "addx SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_addx,tme_uint32_t)572 TME_SPARC_FORMAT3(tme_sparc32_addx, tme_uint32_t)
573 {
574 tme_uint32_t src1;
575 tme_uint32_t src2;
576 tme_uint32_t dst;
577
578 /* get the operands: */
579 src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
580 src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
581
582 /* perform the operation: */
583 dst = src1 + src2;
584 dst += ((ic->tme_sparc32_ireg_psr & TME_SPARC32_PSR_ICC_C) != 0);
585
586 /* store the destination: */
587 TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
588
589 TME_SPARC_INSN_OK;
590 }
591
592 /* this does a sparc32 "addxcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_addxcc,tme_uint32_t)593 TME_SPARC_FORMAT3(tme_sparc32_addxcc, tme_uint32_t)
594 {
595 tme_uint32_t src1;
596 tme_uint32_t src2;
597 tme_uint32_t dst;
598 tme_uint32_t cc;
599
600 /* get the operands: */
601 src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
602 src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
603
604 /* perform the operation: */
605 dst = src1 + src2;
606 dst += ((ic->tme_sparc32_ireg_psr & TME_SPARC32_PSR_ICC_C) != 0);
607
608 /* store the destination: */
609 TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
610
611 /* set Z if the destination is zero: */
612 cc = ((((tme_int32_t) dst) == 0) * TME_SPARC32_PSR_ICC_Z);
613
614 /* set N if the destination is negative: */
615 cc += ((((tme_int32_t) dst) < 0) * TME_SPARC32_PSR_ICC_N);
616
617 /* if the operands are the same sign, and the destination has
618 a different sign, set V: */
619 cc += ((((tme_int32_t) ((src2 ^ dst) & (src1 ^ (src2 ^ (((tme_uint32_t) 0) - 1))))) < 0) * TME_SPARC32_PSR_ICC_V);
620
621 /* if src1 and src2 both have the high bit set, or if dst does
622 not have the high bit set and either src1 or src2 does, set C: */
623 cc += (((tme_int32_t) (((tme_uint32_t) (src1 & src2)) | ((((tme_uint32_t) dst) ^ (((tme_uint32_t) 0) - 1)) & ((tme_uint32_t) (src1 | src2))))) < 0) * TME_SPARC32_PSR_ICC_C;
624
625 /* set the condition codes: */
626 ic->tme_sparc32_ireg_psr = (ic->tme_sparc32_ireg_psr & ~TME_SPARC32_PSR_ICC) | cc;
627
628 TME_SPARC_INSN_OK;
629 }
630
631 /* this does a sparc32 "subx SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_subx,tme_uint32_t)632 TME_SPARC_FORMAT3(tme_sparc32_subx, tme_uint32_t)
633 {
634 tme_uint32_t src1;
635 tme_uint32_t src2;
636 tme_uint32_t dst;
637
638 /* get the operands: */
639 src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
640 src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
641
642 /* perform the operation: */
643 dst = src1 - src2;
644 dst -= ((ic->tme_sparc32_ireg_psr & TME_SPARC32_PSR_ICC_C) != 0);
645
646 /* store the destination: */
647 TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
648
649 TME_SPARC_INSN_OK;
650 }
651
652 /* this does a sparc32 "subxcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_subxcc,tme_uint32_t)653 TME_SPARC_FORMAT3(tme_sparc32_subxcc, tme_uint32_t)
654 {
655 tme_uint32_t src1;
656 tme_uint32_t src2;
657 tme_uint32_t dst;
658 tme_uint32_t cc;
659
660 /* get the operands: */
661 src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
662 src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
663
664 /* perform the operation: */
665 dst = src1 - src2;
666 dst -= ((ic->tme_sparc32_ireg_psr & TME_SPARC32_PSR_ICC_C) != 0);
667
668 /* store the destination: */
669 TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
670
671 /* set Z if the destination is zero: */
672 cc = ((((tme_int32_t) dst) == 0) * TME_SPARC32_PSR_ICC_Z);
673
674 /* set N if the destination is negative: */
675 cc += ((((tme_int32_t) dst) < 0) * TME_SPARC32_PSR_ICC_N);
676
677 /* if the operands are different signs, and the destination has
678 a different sign from the first operand, set V: */
679 cc += ((((tme_int32_t) ((src1 ^ src2) & (src1 ^ dst))) < 0) * TME_SPARC32_PSR_ICC_V);
680
681 /* if src2 is greater than src1, set C: */
682 cc += ((((tme_uint32_t) src2) > ((tme_uint32_t) src1)) || (((tme_uint32_t) src2) == ((tme_uint32_t) src1) && (ic->tme_sparc32_ireg_psr & TME_SPARC32_PSR_ICC_C))) * TME_SPARC32_PSR_ICC_C;
683
684 /* set the condition codes: */
685 ic->tme_sparc32_ireg_psr = (ic->tme_sparc32_ireg_psr & ~TME_SPARC32_PSR_ICC) | cc;
686
687 TME_SPARC_INSN_OK;
688 }
689
690 /* this does a sparc32 "taddcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_taddcc,tme_uint32_t)691 TME_SPARC_FORMAT3(tme_sparc32_taddcc, tme_uint32_t)
692 {
693 tme_uint32_t src1;
694 tme_uint32_t src2;
695 tme_uint32_t dst;
696 tme_uint32_t cc;
697
698 /* get the operands: */
699 src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
700 src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
701
702 /* perform the operation: */
703 dst = src1 + src2;
704
705 /* store the destination: */
706 TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
707
708 /* set Z if the destination is zero: */
709 cc = ((((tme_int32_t) dst) == 0) * TME_SPARC32_PSR_ICC_Z);
710
711 /* set N if the destination is negative: */
712 cc += ((((tme_int32_t) dst) < 0) * TME_SPARC32_PSR_ICC_N);
713
714 /* if the operands are the same sign, and the destination has
715 a different sign, set V: */
716 cc += ((((tme_int32_t) ((src2 ^ dst) & (src1 ^ (src2 ^ (((tme_uint32_t) 0) - 1))))) < 0) * TME_SPARC32_PSR_ICC_V);
717
718 /* if src1 and src2 both have the high bit set, or if dst does
719 not have the high bit set and either src1 or src2 does, set C: */
720 cc += (((tme_int32_t) (((tme_uint32_t) (src1 & src2)) | ((((tme_uint32_t) dst) ^ (((tme_uint32_t) 0) - 1)) & ((tme_uint32_t) (src1 | src2))))) < 0) * TME_SPARC32_PSR_ICC_C;
721
722 /* set V if bits zero or one of src1 or src2 are set: */
723 cc |= ((((src1 | src2) & 3) != 0) * TME_SPARC32_PSR_ICC_V);
724
725 /* set the condition codes: */
726 ic->tme_sparc32_ireg_psr = (ic->tme_sparc32_ireg_psr & ~TME_SPARC32_PSR_ICC) | cc;
727
728 TME_SPARC_INSN_OK;
729 }
730
731 /* this does a sparc32 "taddcctv SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_taddcctv,tme_uint32_t)732 TME_SPARC_FORMAT3(tme_sparc32_taddcctv, tme_uint32_t)
733 {
734 tme_uint32_t src1;
735 tme_uint32_t src2;
736 tme_uint32_t dst;
737 tme_uint32_t cc;
738
739 /* get the operands: */
740 src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
741 src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
742
743 /* perform the operation: */
744 dst = src1 + src2;
745
746 /* set Z if the destination is zero: */
747 cc = ((((tme_int32_t) dst) == 0) * TME_SPARC32_PSR_ICC_Z);
748
749 /* set N if the destination is negative: */
750 cc += ((((tme_int32_t) dst) < 0) * TME_SPARC32_PSR_ICC_N);
751
752 /* if the operands are the same sign, and the destination has
753 a different sign, set V: */
754 cc += ((((tme_int32_t) ((src2 ^ dst) & (src1 ^ (src2 ^ (((tme_uint32_t) 0) - 1))))) < 0) * TME_SPARC32_PSR_ICC_V);
755
756 /* if src1 and src2 both have the high bit set, or if dst does
757 not have the high bit set and either src1 or src2 does, set C: */
758 cc += (((tme_int32_t) (((tme_uint32_t) (src1 & src2)) | ((((tme_uint32_t) dst) ^ (((tme_uint32_t) 0) - 1)) & ((tme_uint32_t) (src1 | src2))))) < 0) * TME_SPARC32_PSR_ICC_C;
759
760 /* set V if bits zero or one of src1 or src2 are set: */
761 cc |= ((((src1 | src2) & 3) != 0) * TME_SPARC32_PSR_ICC_V);
762
763 /* trap on a tagged overflow: */
764 if (cc & TME_SPARC32_PSR_ICC_V) {
765 tme_sparc32_trap(ic, TME_SPARC32_TRAP_tag_overflow);
766 }
767 /* store the destination: */
768 TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
769
770 /* set the condition codes: */
771 ic->tme_sparc32_ireg_psr = (ic->tme_sparc32_ireg_psr & ~TME_SPARC32_PSR_ICC) | cc;
772
773 TME_SPARC_INSN_OK;
774 }
775
776 /* this does a sparc32 "tsubcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_tsubcc,tme_uint32_t)777 TME_SPARC_FORMAT3(tme_sparc32_tsubcc, tme_uint32_t)
778 {
779 tme_uint32_t src1;
780 tme_uint32_t src2;
781 tme_uint32_t dst;
782 tme_uint32_t cc;
783
784 /* get the operands: */
785 src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
786 src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
787
788 /* perform the operation: */
789 dst = src1 - src2;
790
791 /* store the destination: */
792 TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
793
794 /* set Z if the destination is zero: */
795 cc = ((((tme_int32_t) dst) == 0) * TME_SPARC32_PSR_ICC_Z);
796
797 /* set N if the destination is negative: */
798 cc += ((((tme_int32_t) dst) < 0) * TME_SPARC32_PSR_ICC_N);
799
800 /* if the operands are different signs, and the destination has
801 a different sign from the first operand, set V: */
802 cc += ((((tme_int32_t) ((src1 ^ src2) & (src1 ^ dst))) < 0) * TME_SPARC32_PSR_ICC_V);
803
804 /* if src2 is greater than src1, set C: */
805 cc += ((((tme_uint32_t) src2) > ((tme_uint32_t) src1))) * TME_SPARC32_PSR_ICC_C;
806
807 /* set V if bits zero or one of src1 or src2 are set: */
808 cc |= ((((src1 | src2) & 3) != 0) * TME_SPARC32_PSR_ICC_V);
809
810 /* set the condition codes: */
811 ic->tme_sparc32_ireg_psr = (ic->tme_sparc32_ireg_psr & ~TME_SPARC32_PSR_ICC) | cc;
812
813 TME_SPARC_INSN_OK;
814 }
815
816 /* this does a sparc32 "tsubcctv SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_tsubcctv,tme_uint32_t)817 TME_SPARC_FORMAT3(tme_sparc32_tsubcctv, tme_uint32_t)
818 {
819 tme_uint32_t src1;
820 tme_uint32_t src2;
821 tme_uint32_t dst;
822 tme_uint32_t cc;
823
824 /* get the operands: */
825 src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
826 src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
827
828 /* perform the operation: */
829 dst = src1 - src2;
830
831 /* set Z if the destination is zero: */
832 cc = ((((tme_int32_t) dst) == 0) * TME_SPARC32_PSR_ICC_Z);
833
834 /* set N if the destination is negative: */
835 cc += ((((tme_int32_t) dst) < 0) * TME_SPARC32_PSR_ICC_N);
836
837 /* if the operands are different signs, and the destination has
838 a different sign from the first operand, set V: */
839 cc += ((((tme_int32_t) ((src1 ^ src2) & (src1 ^ dst))) < 0) * TME_SPARC32_PSR_ICC_V);
840
841 /* if src2 is greater than src1, set C: */
842 cc += ((((tme_uint32_t) src2) > ((tme_uint32_t) src1))) * TME_SPARC32_PSR_ICC_C;
843
844 /* set V if bits zero or one of src1 or src2 are set: */
845 cc |= ((((src1 | src2) & 3) != 0) * TME_SPARC32_PSR_ICC_V);
846
847 /* trap on a tagged overflow: */
848 if (cc & TME_SPARC32_PSR_ICC_V) {
849 tme_sparc32_trap(ic, TME_SPARC32_TRAP_tag_overflow);
850 }
851 /* store the destination: */
852 TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
853
854 /* set the condition codes: */
855 ic->tme_sparc32_ireg_psr = (ic->tme_sparc32_ireg_psr & ~TME_SPARC32_PSR_ICC) | cc;
856
857 TME_SPARC_INSN_OK;
858 }
859
860 /* this does a sparc32 "umul SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_umul,tme_uint32_t)861 TME_SPARC_FORMAT3(tme_sparc32_umul, tme_uint32_t)
862 {
863 tme_uint32_t src1;
864 tme_uint32_t src2;
865 tme_uint32_t dst;
866 tme_uint64_t val64;
867
868 /* get the operands: */
869 src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
870 src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
871
872 /* perform the operation: */
873 val64 = (((tme_uint64_t) src1) * src2);
874 ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_Y) = (((tme_uint64_t) val64) >> 32);
875 dst = ((tme_uint64_t) val64);
876
877 /* store the destination: */
878 TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
879
880 TME_SPARC_INSN_OK;
881 }
882
883 /* this does a sparc32 "umulcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_umulcc,tme_uint32_t)884 TME_SPARC_FORMAT3(tme_sparc32_umulcc, tme_uint32_t)
885 {
886 tme_uint32_t src1;
887 tme_uint32_t src2;
888 tme_uint32_t dst;
889 tme_uint64_t val64;
890 tme_uint32_t cc;
891
892 /* get the operands: */
893 src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
894 src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
895
896 /* perform the operation: */
897 val64 = (((tme_uint64_t) src1) * src2);
898 ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_Y) = (((tme_uint64_t) val64) >> 32);
899 dst = ((tme_uint64_t) val64);
900
901 /* store the destination: */
902 TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
903
904 /* set Z if the destination is zero: */
905 cc = ((((tme_int32_t) dst) == 0) * TME_SPARC32_PSR_ICC_Z);
906
907 /* set N if the destination is negative: */
908 cc += ((((tme_int32_t) dst) < 0) * TME_SPARC32_PSR_ICC_N);
909
910 /* set the condition codes: */
911 ic->tme_sparc32_ireg_psr = (ic->tme_sparc32_ireg_psr & ~TME_SPARC32_PSR_ICC) | cc;
912
913 TME_SPARC_INSN_OK;
914 }
915
916 /* this does a sparc32 "smul SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_smul,tme_uint32_t)917 TME_SPARC_FORMAT3(tme_sparc32_smul, tme_uint32_t)
918 {
919 tme_int32_t src1;
920 tme_int32_t src2;
921 tme_int32_t dst;
922 tme_int64_t val64;
923
924 /* get the operands: */
925 src1 = (tme_int32_t) TME_SPARC_FORMAT3_RS1;
926 src2 = (tme_int32_t) TME_SPARC_FORMAT3_RS2;
927
928 /* perform the operation: */
929 val64 = (((tme_int64_t) src1) * src2);
930 ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_Y) = (((tme_uint64_t) val64) >> 32);
931 dst = ((tme_int64_t) val64);
932
933 /* store the destination: */
934 TME_SPARC_FORMAT3_RD = (tme_int32_t) dst;
935
936 TME_SPARC_INSN_OK;
937 }
938
939 /* this does a sparc32 "smulcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_smulcc,tme_uint32_t)940 TME_SPARC_FORMAT3(tme_sparc32_smulcc, tme_uint32_t)
941 {
942 tme_int32_t src1;
943 tme_int32_t src2;
944 tme_int32_t dst;
945 tme_int64_t val64;
946 tme_uint32_t cc;
947
948 /* get the operands: */
949 src1 = (tme_int32_t) TME_SPARC_FORMAT3_RS1;
950 src2 = (tme_int32_t) TME_SPARC_FORMAT3_RS2;
951
952 /* perform the operation: */
953 val64 = (((tme_int64_t) src1) * src2);
954 ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_Y) = (((tme_uint64_t) val64) >> 32);
955 dst = ((tme_int64_t) val64);
956
957 /* store the destination: */
958 TME_SPARC_FORMAT3_RD = (tme_int32_t) dst;
959
960 /* set Z if the destination is zero: */
961 cc = ((((tme_int32_t) dst) == 0) * TME_SPARC32_PSR_ICC_Z);
962
963 /* set N if the destination is negative: */
964 cc += ((((tme_int32_t) dst) < 0) * TME_SPARC32_PSR_ICC_N);
965
966 /* set the condition codes: */
967 ic->tme_sparc32_ireg_psr = (ic->tme_sparc32_ireg_psr & ~TME_SPARC32_PSR_ICC) | cc;
968
969 TME_SPARC_INSN_OK;
970 }
971
972 /* this does a sparc32 "udiv SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_udiv,tme_uint32_t)973 TME_SPARC_FORMAT3(tme_sparc32_udiv, tme_uint32_t)
974 {
975 tme_uint32_t src1;
976 tme_uint32_t src2;
977 tme_uint32_t dst;
978 tme_uint64_t val64;
979
980 /* get the operands: */
981 src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
982 src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
983
984 /* perform the operation: */
985 val64 = ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_Y);
986 val64 = (val64 << 32) + (tme_uint32_t) src1;
987 if (__tme_predict_false(src2 == 0)) {
988 tme_sparc32_trap(ic, TME_SPARC32_TRAP_division_by_zero);
989 }
990 val64 /= src2;
991 dst = (tme_uint32_t) val64;
992
993 /* if the division overflowed: */
994 if (dst != val64) {
995
996 /* return the largest appropriate value: */
997 dst = 0xffffffff;
998 }
999
1000 /* store the destination: */
1001 TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
1002
1003 TME_SPARC_INSN_OK;
1004 }
1005
1006 /* this does a sparc32 "udivcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_udivcc,tme_uint32_t)1007 TME_SPARC_FORMAT3(tme_sparc32_udivcc, tme_uint32_t)
1008 {
1009 tme_uint32_t src1;
1010 tme_uint32_t src2;
1011 tme_uint32_t dst;
1012 tme_uint64_t val64;
1013 tme_uint32_t cc;
1014
1015 /* get the operands: */
1016 src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
1017 src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
1018
1019 /* perform the operation: */
1020 val64 = ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_Y);
1021 val64 = (val64 << 32) + (tme_uint32_t) src1;
1022 if (__tme_predict_false(src2 == 0)) {
1023 tme_sparc32_trap(ic, TME_SPARC32_TRAP_division_by_zero);
1024 }
1025 val64 /= src2;
1026 dst = (tme_uint32_t) val64;
1027
1028 /* if the division overflowed: */
1029 if (dst != val64) {
1030
1031 /* return the largest appropriate value: */
1032 dst = 0xffffffff;
1033
1034 /* set V: */
1035 cc = TME_SPARC32_PSR_ICC_V;
1036 }
1037
1038 /* otherwise, the division didn't overflow: */
1039 else {
1040
1041 /* clear V: */
1042 cc = !TME_SPARC32_PSR_ICC_V;
1043 }
1044
1045 /* store the destination: */
1046 TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
1047
1048 /* set Z if the destination is zero: */
1049 cc += ((((tme_int32_t) dst) == 0) * TME_SPARC32_PSR_ICC_Z);
1050
1051 /* set N if the destination is negative: */
1052 cc += ((((tme_int32_t) dst) < 0) * TME_SPARC32_PSR_ICC_N);
1053
1054 /* set the condition codes: */
1055 ic->tme_sparc32_ireg_psr = (ic->tme_sparc32_ireg_psr & ~TME_SPARC32_PSR_ICC) | cc;
1056
1057 TME_SPARC_INSN_OK;
1058 }
1059
1060 /* this does a sparc32 "sdiv SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_sdiv,tme_uint32_t)1061 TME_SPARC_FORMAT3(tme_sparc32_sdiv, tme_uint32_t)
1062 {
1063 tme_int32_t src1;
1064 tme_int32_t src2;
1065 tme_int32_t dst;
1066 tme_int64_t val64;
1067
1068 /* get the operands: */
1069 src1 = (tme_int32_t) TME_SPARC_FORMAT3_RS1;
1070 src2 = (tme_int32_t) TME_SPARC_FORMAT3_RS2;
1071
1072 /* perform the operation: */
1073 val64 = ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_Y);
1074 val64 = (val64 << 32) + (tme_uint32_t) src1;
1075 if (__tme_predict_false(src2 == 0)) {
1076 tme_sparc32_trap(ic, TME_SPARC32_TRAP_division_by_zero);
1077 }
1078 val64 /= src2;
1079 dst = (tme_int32_t) val64;
1080
1081 /* if the division overflowed: */
1082 if (dst != val64) {
1083
1084 /* return the largest appropriate value: */
1085 dst = (tme_int32_t) ((val64 < 0) + (tme_uint32_t) 0x7fffffff);
1086 }
1087
1088 /* store the destination: */
1089 TME_SPARC_FORMAT3_RD = (tme_int32_t) dst;
1090
1091 TME_SPARC_INSN_OK;
1092 }
1093
1094 /* this does a sparc32 "sdivcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_sdivcc,tme_uint32_t)1095 TME_SPARC_FORMAT3(tme_sparc32_sdivcc, tme_uint32_t)
1096 {
1097 tme_int32_t src1;
1098 tme_int32_t src2;
1099 tme_int32_t dst;
1100 tme_int64_t val64;
1101 tme_uint32_t cc;
1102
1103 /* get the operands: */
1104 src1 = (tme_int32_t) TME_SPARC_FORMAT3_RS1;
1105 src2 = (tme_int32_t) TME_SPARC_FORMAT3_RS2;
1106
1107 /* perform the operation: */
1108 val64 = ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_Y);
1109 val64 = (val64 << 32) + (tme_uint32_t) src1;
1110 if (__tme_predict_false(src2 == 0)) {
1111 tme_sparc32_trap(ic, TME_SPARC32_TRAP_division_by_zero);
1112 }
1113 val64 /= src2;
1114 dst = (tme_int32_t) val64;
1115
1116 /* if the division overflowed: */
1117 if (dst != val64) {
1118
1119 /* return the largest appropriate value: */
1120 dst = (tme_int32_t) ((val64 < 0) + (tme_uint32_t) 0x7fffffff);
1121
1122 /* set V: */
1123 cc = TME_SPARC32_PSR_ICC_V;
1124 }
1125
1126 /* otherwise, the division didn't overflow: */
1127 else {
1128
1129 /* clear V: */
1130 cc = !TME_SPARC32_PSR_ICC_V;
1131 }
1132
1133 /* store the destination: */
1134 TME_SPARC_FORMAT3_RD = (tme_int32_t) dst;
1135
1136 /* set Z if the destination is zero: */
1137 cc += ((((tme_int32_t) dst) == 0) * TME_SPARC32_PSR_ICC_Z);
1138
1139 /* set N if the destination is negative: */
1140 cc += ((((tme_int32_t) dst) < 0) * TME_SPARC32_PSR_ICC_N);
1141
1142 /* set the condition codes: */
1143 ic->tme_sparc32_ireg_psr = (ic->tme_sparc32_ireg_psr & ~TME_SPARC32_PSR_ICC) | cc;
1144
1145 TME_SPARC_INSN_OK;
1146 }
1147
1148 /* the sparc32 sll function: */
TME_SPARC_FORMAT3(tme_sparc32_sll,tme_uint32_t)1149 TME_SPARC_FORMAT3(tme_sparc32_sll, tme_uint32_t)
1150 {
1151 tme_uint32_t dst;
1152 unsigned int count;
1153
1154 /* get the value and the shift count: */
1155 dst = TME_SPARC_FORMAT3_RS1;
1156 count = TME_SPARC_FORMAT3_RS2;
1157
1158 /* limit the count: */
1159 count %= 32;
1160
1161 /* do the shift: */
1162 #if defined(SHIFTMAX_INT32_T) && (SHIFTMAX_INT32_T < (32 - 1))
1163 #error "cannot do full shifts of a tme_int32_t"
1164 #endif /* (SHIFTMAX_INT32_T < (32 - 1)) */
1165 dst <<= count;
1166
1167 /* store the destination: */
1168 TME_SPARC_FORMAT3_RD = dst;
1169
1170 TME_SPARC_INSN_OK;
1171 }
1172
1173 /* the sparc32 srl function: */
TME_SPARC_FORMAT3(tme_sparc32_srl,tme_uint32_t)1174 TME_SPARC_FORMAT3(tme_sparc32_srl, tme_uint32_t)
1175 {
1176 tme_uint32_t dst;
1177 unsigned int count;
1178
1179 /* get the value and the shift count: */
1180 dst = TME_SPARC_FORMAT3_RS1;
1181 count = TME_SPARC_FORMAT3_RS2;
1182
1183 /* limit the count: */
1184 count %= 32;
1185
1186 /* do the shift: */
1187 #if defined(SHIFTMAX_INT32_T) && (SHIFTMAX_INT32_T < (32 - 1))
1188 #error "cannot do full shifts of a tme_int32_t"
1189 #endif /* (SHIFTMAX_INT32_T < (32 - 1)) */
1190 dst >>= count;
1191
1192 /* store the destination: */
1193 TME_SPARC_FORMAT3_RD = dst;
1194
1195 TME_SPARC_INSN_OK;
1196 }
1197
1198 /* the sparc32 sra function: */
TME_SPARC_FORMAT3(tme_sparc32_sra,tme_uint32_t)1199 TME_SPARC_FORMAT3(tme_sparc32_sra, tme_uint32_t)
1200 {
1201 tme_int32_t dst;
1202 unsigned int count;
1203
1204 /* get the value and the shift count: */
1205 dst = TME_SPARC_FORMAT3_RS1;
1206 count = TME_SPARC_FORMAT3_RS2;
1207
1208 /* limit the count: */
1209 count %= 32;
1210
1211 /* do the shift: */
1212 #ifdef SHIFTSIGNED_INT32_T
1213 #if defined(SHIFTMAX_INT32_T) && (SHIFTMAX_INT32_T < (32 - 1))
1214 #error "cannot do full shifts of a tme_int32_t"
1215 #endif /* (SHIFTMAX_INT32_T < (32 - 1)) */
1216 dst >>= count;
1217 #else /* !SHIFTSIGNED_INT32_T */
1218 for (; count-- > 0; ) {
1219 dst = (dst & ~((tme_int32_t) 1)) / 2;
1220 }
1221 #endif /* !SHIFTSIGNED_INT32_T */
1222
1223 /* store the destination: */
1224 TME_SPARC_FORMAT3_RD = dst;
1225
1226 TME_SPARC_INSN_OK;
1227 }
1228
1229 /* this does a sparc32 ldb: */
TME_SPARC_FORMAT3(tme_sparc32_ldb,tme_uint32_t)1230 TME_SPARC_FORMAT3(tme_sparc32_ldb, tme_uint32_t)
1231 {
1232 tme_uint32_t address;
1233 tme_uint32_t asi_mask_flags_slow;
1234 struct tme_sparc_tlb *dtlb;
1235 const tme_shared tme_uint8_t *memory;
1236 tme_bus_context_t dtlb_context;
1237 tme_uint32_t endian_little;
1238 tme_uint8_t value8;
1239 tme_uint32_t value32;
1240
1241 /* get the address: */
1242 address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
1243
1244 #ifdef _TME_SPARC_STATS
1245 /* track statistics: */
1246 ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
1247 #endif /* _TME_SPARC_STATS */
1248
1249 /* verify and maybe replay this transfer: */
1250 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
1251 ic->tme_sparc_asi_mask_data, address,
1252 (TME_RECODE_SIZE_8
1253 | TME_SPARC_RECODE_VERIFY_MEM_LOAD));
1254 if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
1255 TME_SPARC_INSN_OK;
1256 }
1257
1258 /* assume that no DTLB ASI mask flags will require a slow load: */
1259 asi_mask_flags_slow = 0;
1260
1261 /* get and busy the DTLB entry: */
1262 dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
1263 tme_sparc_tlb_busy(dtlb);
1264
1265 /* assume that this DTLB applies and allows fast transfers: */
1266 memory = dtlb->tme_sparc_tlb_emulator_off_read;
1267
1268 /* if this DTLB matches any context, it matches this context: */
1269 dtlb_context = dtlb->tme_sparc_tlb_context;
1270 if (dtlb_context > ic->tme_sparc_memory_context_max) {
1271 dtlb_context = ic->tme_sparc_memory_context_default;
1272 }
1273
1274 /* we must call the slow load function if: */
1275 if (__tme_predict_false(
1276
1277 /* the DTLB entry is invalid: */
1278 tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
1279
1280 /* the DTLB entry does not match the context: */
1281 || dtlb_context != ic->tme_sparc_memory_context_default
1282
1283 /* the DTLB entry does not cover the needed addresses: */
1284 || (address < (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_first)
1285 || ((address + ((8 / 8) - 1)) > (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_last)
1286
1287 /* the DTLB entry does not cover the needed address space: */
1288 || (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, ic->tme_sparc_asi_mask_data))
1289
1290 /* the DTLB entry can't be used for a fast ldb: */
1291 || (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
1292
1293 /* the DTLB entry does not allow fast transfers: */
1294 || (memory == TME_EMULATOR_OFF_UNDEF)
1295
1296 )) {
1297
1298 /* call the slow load function: */
1299 memory = tme_sparc32_ls(ic,
1300 address,
1301 &TME_SPARC_FORMAT3_RD,
1302 (TME_SPARC_LSINFO_OP_LD
1303 | (8 / 8)));
1304 }
1305
1306 /* get the byte order of this transfer: */
1307 endian_little = FALSE;
1308
1309 /* do the fast transfer: */
1310 memory += address;
1311 value8 = tme_memory_bus_read8((const tme_shared tme_uint8_t *) memory, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint8_t), sizeof(tme_uint32_t));
1312
1313 /* possibly sign-extend the loaded value: */
1314 value32 = value8;
1315 if (TME_SPARC_INSN & TME_BIT(22)) {
1316 value32 = (tme_uint32_t) (tme_int32_t) (tme_int8_t) value32;
1317 }
1318 TME_SPARC_FORMAT3_RD = (tme_uint32_t) (tme_int32_t) (tme_int32_t) value32;
1319
1320 /* unbusy the DTLB entry: */
1321 tme_sparc_tlb_unbusy(dtlb);
1322
1323 /* log the value loaded: */
1324 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
1325 tme_sparc_log(ic, 1000, TME_OK,
1326 (TME_SPARC_LOG_HANDLE(ic),
1327 _("ldb 0x%02x:0x%08" TME_PRIx32 ": 0x%02" TME_PRIx32),
1328 TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
1329 address,
1330 TME_SPARC_FORMAT3_RD));
1331
1332 TME_SPARC_INSN_OK;
1333 }
1334
1335 /* this does a sparc32 stb: */
TME_SPARC_FORMAT3(tme_sparc32_stb,tme_uint32_t)1336 TME_SPARC_FORMAT3(tme_sparc32_stb, tme_uint32_t)
1337 {
1338 tme_uint32_t address;
1339 tme_uint32_t asi_mask_flags_slow;
1340 struct tme_sparc_tlb *dtlb;
1341 tme_shared tme_uint8_t *memory;
1342 tme_bus_context_t dtlb_context;
1343 tme_uint32_t endian_little;
1344 tme_uint8_t value8;
1345
1346 /* get the address: */
1347 address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
1348
1349 #ifdef _TME_SPARC_STATS
1350 /* track statistics: */
1351 ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
1352 #endif /* _TME_SPARC_STATS */
1353
1354 /* verify and maybe replay this transfer: */
1355 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
1356 ic->tme_sparc_asi_mask_data, address,
1357 (TME_RECODE_SIZE_8
1358 | TME_SPARC_RECODE_VERIFY_MEM_STORE));
1359 if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
1360 TME_SPARC_INSN_OK;
1361 }
1362
1363 /* log the value stored: */
1364 tme_sparc_log(ic, 1000, TME_OK,
1365 (TME_SPARC_LOG_HANDLE(ic),
1366 _("stb 0x%02x:0x%08" TME_PRIx32 ": 0x%02" TME_PRIx8),
1367 TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
1368 address,
1369 (tme_uint8_t) TME_SPARC_FORMAT3_RD));
1370
1371 /* assume that no DTLB ASI mask flags will require a slow store: */
1372 asi_mask_flags_slow = 0;
1373
1374 /* get and busy the DTLB entry: */
1375 dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
1376 tme_sparc_tlb_busy(dtlb);
1377
1378 /* assume that this DTLB applies and allows fast transfers: */
1379 memory = dtlb->tme_sparc_tlb_emulator_off_write;
1380
1381 /* if this DTLB matches any context, it matches this context: */
1382 dtlb_context = dtlb->tme_sparc_tlb_context;
1383 if (dtlb_context > ic->tme_sparc_memory_context_max) {
1384 dtlb_context = ic->tme_sparc_memory_context_default;
1385 }
1386
1387 /* we must call the slow store function if: */
1388 if (__tme_predict_false(
1389
1390 /* the DTLB entry is invalid: */
1391 tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
1392
1393 /* the DTLB entry does not match the context: */
1394 || dtlb_context != ic->tme_sparc_memory_context_default
1395
1396 /* the DTLB entry does not cover the needed addresses: */
1397 || (address < (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_first)
1398 || ((address + ((8 / 8) - 1)) > (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_last)
1399
1400 /* the DTLB entry does not cover the needed address space: */
1401 || (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, ic->tme_sparc_asi_mask_data))
1402
1403 /* the DTLB entry can't be used for a fast stb: */
1404 || (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
1405
1406 /* the DTLB entry does not allow fast transfers: */
1407 || (memory == TME_EMULATOR_OFF_UNDEF)
1408
1409 )) {
1410
1411 /* call the slow store function: */
1412 memory = tme_sparc32_ls(ic,
1413 address,
1414 &TME_SPARC_FORMAT3_RD,
1415 (TME_SPARC_LSINFO_OP_ST
1416 | (8 / 8)));
1417
1418 /* if the slow store function did the transfer: */
1419 if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
1420
1421 /* unbusy the TLB entry; */
1422 tme_sparc_tlb_unbusy(dtlb);
1423
1424 TME_SPARC_INSN_OK;
1425 }
1426 }
1427
1428 /* get the byte order of this transfer: */
1429 endian_little = FALSE;
1430
1431 /* do the fast transfer: */
1432 memory += address;
1433 value8 = TME_SPARC_FORMAT3_RD;
1434 tme_memory_bus_write8((tme_shared tme_uint8_t *) memory, value8, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint8_t), sizeof(tme_uint32_t));
1435
1436 /* unbusy the DTLB entry: */
1437 tme_sparc_tlb_unbusy(dtlb);
1438
1439 TME_SPARC_INSN_OK;
1440 }
1441
1442 /* this does a sparc32 ldh: */
TME_SPARC_FORMAT3(tme_sparc32_ldh,tme_uint32_t)1443 TME_SPARC_FORMAT3(tme_sparc32_ldh, tme_uint32_t)
1444 {
1445 tme_uint32_t address;
1446 tme_uint32_t asi_mask_flags_slow;
1447 struct tme_sparc_tlb *dtlb;
1448 const tme_shared tme_uint8_t *memory;
1449 tme_bus_context_t dtlb_context;
1450 tme_uint32_t endian_little;
1451 tme_uint16_t value16;
1452 tme_uint32_t value32;
1453
1454 /* get the address: */
1455 address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
1456
1457 #ifdef _TME_SPARC_STATS
1458 /* track statistics: */
1459 ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
1460 #endif /* _TME_SPARC_STATS */
1461
1462 /* verify and maybe replay this transfer: */
1463 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
1464 ic->tme_sparc_asi_mask_data, address,
1465 (TME_RECODE_SIZE_16
1466 | TME_SPARC_RECODE_VERIFY_MEM_LOAD));
1467 if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
1468 TME_SPARC_INSN_OK;
1469 }
1470
1471 /* assume that no DTLB ASI mask flags will require a slow load: */
1472 asi_mask_flags_slow = 0;
1473
1474 /* get and busy the DTLB entry: */
1475 dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
1476 tme_sparc_tlb_busy(dtlb);
1477
1478 /* assume that this DTLB applies and allows fast transfers: */
1479 memory = dtlb->tme_sparc_tlb_emulator_off_read;
1480
1481 /* if this DTLB matches any context, it matches this context: */
1482 dtlb_context = dtlb->tme_sparc_tlb_context;
1483 if (dtlb_context > ic->tme_sparc_memory_context_max) {
1484 dtlb_context = ic->tme_sparc_memory_context_default;
1485 }
1486
1487 /* we must call the slow load function if: */
1488 if (__tme_predict_false(
1489
1490 /* the DTLB entry is invalid: */
1491 tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
1492
1493 /* the DTLB entry does not match the context: */
1494 || dtlb_context != ic->tme_sparc_memory_context_default
1495
1496 /* the DTLB entry does not cover the needed addresses: */
1497 || (address < (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_first)
1498 || ((address + ((16 / 8) - 1)) > (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_last)
1499
1500 /* the DTLB entry does not cover the needed address space: */
1501 || (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, ic->tme_sparc_asi_mask_data))
1502
1503 /* the DTLB entry can't be used for a fast ldh: */
1504 || (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
1505
1506 /* the DTLB entry does not allow fast transfers: */
1507 || (memory == TME_EMULATOR_OFF_UNDEF)
1508
1509 /* the address is misaligned: */
1510 || ((address % (16 / 8)) != 0)
1511
1512 )) {
1513
1514 /* call the slow load function: */
1515 memory = tme_sparc32_ls(ic,
1516 address,
1517 &TME_SPARC_FORMAT3_RD,
1518 (TME_SPARC_LSINFO_OP_LD
1519 | (16 / 8)));
1520 }
1521
1522 /* get the byte order of this transfer: */
1523 endian_little = FALSE;
1524
1525 /* do the fast transfer: */
1526 memory += address;
1527 value16 = tme_memory_bus_read16((const tme_shared tme_uint16_t *) memory, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint16_t), sizeof(tme_uint32_t));
1528 value16 = (endian_little ? tme_letoh_u16(value16) : tme_betoh_u16(value16));
1529
1530 /* possibly sign-extend the loaded value: */
1531 value32 = value16;
1532 if (TME_SPARC_INSN & TME_BIT(22)) {
1533 value32 = (tme_uint32_t) (tme_int32_t) (tme_int16_t) value32;
1534 }
1535 TME_SPARC_FORMAT3_RD = (tme_uint32_t) (tme_int32_t) (tme_int32_t) value32;
1536
1537 /* unbusy the DTLB entry: */
1538 tme_sparc_tlb_unbusy(dtlb);
1539
1540 /* log the value loaded: */
1541 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
1542 tme_sparc_log(ic, 1000, TME_OK,
1543 (TME_SPARC_LOG_HANDLE(ic),
1544 _("ldh 0x%02x:0x%08" TME_PRIx32 ": 0x%04" TME_PRIx32),
1545 TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
1546 address,
1547 TME_SPARC_FORMAT3_RD));
1548
1549 TME_SPARC_INSN_OK;
1550 }
1551
1552 /* this does a sparc32 sth: */
TME_SPARC_FORMAT3(tme_sparc32_sth,tme_uint32_t)1553 TME_SPARC_FORMAT3(tme_sparc32_sth, tme_uint32_t)
1554 {
1555 tme_uint32_t address;
1556 tme_uint32_t asi_mask_flags_slow;
1557 struct tme_sparc_tlb *dtlb;
1558 tme_shared tme_uint8_t *memory;
1559 tme_bus_context_t dtlb_context;
1560 tme_uint32_t endian_little;
1561 tme_uint16_t value16;
1562
1563 /* get the address: */
1564 address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
1565
1566 #ifdef _TME_SPARC_STATS
1567 /* track statistics: */
1568 ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
1569 #endif /* _TME_SPARC_STATS */
1570
1571 /* verify and maybe replay this transfer: */
1572 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
1573 ic->tme_sparc_asi_mask_data, address,
1574 (TME_RECODE_SIZE_16
1575 | TME_SPARC_RECODE_VERIFY_MEM_STORE));
1576 if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
1577 TME_SPARC_INSN_OK;
1578 }
1579
1580 /* log the value stored: */
1581 tme_sparc_log(ic, 1000, TME_OK,
1582 (TME_SPARC_LOG_HANDLE(ic),
1583 _("sth 0x%02x:0x%08" TME_PRIx32 ": 0x%04" TME_PRIx16),
1584 TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
1585 address,
1586 (tme_uint16_t) TME_SPARC_FORMAT3_RD));
1587
1588 /* assume that no DTLB ASI mask flags will require a slow store: */
1589 asi_mask_flags_slow = 0;
1590
1591 /* get and busy the DTLB entry: */
1592 dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
1593 tme_sparc_tlb_busy(dtlb);
1594
1595 /* assume that this DTLB applies and allows fast transfers: */
1596 memory = dtlb->tme_sparc_tlb_emulator_off_write;
1597
1598 /* if this DTLB matches any context, it matches this context: */
1599 dtlb_context = dtlb->tme_sparc_tlb_context;
1600 if (dtlb_context > ic->tme_sparc_memory_context_max) {
1601 dtlb_context = ic->tme_sparc_memory_context_default;
1602 }
1603
1604 /* we must call the slow store function if: */
1605 if (__tme_predict_false(
1606
1607 /* the DTLB entry is invalid: */
1608 tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
1609
1610 /* the DTLB entry does not match the context: */
1611 || dtlb_context != ic->tme_sparc_memory_context_default
1612
1613 /* the DTLB entry does not cover the needed addresses: */
1614 || (address < (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_first)
1615 || ((address + ((16 / 8) - 1)) > (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_last)
1616
1617 /* the DTLB entry does not cover the needed address space: */
1618 || (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, ic->tme_sparc_asi_mask_data))
1619
1620 /* the DTLB entry can't be used for a fast sth: */
1621 || (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
1622
1623 /* the DTLB entry does not allow fast transfers: */
1624 || (memory == TME_EMULATOR_OFF_UNDEF)
1625
1626 /* the address is misaligned: */
1627 || ((address % (16 / 8)) != 0)
1628
1629 )) {
1630
1631 /* call the slow store function: */
1632 memory = tme_sparc32_ls(ic,
1633 address,
1634 &TME_SPARC_FORMAT3_RD,
1635 (TME_SPARC_LSINFO_OP_ST
1636 | (16 / 8)));
1637
1638 /* if the slow store function did the transfer: */
1639 if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
1640
1641 /* unbusy the TLB entry; */
1642 tme_sparc_tlb_unbusy(dtlb);
1643
1644 TME_SPARC_INSN_OK;
1645 }
1646 }
1647
1648 /* get the byte order of this transfer: */
1649 endian_little = FALSE;
1650
1651 /* do the fast transfer: */
1652 memory += address;
1653 value16 = TME_SPARC_FORMAT3_RD;
1654 value16 = (endian_little ? tme_htole_u16(value16) : tme_htobe_u16(value16));
1655 tme_memory_bus_write16((tme_shared tme_uint16_t *) memory, value16, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint16_t), sizeof(tme_uint32_t));
1656
1657 /* unbusy the DTLB entry: */
1658 tme_sparc_tlb_unbusy(dtlb);
1659
1660 TME_SPARC_INSN_OK;
1661 }
1662
1663 /* this does a sparc32 ld: */
TME_SPARC_FORMAT3(tme_sparc32_ld,tme_uint32_t)1664 TME_SPARC_FORMAT3(tme_sparc32_ld, tme_uint32_t)
1665 {
1666 tme_uint32_t address;
1667 tme_uint32_t asi_mask_flags_slow;
1668 struct tme_sparc_tlb *dtlb;
1669 const tme_shared tme_uint8_t *memory;
1670 tme_bus_context_t dtlb_context;
1671 tme_uint32_t endian_little;
1672 tme_uint32_t value32;
1673
1674 /* get the address: */
1675 address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
1676
1677 #ifdef _TME_SPARC_STATS
1678 /* track statistics: */
1679 ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
1680 #endif /* _TME_SPARC_STATS */
1681
1682 /* verify and maybe replay this transfer: */
1683 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
1684 ic->tme_sparc_asi_mask_data, address,
1685 (TME_RECODE_SIZE_32
1686 | TME_SPARC_RECODE_VERIFY_MEM_LOAD));
1687 if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
1688 TME_SPARC_INSN_OK;
1689 }
1690
1691 /* assume that no DTLB ASI mask flags will require a slow load: */
1692 asi_mask_flags_slow = 0;
1693
1694 /* get and busy the DTLB entry: */
1695 dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
1696 tme_sparc_tlb_busy(dtlb);
1697
1698 /* assume that this DTLB applies and allows fast transfers: */
1699 memory = dtlb->tme_sparc_tlb_emulator_off_read;
1700
1701 /* if this DTLB matches any context, it matches this context: */
1702 dtlb_context = dtlb->tme_sparc_tlb_context;
1703 if (dtlb_context > ic->tme_sparc_memory_context_max) {
1704 dtlb_context = ic->tme_sparc_memory_context_default;
1705 }
1706
1707 /* we must call the slow load function if: */
1708 if (__tme_predict_false(
1709
1710 /* the DTLB entry is invalid: */
1711 tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
1712
1713 /* the DTLB entry does not match the context: */
1714 || dtlb_context != ic->tme_sparc_memory_context_default
1715
1716 /* the DTLB entry does not cover the needed addresses: */
1717 || (address < (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_first)
1718 || ((address + ((32 / 8) - 1)) > (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_last)
1719
1720 /* the DTLB entry does not cover the needed address space: */
1721 || (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, ic->tme_sparc_asi_mask_data))
1722
1723 /* the DTLB entry can't be used for a fast ld: */
1724 || (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
1725
1726 /* the DTLB entry does not allow fast transfers: */
1727 || (memory == TME_EMULATOR_OFF_UNDEF)
1728
1729 /* the address is misaligned: */
1730 || ((address % (32 / 8)) != 0)
1731
1732 )) {
1733
1734 /* call the slow load function: */
1735 memory = tme_sparc32_ls(ic,
1736 address,
1737 &TME_SPARC_FORMAT3_RD,
1738 (TME_SPARC_LSINFO_OP_LD
1739 | (32 / 8)));
1740 }
1741
1742 /* get the byte order of this transfer: */
1743 endian_little = FALSE;
1744
1745 /* do the fast transfer: */
1746 memory += address;
1747 value32 = tme_memory_bus_read32((const tme_shared tme_uint32_t *) memory, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint32_t), sizeof(tme_uint32_t));
1748 value32 = (endian_little ? tme_letoh_u32(value32) : tme_betoh_u32(value32));
1749 TME_SPARC_FORMAT3_RD = (tme_uint32_t) (tme_int32_t) (tme_int32_t) value32;
1750
1751 /* unbusy the DTLB entry: */
1752 tme_sparc_tlb_unbusy(dtlb);
1753
1754 /* log the value loaded: */
1755 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
1756 tme_sparc_log(ic, 1000, TME_OK,
1757 (TME_SPARC_LOG_HANDLE(ic),
1758 _("ld 0x%02x:0x%08" TME_PRIx32 ": 0x%08" TME_PRIx32),
1759 TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
1760 address,
1761 TME_SPARC_FORMAT3_RD));
1762
1763 TME_SPARC_INSN_OK;
1764 }
1765
1766 /* this does a sparc32 st: */
TME_SPARC_FORMAT3(tme_sparc32_st,tme_uint32_t)1767 TME_SPARC_FORMAT3(tme_sparc32_st, tme_uint32_t)
1768 {
1769 tme_uint32_t address;
1770 tme_uint32_t asi_mask_flags_slow;
1771 struct tme_sparc_tlb *dtlb;
1772 tme_shared tme_uint8_t *memory;
1773 tme_bus_context_t dtlb_context;
1774 tme_uint32_t endian_little;
1775 tme_uint32_t value32;
1776
1777 /* get the address: */
1778 address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
1779
1780 #ifdef _TME_SPARC_STATS
1781 /* track statistics: */
1782 ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
1783 #endif /* _TME_SPARC_STATS */
1784
1785 /* verify and maybe replay this transfer: */
1786 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
1787 ic->tme_sparc_asi_mask_data, address,
1788 (TME_RECODE_SIZE_32
1789 | TME_SPARC_RECODE_VERIFY_MEM_STORE));
1790 if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
1791 TME_SPARC_INSN_OK;
1792 }
1793
1794 /* log the value stored: */
1795 tme_sparc_log(ic, 1000, TME_OK,
1796 (TME_SPARC_LOG_HANDLE(ic),
1797 _("st 0x%02x:0x%08" TME_PRIx32 ": 0x%08" TME_PRIx32),
1798 TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
1799 address,
1800 (tme_uint32_t) TME_SPARC_FORMAT3_RD));
1801
1802 /* assume that no DTLB ASI mask flags will require a slow store: */
1803 asi_mask_flags_slow = 0;
1804
1805 /* get and busy the DTLB entry: */
1806 dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
1807 tme_sparc_tlb_busy(dtlb);
1808
1809 /* assume that this DTLB applies and allows fast transfers: */
1810 memory = dtlb->tme_sparc_tlb_emulator_off_write;
1811
1812 /* if this DTLB matches any context, it matches this context: */
1813 dtlb_context = dtlb->tme_sparc_tlb_context;
1814 if (dtlb_context > ic->tme_sparc_memory_context_max) {
1815 dtlb_context = ic->tme_sparc_memory_context_default;
1816 }
1817
1818 /* we must call the slow store function if: */
1819 if (__tme_predict_false(
1820
1821 /* the DTLB entry is invalid: */
1822 tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
1823
1824 /* the DTLB entry does not match the context: */
1825 || dtlb_context != ic->tme_sparc_memory_context_default
1826
1827 /* the DTLB entry does not cover the needed addresses: */
1828 || (address < (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_first)
1829 || ((address + ((32 / 8) - 1)) > (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_last)
1830
1831 /* the DTLB entry does not cover the needed address space: */
1832 || (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, ic->tme_sparc_asi_mask_data))
1833
1834 /* the DTLB entry can't be used for a fast st: */
1835 || (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
1836
1837 /* the DTLB entry does not allow fast transfers: */
1838 || (memory == TME_EMULATOR_OFF_UNDEF)
1839
1840 /* the address is misaligned: */
1841 || ((address % (32 / 8)) != 0)
1842
1843 )) {
1844
1845 /* call the slow store function: */
1846 memory = tme_sparc32_ls(ic,
1847 address,
1848 &TME_SPARC_FORMAT3_RD,
1849 (TME_SPARC_LSINFO_OP_ST
1850 | (32 / 8)));
1851
1852 /* if the slow store function did the transfer: */
1853 if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
1854
1855 /* unbusy the TLB entry; */
1856 tme_sparc_tlb_unbusy(dtlb);
1857
1858 TME_SPARC_INSN_OK;
1859 }
1860 }
1861
1862 /* get the byte order of this transfer: */
1863 endian_little = FALSE;
1864
1865 /* do the fast transfer: */
1866 memory += address;
1867 value32 = TME_SPARC_FORMAT3_RD;
1868 value32 = (endian_little ? tme_htole_u32(value32) : tme_htobe_u32(value32));
1869 tme_memory_bus_write32((tme_shared tme_uint32_t *) memory, value32, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint32_t), sizeof(tme_uint32_t));
1870
1871 /* unbusy the DTLB entry: */
1872 tme_sparc_tlb_unbusy(dtlb);
1873
1874 TME_SPARC_INSN_OK;
1875 }
1876
1877 /* this does a sparc32 ldd: */
TME_SPARC_FORMAT3(tme_sparc32_ldd,tme_uint32_t)1878 TME_SPARC_FORMAT3(tme_sparc32_ldd, tme_uint32_t)
1879 {
1880 tme_uint32_t address;
1881 tme_uint32_t asi_mask_flags_slow;
1882 struct tme_sparc_tlb *dtlb;
1883 const tme_shared tme_uint8_t *memory;
1884 tme_bus_context_t dtlb_context;
1885 tme_uint32_t endian_little;
1886 tme_uint32_t value32;
1887
1888 /* get the address: */
1889 address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
1890
1891 #ifdef _TME_SPARC_STATS
1892 /* track statistics: */
1893 ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
1894 #endif /* _TME_SPARC_STATS */
1895
1896 /* verify and maybe replay this transfer: */
1897 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
1898 ic->tme_sparc_asi_mask_data, address,
1899 (TME_RECODE_SIZE_32
1900 | TME_SPARC_RECODE_VERIFY_MEM_LOAD));
1901 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint32),
1902 ic->tme_sparc_asi_mask_data, address + sizeof(tme_uint32_t),
1903 (TME_RECODE_SIZE_32
1904 | TME_SPARC_RECODE_VERIFY_MEM_LOAD));
1905 if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
1906 TME_SPARC_INSN_OK;
1907 }
1908
1909 /* assume that no DTLB ASI mask flags will require a slow load: */
1910 asi_mask_flags_slow = 0;
1911
1912 /* get and busy the DTLB entry: */
1913 dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
1914 tme_sparc_tlb_busy(dtlb);
1915
1916 /* assume that this DTLB applies and allows fast transfers: */
1917 memory = dtlb->tme_sparc_tlb_emulator_off_read;
1918
1919 /* if this DTLB matches any context, it matches this context: */
1920 dtlb_context = dtlb->tme_sparc_tlb_context;
1921 if (dtlb_context > ic->tme_sparc_memory_context_max) {
1922 dtlb_context = ic->tme_sparc_memory_context_default;
1923 }
1924
1925 /* we must call the slow load function if: */
1926 if (__tme_predict_false(
1927
1928 /* the DTLB entry is invalid: */
1929 tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
1930
1931 /* the DTLB entry does not match the context: */
1932 || dtlb_context != ic->tme_sparc_memory_context_default
1933
1934 /* the DTLB entry does not cover the needed addresses: */
1935 || (address < (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_first)
1936 || ((address + ((64 / 8) - 1)) > (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_last)
1937
1938 /* the DTLB entry does not cover the needed address space: */
1939 || (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, ic->tme_sparc_asi_mask_data))
1940
1941 /* the DTLB entry can't be used for a fast ldd: */
1942 || (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
1943
1944 /* the DTLB entry does not allow fast transfers: */
1945 || (memory == TME_EMULATOR_OFF_UNDEF)
1946
1947 /* the address is misaligned: */
1948 || ((address % (64 / 8)) != 0)
1949
1950 /* the destination register number is odd: */
1951 || ((TME_SPARC_INSN & TME_BIT(25)) != 0)
1952
1953 )) {
1954
1955 /* call the slow load function: */
1956 memory = tme_sparc32_ls(ic,
1957 address,
1958 &TME_SPARC_FORMAT3_RD,
1959 (TME_SPARC_LSINFO_OP_LD
1960 | TME_SPARC_LSINFO_LDD_STD
1961 | (64 / 8)));
1962 }
1963
1964 /* get the byte order of this transfer: */
1965 endian_little = FALSE;
1966
1967 /* do the fast transfer: */
1968 memory += address;
1969 value32 = tme_memory_bus_read32(((const tme_shared tme_uint32_t *) memory) + 0, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint32_t) * 2, sizeof(tme_uint32_t));
1970 value32 = (endian_little ? tme_letoh_u32(value32) : tme_betoh_u32(value32));
1971 TME_SPARC_FORMAT3_RD = value32;
1972 value32 = tme_memory_bus_read32(((const tme_shared tme_uint32_t *) memory) + 1, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint32_t) * 1, sizeof(tme_uint32_t));
1973 value32 = (endian_little ? tme_letoh_u32(value32) : tme_betoh_u32(value32));
1974 TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint32) = value32;
1975
1976 /* unbusy the DTLB entry: */
1977 tme_sparc_tlb_unbusy(dtlb);
1978
1979 /* log the value loaded: */
1980 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
1981 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint32));
1982 tme_sparc_log(ic, 1000, TME_OK,
1983 (TME_SPARC_LOG_HANDLE(ic),
1984 _("ldd 0x%02x:0x%08" TME_PRIx32 ": 0x%08" TME_PRIx32 " 0x%08" TME_PRIx32 ""),
1985 TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
1986 address,
1987 TME_SPARC_FORMAT3_RD,
1988 TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint32)));
1989
1990 TME_SPARC_INSN_OK;
1991 }
1992
1993 /* this does a sparc32 std: */
TME_SPARC_FORMAT3(tme_sparc32_std,tme_uint32_t)1994 TME_SPARC_FORMAT3(tme_sparc32_std, tme_uint32_t)
1995 {
1996 tme_uint32_t address;
1997 tme_uint32_t asi_mask_flags_slow;
1998 struct tme_sparc_tlb *dtlb;
1999 tme_shared tme_uint8_t *memory;
2000 tme_bus_context_t dtlb_context;
2001 tme_uint32_t endian_little;
2002 tme_uint32_t value32;
2003
2004 /* get the address: */
2005 address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
2006
2007 #ifdef _TME_SPARC_STATS
2008 /* track statistics: */
2009 ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
2010 #endif /* _TME_SPARC_STATS */
2011
2012 /* verify and maybe replay this transfer: */
2013 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
2014 ic->tme_sparc_asi_mask_data, address,
2015 (TME_RECODE_SIZE_32
2016 | TME_SPARC_RECODE_VERIFY_MEM_STORE));
2017 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint32),
2018 ic->tme_sparc_asi_mask_data, address + sizeof(tme_uint32_t),
2019 (TME_RECODE_SIZE_32
2020 | TME_SPARC_RECODE_VERIFY_MEM_STORE));
2021 if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
2022 TME_SPARC_INSN_OK;
2023 }
2024
2025 /* log the values stored: */
2026 tme_sparc_log(ic, 1000, TME_OK,
2027 (TME_SPARC_LOG_HANDLE(ic),
2028 _("std 0x%02x:0x%08" TME_PRIx32 ": 0x%08" TME_PRIx32 " 0x%08" TME_PRIx32),
2029 TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
2030 address,
2031 (tme_uint32_t) TME_SPARC_FORMAT3_RD,
2032 (tme_uint32_t) TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint32)));
2033
2034 /* assume that no DTLB ASI mask flags will require a slow store: */
2035 asi_mask_flags_slow = 0;
2036
2037 /* get and busy the DTLB entry: */
2038 dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
2039 tme_sparc_tlb_busy(dtlb);
2040
2041 /* assume that this DTLB applies and allows fast transfers: */
2042 memory = dtlb->tme_sparc_tlb_emulator_off_write;
2043
2044 /* if this DTLB matches any context, it matches this context: */
2045 dtlb_context = dtlb->tme_sparc_tlb_context;
2046 if (dtlb_context > ic->tme_sparc_memory_context_max) {
2047 dtlb_context = ic->tme_sparc_memory_context_default;
2048 }
2049
2050 /* we must call the slow store function if: */
2051 if (__tme_predict_false(
2052
2053 /* the DTLB entry is invalid: */
2054 tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
2055
2056 /* the DTLB entry does not match the context: */
2057 || dtlb_context != ic->tme_sparc_memory_context_default
2058
2059 /* the DTLB entry does not cover the needed addresses: */
2060 || (address < (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_first)
2061 || ((address + ((64 / 8) - 1)) > (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_last)
2062
2063 /* the DTLB entry does not cover the needed address space: */
2064 || (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, ic->tme_sparc_asi_mask_data))
2065
2066 /* the DTLB entry can't be used for a fast std: */
2067 || (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
2068
2069 /* the DTLB entry does not allow fast transfers: */
2070 || (memory == TME_EMULATOR_OFF_UNDEF)
2071
2072 /* the address is misaligned: */
2073 || ((address % (64 / 8)) != 0)
2074
2075 /* the destination register number is odd: */
2076 || ((TME_SPARC_INSN & TME_BIT(25)) != 0)
2077
2078 )) {
2079
2080 /* call the slow store function: */
2081 memory = tme_sparc32_ls(ic,
2082 address,
2083 &TME_SPARC_FORMAT3_RD,
2084 (TME_SPARC_LSINFO_OP_ST
2085 | TME_SPARC_LSINFO_LDD_STD
2086 | (64 / 8)));
2087
2088 /* if the slow store function did the transfer: */
2089 if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
2090
2091 /* unbusy the TLB entry; */
2092 tme_sparc_tlb_unbusy(dtlb);
2093
2094 TME_SPARC_INSN_OK;
2095 }
2096 }
2097
2098 /* get the byte order of this transfer: */
2099 endian_little = FALSE;
2100
2101 /* do the fast transfer: */
2102 memory += address;
2103 value32 = TME_SPARC_FORMAT3_RD;
2104 value32 = (endian_little ? tme_htole_u32(value32) : tme_htobe_u32(value32));
2105 tme_memory_bus_write32(((tme_shared tme_uint32_t *) memory) + 0, value32, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint32_t) * 2, sizeof(tme_uint32_t));
2106 value32 = TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint32);
2107 value32 = (endian_little ? tme_htole_u32(value32) : tme_htobe_u32(value32));
2108 tme_memory_bus_write32(((tme_shared tme_uint32_t *) memory) + 1, value32, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint32_t) * 1, sizeof(tme_uint32_t));
2109
2110 /* unbusy the DTLB entry: */
2111 tme_sparc_tlb_unbusy(dtlb);
2112
2113 TME_SPARC_INSN_OK;
2114 }
2115
2116 /* this does a sparc32 ldstub: */
TME_SPARC_FORMAT3(tme_sparc32_ldstub,tme_uint32_t)2117 TME_SPARC_FORMAT3(tme_sparc32_ldstub, tme_uint32_t)
2118 {
2119 tme_uint32_t address;
2120 tme_uint32_t asi_mask_flags_slow;
2121 struct tme_sparc_tlb *dtlb;
2122 tme_shared tme_uint8_t *memory;
2123 tme_bus_context_t dtlb_context;
2124 tme_uint32_t endian_little;
2125
2126 /* get the address: */
2127 address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
2128
2129 #ifdef _TME_SPARC_STATS
2130 /* track statistics: */
2131 ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
2132 #endif /* _TME_SPARC_STATS */
2133
2134 /* verify and maybe replay this transfer: */
2135 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
2136 ic->tme_sparc_asi_mask_data, address,
2137 (TME_RECODE_SIZE_8
2138 | TME_SPARC_RECODE_VERIFY_MEM_LOAD));
2139 if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
2140 TME_SPARC_INSN_OK;
2141 }
2142
2143 /* assume that no DTLB ASI mask flags will require a slow store: */
2144 asi_mask_flags_slow = 0;
2145
2146 /* get and busy the DTLB entry: */
2147 dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
2148 tme_sparc_tlb_busy(dtlb);
2149
2150 /* assume that this DTLB applies and allows fast transfers: */
2151 memory = dtlb->tme_sparc_tlb_emulator_off_write;
2152
2153 /* if this DTLB matches any context, it matches this context: */
2154 dtlb_context = dtlb->tme_sparc_tlb_context;
2155 if (dtlb_context > ic->tme_sparc_memory_context_max) {
2156 dtlb_context = ic->tme_sparc_memory_context_default;
2157 }
2158
2159 /* we must call the slow store function if: */
2160 if (__tme_predict_false(
2161
2162 /* the DTLB entry is invalid: */
2163 tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
2164
2165 /* the DTLB entry does not match the context: */
2166 || dtlb_context != ic->tme_sparc_memory_context_default
2167
2168 /* the DTLB entry does not cover the needed addresses: */
2169 || (address < (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_first)
2170 || ((address + ((8 / 8) - 1)) > (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_last)
2171
2172 /* the DTLB entry does not cover the needed address space: */
2173 || (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, ic->tme_sparc_asi_mask_data))
2174
2175 /* the DTLB entry can't be used for a fast ldstub: */
2176 || (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
2177
2178 /* the DTLB entry does not allow fast transfers: */
2179 || (memory != dtlb->tme_sparc_tlb_emulator_off_read)
2180 || (memory == TME_EMULATOR_OFF_UNDEF)
2181
2182 )) {
2183
2184 /* call the slow store function: */
2185 memory = tme_sparc32_ls(ic,
2186 address,
2187 &TME_SPARC_FORMAT3_RD,
2188 (TME_SPARC_LSINFO_OP_ATOMIC
2189 | (8 / 8)));
2190
2191 /* if the slow store function did the transfer: */
2192 if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
2193
2194 /* unbusy the TLB entry; */
2195 tme_sparc_tlb_unbusy(dtlb);
2196
2197 /* log the value loaded: */
2198 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
2199 tme_sparc_log(ic, 1000, TME_OK,
2200 (TME_SPARC_LOG_HANDLE(ic),
2201 _("ldstub 0x%02x:0x%08" TME_PRIx32 ": 0x%02" TME_PRIx32),
2202 TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
2203 address,
2204 TME_SPARC_FORMAT3_RD));
2205
2206 TME_SPARC_INSN_OK;
2207 }
2208 }
2209
2210 /* get the byte order of this transfer: */
2211 endian_little = FALSE;
2212
2213 /* do the fast transfer: */
2214 memory += address;
2215 TME_SPARC_FORMAT3_RD = tme_memory_atomic_xchg8(memory, 0xff, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint8_t));
2216
2217 /* unbusy the DTLB entry: */
2218 tme_sparc_tlb_unbusy(dtlb);
2219
2220 /* log the value loaded: */
2221 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
2222 tme_sparc_log(ic, 1000, TME_OK,
2223 (TME_SPARC_LOG_HANDLE(ic),
2224 _("ldstub 0x%02x:0x%08" TME_PRIx32 ": 0x%02" TME_PRIx32),
2225 TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
2226 address,
2227 TME_SPARC_FORMAT3_RD));
2228
2229 TME_SPARC_INSN_OK;
2230 }
2231
2232 /* this does a sparc32 ldstuba: */
TME_SPARC_FORMAT3(tme_sparc32_ldstuba,tme_uint32_t)2233 TME_SPARC_FORMAT3(tme_sparc32_ldstuba, tme_uint32_t)
2234 {
2235 tme_uint32_t asi_mask_data;
2236 tme_uint32_t address;
2237 tme_uint32_t asi_mask_flags_slow;
2238 struct tme_sparc_tlb *dtlb;
2239 tme_shared tme_uint8_t *memory;
2240 tme_bus_context_t dtlb_context;
2241 tme_uint32_t endian_little;
2242
2243 /* get the alternate ASI mask: */
2244 asi_mask_data = _tme_sparc32_alternate_asi_mask(ic);
2245
2246 /* get the address: */
2247 address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
2248
2249 #ifdef _TME_SPARC_STATS
2250 /* track statistics: */
2251 ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
2252 #endif /* _TME_SPARC_STATS */
2253
2254 /* verify and maybe replay this transfer: */
2255 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
2256 asi_mask_data, address,
2257 (TME_RECODE_SIZE_8
2258 | TME_SPARC_RECODE_VERIFY_MEM_LOAD));
2259 if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
2260 TME_SPARC_INSN_OK;
2261 }
2262
2263 /* assume that no DTLB ASI mask flags will require a slow store: */
2264 asi_mask_flags_slow = 0;
2265
2266 /* get and busy the DTLB entry: */
2267 dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
2268 tme_sparc_tlb_busy(dtlb);
2269
2270 /* assume that this DTLB applies and allows fast transfers: */
2271 memory = dtlb->tme_sparc_tlb_emulator_off_write;
2272
2273 /* if this DTLB matches any context, it matches this context: */
2274 dtlb_context = dtlb->tme_sparc_tlb_context;
2275 if (dtlb_context > ic->tme_sparc_memory_context_max) {
2276 dtlb_context = ic->tme_sparc_memory_context_default;
2277 }
2278
2279 /* we must call the slow store function if: */
2280 if (__tme_predict_false(
2281
2282 /* the DTLB entry is invalid: */
2283 tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
2284
2285 /* the DTLB entry does not match the context: */
2286 || dtlb_context != ic->tme_sparc_memory_context_default
2287
2288 /* the DTLB entry does not cover the needed addresses: */
2289 || (address < (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_first)
2290 || ((address + ((8 / 8) - 1)) > (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_last)
2291
2292 /* the DTLB entry does not cover the needed address space: */
2293 || (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, asi_mask_data))
2294
2295 /* the DTLB entry can't be used for a fast ldstuba: */
2296 || (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
2297
2298 /* the DTLB entry does not allow fast transfers: */
2299 || (memory != dtlb->tme_sparc_tlb_emulator_off_read)
2300 || (memory == TME_EMULATOR_OFF_UNDEF)
2301
2302 )) {
2303
2304 /* call the slow store function: */
2305 memory = tme_sparc32_ls(ic,
2306 address,
2307 &TME_SPARC_FORMAT3_RD,
2308 (TME_SPARC_LSINFO_OP_ATOMIC
2309 | TME_SPARC_LSINFO_ASI(TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF))
2310 | TME_SPARC_LSINFO_A
2311 | (8 / 8)));
2312
2313 /* if the slow store function did the transfer: */
2314 if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
2315
2316 /* unbusy the TLB entry; */
2317 tme_sparc_tlb_unbusy(dtlb);
2318
2319 /* log the value loaded: */
2320 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
2321 tme_sparc_log(ic, 1000, TME_OK,
2322 (TME_SPARC_LOG_HANDLE(ic),
2323 _("ldstuba 0x%02x:0x%08" TME_PRIx32 ": 0x%02" TME_PRIx32),
2324 TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
2325 address,
2326 TME_SPARC_FORMAT3_RD));
2327
2328 TME_SPARC_INSN_OK;
2329 }
2330 }
2331
2332 /* get the byte order of this transfer: */
2333 endian_little = FALSE;
2334
2335 /* do the fast transfer: */
2336 memory += address;
2337 TME_SPARC_FORMAT3_RD = tme_memory_atomic_xchg8(memory, 0xff, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint8_t));
2338
2339 /* unbusy the DTLB entry: */
2340 tme_sparc_tlb_unbusy(dtlb);
2341
2342 /* log the value loaded: */
2343 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
2344 tme_sparc_log(ic, 1000, TME_OK,
2345 (TME_SPARC_LOG_HANDLE(ic),
2346 _("ldstuba 0x%02x:0x%08" TME_PRIx32 ": 0x%02" TME_PRIx32),
2347 TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
2348 address,
2349 TME_SPARC_FORMAT3_RD));
2350
2351 TME_SPARC_INSN_OK;
2352 }
2353
2354 /* this does a sparc32 swap: */
TME_SPARC_FORMAT3(tme_sparc32_swap,tme_uint32_t)2355 TME_SPARC_FORMAT3(tme_sparc32_swap, tme_uint32_t)
2356 {
2357 tme_uint32_t address;
2358 tme_uint32_t asi_mask_flags_slow;
2359 struct tme_sparc_tlb *dtlb;
2360 tme_shared tme_uint8_t *memory;
2361 tme_bus_context_t dtlb_context;
2362 tme_uint32_t endian_little;
2363 tme_uint32_t value32;
2364
2365 /* get the address: */
2366 address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
2367
2368 #ifdef _TME_SPARC_STATS
2369 /* track statistics: */
2370 ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
2371 #endif /* _TME_SPARC_STATS */
2372
2373 /* verify and maybe replay this transfer: */
2374 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
2375 ic->tme_sparc_asi_mask_data, address,
2376 (TME_RECODE_SIZE_32
2377 | TME_SPARC_RECODE_VERIFY_MEM_LOAD | TME_SPARC_RECODE_VERIFY_MEM_STORE));
2378 if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
2379 TME_SPARC_INSN_OK;
2380 }
2381
2382 /* log the value stored: */
2383 tme_sparc_log(ic, 1000, TME_OK,
2384 (TME_SPARC_LOG_HANDLE(ic),
2385 _("swap 0x%02x:0x%08" TME_PRIx32 ": 0x%08" TME_PRIx32),
2386 TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
2387 address,
2388 (tme_uint32_t) TME_SPARC_FORMAT3_RD));
2389
2390 /* assume that no DTLB ASI mask flags will require a slow store: */
2391 asi_mask_flags_slow = 0;
2392
2393 /* get and busy the DTLB entry: */
2394 dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
2395 tme_sparc_tlb_busy(dtlb);
2396
2397 /* assume that this DTLB applies and allows fast transfers: */
2398 memory = dtlb->tme_sparc_tlb_emulator_off_write;
2399
2400 /* if this DTLB matches any context, it matches this context: */
2401 dtlb_context = dtlb->tme_sparc_tlb_context;
2402 if (dtlb_context > ic->tme_sparc_memory_context_max) {
2403 dtlb_context = ic->tme_sparc_memory_context_default;
2404 }
2405
2406 /* we must call the slow store function if: */
2407 if (__tme_predict_false(
2408
2409 /* the DTLB entry is invalid: */
2410 tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
2411
2412 /* the DTLB entry does not match the context: */
2413 || dtlb_context != ic->tme_sparc_memory_context_default
2414
2415 /* the DTLB entry does not cover the needed addresses: */
2416 || (address < (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_first)
2417 || ((address + ((32 / 8) - 1)) > (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_last)
2418
2419 /* the DTLB entry does not cover the needed address space: */
2420 || (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, ic->tme_sparc_asi_mask_data))
2421
2422 /* the DTLB entry can't be used for a fast swap: */
2423 || (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
2424
2425 /* the DTLB entry does not allow fast transfers: */
2426 || (memory != dtlb->tme_sparc_tlb_emulator_off_read)
2427 || (memory == TME_EMULATOR_OFF_UNDEF)
2428
2429 /* the address is misaligned: */
2430 || ((address % (32 / 8)) != 0)
2431
2432 )) {
2433
2434 /* call the slow store function: */
2435 memory = tme_sparc32_ls(ic,
2436 address,
2437 &TME_SPARC_FORMAT3_RD,
2438 (TME_SPARC_LSINFO_OP_ATOMIC
2439 | (32 / 8)));
2440
2441 /* if the slow store function did the transfer: */
2442 if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
2443
2444 /* unbusy the TLB entry; */
2445 tme_sparc_tlb_unbusy(dtlb);
2446
2447 /* log the value loaded: */
2448 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
2449 tme_sparc_log(ic, 1000, TME_OK,
2450 (TME_SPARC_LOG_HANDLE(ic),
2451 _("swap 0x%02x:0x%08" TME_PRIx32 ": 0x%08" TME_PRIx32),
2452 TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
2453 address,
2454 TME_SPARC_FORMAT3_RD));
2455
2456 TME_SPARC_INSN_OK;
2457 }
2458 }
2459
2460 /* get the byte order of this transfer: */
2461 endian_little = FALSE;
2462
2463 /* do the fast transfer: */
2464 memory += address;
2465 value32 = TME_SPARC_FORMAT3_RD;
2466 value32 = (endian_little ? tme_htole_u32(value32) : tme_htobe_u32(value32));
2467 value32 = tme_memory_atomic_xchg32((tme_shared tme_uint32_t *) memory, value32, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint8_t));
2468 value32 = (endian_little ? tme_letoh_u32(value32) : tme_betoh_u32(value32));
2469 TME_SPARC_FORMAT3_RD = value32;
2470
2471 /* unbusy the DTLB entry: */
2472 tme_sparc_tlb_unbusy(dtlb);
2473
2474 /* log the value loaded: */
2475 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
2476 tme_sparc_log(ic, 1000, TME_OK,
2477 (TME_SPARC_LOG_HANDLE(ic),
2478 _("swap 0x%02x:0x%08" TME_PRIx32 ": 0x%08" TME_PRIx32),
2479 TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
2480 address,
2481 TME_SPARC_FORMAT3_RD));
2482
2483 TME_SPARC_INSN_OK;
2484 }
2485
2486 /* this does a sparc32 swapa: */
TME_SPARC_FORMAT3(tme_sparc32_swapa,tme_uint32_t)2487 TME_SPARC_FORMAT3(tme_sparc32_swapa, tme_uint32_t)
2488 {
2489 tme_uint32_t asi_mask_data;
2490 tme_uint32_t address;
2491 tme_uint32_t asi_mask_flags_slow;
2492 struct tme_sparc_tlb *dtlb;
2493 tme_shared tme_uint8_t *memory;
2494 tme_bus_context_t dtlb_context;
2495 tme_uint32_t endian_little;
2496 tme_uint32_t value32;
2497
2498 /* get the alternate ASI mask: */
2499 asi_mask_data = _tme_sparc32_alternate_asi_mask(ic);
2500
2501 /* get the address: */
2502 address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
2503
2504 #ifdef _TME_SPARC_STATS
2505 /* track statistics: */
2506 ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
2507 #endif /* _TME_SPARC_STATS */
2508
2509 /* verify and maybe replay this transfer: */
2510 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
2511 asi_mask_data, address,
2512 (TME_RECODE_SIZE_32
2513 | TME_SPARC_RECODE_VERIFY_MEM_LOAD | TME_SPARC_RECODE_VERIFY_MEM_STORE));
2514 if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
2515 TME_SPARC_INSN_OK;
2516 }
2517
2518 /* log the value stored: */
2519 tme_sparc_log(ic, 1000, TME_OK,
2520 (TME_SPARC_LOG_HANDLE(ic),
2521 _("swapa 0x%02x:0x%08" TME_PRIx32 ": 0x%08" TME_PRIx32),
2522 TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
2523 address,
2524 (tme_uint32_t) TME_SPARC_FORMAT3_RD));
2525
2526 /* assume that no DTLB ASI mask flags will require a slow store: */
2527 asi_mask_flags_slow = 0;
2528
2529 /* get and busy the DTLB entry: */
2530 dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
2531 tme_sparc_tlb_busy(dtlb);
2532
2533 /* assume that this DTLB applies and allows fast transfers: */
2534 memory = dtlb->tme_sparc_tlb_emulator_off_write;
2535
2536 /* if this DTLB matches any context, it matches this context: */
2537 dtlb_context = dtlb->tme_sparc_tlb_context;
2538 if (dtlb_context > ic->tme_sparc_memory_context_max) {
2539 dtlb_context = ic->tme_sparc_memory_context_default;
2540 }
2541
2542 /* we must call the slow store function if: */
2543 if (__tme_predict_false(
2544
2545 /* the DTLB entry is invalid: */
2546 tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
2547
2548 /* the DTLB entry does not match the context: */
2549 || dtlb_context != ic->tme_sparc_memory_context_default
2550
2551 /* the DTLB entry does not cover the needed addresses: */
2552 || (address < (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_first)
2553 || ((address + ((32 / 8) - 1)) > (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_last)
2554
2555 /* the DTLB entry does not cover the needed address space: */
2556 || (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, asi_mask_data))
2557
2558 /* the DTLB entry can't be used for a fast swapa: */
2559 || (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
2560
2561 /* the DTLB entry does not allow fast transfers: */
2562 || (memory != dtlb->tme_sparc_tlb_emulator_off_read)
2563 || (memory == TME_EMULATOR_OFF_UNDEF)
2564
2565 /* the address is misaligned: */
2566 || ((address % (32 / 8)) != 0)
2567
2568 )) {
2569
2570 /* call the slow store function: */
2571 memory = tme_sparc32_ls(ic,
2572 address,
2573 &TME_SPARC_FORMAT3_RD,
2574 (TME_SPARC_LSINFO_OP_ATOMIC
2575 | TME_SPARC_LSINFO_ASI(TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF))
2576 | TME_SPARC_LSINFO_A
2577 | (32 / 8)));
2578
2579 /* if the slow store function did the transfer: */
2580 if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
2581
2582 /* unbusy the TLB entry; */
2583 tme_sparc_tlb_unbusy(dtlb);
2584
2585 /* log the value loaded: */
2586 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
2587 tme_sparc_log(ic, 1000, TME_OK,
2588 (TME_SPARC_LOG_HANDLE(ic),
2589 _("swapa 0x%02x:0x%08" TME_PRIx32 ": 0x%08" TME_PRIx32),
2590 TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
2591 address,
2592 TME_SPARC_FORMAT3_RD));
2593
2594 TME_SPARC_INSN_OK;
2595 }
2596 }
2597
2598 /* get the byte order of this transfer: */
2599 endian_little = FALSE;
2600
2601 /* do the fast transfer: */
2602 memory += address;
2603 value32 = TME_SPARC_FORMAT3_RD;
2604 value32 = (endian_little ? tme_htole_u32(value32) : tme_htobe_u32(value32));
2605 value32 = tme_memory_atomic_xchg32((tme_shared tme_uint32_t *) memory, value32, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint8_t));
2606 value32 = (endian_little ? tme_letoh_u32(value32) : tme_betoh_u32(value32));
2607 TME_SPARC_FORMAT3_RD = value32;
2608
2609 /* unbusy the DTLB entry: */
2610 tme_sparc_tlb_unbusy(dtlb);
2611
2612 /* log the value loaded: */
2613 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
2614 tme_sparc_log(ic, 1000, TME_OK,
2615 (TME_SPARC_LOG_HANDLE(ic),
2616 _("swapa 0x%02x:0x%08" TME_PRIx32 ": 0x%08" TME_PRIx32),
2617 TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
2618 address,
2619 TME_SPARC_FORMAT3_RD));
2620
2621 TME_SPARC_INSN_OK;
2622 }
2623
2624 /* this does a sparc32 ldba: */
TME_SPARC_FORMAT3(tme_sparc32_ldba,tme_uint32_t)2625 TME_SPARC_FORMAT3(tme_sparc32_ldba, tme_uint32_t)
2626 {
2627 tme_uint32_t asi_mask_data;
2628 tme_uint32_t address;
2629 tme_uint32_t asi_mask_flags_slow;
2630 struct tme_sparc_tlb *dtlb;
2631 const tme_shared tme_uint8_t *memory;
2632 tme_bus_context_t dtlb_context;
2633 tme_uint32_t endian_little;
2634 tme_uint8_t value8;
2635 tme_uint32_t value32;
2636
2637 /* get the alternate ASI mask: */
2638 asi_mask_data = _tme_sparc32_alternate_asi_mask(ic);
2639
2640 /* get the address: */
2641 address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
2642
2643 #ifdef _TME_SPARC_STATS
2644 /* track statistics: */
2645 ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
2646 #endif /* _TME_SPARC_STATS */
2647
2648 /* verify and maybe replay this transfer: */
2649 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
2650 asi_mask_data, address,
2651 (TME_RECODE_SIZE_8
2652 | TME_SPARC_RECODE_VERIFY_MEM_LOAD));
2653 if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
2654 TME_SPARC_INSN_OK;
2655 }
2656
2657 /* assume that no DTLB ASI mask flags will require a slow load: */
2658 asi_mask_flags_slow = 0;
2659
2660 /* get and busy the DTLB entry: */
2661 dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
2662 tme_sparc_tlb_busy(dtlb);
2663
2664 /* assume that this DTLB applies and allows fast transfers: */
2665 memory = dtlb->tme_sparc_tlb_emulator_off_read;
2666
2667 /* if this DTLB matches any context, it matches this context: */
2668 dtlb_context = dtlb->tme_sparc_tlb_context;
2669 if (dtlb_context > ic->tme_sparc_memory_context_max) {
2670 dtlb_context = ic->tme_sparc_memory_context_default;
2671 }
2672
2673 /* we must call the slow load function if: */
2674 if (__tme_predict_false(
2675
2676 /* the DTLB entry is invalid: */
2677 tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
2678
2679 /* the DTLB entry does not match the context: */
2680 || dtlb_context != ic->tme_sparc_memory_context_default
2681
2682 /* the DTLB entry does not cover the needed addresses: */
2683 || (address < (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_first)
2684 || ((address + ((8 / 8) - 1)) > (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_last)
2685
2686 /* the DTLB entry does not cover the needed address space: */
2687 || (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, asi_mask_data))
2688
2689 /* the DTLB entry can't be used for a fast ldba: */
2690 || (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
2691
2692 /* the DTLB entry does not allow fast transfers: */
2693 || (memory == TME_EMULATOR_OFF_UNDEF)
2694
2695 )) {
2696
2697 /* call the slow load function: */
2698 memory = tme_sparc32_ls(ic,
2699 address,
2700 &TME_SPARC_FORMAT3_RD,
2701 (TME_SPARC_LSINFO_OP_LD
2702 | TME_SPARC_LSINFO_ASI(TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF))
2703 | TME_SPARC_LSINFO_A
2704 | (8 / 8)));
2705
2706 /* if the slow load function did the transfer: */
2707 if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
2708
2709 /* unbusy the TLB entry; */
2710 tme_sparc_tlb_unbusy(dtlb);
2711
2712 /* log the value loaded: */
2713 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
2714 tme_sparc_log(ic, 1000, TME_OK,
2715 (TME_SPARC_LOG_HANDLE(ic),
2716 _("ldba 0x%02x:0x%08" TME_PRIx32 ": 0x%02" TME_PRIx32),
2717 TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
2718 address,
2719 TME_SPARC_FORMAT3_RD));
2720
2721 TME_SPARC_INSN_OK;
2722 }
2723 }
2724
2725 /* get the byte order of this transfer: */
2726 endian_little = FALSE;
2727
2728 /* do the fast transfer: */
2729 memory += address;
2730 value8 = tme_memory_bus_read8((const tme_shared tme_uint8_t *) memory, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint8_t), sizeof(tme_uint32_t));
2731
2732 /* possibly sign-extend the loaded value: */
2733 value32 = value8;
2734 if (TME_SPARC_INSN & TME_BIT(22)) {
2735 value32 = (tme_uint32_t) (tme_int32_t) (tme_int8_t) value32;
2736 }
2737 TME_SPARC_FORMAT3_RD = (tme_uint32_t) (tme_int32_t) (tme_int32_t) value32;
2738
2739 /* unbusy the DTLB entry: */
2740 tme_sparc_tlb_unbusy(dtlb);
2741
2742 /* log the value loaded: */
2743 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
2744 tme_sparc_log(ic, 1000, TME_OK,
2745 (TME_SPARC_LOG_HANDLE(ic),
2746 _("ldba 0x%02x:0x%08" TME_PRIx32 ": 0x%02" TME_PRIx32),
2747 TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
2748 address,
2749 TME_SPARC_FORMAT3_RD));
2750
2751 TME_SPARC_INSN_OK;
2752 }
2753
2754 /* this does a sparc32 stba: */
TME_SPARC_FORMAT3(tme_sparc32_stba,tme_uint32_t)2755 TME_SPARC_FORMAT3(tme_sparc32_stba, tme_uint32_t)
2756 {
2757 tme_uint32_t asi_mask_data;
2758 tme_uint32_t address;
2759 tme_uint32_t asi_mask_flags_slow;
2760 struct tme_sparc_tlb *dtlb;
2761 tme_shared tme_uint8_t *memory;
2762 tme_bus_context_t dtlb_context;
2763 tme_uint32_t endian_little;
2764 tme_uint8_t value8;
2765
2766 /* get the alternate ASI mask: */
2767 asi_mask_data = _tme_sparc32_alternate_asi_mask(ic);
2768
2769 /* get the address: */
2770 address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
2771
2772 #ifdef _TME_SPARC_STATS
2773 /* track statistics: */
2774 ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
2775 #endif /* _TME_SPARC_STATS */
2776
2777 /* verify and maybe replay this transfer: */
2778 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
2779 asi_mask_data, address,
2780 (TME_RECODE_SIZE_8
2781 | TME_SPARC_RECODE_VERIFY_MEM_STORE));
2782 if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
2783 TME_SPARC_INSN_OK;
2784 }
2785
2786 /* log the value stored: */
2787 tme_sparc_log(ic, 1000, TME_OK,
2788 (TME_SPARC_LOG_HANDLE(ic),
2789 _("stba 0x%02x:0x%08" TME_PRIx32 ": 0x%02" TME_PRIx8),
2790 TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
2791 address,
2792 (tme_uint8_t) TME_SPARC_FORMAT3_RD));
2793
2794 /* assume that no DTLB ASI mask flags will require a slow store: */
2795 asi_mask_flags_slow = 0;
2796
2797 /* get and busy the DTLB entry: */
2798 dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
2799 tme_sparc_tlb_busy(dtlb);
2800
2801 /* assume that this DTLB applies and allows fast transfers: */
2802 memory = dtlb->tme_sparc_tlb_emulator_off_write;
2803
2804 /* if this DTLB matches any context, it matches this context: */
2805 dtlb_context = dtlb->tme_sparc_tlb_context;
2806 if (dtlb_context > ic->tme_sparc_memory_context_max) {
2807 dtlb_context = ic->tme_sparc_memory_context_default;
2808 }
2809
2810 /* we must call the slow store function if: */
2811 if (__tme_predict_false(
2812
2813 /* the DTLB entry is invalid: */
2814 tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
2815
2816 /* the DTLB entry does not match the context: */
2817 || dtlb_context != ic->tme_sparc_memory_context_default
2818
2819 /* the DTLB entry does not cover the needed addresses: */
2820 || (address < (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_first)
2821 || ((address + ((8 / 8) - 1)) > (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_last)
2822
2823 /* the DTLB entry does not cover the needed address space: */
2824 || (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, asi_mask_data))
2825
2826 /* the DTLB entry can't be used for a fast stba: */
2827 || (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
2828
2829 /* the DTLB entry does not allow fast transfers: */
2830 || (memory == TME_EMULATOR_OFF_UNDEF)
2831
2832 )) {
2833
2834 /* call the slow store function: */
2835 memory = tme_sparc32_ls(ic,
2836 address,
2837 &TME_SPARC_FORMAT3_RD,
2838 (TME_SPARC_LSINFO_OP_ST
2839 | TME_SPARC_LSINFO_ASI(TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF))
2840 | TME_SPARC_LSINFO_A
2841 | (8 / 8)));
2842
2843 /* if the slow store function did the transfer: */
2844 if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
2845
2846 /* unbusy the TLB entry; */
2847 tme_sparc_tlb_unbusy(dtlb);
2848
2849 TME_SPARC_INSN_OK;
2850 }
2851 }
2852
2853 /* get the byte order of this transfer: */
2854 endian_little = FALSE;
2855
2856 /* do the fast transfer: */
2857 memory += address;
2858 value8 = TME_SPARC_FORMAT3_RD;
2859 tme_memory_bus_write8((tme_shared tme_uint8_t *) memory, value8, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint8_t), sizeof(tme_uint32_t));
2860
2861 /* unbusy the DTLB entry: */
2862 tme_sparc_tlb_unbusy(dtlb);
2863
2864 TME_SPARC_INSN_OK;
2865 }
2866
2867 /* this does a sparc32 ldha: */
TME_SPARC_FORMAT3(tme_sparc32_ldha,tme_uint32_t)2868 TME_SPARC_FORMAT3(tme_sparc32_ldha, tme_uint32_t)
2869 {
2870 tme_uint32_t asi_mask_data;
2871 tme_uint32_t address;
2872 tme_uint32_t asi_mask_flags_slow;
2873 struct tme_sparc_tlb *dtlb;
2874 const tme_shared tme_uint8_t *memory;
2875 tme_bus_context_t dtlb_context;
2876 tme_uint32_t endian_little;
2877 tme_uint16_t value16;
2878 tme_uint32_t value32;
2879
2880 /* get the alternate ASI mask: */
2881 asi_mask_data = _tme_sparc32_alternate_asi_mask(ic);
2882
2883 /* get the address: */
2884 address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
2885
2886 #ifdef _TME_SPARC_STATS
2887 /* track statistics: */
2888 ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
2889 #endif /* _TME_SPARC_STATS */
2890
2891 /* verify and maybe replay this transfer: */
2892 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
2893 asi_mask_data, address,
2894 (TME_RECODE_SIZE_16
2895 | TME_SPARC_RECODE_VERIFY_MEM_LOAD));
2896 if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
2897 TME_SPARC_INSN_OK;
2898 }
2899
2900 /* assume that no DTLB ASI mask flags will require a slow load: */
2901 asi_mask_flags_slow = 0;
2902
2903 /* get and busy the DTLB entry: */
2904 dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
2905 tme_sparc_tlb_busy(dtlb);
2906
2907 /* assume that this DTLB applies and allows fast transfers: */
2908 memory = dtlb->tme_sparc_tlb_emulator_off_read;
2909
2910 /* if this DTLB matches any context, it matches this context: */
2911 dtlb_context = dtlb->tme_sparc_tlb_context;
2912 if (dtlb_context > ic->tme_sparc_memory_context_max) {
2913 dtlb_context = ic->tme_sparc_memory_context_default;
2914 }
2915
2916 /* we must call the slow load function if: */
2917 if (__tme_predict_false(
2918
2919 /* the DTLB entry is invalid: */
2920 tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
2921
2922 /* the DTLB entry does not match the context: */
2923 || dtlb_context != ic->tme_sparc_memory_context_default
2924
2925 /* the DTLB entry does not cover the needed addresses: */
2926 || (address < (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_first)
2927 || ((address + ((16 / 8) - 1)) > (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_last)
2928
2929 /* the DTLB entry does not cover the needed address space: */
2930 || (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, asi_mask_data))
2931
2932 /* the DTLB entry can't be used for a fast ldha: */
2933 || (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
2934
2935 /* the DTLB entry does not allow fast transfers: */
2936 || (memory == TME_EMULATOR_OFF_UNDEF)
2937
2938 /* the address is misaligned: */
2939 || ((address % (16 / 8)) != 0)
2940
2941 )) {
2942
2943 /* call the slow load function: */
2944 memory = tme_sparc32_ls(ic,
2945 address,
2946 &TME_SPARC_FORMAT3_RD,
2947 (TME_SPARC_LSINFO_OP_LD
2948 | TME_SPARC_LSINFO_ASI(TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF))
2949 | TME_SPARC_LSINFO_A
2950 | (16 / 8)));
2951
2952 /* if the slow load function did the transfer: */
2953 if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
2954
2955 /* unbusy the TLB entry; */
2956 tme_sparc_tlb_unbusy(dtlb);
2957
2958 /* log the value loaded: */
2959 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
2960 tme_sparc_log(ic, 1000, TME_OK,
2961 (TME_SPARC_LOG_HANDLE(ic),
2962 _("ldha 0x%02x:0x%08" TME_PRIx32 ": 0x%04" TME_PRIx32),
2963 TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
2964 address,
2965 TME_SPARC_FORMAT3_RD));
2966
2967 TME_SPARC_INSN_OK;
2968 }
2969 }
2970
2971 /* get the byte order of this transfer: */
2972 endian_little = FALSE;
2973
2974 /* do the fast transfer: */
2975 memory += address;
2976 value16 = tme_memory_bus_read16((const tme_shared tme_uint16_t *) memory, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint16_t), sizeof(tme_uint32_t));
2977 value16 = (endian_little ? tme_letoh_u16(value16) : tme_betoh_u16(value16));
2978
2979 /* possibly sign-extend the loaded value: */
2980 value32 = value16;
2981 if (TME_SPARC_INSN & TME_BIT(22)) {
2982 value32 = (tme_uint32_t) (tme_int32_t) (tme_int16_t) value32;
2983 }
2984 TME_SPARC_FORMAT3_RD = (tme_uint32_t) (tme_int32_t) (tme_int32_t) value32;
2985
2986 /* unbusy the DTLB entry: */
2987 tme_sparc_tlb_unbusy(dtlb);
2988
2989 /* log the value loaded: */
2990 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
2991 tme_sparc_log(ic, 1000, TME_OK,
2992 (TME_SPARC_LOG_HANDLE(ic),
2993 _("ldha 0x%02x:0x%08" TME_PRIx32 ": 0x%04" TME_PRIx32),
2994 TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
2995 address,
2996 TME_SPARC_FORMAT3_RD));
2997
2998 TME_SPARC_INSN_OK;
2999 }
3000
3001 /* this does a sparc32 stha: */
TME_SPARC_FORMAT3(tme_sparc32_stha,tme_uint32_t)3002 TME_SPARC_FORMAT3(tme_sparc32_stha, tme_uint32_t)
3003 {
3004 tme_uint32_t asi_mask_data;
3005 tme_uint32_t address;
3006 tme_uint32_t asi_mask_flags_slow;
3007 struct tme_sparc_tlb *dtlb;
3008 tme_shared tme_uint8_t *memory;
3009 tme_bus_context_t dtlb_context;
3010 tme_uint32_t endian_little;
3011 tme_uint16_t value16;
3012
3013 /* get the alternate ASI mask: */
3014 asi_mask_data = _tme_sparc32_alternate_asi_mask(ic);
3015
3016 /* get the address: */
3017 address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
3018
3019 #ifdef _TME_SPARC_STATS
3020 /* track statistics: */
3021 ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
3022 #endif /* _TME_SPARC_STATS */
3023
3024 /* verify and maybe replay this transfer: */
3025 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
3026 asi_mask_data, address,
3027 (TME_RECODE_SIZE_16
3028 | TME_SPARC_RECODE_VERIFY_MEM_STORE));
3029 if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
3030 TME_SPARC_INSN_OK;
3031 }
3032
3033 /* log the value stored: */
3034 tme_sparc_log(ic, 1000, TME_OK,
3035 (TME_SPARC_LOG_HANDLE(ic),
3036 _("stha 0x%02x:0x%08" TME_PRIx32 ": 0x%04" TME_PRIx16),
3037 TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
3038 address,
3039 (tme_uint16_t) TME_SPARC_FORMAT3_RD));
3040
3041 /* assume that no DTLB ASI mask flags will require a slow store: */
3042 asi_mask_flags_slow = 0;
3043
3044 /* get and busy the DTLB entry: */
3045 dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
3046 tme_sparc_tlb_busy(dtlb);
3047
3048 /* assume that this DTLB applies and allows fast transfers: */
3049 memory = dtlb->tme_sparc_tlb_emulator_off_write;
3050
3051 /* if this DTLB matches any context, it matches this context: */
3052 dtlb_context = dtlb->tme_sparc_tlb_context;
3053 if (dtlb_context > ic->tme_sparc_memory_context_max) {
3054 dtlb_context = ic->tme_sparc_memory_context_default;
3055 }
3056
3057 /* we must call the slow store function if: */
3058 if (__tme_predict_false(
3059
3060 /* the DTLB entry is invalid: */
3061 tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
3062
3063 /* the DTLB entry does not match the context: */
3064 || dtlb_context != ic->tme_sparc_memory_context_default
3065
3066 /* the DTLB entry does not cover the needed addresses: */
3067 || (address < (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_first)
3068 || ((address + ((16 / 8) - 1)) > (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_last)
3069
3070 /* the DTLB entry does not cover the needed address space: */
3071 || (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, asi_mask_data))
3072
3073 /* the DTLB entry can't be used for a fast stha: */
3074 || (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
3075
3076 /* the DTLB entry does not allow fast transfers: */
3077 || (memory == TME_EMULATOR_OFF_UNDEF)
3078
3079 /* the address is misaligned: */
3080 || ((address % (16 / 8)) != 0)
3081
3082 )) {
3083
3084 /* call the slow store function: */
3085 memory = tme_sparc32_ls(ic,
3086 address,
3087 &TME_SPARC_FORMAT3_RD,
3088 (TME_SPARC_LSINFO_OP_ST
3089 | TME_SPARC_LSINFO_ASI(TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF))
3090 | TME_SPARC_LSINFO_A
3091 | (16 / 8)));
3092
3093 /* if the slow store function did the transfer: */
3094 if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
3095
3096 /* unbusy the TLB entry; */
3097 tme_sparc_tlb_unbusy(dtlb);
3098
3099 TME_SPARC_INSN_OK;
3100 }
3101 }
3102
3103 /* get the byte order of this transfer: */
3104 endian_little = FALSE;
3105
3106 /* do the fast transfer: */
3107 memory += address;
3108 value16 = TME_SPARC_FORMAT3_RD;
3109 value16 = (endian_little ? tme_htole_u16(value16) : tme_htobe_u16(value16));
3110 tme_memory_bus_write16((tme_shared tme_uint16_t *) memory, value16, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint16_t), sizeof(tme_uint32_t));
3111
3112 /* unbusy the DTLB entry: */
3113 tme_sparc_tlb_unbusy(dtlb);
3114
3115 TME_SPARC_INSN_OK;
3116 }
3117
3118 /* this does a sparc32 lda: */
TME_SPARC_FORMAT3(tme_sparc32_lda,tme_uint32_t)3119 TME_SPARC_FORMAT3(tme_sparc32_lda, tme_uint32_t)
3120 {
3121 tme_uint32_t asi_mask_data;
3122 tme_uint32_t address;
3123 tme_uint32_t asi_mask_flags_slow;
3124 struct tme_sparc_tlb *dtlb;
3125 const tme_shared tme_uint8_t *memory;
3126 tme_bus_context_t dtlb_context;
3127 tme_uint32_t endian_little;
3128 tme_uint32_t value32;
3129
3130 /* get the alternate ASI mask: */
3131 asi_mask_data = _tme_sparc32_alternate_asi_mask(ic);
3132
3133 /* get the address: */
3134 address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
3135
3136 #ifdef _TME_SPARC_STATS
3137 /* track statistics: */
3138 ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
3139 #endif /* _TME_SPARC_STATS */
3140
3141 /* verify and maybe replay this transfer: */
3142 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
3143 asi_mask_data, address,
3144 (TME_RECODE_SIZE_32
3145 | TME_SPARC_RECODE_VERIFY_MEM_LOAD));
3146 if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
3147 TME_SPARC_INSN_OK;
3148 }
3149
3150 /* assume that no DTLB ASI mask flags will require a slow load: */
3151 asi_mask_flags_slow = 0;
3152
3153 /* get and busy the DTLB entry: */
3154 dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
3155 tme_sparc_tlb_busy(dtlb);
3156
3157 /* assume that this DTLB applies and allows fast transfers: */
3158 memory = dtlb->tme_sparc_tlb_emulator_off_read;
3159
3160 /* if this DTLB matches any context, it matches this context: */
3161 dtlb_context = dtlb->tme_sparc_tlb_context;
3162 if (dtlb_context > ic->tme_sparc_memory_context_max) {
3163 dtlb_context = ic->tme_sparc_memory_context_default;
3164 }
3165
3166 /* we must call the slow load function if: */
3167 if (__tme_predict_false(
3168
3169 /* the DTLB entry is invalid: */
3170 tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
3171
3172 /* the DTLB entry does not match the context: */
3173 || dtlb_context != ic->tme_sparc_memory_context_default
3174
3175 /* the DTLB entry does not cover the needed addresses: */
3176 || (address < (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_first)
3177 || ((address + ((32 / 8) - 1)) > (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_last)
3178
3179 /* the DTLB entry does not cover the needed address space: */
3180 || (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, asi_mask_data))
3181
3182 /* the DTLB entry can't be used for a fast lda: */
3183 || (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
3184
3185 /* the DTLB entry does not allow fast transfers: */
3186 || (memory == TME_EMULATOR_OFF_UNDEF)
3187
3188 /* the address is misaligned: */
3189 || ((address % (32 / 8)) != 0)
3190
3191 )) {
3192
3193 /* call the slow load function: */
3194 memory = tme_sparc32_ls(ic,
3195 address,
3196 &TME_SPARC_FORMAT3_RD,
3197 (TME_SPARC_LSINFO_OP_LD
3198 | TME_SPARC_LSINFO_ASI(TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF))
3199 | TME_SPARC_LSINFO_A
3200 | (32 / 8)));
3201
3202 /* if the slow load function did the transfer: */
3203 if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
3204
3205 /* unbusy the TLB entry; */
3206 tme_sparc_tlb_unbusy(dtlb);
3207
3208 /* log the value loaded: */
3209 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
3210 tme_sparc_log(ic, 1000, TME_OK,
3211 (TME_SPARC_LOG_HANDLE(ic),
3212 _("lda 0x%02x:0x%08" TME_PRIx32 ": 0x%08" TME_PRIx32),
3213 TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
3214 address,
3215 TME_SPARC_FORMAT3_RD));
3216
3217 TME_SPARC_INSN_OK;
3218 }
3219 }
3220
3221 /* get the byte order of this transfer: */
3222 endian_little = FALSE;
3223
3224 /* do the fast transfer: */
3225 memory += address;
3226 value32 = tme_memory_bus_read32((const tme_shared tme_uint32_t *) memory, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint32_t), sizeof(tme_uint32_t));
3227 value32 = (endian_little ? tme_letoh_u32(value32) : tme_betoh_u32(value32));
3228 TME_SPARC_FORMAT3_RD = (tme_uint32_t) (tme_int32_t) (tme_int32_t) value32;
3229
3230 /* unbusy the DTLB entry: */
3231 tme_sparc_tlb_unbusy(dtlb);
3232
3233 /* log the value loaded: */
3234 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
3235 tme_sparc_log(ic, 1000, TME_OK,
3236 (TME_SPARC_LOG_HANDLE(ic),
3237 _("lda 0x%02x:0x%08" TME_PRIx32 ": 0x%08" TME_PRIx32),
3238 TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
3239 address,
3240 TME_SPARC_FORMAT3_RD));
3241
3242 TME_SPARC_INSN_OK;
3243 }
3244
3245 /* this does a sparc32 sta: */
TME_SPARC_FORMAT3(tme_sparc32_sta,tme_uint32_t)3246 TME_SPARC_FORMAT3(tme_sparc32_sta, tme_uint32_t)
3247 {
3248 tme_uint32_t asi_mask_data;
3249 tme_uint32_t address;
3250 tme_uint32_t asi_mask_flags_slow;
3251 struct tme_sparc_tlb *dtlb;
3252 tme_shared tme_uint8_t *memory;
3253 tme_bus_context_t dtlb_context;
3254 tme_uint32_t endian_little;
3255 tme_uint32_t value32;
3256
3257 /* get the alternate ASI mask: */
3258 asi_mask_data = _tme_sparc32_alternate_asi_mask(ic);
3259
3260 /* get the address: */
3261 address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
3262
3263 #ifdef _TME_SPARC_STATS
3264 /* track statistics: */
3265 ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
3266 #endif /* _TME_SPARC_STATS */
3267
3268 /* verify and maybe replay this transfer: */
3269 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
3270 asi_mask_data, address,
3271 (TME_RECODE_SIZE_32
3272 | TME_SPARC_RECODE_VERIFY_MEM_STORE));
3273 if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
3274 TME_SPARC_INSN_OK;
3275 }
3276
3277 /* log the value stored: */
3278 tme_sparc_log(ic, 1000, TME_OK,
3279 (TME_SPARC_LOG_HANDLE(ic),
3280 _("sta 0x%02x:0x%08" TME_PRIx32 ": 0x%08" TME_PRIx32),
3281 TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
3282 address,
3283 (tme_uint32_t) TME_SPARC_FORMAT3_RD));
3284
3285 /* assume that no DTLB ASI mask flags will require a slow store: */
3286 asi_mask_flags_slow = 0;
3287
3288 /* get and busy the DTLB entry: */
3289 dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
3290 tme_sparc_tlb_busy(dtlb);
3291
3292 /* assume that this DTLB applies and allows fast transfers: */
3293 memory = dtlb->tme_sparc_tlb_emulator_off_write;
3294
3295 /* if this DTLB matches any context, it matches this context: */
3296 dtlb_context = dtlb->tme_sparc_tlb_context;
3297 if (dtlb_context > ic->tme_sparc_memory_context_max) {
3298 dtlb_context = ic->tme_sparc_memory_context_default;
3299 }
3300
3301 /* we must call the slow store function if: */
3302 if (__tme_predict_false(
3303
3304 /* the DTLB entry is invalid: */
3305 tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
3306
3307 /* the DTLB entry does not match the context: */
3308 || dtlb_context != ic->tme_sparc_memory_context_default
3309
3310 /* the DTLB entry does not cover the needed addresses: */
3311 || (address < (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_first)
3312 || ((address + ((32 / 8) - 1)) > (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_last)
3313
3314 /* the DTLB entry does not cover the needed address space: */
3315 || (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, asi_mask_data))
3316
3317 /* the DTLB entry can't be used for a fast sta: */
3318 || (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
3319
3320 /* the DTLB entry does not allow fast transfers: */
3321 || (memory == TME_EMULATOR_OFF_UNDEF)
3322
3323 /* the address is misaligned: */
3324 || ((address % (32 / 8)) != 0)
3325
3326 )) {
3327
3328 /* call the slow store function: */
3329 memory = tme_sparc32_ls(ic,
3330 address,
3331 &TME_SPARC_FORMAT3_RD,
3332 (TME_SPARC_LSINFO_OP_ST
3333 | TME_SPARC_LSINFO_ASI(TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF))
3334 | TME_SPARC_LSINFO_A
3335 | (32 / 8)));
3336
3337 /* if the slow store function did the transfer: */
3338 if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
3339
3340 /* unbusy the TLB entry; */
3341 tme_sparc_tlb_unbusy(dtlb);
3342
3343 TME_SPARC_INSN_OK;
3344 }
3345 }
3346
3347 /* get the byte order of this transfer: */
3348 endian_little = FALSE;
3349
3350 /* do the fast transfer: */
3351 memory += address;
3352 value32 = TME_SPARC_FORMAT3_RD;
3353 value32 = (endian_little ? tme_htole_u32(value32) : tme_htobe_u32(value32));
3354 tme_memory_bus_write32((tme_shared tme_uint32_t *) memory, value32, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint32_t), sizeof(tme_uint32_t));
3355
3356 /* unbusy the DTLB entry: */
3357 tme_sparc_tlb_unbusy(dtlb);
3358
3359 TME_SPARC_INSN_OK;
3360 }
3361
3362 /* this does a sparc32 ldda: */
TME_SPARC_FORMAT3(tme_sparc32_ldda,tme_uint32_t)3363 TME_SPARC_FORMAT3(tme_sparc32_ldda, tme_uint32_t)
3364 {
3365 tme_uint32_t asi_mask_data;
3366 tme_uint32_t address;
3367 tme_uint32_t asi_mask_flags_slow;
3368 struct tme_sparc_tlb *dtlb;
3369 const tme_shared tme_uint8_t *memory;
3370 tme_bus_context_t dtlb_context;
3371 tme_uint32_t endian_little;
3372 tme_uint32_t value32;
3373
3374 /* get the alternate ASI mask: */
3375 asi_mask_data = _tme_sparc32_alternate_asi_mask(ic);
3376
3377 /* get the address: */
3378 address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
3379
3380 #ifdef _TME_SPARC_STATS
3381 /* track statistics: */
3382 ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
3383 #endif /* _TME_SPARC_STATS */
3384
3385 /* verify and maybe replay this transfer: */
3386 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
3387 asi_mask_data, address,
3388 (TME_RECODE_SIZE_32
3389 | TME_SPARC_RECODE_VERIFY_MEM_LOAD));
3390 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint32),
3391 asi_mask_data, address + sizeof(tme_uint32_t),
3392 (TME_RECODE_SIZE_32
3393 | TME_SPARC_RECODE_VERIFY_MEM_LOAD));
3394 if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
3395 TME_SPARC_INSN_OK;
3396 }
3397
3398 /* assume that no DTLB ASI mask flags will require a slow load: */
3399 asi_mask_flags_slow = 0;
3400
3401 /* get and busy the DTLB entry: */
3402 dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
3403 tme_sparc_tlb_busy(dtlb);
3404
3405 /* assume that this DTLB applies and allows fast transfers: */
3406 memory = dtlb->tme_sparc_tlb_emulator_off_read;
3407
3408 /* if this DTLB matches any context, it matches this context: */
3409 dtlb_context = dtlb->tme_sparc_tlb_context;
3410 if (dtlb_context > ic->tme_sparc_memory_context_max) {
3411 dtlb_context = ic->tme_sparc_memory_context_default;
3412 }
3413
3414 /* we must call the slow load function if: */
3415 if (__tme_predict_false(
3416
3417 /* the DTLB entry is invalid: */
3418 tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
3419
3420 /* the DTLB entry does not match the context: */
3421 || dtlb_context != ic->tme_sparc_memory_context_default
3422
3423 /* the DTLB entry does not cover the needed addresses: */
3424 || (address < (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_first)
3425 || ((address + ((64 / 8) - 1)) > (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_last)
3426
3427 /* the DTLB entry does not cover the needed address space: */
3428 || (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, asi_mask_data))
3429
3430 /* the DTLB entry can't be used for a fast ldda: */
3431 || (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
3432
3433 /* the DTLB entry does not allow fast transfers: */
3434 || (memory == TME_EMULATOR_OFF_UNDEF)
3435
3436 /* the address is misaligned: */
3437 || ((address % (64 / 8)) != 0)
3438
3439 /* the destination register number is odd: */
3440 || ((TME_SPARC_INSN & TME_BIT(25)) != 0)
3441
3442 )) {
3443
3444 /* call the slow load function: */
3445 memory = tme_sparc32_ls(ic,
3446 address,
3447 &TME_SPARC_FORMAT3_RD,
3448 (TME_SPARC_LSINFO_OP_LD
3449 | TME_SPARC_LSINFO_LDD_STD
3450 | TME_SPARC_LSINFO_ASI(TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF))
3451 | TME_SPARC_LSINFO_A
3452 | (64 / 8)));
3453
3454 /* if the slow load function did the transfer: */
3455 if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
3456
3457 /* unbusy the TLB entry; */
3458 tme_sparc_tlb_unbusy(dtlb);
3459
3460 /* log the value loaded: */
3461 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
3462 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint32));
3463 tme_sparc_log(ic, 1000, TME_OK,
3464 (TME_SPARC_LOG_HANDLE(ic),
3465 _("ldda 0x%02x:0x%08" TME_PRIx32 ": 0x%08" TME_PRIx32 " 0x%08" TME_PRIx32 ""),
3466 TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
3467 address,
3468 TME_SPARC_FORMAT3_RD,
3469 TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint32)));
3470
3471 TME_SPARC_INSN_OK;
3472 }
3473 }
3474
3475 /* get the byte order of this transfer: */
3476 endian_little = FALSE;
3477
3478 /* do the fast transfer: */
3479 memory += address;
3480 value32 = tme_memory_bus_read32(((const tme_shared tme_uint32_t *) memory) + 0, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint32_t) * 2, sizeof(tme_uint32_t));
3481 value32 = (endian_little ? tme_letoh_u32(value32) : tme_betoh_u32(value32));
3482 TME_SPARC_FORMAT3_RD = value32;
3483 value32 = tme_memory_bus_read32(((const tme_shared tme_uint32_t *) memory) + 1, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint32_t) * 1, sizeof(tme_uint32_t));
3484 value32 = (endian_little ? tme_letoh_u32(value32) : tme_betoh_u32(value32));
3485 TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint32) = value32;
3486
3487 /* unbusy the DTLB entry: */
3488 tme_sparc_tlb_unbusy(dtlb);
3489
3490 /* log the value loaded: */
3491 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
3492 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint32));
3493 tme_sparc_log(ic, 1000, TME_OK,
3494 (TME_SPARC_LOG_HANDLE(ic),
3495 _("ldda 0x%02x:0x%08" TME_PRIx32 ": 0x%08" TME_PRIx32 " 0x%08" TME_PRIx32 ""),
3496 TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
3497 address,
3498 TME_SPARC_FORMAT3_RD,
3499 TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint32)));
3500
3501 TME_SPARC_INSN_OK;
3502 }
3503
3504 /* this does a sparc32 stda: */
TME_SPARC_FORMAT3(tme_sparc32_stda,tme_uint32_t)3505 TME_SPARC_FORMAT3(tme_sparc32_stda, tme_uint32_t)
3506 {
3507 tme_uint32_t asi_mask_data;
3508 tme_uint32_t address;
3509 tme_uint32_t asi_mask_flags_slow;
3510 struct tme_sparc_tlb *dtlb;
3511 tme_shared tme_uint8_t *memory;
3512 tme_bus_context_t dtlb_context;
3513 tme_uint32_t endian_little;
3514 tme_uint32_t value32;
3515
3516 /* get the alternate ASI mask: */
3517 asi_mask_data = _tme_sparc32_alternate_asi_mask(ic);
3518
3519 /* get the address: */
3520 address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
3521
3522 #ifdef _TME_SPARC_STATS
3523 /* track statistics: */
3524 ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
3525 #endif /* _TME_SPARC_STATS */
3526
3527 /* verify and maybe replay this transfer: */
3528 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
3529 asi_mask_data, address,
3530 (TME_RECODE_SIZE_32
3531 | TME_SPARC_RECODE_VERIFY_MEM_STORE));
3532 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint32),
3533 asi_mask_data, address + sizeof(tme_uint32_t),
3534 (TME_RECODE_SIZE_32
3535 | TME_SPARC_RECODE_VERIFY_MEM_STORE));
3536 if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
3537 TME_SPARC_INSN_OK;
3538 }
3539
3540 /* log the values stored: */
3541 tme_sparc_log(ic, 1000, TME_OK,
3542 (TME_SPARC_LOG_HANDLE(ic),
3543 _("stda 0x%02x:0x%08" TME_PRIx32 ": 0x%08" TME_PRIx32 " 0x%08" TME_PRIx32),
3544 TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
3545 address,
3546 (tme_uint32_t) TME_SPARC_FORMAT3_RD,
3547 (tme_uint32_t) TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint32)));
3548
3549 /* assume that no DTLB ASI mask flags will require a slow store: */
3550 asi_mask_flags_slow = 0;
3551
3552 /* get and busy the DTLB entry: */
3553 dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
3554 tme_sparc_tlb_busy(dtlb);
3555
3556 /* assume that this DTLB applies and allows fast transfers: */
3557 memory = dtlb->tme_sparc_tlb_emulator_off_write;
3558
3559 /* if this DTLB matches any context, it matches this context: */
3560 dtlb_context = dtlb->tme_sparc_tlb_context;
3561 if (dtlb_context > ic->tme_sparc_memory_context_max) {
3562 dtlb_context = ic->tme_sparc_memory_context_default;
3563 }
3564
3565 /* we must call the slow store function if: */
3566 if (__tme_predict_false(
3567
3568 /* the DTLB entry is invalid: */
3569 tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
3570
3571 /* the DTLB entry does not match the context: */
3572 || dtlb_context != ic->tme_sparc_memory_context_default
3573
3574 /* the DTLB entry does not cover the needed addresses: */
3575 || (address < (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_first)
3576 || ((address + ((64 / 8) - 1)) > (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_last)
3577
3578 /* the DTLB entry does not cover the needed address space: */
3579 || (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, asi_mask_data))
3580
3581 /* the DTLB entry can't be used for a fast stda: */
3582 || (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
3583
3584 /* the DTLB entry does not allow fast transfers: */
3585 || (memory == TME_EMULATOR_OFF_UNDEF)
3586
3587 /* the address is misaligned: */
3588 || ((address % (64 / 8)) != 0)
3589
3590 /* the destination register number is odd: */
3591 || ((TME_SPARC_INSN & TME_BIT(25)) != 0)
3592
3593 )) {
3594
3595 /* call the slow store function: */
3596 memory = tme_sparc32_ls(ic,
3597 address,
3598 &TME_SPARC_FORMAT3_RD,
3599 (TME_SPARC_LSINFO_OP_ST
3600 | TME_SPARC_LSINFO_LDD_STD
3601 | TME_SPARC_LSINFO_ASI(TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF))
3602 | TME_SPARC_LSINFO_A
3603 | (64 / 8)));
3604
3605 /* if the slow store function did the transfer: */
3606 if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
3607
3608 /* unbusy the TLB entry; */
3609 tme_sparc_tlb_unbusy(dtlb);
3610
3611 TME_SPARC_INSN_OK;
3612 }
3613 }
3614
3615 /* get the byte order of this transfer: */
3616 endian_little = FALSE;
3617
3618 /* do the fast transfer: */
3619 memory += address;
3620 value32 = TME_SPARC_FORMAT3_RD;
3621 value32 = (endian_little ? tme_htole_u32(value32) : tme_htobe_u32(value32));
3622 tme_memory_bus_write32(((tme_shared tme_uint32_t *) memory) + 0, value32, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint32_t) * 2, sizeof(tme_uint32_t));
3623 value32 = TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint32);
3624 value32 = (endian_little ? tme_htole_u32(value32) : tme_htobe_u32(value32));
3625 tme_memory_bus_write32(((tme_shared tme_uint32_t *) memory) + 1, value32, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint32_t) * 1, sizeof(tme_uint32_t));
3626
3627 /* unbusy the DTLB entry: */
3628 tme_sparc_tlb_unbusy(dtlb);
3629
3630 TME_SPARC_INSN_OK;
3631 }
3632
3633 /* this does a sparc32 jmpl: */
TME_SPARC_FORMAT3(tme_sparc32_jmpl,tme_uint32_t)3634 TME_SPARC_FORMAT3(tme_sparc32_jmpl, tme_uint32_t)
3635 {
3636 tme_uint32_t pc_next_next;
3637 tme_uint32_t ls_faults;
3638
3639 /* "The JMPL instruction causes a register-indirect delayed control
3640 transfer to the address given by r[rs1] + r[rs2] if the i field is
3641 zero, or r[rs1] + sign_ext(simm13) if the i field is one. The JMPL
3642 instruction copies the PC, which contains the address of the JMPL
3643 instruction, into register r[rd]. If either of the low-order two
3644 bits of the jump address is nonzero, a mem_address_not_aligned
3645 trap occurs." */
3646
3647 /* get the target address: */
3648 pc_next_next = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
3649
3650 /* set the delayed control transfer: */
3651 ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_PC_NEXT_NEXT) = pc_next_next;
3652
3653 /* check the target address: */
3654 ls_faults = TME_SPARC_LS_FAULT_NONE;
3655 if (__tme_predict_false((pc_next_next % sizeof(tme_uint32_t)) != 0)) {
3656 ls_faults += TME_SPARC_LS_FAULT_ADDRESS_NOT_ALIGNED;
3657 }
3658 if (__tme_predict_false(ls_faults != TME_SPARC_LS_FAULT_NONE)) {
3659 tme_sparc_nnpc_trap(ic, ls_faults);
3660 }
3661
3662 /* write the PC of the jmpl into r[rd]: */
3663 TME_SPARC_FORMAT3_RD = ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_PC);
3664
3665 /* log an indirect call instruction, which has 15 (%o7) for rd: */
3666 if (TME_FIELD_MASK_EXTRACTU(TME_SPARC_INSN, TME_SPARC_FORMAT3_MASK_RD) == 15) {
3667 tme_sparc_log(ic, 250, TME_OK,
3668 (TME_SPARC_LOG_HANDLE(ic),
3669 _("call 0x%08" TME_PRIx32),
3670 pc_next_next));
3671 }
3672
3673 /* log a ret or retl instruction, which has 0 (%g0) for rd,
3674 either 31 (%i7) or 15 (%o7) for rs1, and 8 for simm13: */
3675 else if ((TME_SPARC_INSN | (16 << 14))
3676 == ((tme_uint32_t) (0x2 << 30) | (0 << 25) | (0x38 << 19) | (31 << 14) | (0x1 << 13) | 8)) {
3677 tme_sparc_log(ic, 250, TME_OK,
3678 (TME_SPARC_LOG_HANDLE(ic),
3679 _("retl 0x%08" TME_PRIx32),
3680 pc_next_next));
3681 }
3682
3683 TME_SPARC_INSN_OK;
3684 }
3685
3686 /* this does a sparc32 ldf: */
TME_SPARC_FORMAT3(tme_sparc32_ldf,tme_uint32_t)3687 TME_SPARC_FORMAT3(tme_sparc32_ldf, tme_uint32_t)
3688 {
3689 tme_uint32_t misaligned;
3690 struct tme_float float_buffer;
3691 struct tme_float *fpreg;
3692
3693 /* get the least significant 32 bits of the address: */
3694 misaligned = TME_SPARC_FORMAT3_RS1;
3695 misaligned += (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
3696
3697 /* decode rd: */
3698 float_buffer.tme_float_format = TME_FLOAT_FORMAT_IEEE754_SINGLE;
3699 fpreg
3700 = _tme_sparc32_fpu_mem_fpreg(ic,
3701 misaligned,
3702 &float_buffer);
3703
3704 /* do the load: */
3705 tme_sparc32_ld(ic, _rs1, _rs2, &ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_FPX));
3706
3707 /* set the floating-point register value: */
3708 assert (fpreg != &float_buffer);
3709 fpreg->tme_float_format = TME_FLOAT_FORMAT_IEEE754_SINGLE;
3710 fpreg->tme_float_value_ieee754_single
3711 = ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_FPX);
3712
3713 TME_SPARC_INSN_OK;
3714 }
3715
3716 /* this does a sparc32 lddf: */
TME_SPARC_FORMAT3(tme_sparc32_lddf,tme_uint32_t)3717 TME_SPARC_FORMAT3(tme_sparc32_lddf, tme_uint32_t)
3718 {
3719 tme_uint32_t address;
3720 tme_uint32_t misaligned;
3721 struct tme_float float_buffer;
3722 struct tme_float *fpreg;
3723
3724 /* get the address: */
3725 address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
3726
3727 /* get the least significant 32 bits of the address: */
3728 misaligned = address;
3729
3730 /* decode rd: */
3731 float_buffer.tme_float_format = TME_FLOAT_FORMAT_IEEE754_DOUBLE;
3732 fpreg
3733 = _tme_sparc32_fpu_mem_fpreg(ic,
3734 misaligned,
3735 &float_buffer);
3736
3737 /* do the load: */
3738 tme_sparc32_ldd(ic, _rs1, _rs2, &ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_FPX));
3739
3740 /* set the double floating-point register value: */
3741 assert (fpreg != &float_buffer);
3742 fpreg->tme_float_format = TME_FLOAT_FORMAT_IEEE754_DOUBLE;
3743 fpreg->tme_float_value_ieee754_double.tme_value64_uint32_hi
3744 = ic->tme_sparc_ireg_uint32((TME_SPARC_IREG_FPX) + 0);
3745 fpreg->tme_float_value_ieee754_double.tme_value64_uint32_lo
3746 = ic->tme_sparc_ireg_uint32((TME_SPARC_IREG_FPX) + 1);
3747
3748 TME_SPARC_INSN_OK;
3749 }
3750
3751 /* this does a sparc32 ldfsr: */
TME_SPARC_FORMAT3(tme_sparc32_ldfsr,tme_uint32_t)3752 TME_SPARC_FORMAT3(tme_sparc32_ldfsr, tme_uint32_t)
3753 {
3754 tme_uint32_t fsr;
3755
3756 _tme_sparc32_fpu_mem(ic);
3757
3758 /* do the load: */
3759 tme_sparc32_ld(ic, _rs1, _rs2, &ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_FPX));
3760
3761 /* update the FSR: */
3762 fsr = ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_FPX);
3763 /* "An LDFSR instruction does not affect ftt." */
3764 /* "The LDFSR instruction does not affect qne." */
3765 fsr &= ~(TME_SPARC_FSR_VER | TME_SPARC_FSR_FTT | TME_SPARC_FSR_QNE);
3766 ic->tme_sparc_fpu_fsr = (ic->tme_sparc_fpu_fsr & (TME_SPARC_FSR_VER | TME_SPARC_FSR_FTT | TME_SPARC_FSR_QNE)) | fsr;
3767
3768 TME_SPARC_INSN_OK;
3769 }
3770
3771 /* this does a sparc32 stf: */
TME_SPARC_FORMAT3(tme_sparc32_stf,tme_uint32_t)3772 TME_SPARC_FORMAT3(tme_sparc32_stf, tme_uint32_t)
3773 {
3774 tme_uint32_t misaligned;
3775 struct tme_float float_buffer;
3776 const struct tme_float *fpreg;
3777 const tme_uint32_t *value_single;
3778
3779 /* get the least significant 32 bits of the address: */
3780 misaligned = TME_SPARC_FORMAT3_RS1;
3781 misaligned += (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
3782
3783 /* decode rd: */
3784 float_buffer.tme_float_format = TME_FLOAT_FORMAT_IEEE754_SINGLE;
3785 fpreg
3786 = _tme_sparc32_fpu_mem_fpreg(ic,
3787 misaligned,
3788 &float_buffer);
3789
3790 /* get this single floating-point register in IEEE754 single-precision format: */
3791 value_single = tme_ieee754_single_value_get(fpreg, &float_buffer.tme_float_value_ieee754_single);
3792
3793 /* set the floating-point register value: */
3794 ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_FPX) = *value_single;
3795
3796 /* do the store: */
3797 tme_sparc32_st(ic, _rs1, _rs2, &ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_FPX));
3798
3799 assert (fpreg != &float_buffer);
3800 TME_SPARC_INSN_OK;
3801 }
3802
3803 /* this does a sparc32 stdf: */
TME_SPARC_FORMAT3(tme_sparc32_stdf,tme_uint32_t)3804 TME_SPARC_FORMAT3(tme_sparc32_stdf, tme_uint32_t)
3805 {
3806 tme_uint32_t address;
3807 tme_uint32_t misaligned;
3808 struct tme_float float_buffer;
3809 struct tme_float *fpreg;
3810 const union tme_value64 *value_double;
3811
3812 /* get the address: */
3813 address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
3814
3815 /* get the least significant 32 bits of the address: */
3816 misaligned = address;
3817
3818 /* decode rd: */
3819 float_buffer.tme_float_format = TME_FLOAT_FORMAT_IEEE754_DOUBLE;
3820 fpreg
3821 = _tme_sparc32_fpu_mem_fpreg(ic,
3822 misaligned,
3823 &float_buffer);
3824
3825 /* get this double floating-point register in IEEE754 double-precision format: */
3826 value_double = tme_ieee754_double_value_get(fpreg, &float_buffer.tme_float_value_ieee754_double);
3827
3828 /* set the floating-point register value: */
3829 ic->tme_sparc_ireg_uint32((TME_SPARC_IREG_FPX) + 0)
3830 = value_double->tme_value64_uint32_hi;
3831 ic->tme_sparc_ireg_uint32((TME_SPARC_IREG_FPX) + 1)
3832 = value_double->tme_value64_uint32_lo;
3833
3834 /* do the store: */
3835 tme_sparc32_std(ic, _rs1, _rs2, &ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_FPX));
3836
3837 assert (fpreg != &float_buffer);
3838 TME_SPARC_INSN_OK;
3839 }
3840
3841 /* this does a sparc32 stfsr: */
TME_SPARC_FORMAT3(tme_sparc32_stfsr,tme_uint32_t)3842 TME_SPARC_FORMAT3(tme_sparc32_stfsr, tme_uint32_t)
3843 {
3844
3845 _tme_sparc32_fpu_mem(ic);
3846
3847 /* set the FSR value to store: */
3848 ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_FPX) = ic->tme_sparc_fpu_fsr;
3849
3850 /* do the store: */
3851 tme_sparc32_st(ic, _rs1, _rs2, &ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_FPX));
3852
3853 TME_SPARC_INSN_OK;
3854 }
3855
3856 /* this does a sparc32 fpop1: */
TME_SPARC_FORMAT3(tme_sparc32_fpop1,tme_uint32_t)3857 TME_SPARC_FORMAT3(tme_sparc32_fpop1, tme_uint32_t)
3858 {
3859 TME_SPARC_INSN_FPU;
3860 tme_sparc_fpu_fpop1(ic);
3861 TME_SPARC_INSN_OK;
3862 }
3863
3864 /* this does a sparc32 fpop2: */
TME_SPARC_FORMAT3(tme_sparc32_fpop2,tme_uint32_t)3865 TME_SPARC_FORMAT3(tme_sparc32_fpop2, tme_uint32_t)
3866 {
3867 TME_SPARC_INSN_FPU;
3868 tme_sparc_fpu_fpop2(ic);
3869 TME_SPARC_INSN_OK;
3870 }
3871
3872 /* this does a sparc32 "mulscc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_mulscc,tme_uint32_t)3873 TME_SPARC_FORMAT3(tme_sparc32_mulscc, tme_uint32_t)
3874 {
3875 tme_uint32_t src1;
3876 tme_uint32_t src2;
3877 tme_uint32_t dst;
3878 tme_uint32_t y;
3879 tme_uint32_t cc;
3880
3881 /* get the operands: */
3882 src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
3883 src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
3884
3885 /* perform the operation: */
3886
3887 /* "(1) The multiplier is established as r[rs2] if the i field is zero, or
3888 sign_ext(simm13) if the i field is one."
3889
3890 "(3) If the least significant bit of the Y register = 1, the shifted
3891 value from step (2) is added to the multiplier. If the LSB of the
3892 Y register = 0, then 0 is added to the shifted value from step (2)." */
3893 y = ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_Y);
3894 if ((y & 1) == 0) {
3895 src2 = 0;
3896 }
3897
3898 /* "(6) The Y register is shifted right by one bit, with the LSB of the
3899 unshifted r[rs1] replacing the MSB of Y." */
3900 y >>= 1;
3901 if (src1 & 1) {
3902 y += 0x80000000;
3903 }
3904 ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_Y) = y;
3905
3906 /* "(2) A 32-bit value is computed by shifting r[rs1] right by one
3907 bit with (N xor V) from the PSR replacing the high-order bit.
3908 (This is the proper sign for the previous partial product.)" */
3909 src1 >>= 1;
3910 if (((ic->tme_sparc32_ireg_psr ^ (ic->tme_sparc32_ireg_psr * (TME_SPARC32_PSR_ICC_N / TME_SPARC32_PSR_ICC_V))) & TME_SPARC32_PSR_ICC_N) != 0) {
3911 src1 += 0x80000000;
3912 }
3913
3914 /* "(4) The sum from step (3) is written into r[rd]." */
3915 dst = src1 + src2;
3916
3917 /* "(5) The integer condition codes, icc, are updated according to the
3918 addition performed in step (3)." */
3919
3920 /* store the destination: */
3921 TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
3922
3923 /* set Z if the destination is zero: */
3924 cc = ((((tme_int32_t) dst) == 0) * TME_SPARC32_PSR_ICC_Z);
3925
3926 /* set N if the destination is negative: */
3927 cc += ((((tme_int32_t) dst) < 0) * TME_SPARC32_PSR_ICC_N);
3928
3929 /* if the operands are the same sign, and the destination has
3930 a different sign, set V: */
3931 cc += ((((tme_int32_t) ((src2 ^ dst) & (src1 ^ (src2 ^ (((tme_uint32_t) 0) - 1))))) < 0) * TME_SPARC32_PSR_ICC_V);
3932
3933 /* if src1 and src2 both have the high bit set, or if dst does
3934 not have the high bit set and either src1 or src2 does, set C: */
3935 cc += (((tme_int32_t) (((tme_uint32_t) (src1 & src2)) | ((((tme_uint32_t) dst) ^ (((tme_uint32_t) 0) - 1)) & ((tme_uint32_t) (src1 | src2))))) < 0) * TME_SPARC32_PSR_ICC_C;
3936
3937 /* set the condition codes: */
3938 ic->tme_sparc32_ireg_psr = (ic->tme_sparc32_ireg_psr & ~TME_SPARC32_PSR_ICC) | cc;
3939
3940 TME_SPARC_INSN_OK;
3941 }
3942
3943 /* this does a slow atomic operation: */
3944 void
tme_sparc32_atomic(struct tme_sparc * ic,struct tme_sparc_ls * ls)3945 tme_sparc32_atomic(struct tme_sparc *ic, struct tme_sparc_ls *ls)
3946 {
3947 tme_uint32_t endian_little;
3948 tme_uint32_t insn;
3949 tme_uint32_t value32;
3950 tme_uint32_t value_swap32;
3951 tme_uint32_t size;
3952
3953 /* if this is the beginning of the operation: */
3954 if (ls->tme_sparc_ls_state == 0) {
3955
3956 /* start the load part of the operation: */
3957 ls->tme_sparc_ls_state = ls->tme_sparc_ls_size;
3958 assert (ls->tme_sparc_ls_state != 0
3959 && (ls->tme_sparc_ls_state & TME_BIT(7)) == 0);
3960
3961 /* the load must start at the beginning of the buffer: */
3962 assert (ls->tme_sparc_ls_buffer_offset == 0);
3963 }
3964
3965 /* if this is the load part of the operation: */
3966 if ((ls->tme_sparc_ls_state & TME_BIT(7)) == 0) {
3967
3968 /* do one slow load cycle: */
3969 tme_sparc32_load(ic, ls);
3970
3971 /* if the slow load cycle did not load all of the data: */
3972 if (__tme_predict_false(ls->tme_sparc_ls_size != 0)) {
3973 return;
3974 }
3975
3976 /* get the byte order of this transfer: */
3977 endian_little = FALSE;
3978
3979 /* dispatch on the op3 of the instruction: */
3980 insn = TME_SPARC_INSN;
3981 switch ((insn >> 19) & 0x3f) {
3982
3983 case 0x0d: /* ldstub */
3984 case 0x1d: /* ldstuba */
3985
3986 /* finish the load part of the ldstub: */
3987 assert (ls->tme_sparc_ls_state == sizeof(tme_uint8_t));
3988 *ls->tme_sparc_ls_rd32 = ic->tme_sparc_memory_buffer.tme_sparc_memory_buffer8s[0];
3989
3990 /* start the store part of the ldstub: */
3991 ic->tme_sparc_memory_buffer.tme_sparc_memory_buffer8s[0] = 0xff;
3992 break;
3993
3994 /* otherwise, this must be swap: */
3995 default:
3996 assert (((insn >> 19) & 0x2f) == 0x0f /* swap, swapa */);
3997
3998 /* finish the load part of the swap: */
3999 assert (ls->tme_sparc_ls_state == sizeof(tme_uint32_t));
4000 value32 = ic->tme_sparc_memory_buffer.tme_sparc_memory_buffer32s[0];
4001 value_swap32 = *ls->tme_sparc_ls_rd32;
4002 if (endian_little) {
4003 value32 = tme_letoh_u32(value32);
4004 value_swap32 = tme_htole_u32(value32);
4005 }
4006 else {
4007 value32 = tme_betoh_u32(value32);
4008 value_swap32 = tme_htobe_u32(value32);
4009 }
4010 *ls->tme_sparc_ls_rd32 = value32;
4011
4012 /* start the store part of the swap: */
4013 ic->tme_sparc_memory_buffer.tme_sparc_memory_buffer32s[0] = value_swap32;
4014 break;
4015 }
4016
4017 /* start the store part of the operation: */
4018 size = ls->tme_sparc_ls_state;
4019 ls->tme_sparc_ls_address32 -= size;
4020 ls->tme_sparc_ls_size = size;
4021 ls->tme_sparc_ls_buffer_offset = 0;
4022 ls->tme_sparc_ls_state = size | TME_BIT(7);
4023 }
4024
4025 /* this is the store part of the operation: */
4026
4027 /* do one slow store cycle: */
4028 tme_sparc32_store(ic, ls);
4029
4030 /* if the slow store cycle did not store all of the data: */
4031 if (__tme_predict_false(ls->tme_sparc_ls_size != 0)) {
4032 return;
4033 }
4034 }
4035
4036 /* this does one slow load cycle: */
4037 void
tme_sparc32_load(struct tme_sparc * ic,struct tme_sparc_ls * ls)4038 tme_sparc32_load(struct tme_sparc *ic,
4039 struct tme_sparc_ls *ls)
4040 {
4041 struct tme_sparc_tlb *tlb;
4042 tme_uint32_t address;
4043 unsigned int cycle_size;
4044 tme_bus_addr_t physical_address;
4045 int shift;
4046 int err;
4047
4048 /* get the TLB entry: */
4049 tlb = ls->tme_sparc_ls_tlb;
4050
4051 /* the TLB entry must be busy and valid: */
4052 assert (tme_bus_tlb_is_valid(&tlb->tme_sparc_tlb_bus_tlb));
4053
4054 /* start the bus cycle structure: */
4055 ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_type = TME_BUS_CYCLE_READ;
4056
4057 /* get the buffer: */
4058 ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_buffer = &ic->tme_sparc_memory_buffer.tme_sparc_memory_buffer8s[ls->tme_sparc_ls_buffer_offset];
4059 ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_buffer_increment = 1;
4060
4061 /* get the current address: */
4062 address = ls->tme_sparc_ls_address32;
4063 ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_address = address;
4064
4065 /* start the cycle size: */
4066 cycle_size = ls->tme_sparc_ls_size;
4067 assert (cycle_size > 0);
4068 cycle_size--;
4069 cycle_size = TME_MIN(cycle_size, (((tme_bus_addr32_t) tlb->tme_sparc_tlb_addr_last) - address)) + 1;
4070
4071 /* if this TLB entry allows fast reads: */
4072 if (__tme_predict_true(tlb->tme_sparc_tlb_emulator_off_read != TME_EMULATOR_OFF_UNDEF)) {
4073
4074 /* do a read: */
4075 ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_size = cycle_size;
4076 tme_memory_bus_read_buffer((tlb->tme_sparc_tlb_emulator_off_read + (tme_uint32_t) ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_address),
4077 ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_buffer,
4078 ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_size,
4079 tlb->tme_sparc_tlb_bus_rwlock,
4080 sizeof(tme_uint8_t),
4081 sizeof(tme_uint32_t));
4082 }
4083
4084 /* otherwise, this TLB entry does not allow fast reads: */
4085 else {
4086
4087 /* finish the cycle size: */
4088 cycle_size = TME_MIN(cycle_size, 1 + ((~ (unsigned int) address) % sizeof(tme_uint32_t)));
4089 ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_size = cycle_size;
4090
4091 /* form the physical address for the bus cycle handler: */
4092 physical_address = ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_address;
4093 physical_address += tlb->tme_sparc_tlb_addr_offset;
4094 shift = tlb->tme_sparc_tlb_addr_shift;
4095 if (shift < 0) {
4096 physical_address <<= (0 - shift);
4097 }
4098 else if (shift > 0) {
4099 physical_address >>= shift;
4100 }
4101 ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_address = physical_address;
4102
4103 /* finish the bus cycle structure: */
4104 (*ic->_tme_sparc_ls_bus_cycle)(ic, ls);
4105 tme_sparc_log(ic, 10000, TME_OK,
4106 (TME_SPARC_LOG_HANDLE(ic),
4107 _("cycle-load%u 0x%08" TME_PRIx32),
4108 (unsigned int) (ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_size * 8),
4109 (tme_bus_addr32_t) ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_address));
4110
4111 /* callout the bus cycle: */
4112 tme_sparc_tlb_unbusy(tlb);
4113 tme_sparc_callout_unlock(ic);
4114 err = (*tlb->tme_sparc_tlb_bus_tlb.tme_bus_tlb_cycle)
4115 (tlb->tme_sparc_tlb_bus_tlb.tme_bus_tlb_cycle_private,
4116 &ls->tme_sparc_ls_bus_cycle);
4117 tme_sparc_callout_relock(ic);
4118 tme_sparc_tlb_busy(tlb);
4119
4120 /* the TLB entry can't have been invalidated before the load: */
4121 assert (err != EBADF);
4122
4123 /* if the bus cycle didn't complete normally: */
4124 if (err != TME_OK) {
4125
4126 /* if a real bus fault may have happened, instead of
4127 some synchronous event: */
4128 if (err != TME_BUS_CYCLE_SYNCHRONOUS_EVENT) {
4129
4130 /* call the bus fault handlers: */
4131 err = tme_bus_tlb_fault(&tlb->tme_sparc_tlb_bus_tlb, &ls->tme_sparc_ls_bus_cycle, err);
4132 }
4133
4134 /* if some synchronous event has happened: */
4135 if (err == TME_BUS_CYCLE_SYNCHRONOUS_EVENT) {
4136
4137 /* after the currently executing instruction finishes, check
4138 for external resets, halts, or interrupts: */
4139 ic->_tme_sparc_instruction_burst_remaining = 0;
4140 ic->_tme_sparc_instruction_burst_other = TRUE;
4141 }
4142
4143 /* otherwise, if no real bus fault happened: */
4144 else if (err == TME_OK) {
4145
4146 /* nothing to do */
4147 }
4148
4149 /* otherwise, a real bus fault happened: */
4150 else {
4151 (*ic->_tme_sparc_ls_bus_fault)(ic, ls, err);
4152 return;
4153 }
4154 }
4155 }
4156
4157 /* some data must have been transferred: */
4158 assert (ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_size > 0);
4159
4160 /* update: */
4161 cycle_size = ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_size;
4162 ls->tme_sparc_ls_address32 += cycle_size;
4163 ls->tme_sparc_ls_buffer_offset += cycle_size;
4164 ls->tme_sparc_ls_size -= cycle_size;
4165 }
4166
4167 /* this does one slow store cycle: */
4168 void
tme_sparc32_store(struct tme_sparc * ic,struct tme_sparc_ls * ls)4169 tme_sparc32_store(struct tme_sparc *ic,
4170 struct tme_sparc_ls *ls)
4171 {
4172 struct tme_sparc_tlb *tlb;
4173 tme_uint32_t address;
4174 unsigned int cycle_size;
4175 tme_bus_addr_t physical_address;
4176 int shift;
4177 int err;
4178
4179 /* get the TLB entry: */
4180 tlb = ls->tme_sparc_ls_tlb;
4181
4182 /* the TLB entry must be busy and valid: */
4183 assert (tme_bus_tlb_is_valid(&tlb->tme_sparc_tlb_bus_tlb));
4184
4185 /* start the bus cycle structure: */
4186 ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_type = TME_BUS_CYCLE_WRITE;
4187
4188 /* get the buffer: */
4189 ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_buffer = &ic->tme_sparc_memory_buffer.tme_sparc_memory_buffer8s[ls->tme_sparc_ls_buffer_offset];
4190 ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_buffer_increment = 1;
4191
4192 /* get the current address: */
4193 address = ls->tme_sparc_ls_address32;
4194 ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_address = address;
4195
4196 /* start the cycle size: */
4197 cycle_size = ls->tme_sparc_ls_size;
4198 assert (cycle_size > 0);
4199 cycle_size--;
4200 cycle_size = TME_MIN(cycle_size, (((tme_bus_addr32_t) tlb->tme_sparc_tlb_addr_last) - address)) + 1;
4201
4202 /* if this TLB entry allows fast writes: */
4203 if (__tme_predict_true(tlb->tme_sparc_tlb_emulator_off_write != TME_EMULATOR_OFF_UNDEF)) {
4204
4205 /* do a write: */
4206 ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_size = cycle_size;
4207 tme_memory_bus_write_buffer((tlb->tme_sparc_tlb_emulator_off_write + (tme_uint32_t) ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_address),
4208 ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_buffer,
4209 ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_size,
4210 tlb->tme_sparc_tlb_bus_rwlock,
4211 sizeof(tme_uint8_t),
4212 sizeof(tme_uint32_t));
4213 }
4214
4215 /* otherwise, this TLB entry does not allow fast writes: */
4216 else {
4217
4218 /* finish the cycle size: */
4219 cycle_size = TME_MIN(cycle_size, 1 + ((~ (unsigned int) address) % sizeof(tme_uint32_t)));
4220 ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_size = cycle_size;
4221
4222 /* form the physical address for the bus cycle handler: */
4223 physical_address = ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_address;
4224 physical_address += tlb->tme_sparc_tlb_addr_offset;
4225 shift = tlb->tme_sparc_tlb_addr_shift;
4226 if (shift < 0) {
4227 physical_address <<= (0 - shift);
4228 }
4229 else if (shift > 0) {
4230 physical_address >>= shift;
4231 }
4232 ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_address = physical_address;
4233
4234 /* finish the bus cycle structure: */
4235 (*ic->_tme_sparc_ls_bus_cycle)(ic, ls);
4236 tme_sparc_log(ic, 10000, TME_OK,
4237 (TME_SPARC_LOG_HANDLE(ic),
4238 _("cycle-store%u 0x%08" TME_PRIx32),
4239 (unsigned int) (ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_size * 8),
4240 (tme_bus_addr32_t) ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_address));
4241
4242 /* callout the bus cycle: */
4243 tme_sparc_tlb_unbusy(tlb);
4244 tme_sparc_callout_unlock(ic);
4245 err = (*tlb->tme_sparc_tlb_bus_tlb.tme_bus_tlb_cycle)
4246 (tlb->tme_sparc_tlb_bus_tlb.tme_bus_tlb_cycle_private,
4247 &ls->tme_sparc_ls_bus_cycle);
4248 tme_sparc_callout_relock(ic);
4249 tme_sparc_tlb_busy(tlb);
4250
4251 /* the TLB entry can't have been invalidated before the store: */
4252 assert (err != EBADF);
4253
4254 /* if the bus cycle didn't complete normally: */
4255 if (err != TME_OK) {
4256
4257 /* if a real bus fault may have happened, instead of
4258 some synchronous event: */
4259 if (err != TME_BUS_CYCLE_SYNCHRONOUS_EVENT) {
4260
4261 /* call the bus fault handlers: */
4262 err = tme_bus_tlb_fault(&tlb->tme_sparc_tlb_bus_tlb, &ls->tme_sparc_ls_bus_cycle, err);
4263 }
4264
4265 /* if some synchronous event has happened: */
4266 if (err == TME_BUS_CYCLE_SYNCHRONOUS_EVENT) {
4267
4268 /* after the currently executing instruction finishes, check
4269 for external resets, halts, or interrupts: */
4270 ic->_tme_sparc_instruction_burst_remaining = 0;
4271 ic->_tme_sparc_instruction_burst_other = TRUE;
4272 }
4273
4274 /* otherwise, if no real bus fault happened: */
4275 else if (err == TME_OK) {
4276
4277 /* nothing to do */
4278 }
4279
4280 /* otherwise, a real bus fault happened: */
4281 else {
4282 (*ic->_tme_sparc_ls_bus_fault)(ic, ls, err);
4283 return;
4284 }
4285 }
4286 }
4287
4288 /* some data must have been transferred: */
4289 assert (ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_size > 0);
4290
4291 /* if this was an atomic operation: */
4292 if (__tme_predict_false(ls->tme_sparc_ls_lsinfo & TME_SPARC_LSINFO_OP_ATOMIC)) {
4293
4294 /* we do not support atomic operations in TLB entries that
4295 do not support both fast reads and fast writes. assuming
4296 that all atomic operations are to regular memory, we
4297 should always get fast read and fast write TLBs. when
4298 we do not, it should only be because the memory has been
4299 made read-only in the MMU. the write above was supposed
4300 to cause a fault (with the instruction rerun later with
4301 a fast read and fast write TLB entry), but instead it
4302 succeeded and transferred some data. we have modified
4303 memory and cannot recover: */
4304 abort();
4305 }
4306
4307 /* update: */
4308 cycle_size = ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_size;
4309 ls->tme_sparc_ls_address32 += cycle_size;
4310 ls->tme_sparc_ls_buffer_offset += cycle_size;
4311 ls->tme_sparc_ls_size -= cycle_size;
4312 }
4313
4314 /* this does a slow load or store: */
4315 tme_shared tme_uint8_t *
tme_sparc32_ls(struct tme_sparc * ic,tme_uint32_t const address_first,tme_uint32_t * _rd,tme_uint32_t lsinfo)4316 tme_sparc32_ls(struct tme_sparc *ic,
4317 tme_uint32_t const address_first,
4318 tme_uint32_t *_rd,
4319 tme_uint32_t lsinfo)
4320 {
4321 struct tme_sparc_ls ls;
4322 tme_uint32_t size;
4323 tme_uint32_t asi;
4324 tme_uint32_t asi_mask_flags;
4325 tme_uint32_t asi_mask;
4326 tme_bus_context_t context;
4327 tme_uint32_t tlb_hash;
4328 unsigned long tlb_i;
4329 unsigned long handler_i;
4330 struct tme_sparc_tlb *tlb;
4331 unsigned int cycle_type;
4332 tme_uint32_t address;
4333 void (*address_map) _TME_P((struct tme_sparc *, struct tme_sparc_ls *));
4334 tme_bus_addr_t address_bus;
4335 int rc;
4336 const tme_shared tme_uint8_t *emulator_off;
4337 unsigned int buffer_offset;
4338 tme_uint32_t value;
4339 tme_uint32_t value32;
4340
4341 /* we must not be replaying instructions: */
4342 assert (tme_sparc_recode_verify_replay_last_pc(ic) == 0);
4343
4344 /* initialize the pointer to the rd register: */
4345 ls.tme_sparc_ls_rd32 = _rd;
4346
4347 #ifndef NDEBUG
4348
4349 /* initialize the cycle function: */
4350 ls.tme_sparc_ls_cycle = NULL;
4351
4352 /* initialize the TLB entry pointer: */
4353 ls.tme_sparc_ls_tlb = NULL;
4354
4355 #endif /* NDEBUG */
4356
4357 /* initialize the faults: */
4358 ls.tme_sparc_ls_faults = TME_SPARC_LS_FAULT_NONE;
4359
4360 /* initialize the address: */
4361 ls.tme_sparc_ls_address32 = address_first;
4362
4363 /* initialize the size: */
4364 size = TME_SPARC_LSINFO_WHICH_SIZE(lsinfo);
4365 ls.tme_sparc_ls_size = size;
4366
4367 /* initialize the info: */
4368 ls.tme_sparc_ls_lsinfo = lsinfo;
4369
4370 /* if the address is not aligned: */
4371 if (__tme_predict_false(((size - 1) & (tme_uint32_t) address_first) != 0)) {
4372 ls.tme_sparc_ls_faults |= TME_SPARC_LS_FAULT_ADDRESS_NOT_ALIGNED;
4373 }
4374
4375 /* otherwise, the address is aligned: */
4376 else {
4377
4378 /* the transfer must not cross a 32-bit boundary: */
4379 assert ((size - 1) <= (tme_uint32_t) ~address_first);
4380 }
4381
4382 /* initialize the address map: */
4383 ls.tme_sparc_ls_address_map = ic->_tme_sparc_ls_address_map;
4384
4385 /* if this is a ldd, ldda, std, or stda, or an instruction
4386 that loads or stores in the same way: */
4387 if (lsinfo & TME_SPARC_LSINFO_LDD_STD) {
4388
4389 /* if the rd register is odd: */
4390 /* NB: we don't check the rd field in the instruction,
4391 because the register number there might be encoded
4392 in some way, or the architecture might ignore bit
4393 zero in the rd field (for example, the sparc32 lddf).
4394 instead, we test the rd register pointer: */
4395 if (__tme_predict_false((ls.tme_sparc_ls_rd32
4396 - ic->tme_sparc_ic.tme_ic_iregs.tme_ic_iregs_uint32s)
4397 % 2)) {
4398 ls.tme_sparc_ls_faults |= TME_SPARC_LS_FAULT_LDD_STD_RD_ODD;
4399 }
4400 }
4401
4402 /* if the ASI has been specified: */
4403 if (lsinfo & TME_SPARC_LSINFO_A) {
4404
4405 /* get the ASI: */
4406 asi = TME_SPARC_LSINFO_WHICH_ASI(lsinfo);
4407
4408 /* get the flags for this ASI: */
4409 asi_mask_flags = ic->tme_sparc_asis[asi].tme_sparc_asi_mask_flags;
4410
4411 /* make the ASI mask: */
4412 if (asi_mask_flags & TME_SPARC32_ASI_MASK_FLAG_SPECIAL) {
4413 asi_mask
4414 = TME_SPARC_ASI_MASK_SPECIAL(asi, TRUE);
4415 }
4416 else {
4417 asi_mask = TME_SPARC32_ASI_MASK(asi, asi);
4418 }
4419 ls.tme_sparc_ls_asi_mask = asi_mask;
4420
4421 /* get the context for the alternate address space: */
4422 context = ic->tme_sparc_memory_context_default;
4423 ls.tme_sparc_ls_context = context;
4424
4425 /* get the default TLB entry index: */
4426 tlb_hash = TME_SPARC_TLB_HASH(ic, context, address_first);
4427 if (lsinfo & TME_SPARC_LSINFO_OP_FETCH) {
4428 tlb_i = TME_SPARC_ITLB_ENTRY(ic, tlb_hash);
4429 }
4430 else {
4431 tlb_i = TME_SPARC_DTLB_ENTRY(ic, tlb_hash);
4432 }
4433 ls.tme_sparc_ls_tlb_i = tlb_i;
4434
4435 /* call any special handler for this ASI: */
4436 handler_i = ic->tme_sparc_asis[TME_SPARC_ASI_MASK_WHICH(asi_mask)].tme_sparc_asi_handler;
4437 if (__tme_predict_false(handler_i != 0)) {
4438 (*ic->_tme_sparc_ls_asi_handlers[handler_i])(ic, &ls);
4439 }
4440
4441 /* get the final TLB entry index: */
4442 tlb_i = ls.tme_sparc_ls_tlb_i;
4443 }
4444
4445 /* otherwise, the ASI has not been specified: */
4446 else {
4447
4448 /* get the ASI mask: */
4449 asi_mask = ic->tme_sparc_asi_mask_data;
4450
4451 /* add in any ASI mask flags from the instruction: */
4452 assert (TME_SPARC_LSINFO_WHICH_ASI_FLAGS(lsinfo) == 0
4453 );
4454 asi_mask |= TME_SPARC_LSINFO_WHICH_ASI_FLAGS(lsinfo);
4455
4456 /* set the ASI mask: */
4457 ls.tme_sparc_ls_asi_mask = asi_mask;
4458
4459 /* get the context: */
4460 context = ic->tme_sparc_memory_context_default;
4461 ls.tme_sparc_ls_context = context;
4462
4463 /* this must not be a fetch: */
4464 assert ((lsinfo & TME_SPARC_LSINFO_OP_FETCH) == 0);
4465
4466 /* get the TLB entry index: */
4467 tlb_hash = TME_SPARC_TLB_HASH(ic, context, address_first);
4468 tlb_i = TME_SPARC_DTLB_ENTRY(ic, tlb_hash);
4469 ls.tme_sparc_ls_tlb_i = tlb_i;
4470 }
4471
4472 /* get the TLB entry pointer: */
4473 tlb = &ic->tme_sparc_tlbs[tlb_i];
4474 ls.tme_sparc_ls_tlb = tlb;
4475
4476 /* get the cycle type: */
4477 /* NB: we deliberately set this once, now, since the lsinfo
4478 may change once we start transferring: */
4479 cycle_type
4480 = ((lsinfo
4481 & (TME_SPARC_LSINFO_OP_ST
4482 | TME_SPARC_LSINFO_OP_ATOMIC))
4483 ? TME_BUS_CYCLE_WRITE
4484 : TME_BUS_CYCLE_READ);
4485
4486 /* loop until the transfer is complete: */
4487 for (;;) {
4488
4489 /* if we have faulted: */
4490 if (__tme_predict_false(ls.tme_sparc_ls_faults != TME_SPARC_LS_FAULT_NONE)) {
4491
4492 /* unbusy this TLB, since the trap function may not return: */
4493 tme_bus_tlb_unbusy(&tlb->tme_sparc_tlb_bus_tlb);
4494
4495 /* call the trap function, which will not return if it traps: */
4496 (*ic->_tme_sparc_ls_trap)(ic, &ls);
4497
4498 /* rebusy this TLB: */
4499 tme_bus_tlb_busy(&tlb->tme_sparc_tlb_bus_tlb);
4500
4501 /* since the trap function returned, it must have cleared the fault: */
4502 assert (ls.tme_sparc_ls_faults == TME_SPARC_LS_FAULT_NONE);
4503 }
4504
4505 /* if the transfer is complete, stop now: */
4506 if (__tme_predict_false(ls.tme_sparc_ls_size == 0)) {
4507 break;
4508 }
4509
4510 /* get the current address: */
4511 address = ls.tme_sparc_ls_address32;
4512
4513 /* if this TLB entry does not apply or is invalid: */
4514 if ((tlb->tme_sparc_tlb_context != ls.tme_sparc_ls_context
4515 && tlb->tme_sparc_tlb_context <= ic->tme_sparc_memory_context_max)
4516 || address < (tme_bus_addr32_t) tlb->tme_sparc_tlb_addr_first
4517 || address > (tme_bus_addr32_t) tlb->tme_sparc_tlb_addr_last
4518 || !TME_SPARC_TLB_ASI_MASK_OK(tlb, ls.tme_sparc_ls_asi_mask)
4519 || ((tlb->tme_sparc_tlb_cycles_ok & cycle_type) == 0
4520 && (cycle_type == TME_BUS_CYCLE_READ
4521 ? tlb->tme_sparc_tlb_emulator_off_read
4522 : tlb->tme_sparc_tlb_emulator_off_write) == TME_EMULATOR_OFF_UNDEF)
4523 || tme_bus_tlb_is_invalid(&tlb->tme_sparc_tlb_bus_tlb)) {
4524
4525 /* unbusy this TLB entry for filling: */
4526 tme_bus_tlb_unbusy_fill(&tlb->tme_sparc_tlb_bus_tlb);
4527
4528 /* if we haven't mapped this address yet: */
4529 address_map = ls.tme_sparc_ls_address_map;
4530 if (address_map != NULL) {
4531 ls.tme_sparc_ls_address_map = NULL;
4532
4533 /* count this mapping: */
4534 if (ls.tme_sparc_ls_lsinfo & TME_SPARC_LSINFO_OP_FETCH) {
4535 TME_SPARC_STAT(ic, tme_sparc_stats_itlb_map);
4536 }
4537 else {
4538 TME_SPARC_STAT(ic, tme_sparc_stats_dtlb_map);
4539 }
4540
4541 /* initialize the ASI mask and context on this TLB entry: */
4542 /* NB that the ASI mask will likely be updated by either the
4543 address mapping or the TLB fill: */
4544 tlb->tme_sparc_tlb_asi_mask
4545 = (ls.tme_sparc_ls_asi_mask
4546 & ~TME_SPARC_ASI_MASK_FLAGS_AVAIL);
4547 tlb->tme_sparc_tlb_context = ls.tme_sparc_ls_context;
4548
4549 /* NB: if the address mapping traps, we won't get a chance
4550 to finish updating this TLB entry, which is currently in
4551 an inconsistent state - but not necessarily an unusable
4552 state. poison it to be unusable, including any recode
4553 TLB entry: */
4554 tlb->tme_sparc_tlb_addr_first = 1;
4555 tlb->tme_sparc_tlb_addr_last = 0;
4556 #if TME_SPARC_HAVE_RECODE(ic)
4557 if (ls.tme_sparc_ls_lsinfo & TME_SPARC_LSINFO_OP_FETCH) {
4558 tme_sparc32_recode_chain_tlb_update(ic, &ls);
4559 }
4560 else {
4561 tme_sparc32_recode_ls_tlb_update(ic, &ls);
4562 }
4563 #endif /* TME_SPARC_HAVE_RECODE(ic) */
4564
4565 #ifndef NDEBUG
4566
4567 /* initialize the mapping TLB entry: */
4568 ls.tme_sparc_ls_tlb_map.tme_bus_tlb_addr_first = 0 - (tme_bus_addr_t) 1;
4569 ls.tme_sparc_ls_tlb_map.tme_bus_tlb_addr_last = 0 - (tme_bus_addr_t) 2;
4570 ls.tme_sparc_ls_tlb_map.tme_bus_tlb_cycles_ok = 0;
4571 ls.tme_sparc_ls_tlb_map.tme_bus_tlb_addr_offset = 0 - (tme_bus_addr_t) 1;
4572
4573 #endif /* !NDEBUG */
4574
4575 /* map the address: */
4576 (*address_map)(ic, &ls);
4577
4578 /* the address mapping must do any trapping itself: */
4579 assert (ls.tme_sparc_ls_faults == TME_SPARC_LS_FAULT_NONE);
4580
4581 /* if the address mapping completed the transfer: */
4582 if (__tme_predict_false(ls.tme_sparc_ls_size == 0)) {
4583
4584 /* rebusy the TLB entry: */
4585 tme_sparc_tlb_busy(tlb);
4586
4587 /* stop now: */
4588 break;
4589 }
4590
4591 /* the mapping must have actually made a mapping: */
4592 assert (ls.tme_sparc_ls_tlb_map.tme_bus_tlb_addr_first != 0 - (tme_bus_addr_t) 1);
4593 assert (ls.tme_sparc_ls_tlb_map.tme_bus_tlb_addr_last != 0 - (tme_bus_addr_t) 2);
4594 assert (ls.tme_sparc_ls_tlb_map.tme_bus_tlb_cycles_ok != 0);
4595 assert (ls.tme_sparc_ls_tlb_map.tme_bus_tlb_addr_offset != 0 - (tme_bus_addr_t) 1);
4596 }
4597
4598 /* count this fill: */
4599 if (ls.tme_sparc_ls_lsinfo & TME_SPARC_LSINFO_OP_FETCH) {
4600 TME_SPARC_STAT(ic, tme_sparc_stats_itlb_fill);
4601 }
4602 else {
4603 TME_SPARC_STAT(ic, tme_sparc_stats_dtlb_fill);
4604 }
4605
4606 /* get the bus address: */
4607 address_bus = ls.tme_sparc_ls_address32 + ls.tme_sparc_ls_tlb_map.tme_bus_tlb_addr_offset;
4608
4609 /* fill the TLB entry: */
4610 tme_sparc_callout_unlock(ic);
4611 rc = (*ic->_tme_sparc_bus_connection->tme_sparc_bus_tlb_fill)
4612 (ic->_tme_sparc_bus_connection,
4613 tlb,
4614 ls.tme_sparc_ls_asi_mask,
4615 address_bus,
4616 cycle_type);
4617 assert (rc == TME_OK);
4618 tme_sparc_callout_relock(ic);
4619
4620 /* map the TLB entry: */
4621 tme_bus_tlb_map(&tlb->tme_sparc_tlb_bus_tlb, address_bus,
4622 &ls.tme_sparc_ls_tlb_map, ls.tme_sparc_ls_address32);
4623
4624 /* update any recode TLB entry: */
4625 #if TME_SPARC_HAVE_RECODE(ic)
4626 if (ls.tme_sparc_ls_lsinfo & TME_SPARC_LSINFO_OP_FETCH) {
4627 tme_sparc32_recode_chain_tlb_update(ic, &ls);
4628 }
4629 else {
4630 tme_sparc32_recode_ls_tlb_update(ic, &ls);
4631 }
4632 #endif /* TME_SPARC_HAVE_RECODE(ic) */
4633
4634 /* rebusy the TLB entry: */
4635 tme_sparc_tlb_busy(tlb);
4636
4637 /* if this TLB entry is already invalid: */
4638 if (tme_bus_tlb_is_invalid(&tlb->tme_sparc_tlb_bus_tlb)) {
4639 continue;
4640 }
4641 }
4642
4643 /* this TLB entry must apply: */
4644 assert ((tlb->tme_sparc_tlb_context == ls.tme_sparc_ls_context
4645 || tlb->tme_sparc_tlb_context > ic->tme_sparc_memory_context_max)
4646 && ls.tme_sparc_ls_address32 >= (tme_bus_addr32_t) tlb->tme_sparc_tlb_addr_first
4647 && ls.tme_sparc_ls_address32 <= (tme_bus_addr32_t) tlb->tme_sparc_tlb_addr_last
4648 && ((tlb->tme_sparc_tlb_cycles_ok & cycle_type)
4649 || (cycle_type == TME_BUS_CYCLE_READ
4650 ? tlb->tme_sparc_tlb_emulator_off_read
4651 : tlb->tme_sparc_tlb_emulator_off_write) != TME_EMULATOR_OFF_UNDEF)
4652 && TME_SPARC_TLB_ASI_MASK_OK(tlb, ls.tme_sparc_ls_asi_mask));
4653
4654 /* get the current lsinfo: */
4655 lsinfo = ls.tme_sparc_ls_lsinfo;
4656
4657 /* if we have to check the TLB: */
4658 if (__tme_predict_true((lsinfo & TME_SPARC_LSINFO_NO_CHECK_TLB) == 0)) {
4659
4660 /* get the ASI mask for this TLB entry: */
4661 asi_mask = tlb->tme_sparc_tlb_asi_mask;
4662 }
4663
4664 /* if we might not have to call a slow cycle function: */
4665 if (__tme_predict_true((lsinfo & TME_SPARC_LSINFO_SLOW_CYCLES) == 0)) {
4666
4667 /* if this TLB entry allows fast transfer of all of the addresses: */
4668 if (__tme_predict_true(((tme_bus_addr32_t) tlb->tme_sparc_tlb_addr_last) >= (address_first + (ls.tme_sparc_ls_size - 1)))) {
4669 emulator_off = tlb->tme_sparc_tlb_emulator_off_read;
4670 if (lsinfo & TME_SPARC_LSINFO_OP_ST) {
4671 emulator_off = tlb->tme_sparc_tlb_emulator_off_write;
4672 }
4673 if (__tme_predict_true(emulator_off != TME_EMULATOR_OFF_UNDEF
4674 && (((lsinfo & TME_SPARC_LSINFO_OP_ATOMIC) == 0)
4675 || emulator_off == tlb->tme_sparc_tlb_emulator_off_write))) {
4676
4677 /* return and let our caller do the transfer: */
4678 /* NB: we break const here: */
4679 return ((tme_shared tme_uint8_t *) emulator_off);
4680 }
4681 }
4682
4683 /* we have to call a slow cycle function: */
4684 lsinfo |= TME_SPARC_LSINFO_SLOW_CYCLES;
4685 assert (ls.tme_sparc_ls_cycle == NULL);
4686
4687 /* assume that this operation will transfer the start of the buffer: */
4688 buffer_offset = 0;
4689
4690 /* assume that this is a load or a fetch: */
4691 ls.tme_sparc_ls_cycle = tme_sparc32_load;
4692
4693 /* if this is a store: */
4694 if (lsinfo & TME_SPARC_LSINFO_OP_ST) {
4695
4696 /* put the (first) register to store in the memory buffer: */
4697 value = TME_SPARC_FORMAT3_RD;
4698 value = (FALSE ? tme_htole_u32(value) : tme_htobe_u32(value));
4699 ic->tme_sparc_memory_buffer.tme_sparc_memory_buffer32s[0] = value;
4700
4701 /* find the offset in the memory buffer corresponding to the
4702 first address: */
4703 buffer_offset = sizeof(tme_uint32_t) - ls.tme_sparc_ls_size;
4704 if (FALSE) {
4705 buffer_offset = 0;
4706 }
4707
4708 /* if this is a std or stda: */
4709 if (lsinfo & TME_SPARC_LSINFO_LDD_STD) {
4710
4711 /* put the odd 32-bit register to store in the memory buffer
4712 after the even 32-bit register. exactly where this is depends
4713 on the architecture and on the byte order of the store: */
4714 value32 = TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint32);
4715 if (FALSE) {
4716 value32 = tme_htole_u32(value32);
4717 ic->tme_sparc_memory_buffer.tme_sparc_memory_buffer32s[1] = value32;
4718 buffer_offset = 0;
4719 }
4720 else {
4721 value32 = tme_htobe_u32(value32);
4722 ic->tme_sparc_memory_buffer.tme_sparc_memory_buffer32s[(32 / 32)] = value32;
4723 buffer_offset = sizeof(tme_uint32_t) - sizeof(tme_uint32_t);
4724 }
4725 }
4726
4727 /* set the cycle function: */
4728 ls.tme_sparc_ls_cycle = tme_sparc32_store;
4729 }
4730
4731 /* otherwise, if this is an atomic: */
4732 else if (lsinfo & TME_SPARC_LSINFO_OP_ATOMIC) {
4733
4734 /* set the cycle function: */
4735 ls.tme_sparc_ls_cycle = tme_sparc32_atomic;
4736 }
4737
4738 /* set the buffer offset for the (first) slow cycle: */
4739 ls.tme_sparc_ls_buffer_offset = buffer_offset;
4740
4741 /* clear the state for this operation: */
4742 ls.tme_sparc_ls_state = 0;
4743 }
4744
4745 /* assume that we won't have to check the TLB again: */
4746 ls.tme_sparc_ls_lsinfo = lsinfo | TME_SPARC_LSINFO_NO_CHECK_TLB;
4747 /* call the slow cycle function: */
4748 (*ls.tme_sparc_ls_cycle)(ic, &ls);
4749 }
4750
4751 /* if this was a load that has already completed, a store,
4752 or an atomic, make sure our caller doesn't try to complete
4753 a fast transfer: */
4754 if (ls.tme_sparc_ls_lsinfo
4755 & (TME_SPARC_LSINFO_LD_COMPLETED
4756 | TME_SPARC_LSINFO_OP_ST
4757 | TME_SPARC_LSINFO_OP_ATOMIC)) {
4758 return (TME_EMULATOR_OFF_UNDEF);
4759 }
4760
4761 /* otherwise, this was a load that did slow cycles into the
4762 memory buffer and hasn't updated rd yet. return a pointer
4763 to the memory buffer so our caller can complete the load: */
4764 return (ic->tme_sparc_memory_buffer.tme_sparc_memory_buffer8s
4765 - address_first);
4766 }
4767
4768 #undef TME_SPARC_VERSION
4769 #define TME_SPARC_VERSION(ic) _TME_SPARC_VERSION(ic)
4770
4771 #ifdef TME_HAVE_INT64_T
4772
4773 #undef TME_SPARC_VERSION
4774 #define TME_SPARC_VERSION(ic) (9)
4775
4776 static tme_uint32_t
_tme_sparc64_alternate_asi_mask(struct tme_sparc * ic)4777 _tme_sparc64_alternate_asi_mask(struct tme_sparc *ic)
4778 {
4779 unsigned int asi_data;
4780 unsigned int asi_mask_flags;
4781 tme_uint32_t asi_mask_data;
4782
4783 /* get the ASI, assuming that the i bit is zero: */
4784 asi_data = TME_FIELD_MASK_EXTRACTU(TME_SPARC_INSN, (0xff << 5));
4785
4786 /* if the i bit is one, use the address space in the ASI register: */
4787 if (TME_SPARC_INSN & TME_BIT(13)) {
4788 asi_data = ic->tme_sparc64_ireg_asi;
4789 }
4790
4791 /* get the flags for this ASI: */
4792 asi_mask_flags = ic->tme_sparc_asis[asi_data].tme_sparc_asi_mask_flags;
4793
4794 /* if this is a nonprivileged access: */
4795 if (!TME_SPARC_PRIV(ic)) {
4796
4797 /* if this is a restricted ASI: */
4798 if (__tme_predict_false((asi_data & TME_SPARC64_ASI_FLAG_UNRESTRICTED) == 0)) {
4799
4800 /* force a slow load or store, which will generate the
4801 privileged_action trap: */
4802 asi_mask_flags |= TME_SPARC_ASI_MASK_FLAG_UNDEF;
4803 }
4804
4805 /* force a nonprivileged access with the ASI: */
4806 asi_mask_flags |= TME_SPARC64_ASI_MASK_FLAG_INSN_AS_IF_USER;
4807 }
4808
4809 /* make the ASI mask: */
4810 if (asi_mask_flags & TME_SPARC64_ASI_MASK_FLAG_SPECIAL) {
4811 asi_mask_data
4812 = (asi_mask_flags
4813 + TME_SPARC_ASI_MASK_SPECIAL(asi_data,
4814 ((asi_mask_flags & TME_SPARC64_ASI_MASK_FLAG_INSN_AS_IF_USER) == 0)));
4815 }
4816 else {
4817 asi_mask_data = TME_SPARC64_ASI_MASK(asi_data, asi_mask_flags);
4818 }
4819
4820 /* if this ASI has a special handler: */
4821 if (__tme_predict_false(ic->tme_sparc_asis[TME_SPARC_ASI_MASK_WHICH(asi_mask_data)].tme_sparc_asi_handler != 0)) {
4822
4823 /* force a slow load or store, which will call the special handler: */
4824 asi_mask_data |= TME_SPARC_ASI_MASK_FLAG_UNDEF;
4825 }
4826
4827 return (asi_mask_data);
4828 }
4829
4830 static struct tme_float *
_tme_sparc64_fpu_mem_fpreg(struct tme_sparc * ic,tme_uint32_t misaligned,struct tme_float * float_buffer)4831 _tme_sparc64_fpu_mem_fpreg(struct tme_sparc *ic,
4832 tme_uint32_t misaligned,
4833 struct tme_float *float_buffer)
4834 {
4835 unsigned int float_format;
4836 unsigned int fpreg_format;
4837 tme_uint32_t fp_store;
4838 unsigned int fpu_mode;
4839 unsigned int fpreg_number;
4840
4841 /* NB: this checks for various traps by their priority order: */
4842
4843 TME_SPARC_INSN_FPU_ENABLED;
4844
4845 /* get the floating-point format: */
4846 float_format = float_buffer->tme_float_format;
4847
4848 /* convert the floating-point format into the ieee754
4849 floating-point register file format: */
4850 #if (TME_FLOAT_FORMAT_NULL | TME_IEEE754_FPREG_FORMAT_NULL) != 0
4851 #error "TME_FLOAT_FORMAT_ or TME_IEEE754_FPREG_FORMAT_ values changed"
4852 #endif
4853 #if TME_FLOAT_FORMAT_IEEE754_SINGLE < TME_IEEE754_FPREG_FORMAT_SINGLE
4854 #error "TME_FLOAT_FORMAT_ or TME_IEEE754_FPREG_FORMAT_ values changed"
4855 #endif
4856 #if (TME_FLOAT_FORMAT_IEEE754_SINGLE / TME_IEEE754_FPREG_FORMAT_SINGLE) != (TME_FLOAT_FORMAT_IEEE754_DOUBLE / TME_IEEE754_FPREG_FORMAT_DOUBLE)
4857 #error "TME_FLOAT_FORMAT_ or TME_IEEE754_FPREG_FORMAT_ values changed"
4858 #endif
4859 assert (float_format == TME_FLOAT_FORMAT_NULL
4860 || float_format == TME_FLOAT_FORMAT_IEEE754_SINGLE
4861 || float_format == TME_FLOAT_FORMAT_IEEE754_DOUBLE);
4862 fpreg_format = float_format / (TME_FLOAT_FORMAT_IEEE754_SINGLE / TME_IEEE754_FPREG_FORMAT_SINGLE);
4863
4864 /* if the memory address is misaligned, return the
4865 float buffer now. the eventual load or store will
4866 cause the mem_address_not_aligned trap: */
4867
4868 /* if the memory address is misaligned: */
4869 #if TME_IEEE754_FPREG_FORMAT_NULL != 0 || TME_IEEE754_FPREG_FORMAT_SINGLE != 1 || TME_IEEE754_FPREG_FORMAT_DOUBLE != 2 || TME_IEEE754_FPREG_FORMAT_QUAD != 4
4870 #error "TME_IEEE754_FPREG_FORMAT_ values changed"
4871 #endif
4872 assert (fpreg_format == TME_IEEE754_FPREG_FORMAT_NULL
4873 || fpreg_format == TME_IEEE754_FPREG_FORMAT_SINGLE
4874 || fpreg_format == TME_IEEE754_FPREG_FORMAT_DOUBLE
4875 || fpreg_format == TME_IEEE754_FPREG_FORMAT_QUAD);
4876 misaligned &= ((sizeof(tme_uint32_t) * fpreg_format) - 1);
4877 if (__tme_predict_false(misaligned)) {
4878
4879 /* if the memory address is not even 32-bit aligned, or
4880 if this SPARC doesn't support loads and stores of this
4881 size at 32-bit alignment: */
4882 if (misaligned != sizeof(tme_uint32_t)
4883 #if TME_IEEE754_FPREG_FORMAT_SINGLE != 1 || (TME_SPARC_MEMORY_FLAG_HAS_LDDF_STDF_32 * TME_IEEE754_FPREG_FORMAT_DOUBLE) != TME_SPARC_MEMORY_FLAG_HAS_LDQF_STQF_32
4884 #error "TME_IEEE754_FPREG_FORMAT_ or TME_SPARC_MEMORY_FLAG_ values changed"
4885 #endif
4886 || (TME_SPARC_MEMORY_FLAGS(ic)
4887 & (TME_SPARC_MEMORY_FLAG_HAS_LDDF_STDF_32 * fpreg_format)) == 0) {
4888
4889 return (float_buffer);
4890 }
4891 }
4892
4893 /* see if this is a floating-point load or store: */
4894 /* NB: all of the floating-point instructions that use
4895 this preamble have bit two of op3 clear for a load,
4896 and set for a store: */
4897 fp_store = (TME_SPARC_INSN & TME_BIT(19 + 2));
4898
4899 /* if the FPU isn't in execute mode: */
4900 fpu_mode = ic->tme_sparc_fpu_mode;
4901 if (__tme_predict_false(fpu_mode != TME_SPARC_FPU_MODE_EXECUTE)) {
4902
4903 /* if this is a floating-point load, or if this is a
4904 floating-point store and a floating-point exception
4905 is pending: */
4906 if (!fp_store
4907 || fpu_mode == TME_SPARC_FPU_MODE_EXCEPTION_PENDING) {
4908
4909 /* do an FPU exception check: */
4910 tme_sparc_fpu_exception_check(ic);
4911 }
4912 }
4913
4914 /* if this is not a load or store of a floating-point register: */
4915 if (fpreg_format == TME_IEEE754_FPREG_FORMAT_NULL) {
4916 return (float_buffer);
4917 }
4918
4919 /* decode rd: */
4920 fpreg_number
4921 = tme_sparc_fpu_fpreg_decode(ic,
4922 TME_FIELD_MASK_EXTRACTU(TME_SPARC_INSN,
4923 TME_SPARC_FORMAT3_MASK_RD),
4924 fpreg_format);
4925
4926 /* make sure this floating-point register has the right precision: */
4927 tme_sparc_fpu_fpreg_format(ic, fpreg_number, fpreg_format | TME_IEEE754_FPREG_FORMAT_BUILTIN);
4928
4929 /* if this is a floating-point load: */
4930 if (!fp_store) {
4931
4932 /* mark rd as dirty: */
4933 TME_SPARC_FPU_DIRTY(ic, fpreg_number);
4934 }
4935
4936 /* return the floating-point register: */
4937 return (&ic->tme_sparc_fpu_fpregs[fpreg_number]);
4938 }
4939 #define _tme_sparc64_fpu_mem(ic) \
4940 do { _tme_sparc64_fpu_mem_fpreg(ic, 0, &_tme_sparc_float_null); } while (/* CONSTCOND */ 0)
4941
4942 /* this does a sparc64 "add SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_add,tme_uint64_t)4943 TME_SPARC_FORMAT3(tme_sparc64_add, tme_uint64_t)
4944 {
4945 tme_uint64_t src1;
4946 tme_uint64_t src2;
4947 tme_uint64_t dst;
4948
4949 /* get the operands: */
4950 src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
4951 src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
4952
4953 /* perform the operation: */
4954 dst = src1 + src2;
4955
4956 /* store the destination: */
4957 TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
4958
4959 TME_SPARC_INSN_OK;
4960 }
4961
4962 /* this does a sparc64 "addcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_addcc,tme_uint64_t)4963 TME_SPARC_FORMAT3(tme_sparc64_addcc, tme_uint64_t)
4964 {
4965 tme_uint64_t src1;
4966 tme_uint64_t src2;
4967 tme_uint64_t dst;
4968 tme_uint32_t cc;
4969
4970 /* get the operands: */
4971 src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
4972 src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
4973
4974 /* perform the operation: */
4975 dst = src1 + src2;
4976
4977 /* store the destination: */
4978 TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
4979
4980 /* set Z if the destination is zero: */
4981 cc = ((((tme_int32_t) dst) == 0) * TME_SPARC64_CCR_ICC_Z);
4982
4983 /* set N if the destination is negative: */
4984 cc += ((((tme_int32_t) dst) < 0) * TME_SPARC64_CCR_ICC_N);
4985
4986 /* if the operands are the same sign, and the destination has
4987 a different sign, set V: */
4988 cc += ((((tme_int32_t) ((src2 ^ dst) & (src1 ^ (src2 ^ (((tme_uint32_t) 0) - 1))))) < 0) * TME_SPARC64_CCR_ICC_V);
4989
4990 /* if src1 and src2 both have the high bit set, or if dst does
4991 not have the high bit set and either src1 or src2 does, set C: */
4992 cc += (((tme_int32_t) (((tme_uint32_t) (src1 & src2)) | ((((tme_uint32_t) dst) ^ (((tme_uint32_t) 0) - 1)) & ((tme_uint32_t) (src1 | src2))))) < 0) * TME_SPARC64_CCR_ICC_C;
4993
4994 /* set Z if the destination is zero: */
4995 cc += ((((tme_int64_t) dst) == 0) * TME_SPARC64_CCR_XCC_Z);
4996
4997 /* set N if the destination is negative: */
4998 cc += ((((tme_int64_t) dst) < 0) * TME_SPARC64_CCR_XCC_N);
4999
5000 /* if the operands are the same sign, and the destination has
5001 a different sign, set V: */
5002 cc += ((((tme_int64_t) ((src2 ^ dst) & (src1 ^ (src2 ^ (((tme_uint64_t) 0) - 1))))) < 0) * TME_SPARC64_CCR_XCC_V);
5003
5004 /* if src1 and src2 both have the high bit set, or if dst does
5005 not have the high bit set and either src1 or src2 does, set C: */
5006 cc += (((tme_int64_t) (((tme_uint64_t) (src1 & src2)) | ((((tme_uint64_t) dst) ^ (((tme_uint64_t) 0) - 1)) & ((tme_uint64_t) (src1 | src2))))) < 0) * TME_SPARC64_CCR_XCC_C;
5007
5008 /* set the condition codes: */
5009 ic->tme_sparc64_ireg_ccr = cc;
5010
5011 TME_SPARC_INSN_OK;
5012 }
5013
5014 /* this does a sparc64 "sub SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_sub,tme_uint64_t)5015 TME_SPARC_FORMAT3(tme_sparc64_sub, tme_uint64_t)
5016 {
5017 tme_uint64_t src1;
5018 tme_uint64_t src2;
5019 tme_uint64_t dst;
5020
5021 /* get the operands: */
5022 src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
5023 src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
5024
5025 /* perform the operation: */
5026 dst = src1 - src2;
5027
5028 /* store the destination: */
5029 TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
5030
5031 TME_SPARC_INSN_OK;
5032 }
5033
5034 /* this does a sparc64 "subcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_subcc,tme_uint64_t)5035 TME_SPARC_FORMAT3(tme_sparc64_subcc, tme_uint64_t)
5036 {
5037 tme_uint64_t src1;
5038 tme_uint64_t src2;
5039 tme_uint64_t dst;
5040 tme_uint32_t cc;
5041
5042 /* get the operands: */
5043 src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
5044 src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
5045
5046 /* perform the operation: */
5047 dst = src1 - src2;
5048
5049 /* store the destination: */
5050 TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
5051
5052 /* set Z if the destination is zero: */
5053 cc = ((((tme_int32_t) dst) == 0) * TME_SPARC64_CCR_ICC_Z);
5054
5055 /* set N if the destination is negative: */
5056 cc += ((((tme_int32_t) dst) < 0) * TME_SPARC64_CCR_ICC_N);
5057
5058 /* if the operands are different signs, and the destination has
5059 a different sign from the first operand, set V: */
5060 cc += ((((tme_int32_t) ((src1 ^ src2) & (src1 ^ dst))) < 0) * TME_SPARC64_CCR_ICC_V);
5061
5062 /* if src2 is greater than src1, set C: */
5063 cc += ((((tme_uint32_t) src2) > ((tme_uint32_t) src1))) * TME_SPARC64_CCR_ICC_C;
5064
5065 /* set Z if the destination is zero: */
5066 cc += ((((tme_int64_t) dst) == 0) * TME_SPARC64_CCR_XCC_Z);
5067
5068 /* set N if the destination is negative: */
5069 cc += ((((tme_int64_t) dst) < 0) * TME_SPARC64_CCR_XCC_N);
5070
5071 /* if the operands are different signs, and the destination has
5072 a different sign from the first operand, set V: */
5073 cc += ((((tme_int64_t) ((src1 ^ src2) & (src1 ^ dst))) < 0) * TME_SPARC64_CCR_XCC_V);
5074
5075 /* if src2 is greater than src1, set C: */
5076 cc += ((((tme_uint64_t) src2) > ((tme_uint64_t) src1))) * TME_SPARC64_CCR_XCC_C;
5077
5078 /* set the condition codes: */
5079 ic->tme_sparc64_ireg_ccr = cc;
5080
5081 TME_SPARC_INSN_OK;
5082 }
5083
5084 /* this does a sparc64 "or SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_or,tme_uint64_t)5085 TME_SPARC_FORMAT3(tme_sparc64_or, tme_uint64_t)
5086 {
5087 tme_uint64_t src1;
5088 tme_uint64_t src2;
5089 tme_uint64_t dst;
5090
5091 /* get the operands: */
5092 src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
5093 src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
5094
5095 /* perform the operation: */
5096 dst = src1 | src2;
5097
5098 /* store the destination: */
5099 TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
5100
5101 TME_SPARC_INSN_OK;
5102 }
5103
5104 /* this does a sparc64 "orcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_orcc,tme_uint64_t)5105 TME_SPARC_FORMAT3(tme_sparc64_orcc, tme_uint64_t)
5106 {
5107 tme_uint64_t src1;
5108 tme_uint64_t src2;
5109 tme_uint64_t dst;
5110 tme_uint32_t cc;
5111
5112 /* get the operands: */
5113 src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
5114 src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
5115
5116 /* perform the operation: */
5117 dst = src1 | src2;
5118
5119 /* store the destination: */
5120 TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
5121
5122 /* set Z if the destination is zero: */
5123 cc = ((((tme_int32_t) dst) == 0) * TME_SPARC64_CCR_ICC_Z);
5124
5125 /* set N if the destination is negative: */
5126 cc += ((((tme_int32_t) dst) < 0) * TME_SPARC64_CCR_ICC_N);
5127
5128 /* set Z if the destination is zero: */
5129 cc += ((((tme_int64_t) dst) == 0) * TME_SPARC64_CCR_XCC_Z);
5130
5131 /* set N if the destination is negative: */
5132 cc += ((((tme_int64_t) dst) < 0) * TME_SPARC64_CCR_XCC_N);
5133
5134 /* set the condition codes: */
5135 ic->tme_sparc64_ireg_ccr = cc;
5136
5137 TME_SPARC_INSN_OK;
5138 }
5139
5140 /* this does a sparc64 "orn SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_orn,tme_uint64_t)5141 TME_SPARC_FORMAT3(tme_sparc64_orn, tme_uint64_t)
5142 {
5143 tme_uint64_t src1;
5144 tme_uint64_t src2;
5145 tme_uint64_t dst;
5146
5147 /* get the operands: */
5148 src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
5149 src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
5150
5151 /* perform the operation: */
5152 dst = src1 | ~src2;
5153
5154 /* store the destination: */
5155 TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
5156
5157 TME_SPARC_INSN_OK;
5158 }
5159
5160 /* this does a sparc64 "orncc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_orncc,tme_uint64_t)5161 TME_SPARC_FORMAT3(tme_sparc64_orncc, tme_uint64_t)
5162 {
5163 tme_uint64_t src1;
5164 tme_uint64_t src2;
5165 tme_uint64_t dst;
5166 tme_uint32_t cc;
5167
5168 /* get the operands: */
5169 src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
5170 src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
5171
5172 /* perform the operation: */
5173 dst = src1 | ~src2;
5174
5175 /* store the destination: */
5176 TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
5177
5178 /* set Z if the destination is zero: */
5179 cc = ((((tme_int32_t) dst) == 0) * TME_SPARC64_CCR_ICC_Z);
5180
5181 /* set N if the destination is negative: */
5182 cc += ((((tme_int32_t) dst) < 0) * TME_SPARC64_CCR_ICC_N);
5183
5184 /* set Z if the destination is zero: */
5185 cc += ((((tme_int64_t) dst) == 0) * TME_SPARC64_CCR_XCC_Z);
5186
5187 /* set N if the destination is negative: */
5188 cc += ((((tme_int64_t) dst) < 0) * TME_SPARC64_CCR_XCC_N);
5189
5190 /* set the condition codes: */
5191 ic->tme_sparc64_ireg_ccr = cc;
5192
5193 TME_SPARC_INSN_OK;
5194 }
5195
5196 /* this does a sparc64 "and SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_and,tme_uint64_t)5197 TME_SPARC_FORMAT3(tme_sparc64_and, tme_uint64_t)
5198 {
5199 tme_uint64_t src1;
5200 tme_uint64_t src2;
5201 tme_uint64_t dst;
5202
5203 /* get the operands: */
5204 src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
5205 src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
5206
5207 /* perform the operation: */
5208 dst = src1 & src2;
5209
5210 /* store the destination: */
5211 TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
5212
5213 TME_SPARC_INSN_OK;
5214 }
5215
5216 /* this does a sparc64 "andcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_andcc,tme_uint64_t)5217 TME_SPARC_FORMAT3(tme_sparc64_andcc, tme_uint64_t)
5218 {
5219 tme_uint64_t src1;
5220 tme_uint64_t src2;
5221 tme_uint64_t dst;
5222 tme_uint32_t cc;
5223
5224 /* get the operands: */
5225 src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
5226 src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
5227
5228 /* perform the operation: */
5229 dst = src1 & src2;
5230
5231 /* store the destination: */
5232 TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
5233
5234 /* set Z if the destination is zero: */
5235 cc = ((((tme_int32_t) dst) == 0) * TME_SPARC64_CCR_ICC_Z);
5236
5237 /* set N if the destination is negative: */
5238 cc += ((((tme_int32_t) dst) < 0) * TME_SPARC64_CCR_ICC_N);
5239
5240 /* set Z if the destination is zero: */
5241 cc += ((((tme_int64_t) dst) == 0) * TME_SPARC64_CCR_XCC_Z);
5242
5243 /* set N if the destination is negative: */
5244 cc += ((((tme_int64_t) dst) < 0) * TME_SPARC64_CCR_XCC_N);
5245
5246 /* set the condition codes: */
5247 ic->tme_sparc64_ireg_ccr = cc;
5248
5249 TME_SPARC_INSN_OK;
5250 }
5251
5252 /* this does a sparc64 "andn SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_andn,tme_uint64_t)5253 TME_SPARC_FORMAT3(tme_sparc64_andn, tme_uint64_t)
5254 {
5255 tme_uint64_t src1;
5256 tme_uint64_t src2;
5257 tme_uint64_t dst;
5258
5259 /* get the operands: */
5260 src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
5261 src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
5262
5263 /* perform the operation: */
5264 dst = src1 & ~src2;
5265
5266 /* store the destination: */
5267 TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
5268
5269 TME_SPARC_INSN_OK;
5270 }
5271
5272 /* this does a sparc64 "andncc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_andncc,tme_uint64_t)5273 TME_SPARC_FORMAT3(tme_sparc64_andncc, tme_uint64_t)
5274 {
5275 tme_uint64_t src1;
5276 tme_uint64_t src2;
5277 tme_uint64_t dst;
5278 tme_uint32_t cc;
5279
5280 /* get the operands: */
5281 src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
5282 src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
5283
5284 /* perform the operation: */
5285 dst = src1 & ~src2;
5286
5287 /* store the destination: */
5288 TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
5289
5290 /* set Z if the destination is zero: */
5291 cc = ((((tme_int32_t) dst) == 0) * TME_SPARC64_CCR_ICC_Z);
5292
5293 /* set N if the destination is negative: */
5294 cc += ((((tme_int32_t) dst) < 0) * TME_SPARC64_CCR_ICC_N);
5295
5296 /* set Z if the destination is zero: */
5297 cc += ((((tme_int64_t) dst) == 0) * TME_SPARC64_CCR_XCC_Z);
5298
5299 /* set N if the destination is negative: */
5300 cc += ((((tme_int64_t) dst) < 0) * TME_SPARC64_CCR_XCC_N);
5301
5302 /* set the condition codes: */
5303 ic->tme_sparc64_ireg_ccr = cc;
5304
5305 TME_SPARC_INSN_OK;
5306 }
5307
5308 /* this does a sparc64 "xor SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_xor,tme_uint64_t)5309 TME_SPARC_FORMAT3(tme_sparc64_xor, tme_uint64_t)
5310 {
5311 tme_uint64_t src1;
5312 tme_uint64_t src2;
5313 tme_uint64_t dst;
5314
5315 /* get the operands: */
5316 src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
5317 src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
5318
5319 /* perform the operation: */
5320 dst = src1 ^ src2;
5321
5322 /* store the destination: */
5323 TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
5324
5325 TME_SPARC_INSN_OK;
5326 }
5327
5328 /* this does a sparc64 "xorcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_xorcc,tme_uint64_t)5329 TME_SPARC_FORMAT3(tme_sparc64_xorcc, tme_uint64_t)
5330 {
5331 tme_uint64_t src1;
5332 tme_uint64_t src2;
5333 tme_uint64_t dst;
5334 tme_uint32_t cc;
5335
5336 /* get the operands: */
5337 src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
5338 src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
5339
5340 /* perform the operation: */
5341 dst = src1 ^ src2;
5342
5343 /* store the destination: */
5344 TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
5345
5346 /* set Z if the destination is zero: */
5347 cc = ((((tme_int32_t) dst) == 0) * TME_SPARC64_CCR_ICC_Z);
5348
5349 /* set N if the destination is negative: */
5350 cc += ((((tme_int32_t) dst) < 0) * TME_SPARC64_CCR_ICC_N);
5351
5352 /* set Z if the destination is zero: */
5353 cc += ((((tme_int64_t) dst) == 0) * TME_SPARC64_CCR_XCC_Z);
5354
5355 /* set N if the destination is negative: */
5356 cc += ((((tme_int64_t) dst) < 0) * TME_SPARC64_CCR_XCC_N);
5357
5358 /* set the condition codes: */
5359 ic->tme_sparc64_ireg_ccr = cc;
5360
5361 TME_SPARC_INSN_OK;
5362 }
5363
5364 /* this does a sparc64 "xnor SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_xnor,tme_uint64_t)5365 TME_SPARC_FORMAT3(tme_sparc64_xnor, tme_uint64_t)
5366 {
5367 tme_uint64_t src1;
5368 tme_uint64_t src2;
5369 tme_uint64_t dst;
5370
5371 /* get the operands: */
5372 src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
5373 src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
5374
5375 /* perform the operation: */
5376 dst = src1 ^ ~src2;
5377
5378 /* store the destination: */
5379 TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
5380
5381 TME_SPARC_INSN_OK;
5382 }
5383
5384 /* this does a sparc64 "xnorcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_xnorcc,tme_uint64_t)5385 TME_SPARC_FORMAT3(tme_sparc64_xnorcc, tme_uint64_t)
5386 {
5387 tme_uint64_t src1;
5388 tme_uint64_t src2;
5389 tme_uint64_t dst;
5390 tme_uint32_t cc;
5391
5392 /* get the operands: */
5393 src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
5394 src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
5395
5396 /* perform the operation: */
5397 dst = src1 ^ ~src2;
5398
5399 /* store the destination: */
5400 TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
5401
5402 /* set Z if the destination is zero: */
5403 cc = ((((tme_int32_t) dst) == 0) * TME_SPARC64_CCR_ICC_Z);
5404
5405 /* set N if the destination is negative: */
5406 cc += ((((tme_int32_t) dst) < 0) * TME_SPARC64_CCR_ICC_N);
5407
5408 /* set Z if the destination is zero: */
5409 cc += ((((tme_int64_t) dst) == 0) * TME_SPARC64_CCR_XCC_Z);
5410
5411 /* set N if the destination is negative: */
5412 cc += ((((tme_int64_t) dst) < 0) * TME_SPARC64_CCR_XCC_N);
5413
5414 /* set the condition codes: */
5415 ic->tme_sparc64_ireg_ccr = cc;
5416
5417 TME_SPARC_INSN_OK;
5418 }
5419
5420 /* this does a sparc64 "addx SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_addx,tme_uint64_t)5421 TME_SPARC_FORMAT3(tme_sparc64_addx, tme_uint64_t)
5422 {
5423 tme_uint64_t src1;
5424 tme_uint64_t src2;
5425 tme_uint64_t dst;
5426
5427 /* get the operands: */
5428 src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
5429 src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
5430
5431 /* perform the operation: */
5432 dst = src1 + src2;
5433 dst += ((ic->tme_sparc64_ireg_ccr & TME_SPARC64_CCR_ICC_C) != 0);
5434
5435 /* store the destination: */
5436 TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
5437
5438 TME_SPARC_INSN_OK;
5439 }
5440
5441 /* this does a sparc64 "addxcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_addxcc,tme_uint64_t)5442 TME_SPARC_FORMAT3(tme_sparc64_addxcc, tme_uint64_t)
5443 {
5444 tme_uint64_t src1;
5445 tme_uint64_t src2;
5446 tme_uint64_t dst;
5447 tme_uint32_t cc;
5448
5449 /* get the operands: */
5450 src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
5451 src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
5452
5453 /* perform the operation: */
5454 dst = src1 + src2;
5455 dst += ((ic->tme_sparc64_ireg_ccr & TME_SPARC64_CCR_ICC_C) != 0);
5456
5457 /* store the destination: */
5458 TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
5459
5460 /* set Z if the destination is zero: */
5461 cc = ((((tme_int32_t) dst) == 0) * TME_SPARC64_CCR_ICC_Z);
5462
5463 /* set N if the destination is negative: */
5464 cc += ((((tme_int32_t) dst) < 0) * TME_SPARC64_CCR_ICC_N);
5465
5466 /* if the operands are the same sign, and the destination has
5467 a different sign, set V: */
5468 cc += ((((tme_int32_t) ((src2 ^ dst) & (src1 ^ (src2 ^ (((tme_uint32_t) 0) - 1))))) < 0) * TME_SPARC64_CCR_ICC_V);
5469
5470 /* if src1 and src2 both have the high bit set, or if dst does
5471 not have the high bit set and either src1 or src2 does, set C: */
5472 cc += (((tme_int32_t) (((tme_uint32_t) (src1 & src2)) | ((((tme_uint32_t) dst) ^ (((tme_uint32_t) 0) - 1)) & ((tme_uint32_t) (src1 | src2))))) < 0) * TME_SPARC64_CCR_ICC_C;
5473
5474 /* set Z if the destination is zero: */
5475 cc += ((((tme_int64_t) dst) == 0) * TME_SPARC64_CCR_XCC_Z);
5476
5477 /* set N if the destination is negative: */
5478 cc += ((((tme_int64_t) dst) < 0) * TME_SPARC64_CCR_XCC_N);
5479
5480 /* if the operands are the same sign, and the destination has
5481 a different sign, set V: */
5482 cc += ((((tme_int64_t) ((src2 ^ dst) & (src1 ^ (src2 ^ (((tme_uint64_t) 0) - 1))))) < 0) * TME_SPARC64_CCR_XCC_V);
5483
5484 /* if src1 and src2 both have the high bit set, or if dst does
5485 not have the high bit set and either src1 or src2 does, set C: */
5486 cc += (((tme_int64_t) (((tme_uint64_t) (src1 & src2)) | ((((tme_uint64_t) dst) ^ (((tme_uint64_t) 0) - 1)) & ((tme_uint64_t) (src1 | src2))))) < 0) * TME_SPARC64_CCR_XCC_C;
5487
5488 /* set the condition codes: */
5489 ic->tme_sparc64_ireg_ccr = cc;
5490
5491 TME_SPARC_INSN_OK;
5492 }
5493
5494 /* this does a sparc64 "subx SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_subx,tme_uint64_t)5495 TME_SPARC_FORMAT3(tme_sparc64_subx, tme_uint64_t)
5496 {
5497 tme_uint64_t src1;
5498 tme_uint64_t src2;
5499 tme_uint64_t dst;
5500
5501 /* get the operands: */
5502 src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
5503 src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
5504
5505 /* perform the operation: */
5506 dst = src1 - src2;
5507 dst -= ((ic->tme_sparc64_ireg_ccr & TME_SPARC64_CCR_ICC_C) != 0);
5508
5509 /* store the destination: */
5510 TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
5511
5512 TME_SPARC_INSN_OK;
5513 }
5514
5515 /* this does a sparc64 "subxcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_subxcc,tme_uint64_t)5516 TME_SPARC_FORMAT3(tme_sparc64_subxcc, tme_uint64_t)
5517 {
5518 tme_uint64_t src1;
5519 tme_uint64_t src2;
5520 tme_uint64_t dst;
5521 tme_uint32_t cc;
5522
5523 /* get the operands: */
5524 src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
5525 src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
5526
5527 /* perform the operation: */
5528 dst = src1 - src2;
5529 dst -= ((ic->tme_sparc64_ireg_ccr & TME_SPARC64_CCR_ICC_C) != 0);
5530
5531 /* store the destination: */
5532 TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
5533
5534 /* set Z if the destination is zero: */
5535 cc = ((((tme_int32_t) dst) == 0) * TME_SPARC64_CCR_ICC_Z);
5536
5537 /* set N if the destination is negative: */
5538 cc += ((((tme_int32_t) dst) < 0) * TME_SPARC64_CCR_ICC_N);
5539
5540 /* if the operands are different signs, and the destination has
5541 a different sign from the first operand, set V: */
5542 cc += ((((tme_int32_t) ((src1 ^ src2) & (src1 ^ dst))) < 0) * TME_SPARC64_CCR_ICC_V);
5543
5544 /* if src2 is greater than src1, set C: */
5545 cc += ((((tme_uint32_t) src2) > ((tme_uint32_t) src1)) || (((tme_uint32_t) src2) == ((tme_uint32_t) src1) && (ic->tme_sparc64_ireg_ccr & TME_SPARC64_CCR_ICC_C))) * TME_SPARC64_CCR_ICC_C;
5546
5547 /* set Z if the destination is zero: */
5548 cc += ((((tme_int64_t) dst) == 0) * TME_SPARC64_CCR_XCC_Z);
5549
5550 /* set N if the destination is negative: */
5551 cc += ((((tme_int64_t) dst) < 0) * TME_SPARC64_CCR_XCC_N);
5552
5553 /* if the operands are different signs, and the destination has
5554 a different sign from the first operand, set V: */
5555 cc += ((((tme_int64_t) ((src1 ^ src2) & (src1 ^ dst))) < 0) * TME_SPARC64_CCR_XCC_V);
5556
5557 /* if src2 is greater than src1, set C: */
5558 cc += ((((tme_uint64_t) src2) > ((tme_uint64_t) src1)) || (((tme_uint64_t) src2) == ((tme_uint64_t) src1) && (ic->tme_sparc64_ireg_ccr & TME_SPARC64_CCR_ICC_C))) * TME_SPARC64_CCR_XCC_C;
5559
5560 /* set the condition codes: */
5561 ic->tme_sparc64_ireg_ccr = cc;
5562
5563 TME_SPARC_INSN_OK;
5564 }
5565
5566 /* this does a sparc64 "taddcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_taddcc,tme_uint64_t)5567 TME_SPARC_FORMAT3(tme_sparc64_taddcc, tme_uint64_t)
5568 {
5569 tme_uint64_t src1;
5570 tme_uint64_t src2;
5571 tme_uint64_t dst;
5572 tme_uint32_t cc;
5573
5574 /* get the operands: */
5575 src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
5576 src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
5577
5578 /* perform the operation: */
5579 dst = src1 + src2;
5580
5581 /* store the destination: */
5582 TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
5583
5584 /* set Z if the destination is zero: */
5585 cc = ((((tme_int32_t) dst) == 0) * TME_SPARC64_CCR_ICC_Z);
5586
5587 /* set N if the destination is negative: */
5588 cc += ((((tme_int32_t) dst) < 0) * TME_SPARC64_CCR_ICC_N);
5589
5590 /* if the operands are the same sign, and the destination has
5591 a different sign, set V: */
5592 cc += ((((tme_int32_t) ((src2 ^ dst) & (src1 ^ (src2 ^ (((tme_uint32_t) 0) - 1))))) < 0) * TME_SPARC64_CCR_ICC_V);
5593
5594 /* if src1 and src2 both have the high bit set, or if dst does
5595 not have the high bit set and either src1 or src2 does, set C: */
5596 cc += (((tme_int32_t) (((tme_uint32_t) (src1 & src2)) | ((((tme_uint32_t) dst) ^ (((tme_uint32_t) 0) - 1)) & ((tme_uint32_t) (src1 | src2))))) < 0) * TME_SPARC64_CCR_ICC_C;
5597
5598 /* set Z if the destination is zero: */
5599 cc += ((((tme_int64_t) dst) == 0) * TME_SPARC64_CCR_XCC_Z);
5600
5601 /* set N if the destination is negative: */
5602 cc += ((((tme_int64_t) dst) < 0) * TME_SPARC64_CCR_XCC_N);
5603
5604 /* if the operands are the same sign, and the destination has
5605 a different sign, set V: */
5606 cc += ((((tme_int64_t) ((src2 ^ dst) & (src1 ^ (src2 ^ (((tme_uint64_t) 0) - 1))))) < 0) * TME_SPARC64_CCR_XCC_V);
5607
5608 /* if src1 and src2 both have the high bit set, or if dst does
5609 not have the high bit set and either src1 or src2 does, set C: */
5610 cc += (((tme_int64_t) (((tme_uint64_t) (src1 & src2)) | ((((tme_uint64_t) dst) ^ (((tme_uint64_t) 0) - 1)) & ((tme_uint64_t) (src1 | src2))))) < 0) * TME_SPARC64_CCR_XCC_C;
5611
5612 /* set V if bits zero or one of src1 or src2 are set: */
5613 cc |= ((((src1 | src2) & 3) != 0) * TME_SPARC64_CCR_ICC_V);
5614
5615 /* set the condition codes: */
5616 ic->tme_sparc64_ireg_ccr = cc;
5617
5618 TME_SPARC_INSN_OK;
5619 }
5620
5621 /* this does a sparc64 "taddcctv SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_taddcctv,tme_uint64_t)5622 TME_SPARC_FORMAT3(tme_sparc64_taddcctv, tme_uint64_t)
5623 {
5624 tme_uint64_t src1;
5625 tme_uint64_t src2;
5626 tme_uint64_t dst;
5627 tme_uint32_t cc;
5628
5629 /* get the operands: */
5630 src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
5631 src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
5632
5633 /* perform the operation: */
5634 dst = src1 + src2;
5635
5636 /* set Z if the destination is zero: */
5637 cc = ((((tme_int32_t) dst) == 0) * TME_SPARC64_CCR_ICC_Z);
5638
5639 /* set N if the destination is negative: */
5640 cc += ((((tme_int32_t) dst) < 0) * TME_SPARC64_CCR_ICC_N);
5641
5642 /* if the operands are the same sign, and the destination has
5643 a different sign, set V: */
5644 cc += ((((tme_int32_t) ((src2 ^ dst) & (src1 ^ (src2 ^ (((tme_uint32_t) 0) - 1))))) < 0) * TME_SPARC64_CCR_ICC_V);
5645
5646 /* if src1 and src2 both have the high bit set, or if dst does
5647 not have the high bit set and either src1 or src2 does, set C: */
5648 cc += (((tme_int32_t) (((tme_uint32_t) (src1 & src2)) | ((((tme_uint32_t) dst) ^ (((tme_uint32_t) 0) - 1)) & ((tme_uint32_t) (src1 | src2))))) < 0) * TME_SPARC64_CCR_ICC_C;
5649
5650 /* set Z if the destination is zero: */
5651 cc += ((((tme_int64_t) dst) == 0) * TME_SPARC64_CCR_XCC_Z);
5652
5653 /* set N if the destination is negative: */
5654 cc += ((((tme_int64_t) dst) < 0) * TME_SPARC64_CCR_XCC_N);
5655
5656 /* if the operands are the same sign, and the destination has
5657 a different sign, set V: */
5658 cc += ((((tme_int64_t) ((src2 ^ dst) & (src1 ^ (src2 ^ (((tme_uint64_t) 0) - 1))))) < 0) * TME_SPARC64_CCR_XCC_V);
5659
5660 /* if src1 and src2 both have the high bit set, or if dst does
5661 not have the high bit set and either src1 or src2 does, set C: */
5662 cc += (((tme_int64_t) (((tme_uint64_t) (src1 & src2)) | ((((tme_uint64_t) dst) ^ (((tme_uint64_t) 0) - 1)) & ((tme_uint64_t) (src1 | src2))))) < 0) * TME_SPARC64_CCR_XCC_C;
5663
5664 /* set V if bits zero or one of src1 or src2 are set: */
5665 cc |= ((((src1 | src2) & 3) != 0) * TME_SPARC64_CCR_ICC_V);
5666
5667 /* trap on a tagged overflow: */
5668 if (cc & TME_SPARC64_CCR_ICC_V) {
5669 tme_sparc64_trap(ic, TME_SPARC64_TRAP_tag_overflow);
5670 }
5671 /* store the destination: */
5672 TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
5673
5674 /* set the condition codes: */
5675 ic->tme_sparc64_ireg_ccr = cc;
5676
5677 TME_SPARC_INSN_OK;
5678 }
5679
5680 /* this does a sparc64 "tsubcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_tsubcc,tme_uint64_t)5681 TME_SPARC_FORMAT3(tme_sparc64_tsubcc, tme_uint64_t)
5682 {
5683 tme_uint64_t src1;
5684 tme_uint64_t src2;
5685 tme_uint64_t dst;
5686 tme_uint32_t cc;
5687
5688 /* get the operands: */
5689 src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
5690 src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
5691
5692 /* perform the operation: */
5693 dst = src1 - src2;
5694
5695 /* store the destination: */
5696 TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
5697
5698 /* set Z if the destination is zero: */
5699 cc = ((((tme_int32_t) dst) == 0) * TME_SPARC64_CCR_ICC_Z);
5700
5701 /* set N if the destination is negative: */
5702 cc += ((((tme_int32_t) dst) < 0) * TME_SPARC64_CCR_ICC_N);
5703
5704 /* if the operands are different signs, and the destination has
5705 a different sign from the first operand, set V: */
5706 cc += ((((tme_int32_t) ((src1 ^ src2) & (src1 ^ dst))) < 0) * TME_SPARC64_CCR_ICC_V);
5707
5708 /* if src2 is greater than src1, set C: */
5709 cc += ((((tme_uint32_t) src2) > ((tme_uint32_t) src1))) * TME_SPARC64_CCR_ICC_C;
5710
5711 /* set Z if the destination is zero: */
5712 cc += ((((tme_int64_t) dst) == 0) * TME_SPARC64_CCR_XCC_Z);
5713
5714 /* set N if the destination is negative: */
5715 cc += ((((tme_int64_t) dst) < 0) * TME_SPARC64_CCR_XCC_N);
5716
5717 /* if the operands are different signs, and the destination has
5718 a different sign from the first operand, set V: */
5719 cc += ((((tme_int64_t) ((src1 ^ src2) & (src1 ^ dst))) < 0) * TME_SPARC64_CCR_XCC_V);
5720
5721 /* if src2 is greater than src1, set C: */
5722 cc += ((((tme_uint64_t) src2) > ((tme_uint64_t) src1))) * TME_SPARC64_CCR_XCC_C;
5723
5724 /* set V if bits zero or one of src1 or src2 are set: */
5725 cc |= ((((src1 | src2) & 3) != 0) * TME_SPARC64_CCR_ICC_V);
5726
5727 /* set the condition codes: */
5728 ic->tme_sparc64_ireg_ccr = cc;
5729
5730 TME_SPARC_INSN_OK;
5731 }
5732
5733 /* this does a sparc64 "tsubcctv SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_tsubcctv,tme_uint64_t)5734 TME_SPARC_FORMAT3(tme_sparc64_tsubcctv, tme_uint64_t)
5735 {
5736 tme_uint64_t src1;
5737 tme_uint64_t src2;
5738 tme_uint64_t dst;
5739 tme_uint32_t cc;
5740
5741 /* get the operands: */
5742 src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
5743 src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
5744
5745 /* perform the operation: */
5746 dst = src1 - src2;
5747
5748 /* set Z if the destination is zero: */
5749 cc = ((((tme_int32_t) dst) == 0) * TME_SPARC64_CCR_ICC_Z);
5750
5751 /* set N if the destination is negative: */
5752 cc += ((((tme_int32_t) dst) < 0) * TME_SPARC64_CCR_ICC_N);
5753
5754 /* if the operands are different signs, and the destination has
5755 a different sign from the first operand, set V: */
5756 cc += ((((tme_int32_t) ((src1 ^ src2) & (src1 ^ dst))) < 0) * TME_SPARC64_CCR_ICC_V);
5757
5758 /* if src2 is greater than src1, set C: */
5759 cc += ((((tme_uint32_t) src2) > ((tme_uint32_t) src1))) * TME_SPARC64_CCR_ICC_C;
5760
5761 /* set Z if the destination is zero: */
5762 cc += ((((tme_int64_t) dst) == 0) * TME_SPARC64_CCR_XCC_Z);
5763
5764 /* set N if the destination is negative: */
5765 cc += ((((tme_int64_t) dst) < 0) * TME_SPARC64_CCR_XCC_N);
5766
5767 /* if the operands are different signs, and the destination has
5768 a different sign from the first operand, set V: */
5769 cc += ((((tme_int64_t) ((src1 ^ src2) & (src1 ^ dst))) < 0) * TME_SPARC64_CCR_XCC_V);
5770
5771 /* if src2 is greater than src1, set C: */
5772 cc += ((((tme_uint64_t) src2) > ((tme_uint64_t) src1))) * TME_SPARC64_CCR_XCC_C;
5773
5774 /* set V if bits zero or one of src1 or src2 are set: */
5775 cc |= ((((src1 | src2) & 3) != 0) * TME_SPARC64_CCR_ICC_V);
5776
5777 /* trap on a tagged overflow: */
5778 if (cc & TME_SPARC64_CCR_ICC_V) {
5779 tme_sparc64_trap(ic, TME_SPARC64_TRAP_tag_overflow);
5780 }
5781 /* store the destination: */
5782 TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
5783
5784 /* set the condition codes: */
5785 ic->tme_sparc64_ireg_ccr = cc;
5786
5787 TME_SPARC_INSN_OK;
5788 }
5789
5790 /* this does a sparc64 "umul SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_umul,tme_uint64_t)5791 TME_SPARC_FORMAT3(tme_sparc64_umul, tme_uint64_t)
5792 {
5793 tme_uint32_t src1;
5794 tme_uint32_t src2;
5795 tme_uint64_t dst;
5796 tme_uint64_t val64;
5797
5798 /* get the operands: */
5799 src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
5800 src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
5801
5802 /* perform the operation: */
5803 val64 = (((tme_uint64_t) src1) * src2);
5804 ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_Y << 1) = (((tme_uint64_t) val64) >> 32);
5805 dst = ((tme_uint64_t) val64);
5806
5807 /* store the destination: */
5808 TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
5809
5810 TME_SPARC_INSN_OK;
5811 }
5812
5813 /* this does a sparc64 "umulcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_umulcc,tme_uint64_t)5814 TME_SPARC_FORMAT3(tme_sparc64_umulcc, tme_uint64_t)
5815 {
5816 tme_uint32_t src1;
5817 tme_uint32_t src2;
5818 tme_uint64_t dst;
5819 tme_uint64_t val64;
5820 tme_uint32_t cc;
5821
5822 /* get the operands: */
5823 src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
5824 src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
5825
5826 /* perform the operation: */
5827 val64 = (((tme_uint64_t) src1) * src2);
5828 ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_Y << 1) = (((tme_uint64_t) val64) >> 32);
5829 dst = ((tme_uint64_t) val64);
5830
5831 /* store the destination: */
5832 TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
5833
5834 /* set Z if the destination is zero: */
5835 cc = ((((tme_int32_t) dst) == 0) * TME_SPARC64_CCR_ICC_Z);
5836
5837 /* set N if the destination is negative: */
5838 cc += ((((tme_int32_t) dst) < 0) * TME_SPARC64_CCR_ICC_N);
5839
5840 /* set Z if the destination is zero: */
5841 cc += ((((tme_int64_t) dst) == 0) * TME_SPARC64_CCR_XCC_Z);
5842
5843 /* set N if the destination is negative: */
5844 cc += ((((tme_int64_t) dst) < 0) * TME_SPARC64_CCR_XCC_N);
5845
5846 /* set the condition codes: */
5847 ic->tme_sparc64_ireg_ccr = cc;
5848
5849 TME_SPARC_INSN_OK;
5850 }
5851
5852 /* this does a sparc64 "smul SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_smul,tme_uint64_t)5853 TME_SPARC_FORMAT3(tme_sparc64_smul, tme_uint64_t)
5854 {
5855 tme_int32_t src1;
5856 tme_int32_t src2;
5857 tme_int64_t dst;
5858 tme_int64_t val64;
5859
5860 /* get the operands: */
5861 src1 = (tme_int64_t) TME_SPARC_FORMAT3_RS1;
5862 src2 = (tme_int64_t) TME_SPARC_FORMAT3_RS2;
5863
5864 /* perform the operation: */
5865 val64 = (((tme_int64_t) src1) * src2);
5866 ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_Y << 1) = (((tme_uint64_t) val64) >> 32);
5867 dst = ((tme_int64_t) val64);
5868
5869 /* store the destination: */
5870 TME_SPARC_FORMAT3_RD = (tme_int64_t) dst;
5871
5872 TME_SPARC_INSN_OK;
5873 }
5874
5875 /* this does a sparc64 "smulcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_smulcc,tme_uint64_t)5876 TME_SPARC_FORMAT3(tme_sparc64_smulcc, tme_uint64_t)
5877 {
5878 tme_int32_t src1;
5879 tme_int32_t src2;
5880 tme_int64_t dst;
5881 tme_int64_t val64;
5882 tme_uint32_t cc;
5883
5884 /* get the operands: */
5885 src1 = (tme_int64_t) TME_SPARC_FORMAT3_RS1;
5886 src2 = (tme_int64_t) TME_SPARC_FORMAT3_RS2;
5887
5888 /* perform the operation: */
5889 val64 = (((tme_int64_t) src1) * src2);
5890 ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_Y << 1) = (((tme_uint64_t) val64) >> 32);
5891 dst = ((tme_int64_t) val64);
5892
5893 /* store the destination: */
5894 TME_SPARC_FORMAT3_RD = (tme_int64_t) dst;
5895
5896 /* set Z if the destination is zero: */
5897 cc = ((((tme_int32_t) dst) == 0) * TME_SPARC64_CCR_ICC_Z);
5898
5899 /* set N if the destination is negative: */
5900 cc += ((((tme_int32_t) dst) < 0) * TME_SPARC64_CCR_ICC_N);
5901
5902 /* set Z if the destination is zero: */
5903 cc += ((((tme_int64_t) dst) == 0) * TME_SPARC64_CCR_XCC_Z);
5904
5905 /* set N if the destination is negative: */
5906 cc += ((((tme_int64_t) dst) < 0) * TME_SPARC64_CCR_XCC_N);
5907
5908 /* set the condition codes: */
5909 ic->tme_sparc64_ireg_ccr = cc;
5910
5911 TME_SPARC_INSN_OK;
5912 }
5913
5914 /* this does a sparc64 "udiv SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_udiv,tme_uint64_t)5915 TME_SPARC_FORMAT3(tme_sparc64_udiv, tme_uint64_t)
5916 {
5917 tme_uint32_t src1;
5918 tme_uint32_t src2;
5919 tme_uint32_t dst;
5920 tme_uint64_t val64;
5921
5922 /* get the operands: */
5923 src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
5924 src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
5925
5926 /* perform the operation: */
5927 val64 = ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_Y << 1);
5928 val64 = (val64 << 32) + (tme_uint32_t) src1;
5929 if (__tme_predict_false(src2 == 0)) {
5930 tme_sparc64_trap(ic, TME_SPARC64_TRAP_division_by_zero);
5931 }
5932 val64 /= src2;
5933 dst = (tme_uint32_t) val64;
5934
5935 /* if the division overflowed: */
5936 if (dst != val64) {
5937
5938 /* return the largest appropriate value: */
5939 dst = 0xffffffff;
5940 }
5941
5942 /* store the destination: */
5943 TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
5944
5945 TME_SPARC_INSN_OK;
5946 }
5947
5948 /* this does a sparc64 "udivcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_udivcc,tme_uint64_t)5949 TME_SPARC_FORMAT3(tme_sparc64_udivcc, tme_uint64_t)
5950 {
5951 tme_uint32_t src1;
5952 tme_uint32_t src2;
5953 tme_uint32_t dst;
5954 tme_uint64_t val64;
5955 tme_uint32_t cc;
5956
5957 /* get the operands: */
5958 src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
5959 src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
5960
5961 /* perform the operation: */
5962 val64 = ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_Y << 1);
5963 val64 = (val64 << 32) + (tme_uint32_t) src1;
5964 if (__tme_predict_false(src2 == 0)) {
5965 tme_sparc64_trap(ic, TME_SPARC64_TRAP_division_by_zero);
5966 }
5967 val64 /= src2;
5968 dst = (tme_uint32_t) val64;
5969
5970 /* if the division overflowed: */
5971 if (dst != val64) {
5972
5973 /* return the largest appropriate value: */
5974 dst = 0xffffffff;
5975
5976 /* set V: */
5977 cc = TME_SPARC64_CCR_ICC_V;
5978 }
5979
5980 /* otherwise, the division didn't overflow: */
5981 else {
5982
5983 /* clear V: */
5984 cc = !TME_SPARC64_CCR_ICC_V;
5985 }
5986
5987 /* store the destination: */
5988 TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
5989
5990 /* set Z if the destination is zero: */
5991 cc += ((((tme_int32_t) dst) == 0) * TME_SPARC64_CCR_ICC_Z);
5992
5993 /* set N if the destination is negative: */
5994 cc += ((((tme_int32_t) dst) < 0) * TME_SPARC64_CCR_ICC_N);
5995
5996 /* set Z if the destination is zero: */
5997 cc += ((((tme_int64_t) dst) == 0) * TME_SPARC64_CCR_XCC_Z);
5998
5999 /* set the condition codes: */
6000 ic->tme_sparc64_ireg_ccr = cc;
6001
6002 TME_SPARC_INSN_OK;
6003 }
6004
6005 /* this does a sparc64 "sdiv SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_sdiv,tme_uint64_t)6006 TME_SPARC_FORMAT3(tme_sparc64_sdiv, tme_uint64_t)
6007 {
6008 tme_int32_t src1;
6009 tme_int32_t src2;
6010 tme_int64_t dst;
6011 tme_int64_t val64;
6012
6013 /* get the operands: */
6014 src1 = (tme_int64_t) TME_SPARC_FORMAT3_RS1;
6015 src2 = (tme_int64_t) TME_SPARC_FORMAT3_RS2;
6016
6017 /* perform the operation: */
6018 val64 = ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_Y << 1);
6019 val64 = (val64 << 32) + (tme_uint32_t) src1;
6020 if (__tme_predict_false(src2 == 0)) {
6021 tme_sparc64_trap(ic, TME_SPARC64_TRAP_division_by_zero);
6022 }
6023 val64 /= src2;
6024 dst = (tme_int32_t) val64;
6025
6026 /* if the division overflowed: */
6027 if (dst != val64) {
6028
6029 /* return the largest appropriate value: */
6030 dst = (tme_int32_t) ((val64 < 0) + (tme_uint32_t) 0x7fffffff);
6031 }
6032
6033 /* store the destination: */
6034 TME_SPARC_FORMAT3_RD = (tme_int64_t) dst;
6035
6036 TME_SPARC_INSN_OK;
6037 }
6038
6039 /* this does a sparc64 "sdivcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_sdivcc,tme_uint64_t)6040 TME_SPARC_FORMAT3(tme_sparc64_sdivcc, tme_uint64_t)
6041 {
6042 tme_int32_t src1;
6043 tme_int32_t src2;
6044 tme_int64_t dst;
6045 tme_int64_t val64;
6046 tme_uint32_t cc;
6047
6048 /* get the operands: */
6049 src1 = (tme_int64_t) TME_SPARC_FORMAT3_RS1;
6050 src2 = (tme_int64_t) TME_SPARC_FORMAT3_RS2;
6051
6052 /* perform the operation: */
6053 val64 = ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_Y << 1);
6054 val64 = (val64 << 32) + (tme_uint32_t) src1;
6055 if (__tme_predict_false(src2 == 0)) {
6056 tme_sparc64_trap(ic, TME_SPARC64_TRAP_division_by_zero);
6057 }
6058 val64 /= src2;
6059 dst = (tme_int32_t) val64;
6060
6061 /* if the division overflowed: */
6062 if (dst != val64) {
6063
6064 /* return the largest appropriate value: */
6065 dst = (tme_int32_t) ((val64 < 0) + (tme_uint32_t) 0x7fffffff);
6066
6067 /* set V: */
6068 cc = TME_SPARC64_CCR_ICC_V;
6069 }
6070
6071 /* otherwise, the division didn't overflow: */
6072 else {
6073
6074 /* clear V: */
6075 cc = !TME_SPARC64_CCR_ICC_V;
6076 }
6077
6078 /* store the destination: */
6079 TME_SPARC_FORMAT3_RD = (tme_int64_t) dst;
6080
6081 /* set Z if the destination is zero: */
6082 cc += ((((tme_int32_t) dst) == 0) * TME_SPARC64_CCR_ICC_Z);
6083
6084 /* set N if the destination is negative: */
6085 cc += ((((tme_int32_t) dst) < 0) * TME_SPARC64_CCR_ICC_N);
6086
6087 /* set Z if the destination is zero: */
6088 cc += ((((tme_int64_t) dst) == 0) * TME_SPARC64_CCR_XCC_Z);
6089
6090 /* set N if the destination is negative: */
6091 cc += ((((tme_int64_t) dst) < 0) * TME_SPARC64_CCR_XCC_N);
6092
6093 /* set the condition codes: */
6094 ic->tme_sparc64_ireg_ccr = cc;
6095
6096 TME_SPARC_INSN_OK;
6097 }
6098
6099 /* the sparc64 sll function: */
TME_SPARC_FORMAT3(tme_sparc64_sll,tme_uint64_t)6100 TME_SPARC_FORMAT3(tme_sparc64_sll, tme_uint64_t)
6101 {
6102 tme_uint64_t dst;
6103 unsigned int count;
6104
6105 /* get the value and the shift count: */
6106 dst = TME_SPARC_FORMAT3_RS1;
6107 count = TME_SPARC_FORMAT3_RS2;
6108
6109 /* if the X bit is clear: */
6110 if ((TME_SPARC_INSN & TME_BIT(12)) == 0) {
6111
6112 /* limit the count: */
6113 count %= 32;
6114 }
6115
6116 /* limit the count: */
6117 count %= 64;
6118
6119 /* do the shift: */
6120 #if defined(SHIFTMAX_INT64_T) && (SHIFTMAX_INT64_T < (64 - 1))
6121 #error "cannot do full shifts of a tme_int64_t"
6122 #endif /* (SHIFTMAX_INT64_T < (64 - 1)) */
6123 dst <<= count;
6124
6125 /* store the destination: */
6126 TME_SPARC_FORMAT3_RD = dst;
6127
6128 TME_SPARC_INSN_OK;
6129 }
6130
6131 /* the sparc64 srl function: */
TME_SPARC_FORMAT3(tme_sparc64_srl,tme_uint64_t)6132 TME_SPARC_FORMAT3(tme_sparc64_srl, tme_uint64_t)
6133 {
6134 tme_uint64_t dst;
6135 unsigned int count;
6136
6137 /* get the value and the shift count: */
6138 dst = TME_SPARC_FORMAT3_RS1;
6139 count = TME_SPARC_FORMAT3_RS2;
6140
6141 /* if the X bit is clear: */
6142 if ((TME_SPARC_INSN & TME_BIT(12)) == 0) {
6143
6144 /* limit the count: */
6145 count %= 32;
6146
6147 /* clip the value to 32 bits: */
6148 dst = (tme_uint32_t) dst;
6149 }
6150
6151 /* limit the count: */
6152 count %= 64;
6153
6154 /* do the shift: */
6155 #if defined(SHIFTMAX_INT64_T) && (SHIFTMAX_INT64_T < (64 - 1))
6156 #error "cannot do full shifts of a tme_int64_t"
6157 #endif /* (SHIFTMAX_INT64_T < (64 - 1)) */
6158 dst >>= count;
6159
6160 /* store the destination: */
6161 TME_SPARC_FORMAT3_RD = dst;
6162
6163 TME_SPARC_INSN_OK;
6164 }
6165
6166 /* the sparc64 sra function: */
TME_SPARC_FORMAT3(tme_sparc64_sra,tme_uint64_t)6167 TME_SPARC_FORMAT3(tme_sparc64_sra, tme_uint64_t)
6168 {
6169 tme_int64_t dst;
6170 unsigned int count;
6171
6172 /* get the value and the shift count: */
6173 dst = TME_SPARC_FORMAT3_RS1;
6174 count = TME_SPARC_FORMAT3_RS2;
6175
6176 /* if the X bit is clear: */
6177 if ((TME_SPARC_INSN & TME_BIT(12)) == 0) {
6178
6179 /* limit the count: */
6180 count %= 32;
6181
6182 /* clip the value to 32 bits: */
6183 dst = (tme_int32_t) dst;
6184 }
6185
6186 /* limit the count: */
6187 count %= 64;
6188
6189 /* do the shift: */
6190 #ifdef SHIFTSIGNED_INT64_T
6191 #if defined(SHIFTMAX_INT64_T) && (SHIFTMAX_INT64_T < (64 - 1))
6192 #error "cannot do full shifts of a tme_int64_t"
6193 #endif /* (SHIFTMAX_INT64_T < (64 - 1)) */
6194 dst >>= count;
6195 #else /* !SHIFTSIGNED_INT64_T */
6196 for (; count-- > 0; ) {
6197 dst = (dst & ~((tme_int64_t) 1)) / 2;
6198 }
6199 #endif /* !SHIFTSIGNED_INT64_T */
6200
6201 /* store the destination: */
6202 TME_SPARC_FORMAT3_RD = dst;
6203
6204 TME_SPARC_INSN_OK;
6205 }
6206
6207 /* this does a sparc64 ldb: */
TME_SPARC_FORMAT3(tme_sparc64_ldb,tme_uint64_t)6208 TME_SPARC_FORMAT3(tme_sparc64_ldb, tme_uint64_t)
6209 {
6210 tme_uint64_t address;
6211 tme_uint32_t asi_mask_flags_slow;
6212 struct tme_sparc_tlb *dtlb;
6213 const tme_shared tme_uint8_t *memory;
6214 tme_bus_context_t dtlb_context;
6215 tme_uint32_t endian_little;
6216 tme_uint8_t value8;
6217 tme_uint32_t value32;
6218
6219 /* get the address: */
6220 address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
6221 address &= ic->tme_sparc_address_mask;
6222
6223 #ifdef _TME_SPARC_STATS
6224 /* track statistics: */
6225 ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
6226 #endif /* _TME_SPARC_STATS */
6227
6228 /* verify and maybe replay this transfer: */
6229 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
6230 ic->tme_sparc_asi_mask_data, address,
6231 (TME_RECODE_SIZE_8
6232 | TME_SPARC_RECODE_VERIFY_MEM_LOAD));
6233 if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
6234 TME_SPARC_INSN_OK;
6235 }
6236
6237 /* assume that no DTLB ASI mask flags will require a slow load: */
6238 asi_mask_flags_slow = 0;
6239
6240 /* a ldb without a no-fault ASI traps on no-fault addresses: */
6241 asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
6242
6243 /* get and busy the DTLB entry: */
6244 dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
6245 tme_sparc_tlb_busy(dtlb);
6246
6247 /* assume that this DTLB applies and allows fast transfers: */
6248 memory = dtlb->tme_sparc_tlb_emulator_off_read;
6249
6250 /* if this DTLB matches any context, it matches this context: */
6251 dtlb_context = dtlb->tme_sparc_tlb_context;
6252 if (dtlb_context > ic->tme_sparc_memory_context_max) {
6253 dtlb_context = ic->tme_sparc_memory_context_default;
6254 }
6255
6256 /* we must call the slow load function if: */
6257 if (__tme_predict_false(
6258
6259 /* the DTLB entry is invalid: */
6260 tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
6261
6262 /* the DTLB entry does not match the context: */
6263 || dtlb_context != ic->tme_sparc_memory_context_default
6264
6265 /* the DTLB entry does not cover the needed addresses: */
6266 || (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
6267 || ((address + ((8 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
6268
6269 /* the DTLB entry does not cover the needed address space: */
6270 || (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, ic->tme_sparc_asi_mask_data))
6271
6272 /* the DTLB entry can't be used for a fast ldb: */
6273 || (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
6274
6275 /* the DTLB entry does not allow fast transfers: */
6276 || (memory == TME_EMULATOR_OFF_UNDEF)
6277
6278 )) {
6279
6280 /* call the slow load function: */
6281 memory = tme_sparc64_ls(ic,
6282 address,
6283 &TME_SPARC_FORMAT3_RD,
6284 (TME_SPARC_LSINFO_OP_LD
6285 | (8 / 8)));
6286 }
6287
6288 /* get the byte order of this transfer: */
6289 endian_little = ic->tme_sparc_asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
6290 if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
6291 if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
6292 endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
6293 }
6294 else {
6295 assert (FALSE);
6296 }
6297 }
6298
6299 /* do the fast transfer: */
6300 memory += address;
6301 value8 = tme_memory_bus_read8((const tme_shared tme_uint8_t *) memory, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint8_t), sizeof(tme_uint64_t));
6302
6303 /* possibly sign-extend the loaded value: */
6304 value32 = value8;
6305 if (TME_SPARC_INSN & TME_BIT(22)) {
6306 value32 = (tme_uint32_t) (tme_int32_t) (tme_int8_t) value32;
6307 }
6308 TME_SPARC_FORMAT3_RD = (tme_uint64_t) (tme_int64_t) (tme_int32_t) value32;
6309
6310 /* unbusy the DTLB entry: */
6311 tme_sparc_tlb_unbusy(dtlb);
6312
6313 /* log the value loaded: */
6314 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
6315 tme_sparc_log(ic, 1000, TME_OK,
6316 (TME_SPARC_LOG_HANDLE(ic),
6317 _("ldb 0x%02x:0x%016" TME_PRIx64 ": 0x%02" TME_PRIx64),
6318 TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
6319 address,
6320 TME_SPARC_FORMAT3_RD));
6321
6322 TME_SPARC_INSN_OK;
6323 }
6324
6325 /* this does a sparc64 stb: */
TME_SPARC_FORMAT3(tme_sparc64_stb,tme_uint64_t)6326 TME_SPARC_FORMAT3(tme_sparc64_stb, tme_uint64_t)
6327 {
6328 tme_uint64_t address;
6329 tme_uint32_t asi_mask_flags_slow;
6330 struct tme_sparc_tlb *dtlb;
6331 tme_shared tme_uint8_t *memory;
6332 tme_bus_context_t dtlb_context;
6333 tme_uint32_t endian_little;
6334 tme_uint8_t value8;
6335
6336 /* get the address: */
6337 address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
6338 address &= ic->tme_sparc_address_mask;
6339
6340 #ifdef _TME_SPARC_STATS
6341 /* track statistics: */
6342 ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
6343 #endif /* _TME_SPARC_STATS */
6344
6345 /* verify and maybe replay this transfer: */
6346 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
6347 ic->tme_sparc_asi_mask_data, address,
6348 (TME_RECODE_SIZE_8
6349 | TME_SPARC_RECODE_VERIFY_MEM_STORE));
6350 if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
6351 TME_SPARC_INSN_OK;
6352 }
6353
6354 /* log the value stored: */
6355 tme_sparc_log(ic, 1000, TME_OK,
6356 (TME_SPARC_LOG_HANDLE(ic),
6357 _("stb 0x%02x:0x%016" TME_PRIx64 ": 0x%02" TME_PRIx8),
6358 TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
6359 address,
6360 (tme_uint8_t) TME_SPARC_FORMAT3_RD));
6361
6362 /* assume that no DTLB ASI mask flags will require a slow store: */
6363 asi_mask_flags_slow = 0;
6364
6365 /* a stb traps on no-fault addresses: */
6366 asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
6367
6368 /* get and busy the DTLB entry: */
6369 dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
6370 tme_sparc_tlb_busy(dtlb);
6371
6372 /* assume that this DTLB applies and allows fast transfers: */
6373 memory = dtlb->tme_sparc_tlb_emulator_off_write;
6374
6375 /* if this DTLB matches any context, it matches this context: */
6376 dtlb_context = dtlb->tme_sparc_tlb_context;
6377 if (dtlb_context > ic->tme_sparc_memory_context_max) {
6378 dtlb_context = ic->tme_sparc_memory_context_default;
6379 }
6380
6381 /* we must call the slow store function if: */
6382 if (__tme_predict_false(
6383
6384 /* the DTLB entry is invalid: */
6385 tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
6386
6387 /* the DTLB entry does not match the context: */
6388 || dtlb_context != ic->tme_sparc_memory_context_default
6389
6390 /* the DTLB entry does not cover the needed addresses: */
6391 || (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
6392 || ((address + ((8 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
6393
6394 /* the DTLB entry does not cover the needed address space: */
6395 || (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, ic->tme_sparc_asi_mask_data))
6396
6397 /* the DTLB entry can't be used for a fast stb: */
6398 || (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
6399
6400 /* the DTLB entry does not allow fast transfers: */
6401 || (memory == TME_EMULATOR_OFF_UNDEF)
6402
6403 )) {
6404
6405 /* call the slow store function: */
6406 memory = tme_sparc64_ls(ic,
6407 address,
6408 &TME_SPARC_FORMAT3_RD,
6409 (TME_SPARC_LSINFO_OP_ST
6410 | (8 / 8)));
6411
6412 /* if the slow store function did the transfer: */
6413 if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
6414
6415 /* unbusy the TLB entry; */
6416 tme_sparc_tlb_unbusy(dtlb);
6417
6418 TME_SPARC_INSN_OK;
6419 }
6420 }
6421
6422 /* get the byte order of this transfer: */
6423 endian_little = ic->tme_sparc_asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
6424 if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
6425 if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
6426 endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
6427 }
6428 else {
6429 assert (FALSE);
6430 }
6431 }
6432
6433 /* do the fast transfer: */
6434 memory += address;
6435 value8 = TME_SPARC_FORMAT3_RD;
6436 tme_memory_bus_write8((tme_shared tme_uint8_t *) memory, value8, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint8_t), sizeof(tme_uint64_t));
6437
6438 /* unbusy the DTLB entry: */
6439 tme_sparc_tlb_unbusy(dtlb);
6440
6441 TME_SPARC_INSN_OK;
6442 }
6443
6444 /* this does a sparc64 ldh: */
TME_SPARC_FORMAT3(tme_sparc64_ldh,tme_uint64_t)6445 TME_SPARC_FORMAT3(tme_sparc64_ldh, tme_uint64_t)
6446 {
6447 tme_uint64_t address;
6448 tme_uint32_t asi_mask_flags_slow;
6449 struct tme_sparc_tlb *dtlb;
6450 const tme_shared tme_uint8_t *memory;
6451 tme_bus_context_t dtlb_context;
6452 tme_uint32_t endian_little;
6453 tme_uint16_t value16;
6454 tme_uint32_t value32;
6455
6456 /* get the address: */
6457 address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
6458 address &= ic->tme_sparc_address_mask;
6459
6460 #ifdef _TME_SPARC_STATS
6461 /* track statistics: */
6462 ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
6463 #endif /* _TME_SPARC_STATS */
6464
6465 /* verify and maybe replay this transfer: */
6466 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
6467 ic->tme_sparc_asi_mask_data, address,
6468 (TME_RECODE_SIZE_16
6469 | TME_SPARC_RECODE_VERIFY_MEM_LOAD));
6470 if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
6471 TME_SPARC_INSN_OK;
6472 }
6473
6474 /* assume that no DTLB ASI mask flags will require a slow load: */
6475 asi_mask_flags_slow = 0;
6476
6477 /* a ldh without a no-fault ASI traps on no-fault addresses: */
6478 asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
6479
6480 /* get and busy the DTLB entry: */
6481 dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
6482 tme_sparc_tlb_busy(dtlb);
6483
6484 /* assume that this DTLB applies and allows fast transfers: */
6485 memory = dtlb->tme_sparc_tlb_emulator_off_read;
6486
6487 /* if this DTLB matches any context, it matches this context: */
6488 dtlb_context = dtlb->tme_sparc_tlb_context;
6489 if (dtlb_context > ic->tme_sparc_memory_context_max) {
6490 dtlb_context = ic->tme_sparc_memory_context_default;
6491 }
6492
6493 /* we must call the slow load function if: */
6494 if (__tme_predict_false(
6495
6496 /* the DTLB entry is invalid: */
6497 tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
6498
6499 /* the DTLB entry does not match the context: */
6500 || dtlb_context != ic->tme_sparc_memory_context_default
6501
6502 /* the DTLB entry does not cover the needed addresses: */
6503 || (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
6504 || ((address + ((16 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
6505
6506 /* the DTLB entry does not cover the needed address space: */
6507 || (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, ic->tme_sparc_asi_mask_data))
6508
6509 /* the DTLB entry can't be used for a fast ldh: */
6510 || (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
6511
6512 /* the DTLB entry does not allow fast transfers: */
6513 || (memory == TME_EMULATOR_OFF_UNDEF)
6514
6515 /* the address is misaligned: */
6516 || ((address % (16 / 8)) != 0)
6517
6518 )) {
6519
6520 /* call the slow load function: */
6521 memory = tme_sparc64_ls(ic,
6522 address,
6523 &TME_SPARC_FORMAT3_RD,
6524 (TME_SPARC_LSINFO_OP_LD
6525 | (16 / 8)));
6526 }
6527
6528 /* get the byte order of this transfer: */
6529 endian_little = ic->tme_sparc_asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
6530 if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
6531 if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
6532 endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
6533 }
6534 else {
6535 assert (FALSE);
6536 }
6537 }
6538
6539 /* do the fast transfer: */
6540 memory += address;
6541 value16 = tme_memory_bus_read16((const tme_shared tme_uint16_t *) memory, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint16_t), sizeof(tme_uint64_t));
6542 value16 = (endian_little ? tme_letoh_u16(value16) : tme_betoh_u16(value16));
6543
6544 /* possibly sign-extend the loaded value: */
6545 value32 = value16;
6546 if (TME_SPARC_INSN & TME_BIT(22)) {
6547 value32 = (tme_uint32_t) (tme_int32_t) (tme_int16_t) value32;
6548 }
6549 TME_SPARC_FORMAT3_RD = (tme_uint64_t) (tme_int64_t) (tme_int32_t) value32;
6550
6551 /* unbusy the DTLB entry: */
6552 tme_sparc_tlb_unbusy(dtlb);
6553
6554 /* log the value loaded: */
6555 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
6556 tme_sparc_log(ic, 1000, TME_OK,
6557 (TME_SPARC_LOG_HANDLE(ic),
6558 _("ldh 0x%02x:0x%016" TME_PRIx64 ": 0x%04" TME_PRIx64),
6559 TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
6560 address,
6561 TME_SPARC_FORMAT3_RD));
6562
6563 TME_SPARC_INSN_OK;
6564 }
6565
6566 /* this does a sparc64 sth: */
TME_SPARC_FORMAT3(tme_sparc64_sth,tme_uint64_t)6567 TME_SPARC_FORMAT3(tme_sparc64_sth, tme_uint64_t)
6568 {
6569 tme_uint64_t address;
6570 tme_uint32_t asi_mask_flags_slow;
6571 struct tme_sparc_tlb *dtlb;
6572 tme_shared tme_uint8_t *memory;
6573 tme_bus_context_t dtlb_context;
6574 tme_uint32_t endian_little;
6575 tme_uint16_t value16;
6576
6577 /* get the address: */
6578 address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
6579 address &= ic->tme_sparc_address_mask;
6580
6581 #ifdef _TME_SPARC_STATS
6582 /* track statistics: */
6583 ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
6584 #endif /* _TME_SPARC_STATS */
6585
6586 /* verify and maybe replay this transfer: */
6587 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
6588 ic->tme_sparc_asi_mask_data, address,
6589 (TME_RECODE_SIZE_16
6590 | TME_SPARC_RECODE_VERIFY_MEM_STORE));
6591 if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
6592 TME_SPARC_INSN_OK;
6593 }
6594
6595 /* log the value stored: */
6596 tme_sparc_log(ic, 1000, TME_OK,
6597 (TME_SPARC_LOG_HANDLE(ic),
6598 _("sth 0x%02x:0x%016" TME_PRIx64 ": 0x%04" TME_PRIx16),
6599 TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
6600 address,
6601 (tme_uint16_t) TME_SPARC_FORMAT3_RD));
6602
6603 /* assume that no DTLB ASI mask flags will require a slow store: */
6604 asi_mask_flags_slow = 0;
6605
6606 /* a sth traps on no-fault addresses: */
6607 asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
6608
6609 /* get and busy the DTLB entry: */
6610 dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
6611 tme_sparc_tlb_busy(dtlb);
6612
6613 /* assume that this DTLB applies and allows fast transfers: */
6614 memory = dtlb->tme_sparc_tlb_emulator_off_write;
6615
6616 /* if this DTLB matches any context, it matches this context: */
6617 dtlb_context = dtlb->tme_sparc_tlb_context;
6618 if (dtlb_context > ic->tme_sparc_memory_context_max) {
6619 dtlb_context = ic->tme_sparc_memory_context_default;
6620 }
6621
6622 /* we must call the slow store function if: */
6623 if (__tme_predict_false(
6624
6625 /* the DTLB entry is invalid: */
6626 tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
6627
6628 /* the DTLB entry does not match the context: */
6629 || dtlb_context != ic->tme_sparc_memory_context_default
6630
6631 /* the DTLB entry does not cover the needed addresses: */
6632 || (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
6633 || ((address + ((16 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
6634
6635 /* the DTLB entry does not cover the needed address space: */
6636 || (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, ic->tme_sparc_asi_mask_data))
6637
6638 /* the DTLB entry can't be used for a fast sth: */
6639 || (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
6640
6641 /* the DTLB entry does not allow fast transfers: */
6642 || (memory == TME_EMULATOR_OFF_UNDEF)
6643
6644 /* the address is misaligned: */
6645 || ((address % (16 / 8)) != 0)
6646
6647 )) {
6648
6649 /* call the slow store function: */
6650 memory = tme_sparc64_ls(ic,
6651 address,
6652 &TME_SPARC_FORMAT3_RD,
6653 (TME_SPARC_LSINFO_OP_ST
6654 | (16 / 8)));
6655
6656 /* if the slow store function did the transfer: */
6657 if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
6658
6659 /* unbusy the TLB entry; */
6660 tme_sparc_tlb_unbusy(dtlb);
6661
6662 TME_SPARC_INSN_OK;
6663 }
6664 }
6665
6666 /* get the byte order of this transfer: */
6667 endian_little = ic->tme_sparc_asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
6668 if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
6669 if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
6670 endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
6671 }
6672 else {
6673 assert (FALSE);
6674 }
6675 }
6676
6677 /* do the fast transfer: */
6678 memory += address;
6679 value16 = TME_SPARC_FORMAT3_RD;
6680 value16 = (endian_little ? tme_htole_u16(value16) : tme_htobe_u16(value16));
6681 tme_memory_bus_write16((tme_shared tme_uint16_t *) memory, value16, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint16_t), sizeof(tme_uint64_t));
6682
6683 /* unbusy the DTLB entry: */
6684 tme_sparc_tlb_unbusy(dtlb);
6685
6686 TME_SPARC_INSN_OK;
6687 }
6688
6689 /* this does a sparc64 ld: */
TME_SPARC_FORMAT3(tme_sparc64_ld,tme_uint64_t)6690 TME_SPARC_FORMAT3(tme_sparc64_ld, tme_uint64_t)
6691 {
6692 tme_uint64_t address;
6693 tme_uint32_t asi_mask_flags_slow;
6694 struct tme_sparc_tlb *dtlb;
6695 const tme_shared tme_uint8_t *memory;
6696 tme_bus_context_t dtlb_context;
6697 tme_uint32_t endian_little;
6698 tme_uint32_t value32;
6699 tme_uint64_t value64;
6700
6701 /* get the address: */
6702 address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
6703 address &= ic->tme_sparc_address_mask;
6704
6705 #ifdef _TME_SPARC_STATS
6706 /* track statistics: */
6707 ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
6708 #endif /* _TME_SPARC_STATS */
6709
6710 /* verify and maybe replay this transfer: */
6711 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
6712 ic->tme_sparc_asi_mask_data, address,
6713 (TME_RECODE_SIZE_32
6714 | TME_SPARC_RECODE_VERIFY_MEM_LOAD));
6715 if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
6716 TME_SPARC_INSN_OK;
6717 }
6718
6719 /* assume that no DTLB ASI mask flags will require a slow load: */
6720 asi_mask_flags_slow = 0;
6721
6722 /* a ld without a no-fault ASI traps on no-fault addresses: */
6723 asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
6724
6725 /* get and busy the DTLB entry: */
6726 dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
6727 tme_sparc_tlb_busy(dtlb);
6728
6729 /* assume that this DTLB applies and allows fast transfers: */
6730 memory = dtlb->tme_sparc_tlb_emulator_off_read;
6731
6732 /* if this DTLB matches any context, it matches this context: */
6733 dtlb_context = dtlb->tme_sparc_tlb_context;
6734 if (dtlb_context > ic->tme_sparc_memory_context_max) {
6735 dtlb_context = ic->tme_sparc_memory_context_default;
6736 }
6737
6738 /* we must call the slow load function if: */
6739 if (__tme_predict_false(
6740
6741 /* the DTLB entry is invalid: */
6742 tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
6743
6744 /* the DTLB entry does not match the context: */
6745 || dtlb_context != ic->tme_sparc_memory_context_default
6746
6747 /* the DTLB entry does not cover the needed addresses: */
6748 || (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
6749 || ((address + ((32 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
6750
6751 /* the DTLB entry does not cover the needed address space: */
6752 || (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, ic->tme_sparc_asi_mask_data))
6753
6754 /* the DTLB entry can't be used for a fast ld: */
6755 || (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
6756
6757 /* the DTLB entry does not allow fast transfers: */
6758 || (memory == TME_EMULATOR_OFF_UNDEF)
6759
6760 /* the address is misaligned: */
6761 || ((address % (32 / 8)) != 0)
6762
6763 )) {
6764
6765 /* call the slow load function: */
6766 memory = tme_sparc64_ls(ic,
6767 address,
6768 &TME_SPARC_FORMAT3_RD,
6769 (TME_SPARC_LSINFO_OP_LD
6770 | (32 / 8)));
6771 }
6772
6773 /* get the byte order of this transfer: */
6774 endian_little = ic->tme_sparc_asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
6775 if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
6776 if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
6777 endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
6778 }
6779 else {
6780 assert (FALSE);
6781 }
6782 }
6783
6784 /* do the fast transfer: */
6785 memory += address;
6786 value32 = tme_memory_bus_read32((const tme_shared tme_uint32_t *) memory, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint32_t), sizeof(tme_uint64_t));
6787 value32 = (endian_little ? tme_letoh_u32(value32) : tme_betoh_u32(value32));
6788
6789 /* possibly sign-extend the loaded value: */
6790 value64 = value32;
6791 if (TME_SPARC_INSN & TME_BIT(22)) {
6792 value64 = (tme_uint64_t) (tme_int64_t) (tme_int32_t) value64;
6793 }
6794 TME_SPARC_FORMAT3_RD = (tme_uint64_t) (tme_int64_t) (tme_int64_t) value64;
6795
6796 /* unbusy the DTLB entry: */
6797 tme_sparc_tlb_unbusy(dtlb);
6798
6799 /* log the value loaded: */
6800 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
6801 tme_sparc_log(ic, 1000, TME_OK,
6802 (TME_SPARC_LOG_HANDLE(ic),
6803 _("ld 0x%02x:0x%016" TME_PRIx64 ": 0x%08" TME_PRIx64),
6804 TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
6805 address,
6806 TME_SPARC_FORMAT3_RD));
6807
6808 TME_SPARC_INSN_OK;
6809 }
6810
6811 /* this does a sparc64 st: */
TME_SPARC_FORMAT3(tme_sparc64_st,tme_uint64_t)6812 TME_SPARC_FORMAT3(tme_sparc64_st, tme_uint64_t)
6813 {
6814 tme_uint64_t address;
6815 tme_uint32_t asi_mask_flags_slow;
6816 struct tme_sparc_tlb *dtlb;
6817 tme_shared tme_uint8_t *memory;
6818 tme_bus_context_t dtlb_context;
6819 tme_uint32_t endian_little;
6820 tme_uint32_t value32;
6821
6822 /* get the address: */
6823 address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
6824 address &= ic->tme_sparc_address_mask;
6825
6826 #ifdef _TME_SPARC_STATS
6827 /* track statistics: */
6828 ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
6829 #endif /* _TME_SPARC_STATS */
6830
6831 /* verify and maybe replay this transfer: */
6832 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
6833 ic->tme_sparc_asi_mask_data, address,
6834 (TME_RECODE_SIZE_32
6835 | TME_SPARC_RECODE_VERIFY_MEM_STORE));
6836 if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
6837 TME_SPARC_INSN_OK;
6838 }
6839
6840 /* log the value stored: */
6841 tme_sparc_log(ic, 1000, TME_OK,
6842 (TME_SPARC_LOG_HANDLE(ic),
6843 _("st 0x%02x:0x%016" TME_PRIx64 ": 0x%08" TME_PRIx32),
6844 TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
6845 address,
6846 (tme_uint32_t) TME_SPARC_FORMAT3_RD));
6847
6848 /* assume that no DTLB ASI mask flags will require a slow store: */
6849 asi_mask_flags_slow = 0;
6850
6851 /* a st traps on no-fault addresses: */
6852 asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
6853
6854 /* get and busy the DTLB entry: */
6855 dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
6856 tme_sparc_tlb_busy(dtlb);
6857
6858 /* assume that this DTLB applies and allows fast transfers: */
6859 memory = dtlb->tme_sparc_tlb_emulator_off_write;
6860
6861 /* if this DTLB matches any context, it matches this context: */
6862 dtlb_context = dtlb->tme_sparc_tlb_context;
6863 if (dtlb_context > ic->tme_sparc_memory_context_max) {
6864 dtlb_context = ic->tme_sparc_memory_context_default;
6865 }
6866
6867 /* we must call the slow store function if: */
6868 if (__tme_predict_false(
6869
6870 /* the DTLB entry is invalid: */
6871 tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
6872
6873 /* the DTLB entry does not match the context: */
6874 || dtlb_context != ic->tme_sparc_memory_context_default
6875
6876 /* the DTLB entry does not cover the needed addresses: */
6877 || (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
6878 || ((address + ((32 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
6879
6880 /* the DTLB entry does not cover the needed address space: */
6881 || (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, ic->tme_sparc_asi_mask_data))
6882
6883 /* the DTLB entry can't be used for a fast st: */
6884 || (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
6885
6886 /* the DTLB entry does not allow fast transfers: */
6887 || (memory == TME_EMULATOR_OFF_UNDEF)
6888
6889 /* the address is misaligned: */
6890 || ((address % (32 / 8)) != 0)
6891
6892 )) {
6893
6894 /* call the slow store function: */
6895 memory = tme_sparc64_ls(ic,
6896 address,
6897 &TME_SPARC_FORMAT3_RD,
6898 (TME_SPARC_LSINFO_OP_ST
6899 | (32 / 8)));
6900
6901 /* if the slow store function did the transfer: */
6902 if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
6903
6904 /* unbusy the TLB entry; */
6905 tme_sparc_tlb_unbusy(dtlb);
6906
6907 TME_SPARC_INSN_OK;
6908 }
6909 }
6910
6911 /* get the byte order of this transfer: */
6912 endian_little = ic->tme_sparc_asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
6913 if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
6914 if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
6915 endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
6916 }
6917 else {
6918 assert (FALSE);
6919 }
6920 }
6921
6922 /* do the fast transfer: */
6923 memory += address;
6924 value32 = TME_SPARC_FORMAT3_RD;
6925 value32 = (endian_little ? tme_htole_u32(value32) : tme_htobe_u32(value32));
6926 tme_memory_bus_write32((tme_shared tme_uint32_t *) memory, value32, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint32_t), sizeof(tme_uint64_t));
6927
6928 /* unbusy the DTLB entry: */
6929 tme_sparc_tlb_unbusy(dtlb);
6930
6931 TME_SPARC_INSN_OK;
6932 }
6933
6934 /* this does a sparc64 ldd: */
TME_SPARC_FORMAT3(tme_sparc64_ldd,tme_uint64_t)6935 TME_SPARC_FORMAT3(tme_sparc64_ldd, tme_uint64_t)
6936 {
6937 tme_uint64_t address;
6938 tme_uint32_t asi_mask_flags_slow;
6939 struct tme_sparc_tlb *dtlb;
6940 const tme_shared tme_uint8_t *memory;
6941 tme_bus_context_t dtlb_context;
6942 tme_uint32_t endian_little;
6943 tme_uint32_t value32;
6944
6945 /* get the address: */
6946 address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
6947 address &= ic->tme_sparc_address_mask;
6948
6949 #ifdef _TME_SPARC_STATS
6950 /* track statistics: */
6951 ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
6952 #endif /* _TME_SPARC_STATS */
6953
6954 /* verify and maybe replay this transfer: */
6955 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
6956 ic->tme_sparc_asi_mask_data, address,
6957 (TME_RECODE_SIZE_32
6958 | TME_SPARC_RECODE_VERIFY_MEM_LOAD));
6959 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint64),
6960 ic->tme_sparc_asi_mask_data, address + sizeof(tme_uint32_t),
6961 (TME_RECODE_SIZE_32
6962 | TME_SPARC_RECODE_VERIFY_MEM_LOAD));
6963 if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
6964 TME_SPARC_INSN_OK;
6965 }
6966
6967 /* assume that no DTLB ASI mask flags will require a slow load: */
6968 asi_mask_flags_slow = 0;
6969
6970 /* a ldd without a no-fault ASI traps on no-fault addresses: */
6971 asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
6972
6973 /* get and busy the DTLB entry: */
6974 dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
6975 tme_sparc_tlb_busy(dtlb);
6976
6977 /* assume that this DTLB applies and allows fast transfers: */
6978 memory = dtlb->tme_sparc_tlb_emulator_off_read;
6979
6980 /* if this DTLB matches any context, it matches this context: */
6981 dtlb_context = dtlb->tme_sparc_tlb_context;
6982 if (dtlb_context > ic->tme_sparc_memory_context_max) {
6983 dtlb_context = ic->tme_sparc_memory_context_default;
6984 }
6985
6986 /* we must call the slow load function if: */
6987 if (__tme_predict_false(
6988
6989 /* the DTLB entry is invalid: */
6990 tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
6991
6992 /* the DTLB entry does not match the context: */
6993 || dtlb_context != ic->tme_sparc_memory_context_default
6994
6995 /* the DTLB entry does not cover the needed addresses: */
6996 || (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
6997 || ((address + ((64 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
6998
6999 /* the DTLB entry does not cover the needed address space: */
7000 || (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, ic->tme_sparc_asi_mask_data))
7001
7002 /* the DTLB entry can't be used for a fast ldd: */
7003 || (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
7004
7005 /* the DTLB entry does not allow fast transfers: */
7006 || (memory == TME_EMULATOR_OFF_UNDEF)
7007
7008 /* the address is misaligned: */
7009 || ((address % (64 / 8)) != 0)
7010
7011 /* the destination register number is odd: */
7012 || ((TME_SPARC_INSN & TME_BIT(25)) != 0)
7013
7014 )) {
7015
7016 /* call the slow load function: */
7017 memory = tme_sparc64_ls(ic,
7018 address,
7019 &TME_SPARC_FORMAT3_RD,
7020 (TME_SPARC_LSINFO_OP_LD
7021 | TME_SPARC_LSINFO_LDD_STD
7022 | (64 / 8)));
7023 }
7024
7025 /* get the byte order of this transfer: */
7026 endian_little = ic->tme_sparc_asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
7027 if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
7028 if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
7029 endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
7030 }
7031 else {
7032 assert (FALSE);
7033 }
7034 }
7035
7036 /* do the fast transfer: */
7037 memory += address;
7038 value32 = tme_memory_bus_read32(((const tme_shared tme_uint32_t *) memory) + 0, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint32_t) * 2, sizeof(tme_uint64_t));
7039 value32 = (endian_little ? tme_letoh_u32(value32) : tme_betoh_u32(value32));
7040 TME_SPARC_FORMAT3_RD = value32;
7041 value32 = tme_memory_bus_read32(((const tme_shared tme_uint32_t *) memory) + 1, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint32_t) * 1, sizeof(tme_uint64_t));
7042 value32 = (endian_little ? tme_letoh_u32(value32) : tme_betoh_u32(value32));
7043 TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint64) = value32;
7044
7045 /* unbusy the DTLB entry: */
7046 tme_sparc_tlb_unbusy(dtlb);
7047
7048 /* log the value loaded: */
7049 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
7050 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint64));
7051 tme_sparc_log(ic, 1000, TME_OK,
7052 (TME_SPARC_LOG_HANDLE(ic),
7053 _("ldd 0x%02x:0x%016" TME_PRIx64 ": 0x%016" TME_PRIx64 " 0x%016" TME_PRIx64 ""),
7054 TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
7055 address,
7056 TME_SPARC_FORMAT3_RD,
7057 TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint64)));
7058
7059 TME_SPARC_INSN_OK;
7060 }
7061
7062 /* this does a sparc64 std: */
TME_SPARC_FORMAT3(tme_sparc64_std,tme_uint64_t)7063 TME_SPARC_FORMAT3(tme_sparc64_std, tme_uint64_t)
7064 {
7065 tme_uint64_t address;
7066 tme_uint32_t asi_mask_flags_slow;
7067 struct tme_sparc_tlb *dtlb;
7068 tme_shared tme_uint8_t *memory;
7069 tme_bus_context_t dtlb_context;
7070 tme_uint32_t endian_little;
7071 tme_uint32_t value32;
7072
7073 /* get the address: */
7074 address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
7075 address &= ic->tme_sparc_address_mask;
7076
7077 #ifdef _TME_SPARC_STATS
7078 /* track statistics: */
7079 ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
7080 #endif /* _TME_SPARC_STATS */
7081
7082 /* verify and maybe replay this transfer: */
7083 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
7084 ic->tme_sparc_asi_mask_data, address,
7085 (TME_RECODE_SIZE_32
7086 | TME_SPARC_RECODE_VERIFY_MEM_STORE));
7087 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint64),
7088 ic->tme_sparc_asi_mask_data, address + sizeof(tme_uint32_t),
7089 (TME_RECODE_SIZE_32
7090 | TME_SPARC_RECODE_VERIFY_MEM_STORE));
7091 if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
7092 TME_SPARC_INSN_OK;
7093 }
7094
7095 /* log the values stored: */
7096 tme_sparc_log(ic, 1000, TME_OK,
7097 (TME_SPARC_LOG_HANDLE(ic),
7098 _("std 0x%02x:0x%016" TME_PRIx64 ": 0x%08" TME_PRIx32 " 0x%08" TME_PRIx32),
7099 TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
7100 address,
7101 (tme_uint32_t) TME_SPARC_FORMAT3_RD,
7102 (tme_uint32_t) TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint64)));
7103
7104 /* assume that no DTLB ASI mask flags will require a slow store: */
7105 asi_mask_flags_slow = 0;
7106
7107 /* a std traps on no-fault addresses: */
7108 asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
7109
7110 /* get and busy the DTLB entry: */
7111 dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
7112 tme_sparc_tlb_busy(dtlb);
7113
7114 /* assume that this DTLB applies and allows fast transfers: */
7115 memory = dtlb->tme_sparc_tlb_emulator_off_write;
7116
7117 /* if this DTLB matches any context, it matches this context: */
7118 dtlb_context = dtlb->tme_sparc_tlb_context;
7119 if (dtlb_context > ic->tme_sparc_memory_context_max) {
7120 dtlb_context = ic->tme_sparc_memory_context_default;
7121 }
7122
7123 /* we must call the slow store function if: */
7124 if (__tme_predict_false(
7125
7126 /* the DTLB entry is invalid: */
7127 tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
7128
7129 /* the DTLB entry does not match the context: */
7130 || dtlb_context != ic->tme_sparc_memory_context_default
7131
7132 /* the DTLB entry does not cover the needed addresses: */
7133 || (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
7134 || ((address + ((64 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
7135
7136 /* the DTLB entry does not cover the needed address space: */
7137 || (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, ic->tme_sparc_asi_mask_data))
7138
7139 /* the DTLB entry can't be used for a fast std: */
7140 || (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
7141
7142 /* the DTLB entry does not allow fast transfers: */
7143 || (memory == TME_EMULATOR_OFF_UNDEF)
7144
7145 /* the address is misaligned: */
7146 || ((address % (64 / 8)) != 0)
7147
7148 /* the destination register number is odd: */
7149 || ((TME_SPARC_INSN & TME_BIT(25)) != 0)
7150
7151 )) {
7152
7153 /* call the slow store function: */
7154 memory = tme_sparc64_ls(ic,
7155 address,
7156 &TME_SPARC_FORMAT3_RD,
7157 (TME_SPARC_LSINFO_OP_ST
7158 | TME_SPARC_LSINFO_LDD_STD
7159 | (64 / 8)));
7160
7161 /* if the slow store function did the transfer: */
7162 if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
7163
7164 /* unbusy the TLB entry; */
7165 tme_sparc_tlb_unbusy(dtlb);
7166
7167 TME_SPARC_INSN_OK;
7168 }
7169 }
7170
7171 /* get the byte order of this transfer: */
7172 endian_little = ic->tme_sparc_asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
7173 if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
7174 if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
7175 endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
7176 }
7177 else {
7178 assert (FALSE);
7179 }
7180 }
7181
7182 /* do the fast transfer: */
7183 memory += address;
7184 value32 = TME_SPARC_FORMAT3_RD;
7185 value32 = (endian_little ? tme_htole_u32(value32) : tme_htobe_u32(value32));
7186 tme_memory_bus_write32(((tme_shared tme_uint32_t *) memory) + 0, value32, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint32_t) * 2, sizeof(tme_uint64_t));
7187 value32 = TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint64);
7188 value32 = (endian_little ? tme_htole_u32(value32) : tme_htobe_u32(value32));
7189 tme_memory_bus_write32(((tme_shared tme_uint32_t *) memory) + 1, value32, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint32_t) * 1, sizeof(tme_uint64_t));
7190
7191 /* unbusy the DTLB entry: */
7192 tme_sparc_tlb_unbusy(dtlb);
7193
7194 TME_SPARC_INSN_OK;
7195 }
7196
7197 /* this does a sparc64 ldstub: */
TME_SPARC_FORMAT3(tme_sparc64_ldstub,tme_uint64_t)7198 TME_SPARC_FORMAT3(tme_sparc64_ldstub, tme_uint64_t)
7199 {
7200 tme_uint64_t address;
7201 tme_uint32_t asi_mask_flags_slow;
7202 struct tme_sparc_tlb *dtlb;
7203 tme_shared tme_uint8_t *memory;
7204 tme_bus_context_t dtlb_context;
7205 tme_uint32_t endian_little;
7206
7207 /* get the address: */
7208 address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
7209 address &= ic->tme_sparc_address_mask;
7210
7211 #ifdef _TME_SPARC_STATS
7212 /* track statistics: */
7213 ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
7214 #endif /* _TME_SPARC_STATS */
7215
7216 /* verify and maybe replay this transfer: */
7217 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
7218 ic->tme_sparc_asi_mask_data, address,
7219 (TME_RECODE_SIZE_8
7220 | TME_SPARC_RECODE_VERIFY_MEM_LOAD));
7221 if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
7222 TME_SPARC_INSN_OK;
7223 }
7224
7225 /* assume that no DTLB ASI mask flags will require a slow store: */
7226 asi_mask_flags_slow = 0;
7227
7228 /* a ldstub traps on no-fault addresses: */
7229 asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
7230
7231 /* a ldstub traps on uncacheable addresses with side-effects: */
7232 asi_mask_flags_slow |= TME_SPARC64_ASI_MASK_FLAG_TLB_UNCACHEABLE;
7233
7234 /* get and busy the DTLB entry: */
7235 dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
7236 tme_sparc_tlb_busy(dtlb);
7237
7238 /* assume that this DTLB applies and allows fast transfers: */
7239 memory = dtlb->tme_sparc_tlb_emulator_off_write;
7240
7241 /* if this DTLB matches any context, it matches this context: */
7242 dtlb_context = dtlb->tme_sparc_tlb_context;
7243 if (dtlb_context > ic->tme_sparc_memory_context_max) {
7244 dtlb_context = ic->tme_sparc_memory_context_default;
7245 }
7246
7247 /* we must call the slow store function if: */
7248 if (__tme_predict_false(
7249
7250 /* the DTLB entry is invalid: */
7251 tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
7252
7253 /* the DTLB entry does not match the context: */
7254 || dtlb_context != ic->tme_sparc_memory_context_default
7255
7256 /* the DTLB entry does not cover the needed addresses: */
7257 || (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
7258 || ((address + ((8 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
7259
7260 /* the DTLB entry does not cover the needed address space: */
7261 || (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, ic->tme_sparc_asi_mask_data))
7262
7263 /* the DTLB entry can't be used for a fast ldstub: */
7264 || (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
7265
7266 /* the DTLB entry does not allow fast transfers: */
7267 || (memory != dtlb->tme_sparc_tlb_emulator_off_read)
7268 || (memory == TME_EMULATOR_OFF_UNDEF)
7269
7270 )) {
7271
7272 /* call the slow store function: */
7273 memory = tme_sparc64_ls(ic,
7274 address,
7275 &TME_SPARC_FORMAT3_RD,
7276 (TME_SPARC_LSINFO_OP_ATOMIC
7277 | (8 / 8)));
7278
7279 /* if the slow store function did the transfer: */
7280 if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
7281
7282 /* unbusy the TLB entry; */
7283 tme_sparc_tlb_unbusy(dtlb);
7284
7285 /* log the value loaded: */
7286 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
7287 tme_sparc_log(ic, 1000, TME_OK,
7288 (TME_SPARC_LOG_HANDLE(ic),
7289 _("ldstub 0x%02x:0x%016" TME_PRIx64 ": 0x%02" TME_PRIx64),
7290 TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
7291 address,
7292 TME_SPARC_FORMAT3_RD));
7293
7294 TME_SPARC_INSN_OK;
7295 }
7296 }
7297
7298 /* get the byte order of this transfer: */
7299 endian_little = ic->tme_sparc_asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
7300 if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
7301 if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
7302 endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
7303 }
7304 else {
7305 assert (FALSE);
7306 }
7307 }
7308
7309 /* do the fast transfer: */
7310 memory += address;
7311 TME_SPARC_FORMAT3_RD = tme_memory_atomic_xchg8(memory, 0xff, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint8_t));
7312
7313 /* unbusy the DTLB entry: */
7314 tme_sparc_tlb_unbusy(dtlb);
7315
7316 /* log the value loaded: */
7317 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
7318 tme_sparc_log(ic, 1000, TME_OK,
7319 (TME_SPARC_LOG_HANDLE(ic),
7320 _("ldstub 0x%02x:0x%016" TME_PRIx64 ": 0x%02" TME_PRIx64),
7321 TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
7322 address,
7323 TME_SPARC_FORMAT3_RD));
7324
7325 TME_SPARC_INSN_OK;
7326 }
7327
7328 /* this does a sparc64 ldstuba: */
TME_SPARC_FORMAT3(tme_sparc64_ldstuba,tme_uint64_t)7329 TME_SPARC_FORMAT3(tme_sparc64_ldstuba, tme_uint64_t)
7330 {
7331 tme_uint32_t asi_mask_data;
7332 tme_uint64_t address;
7333 tme_bus_context_t context;
7334 tme_uint32_t asi_mask_flags_slow;
7335 struct tme_sparc_tlb *dtlb;
7336 tme_shared tme_uint8_t *memory;
7337 tme_bus_context_t dtlb_context;
7338 tme_uint32_t endian_little;
7339
7340 /* get the alternate ASI mask: */
7341 asi_mask_data = _tme_sparc64_alternate_asi_mask(ic);
7342
7343 /* get the address: */
7344 address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
7345 address &= ic->tme_sparc_address_mask;
7346
7347 #ifdef _TME_SPARC_STATS
7348 /* track statistics: */
7349 ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
7350 #endif /* _TME_SPARC_STATS */
7351
7352 /* verify and maybe replay this transfer: */
7353 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
7354 asi_mask_data, address,
7355 (TME_RECODE_SIZE_8
7356 | TME_SPARC_RECODE_VERIFY_MEM_LOAD));
7357 if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
7358 TME_SPARC_INSN_OK;
7359 }
7360
7361 /* get the context: */
7362 context = ic->tme_sparc_memory_context_primary;
7363 if (__tme_predict_false(asi_mask_data
7364 & (TME_SPARC64_ASI_FLAG_SECONDARY
7365 + TME_SPARC64_ASI_MASK_FLAG_INSN_NUCLEUS))) {
7366 if (asi_mask_data & TME_SPARC64_ASI_FLAG_SECONDARY) {
7367 context = ic->tme_sparc_memory_context_secondary;
7368 }
7369 else if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_NUCLEUS) {
7370 context = 0;
7371 }
7372 }
7373
7374 /* assume that no DTLB ASI mask flags will require a slow store: */
7375 asi_mask_flags_slow = 0;
7376
7377 /* a ldstuba traps on no-fault addresses: */
7378 asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
7379
7380 /* a ldstuba traps on uncacheable addresses with side-effects: */
7381 asi_mask_flags_slow |= TME_SPARC64_ASI_MASK_FLAG_TLB_UNCACHEABLE;
7382
7383 /* if this ldstuba is using a no-fault ASI: */
7384 if (__tme_predict_false(asi_mask_data & TME_SPARC64_ASI_FLAG_NO_FAULT)) {
7385
7386 /* a ldstuba with a no-fault ASI traps: */
7387 asi_mask_flags_slow = 0 - (tme_uint32_t) 1;
7388 }
7389
7390 /* get and busy the DTLB entry: */
7391 dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, context, address))];
7392 tme_sparc_tlb_busy(dtlb);
7393
7394 /* assume that this DTLB applies and allows fast transfers: */
7395 memory = dtlb->tme_sparc_tlb_emulator_off_write;
7396
7397 /* if this DTLB matches any context, it matches this context: */
7398 dtlb_context = dtlb->tme_sparc_tlb_context;
7399 if (dtlb_context > ic->tme_sparc_memory_context_max) {
7400 dtlb_context = context;
7401 }
7402
7403 /* we must call the slow store function if: */
7404 if (__tme_predict_false(
7405
7406 /* the DTLB entry is invalid: */
7407 tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
7408
7409 /* the DTLB entry does not match the context: */
7410 || dtlb_context != context
7411
7412 /* the DTLB entry does not cover the needed addresses: */
7413 || (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
7414 || ((address + ((8 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
7415
7416 /* the DTLB entry does not cover the needed address space: */
7417 || (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, asi_mask_data))
7418
7419 /* the DTLB entry can't be used for a fast ldstuba: */
7420 || (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
7421
7422 /* the DTLB entry does not allow fast transfers: */
7423 || (memory != dtlb->tme_sparc_tlb_emulator_off_read)
7424 || (memory == TME_EMULATOR_OFF_UNDEF)
7425
7426 )) {
7427
7428 /* call the slow store function: */
7429 memory = tme_sparc64_ls(ic,
7430 address,
7431 &TME_SPARC_FORMAT3_RD,
7432 (TME_SPARC_LSINFO_OP_ATOMIC
7433 | TME_SPARC_LSINFO_ASI(TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF))
7434 | TME_SPARC_LSINFO_A
7435 | (8 / 8)));
7436
7437 /* if the slow store function did the transfer: */
7438 if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
7439
7440 /* unbusy the TLB entry; */
7441 tme_sparc_tlb_unbusy(dtlb);
7442
7443 /* log the value loaded: */
7444 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
7445 tme_sparc_log(ic, 1000, TME_OK,
7446 (TME_SPARC_LOG_HANDLE(ic),
7447 _("ldstuba 0x%02x:0x%016" TME_PRIx64 ": 0x%02" TME_PRIx64),
7448 TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
7449 address,
7450 TME_SPARC_FORMAT3_RD));
7451
7452 TME_SPARC_INSN_OK;
7453 }
7454 }
7455
7456 /* get the byte order of this transfer: */
7457 endian_little = asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
7458 if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
7459 if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
7460 endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
7461 }
7462 else {
7463 assert (FALSE);
7464 }
7465 }
7466
7467 /* do the fast transfer: */
7468 memory += address;
7469 TME_SPARC_FORMAT3_RD = tme_memory_atomic_xchg8(memory, 0xff, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint8_t));
7470
7471 /* unbusy the DTLB entry: */
7472 tme_sparc_tlb_unbusy(dtlb);
7473
7474 /* log the value loaded: */
7475 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
7476 tme_sparc_log(ic, 1000, TME_OK,
7477 (TME_SPARC_LOG_HANDLE(ic),
7478 _("ldstuba 0x%02x:0x%016" TME_PRIx64 ": 0x%02" TME_PRIx64),
7479 TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
7480 address,
7481 TME_SPARC_FORMAT3_RD));
7482
7483 TME_SPARC_INSN_OK;
7484 }
7485
7486 /* this does a sparc64 swap: */
TME_SPARC_FORMAT3(tme_sparc64_swap,tme_uint64_t)7487 TME_SPARC_FORMAT3(tme_sparc64_swap, tme_uint64_t)
7488 {
7489 tme_uint64_t address;
7490 tme_uint32_t asi_mask_flags_slow;
7491 struct tme_sparc_tlb *dtlb;
7492 tme_shared tme_uint8_t *memory;
7493 tme_bus_context_t dtlb_context;
7494 tme_uint32_t endian_little;
7495 tme_uint32_t value32;
7496
7497 /* get the address: */
7498 address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
7499 address &= ic->tme_sparc_address_mask;
7500
7501 #ifdef _TME_SPARC_STATS
7502 /* track statistics: */
7503 ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
7504 #endif /* _TME_SPARC_STATS */
7505
7506 /* verify and maybe replay this transfer: */
7507 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
7508 ic->tme_sparc_asi_mask_data, address,
7509 (TME_RECODE_SIZE_32
7510 | TME_SPARC_RECODE_VERIFY_MEM_LOAD | TME_SPARC_RECODE_VERIFY_MEM_STORE));
7511 if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
7512 TME_SPARC_INSN_OK;
7513 }
7514
7515 /* log the value stored: */
7516 tme_sparc_log(ic, 1000, TME_OK,
7517 (TME_SPARC_LOG_HANDLE(ic),
7518 _("swap 0x%02x:0x%016" TME_PRIx64 ": 0x%08" TME_PRIx32),
7519 TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
7520 address,
7521 (tme_uint32_t) TME_SPARC_FORMAT3_RD));
7522
7523 /* assume that no DTLB ASI mask flags will require a slow store: */
7524 asi_mask_flags_slow = 0;
7525
7526 /* a swap traps on no-fault addresses: */
7527 asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
7528
7529 /* a swap traps on uncacheable addresses with side-effects: */
7530 asi_mask_flags_slow |= TME_SPARC64_ASI_MASK_FLAG_TLB_UNCACHEABLE;
7531
7532 /* get and busy the DTLB entry: */
7533 dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
7534 tme_sparc_tlb_busy(dtlb);
7535
7536 /* assume that this DTLB applies and allows fast transfers: */
7537 memory = dtlb->tme_sparc_tlb_emulator_off_write;
7538
7539 /* if this DTLB matches any context, it matches this context: */
7540 dtlb_context = dtlb->tme_sparc_tlb_context;
7541 if (dtlb_context > ic->tme_sparc_memory_context_max) {
7542 dtlb_context = ic->tme_sparc_memory_context_default;
7543 }
7544
7545 /* we must call the slow store function if: */
7546 if (__tme_predict_false(
7547
7548 /* the DTLB entry is invalid: */
7549 tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
7550
7551 /* the DTLB entry does not match the context: */
7552 || dtlb_context != ic->tme_sparc_memory_context_default
7553
7554 /* the DTLB entry does not cover the needed addresses: */
7555 || (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
7556 || ((address + ((32 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
7557
7558 /* the DTLB entry does not cover the needed address space: */
7559 || (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, ic->tme_sparc_asi_mask_data))
7560
7561 /* the DTLB entry can't be used for a fast swap: */
7562 || (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
7563
7564 /* the DTLB entry does not allow fast transfers: */
7565 || (memory != dtlb->tme_sparc_tlb_emulator_off_read)
7566 || (memory == TME_EMULATOR_OFF_UNDEF)
7567
7568 /* the address is misaligned: */
7569 || ((address % (32 / 8)) != 0)
7570
7571 )) {
7572
7573 /* call the slow store function: */
7574 memory = tme_sparc64_ls(ic,
7575 address,
7576 &TME_SPARC_FORMAT3_RD,
7577 (TME_SPARC_LSINFO_OP_ATOMIC
7578 | (32 / 8)));
7579
7580 /* if the slow store function did the transfer: */
7581 if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
7582
7583 /* unbusy the TLB entry; */
7584 tme_sparc_tlb_unbusy(dtlb);
7585
7586 /* log the value loaded: */
7587 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
7588 tme_sparc_log(ic, 1000, TME_OK,
7589 (TME_SPARC_LOG_HANDLE(ic),
7590 _("swap 0x%02x:0x%016" TME_PRIx64 ": 0x%08" TME_PRIx64),
7591 TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
7592 address,
7593 TME_SPARC_FORMAT3_RD));
7594
7595 TME_SPARC_INSN_OK;
7596 }
7597 }
7598
7599 /* get the byte order of this transfer: */
7600 endian_little = ic->tme_sparc_asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
7601 if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
7602 if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
7603 endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
7604 }
7605 else {
7606 assert (FALSE);
7607 }
7608 }
7609
7610 /* do the fast transfer: */
7611 memory += address;
7612 value32 = TME_SPARC_FORMAT3_RD;
7613 value32 = (endian_little ? tme_htole_u32(value32) : tme_htobe_u32(value32));
7614 value32 = tme_memory_atomic_xchg32((tme_shared tme_uint32_t *) memory, value32, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint8_t));
7615 value32 = (endian_little ? tme_letoh_u32(value32) : tme_betoh_u32(value32));
7616 TME_SPARC_FORMAT3_RD = value32;
7617
7618 /* unbusy the DTLB entry: */
7619 tme_sparc_tlb_unbusy(dtlb);
7620
7621 /* log the value loaded: */
7622 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
7623 tme_sparc_log(ic, 1000, TME_OK,
7624 (TME_SPARC_LOG_HANDLE(ic),
7625 _("swap 0x%02x:0x%016" TME_PRIx64 ": 0x%08" TME_PRIx64),
7626 TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
7627 address,
7628 TME_SPARC_FORMAT3_RD));
7629
7630 TME_SPARC_INSN_OK;
7631 }
7632
7633 /* this does a sparc64 swapa: */
TME_SPARC_FORMAT3(tme_sparc64_swapa,tme_uint64_t)7634 TME_SPARC_FORMAT3(tme_sparc64_swapa, tme_uint64_t)
7635 {
7636 tme_uint32_t asi_mask_data;
7637 tme_uint64_t address;
7638 tme_bus_context_t context;
7639 tme_uint32_t asi_mask_flags_slow;
7640 struct tme_sparc_tlb *dtlb;
7641 tme_shared tme_uint8_t *memory;
7642 tme_bus_context_t dtlb_context;
7643 tme_uint32_t endian_little;
7644 tme_uint32_t value32;
7645
7646 /* get the alternate ASI mask: */
7647 asi_mask_data = _tme_sparc64_alternate_asi_mask(ic);
7648
7649 /* get the address: */
7650 address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
7651 address &= ic->tme_sparc_address_mask;
7652
7653 #ifdef _TME_SPARC_STATS
7654 /* track statistics: */
7655 ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
7656 #endif /* _TME_SPARC_STATS */
7657
7658 /* verify and maybe replay this transfer: */
7659 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
7660 asi_mask_data, address,
7661 (TME_RECODE_SIZE_32
7662 | TME_SPARC_RECODE_VERIFY_MEM_LOAD | TME_SPARC_RECODE_VERIFY_MEM_STORE));
7663 if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
7664 TME_SPARC_INSN_OK;
7665 }
7666
7667 /* log the value stored: */
7668 tme_sparc_log(ic, 1000, TME_OK,
7669 (TME_SPARC_LOG_HANDLE(ic),
7670 _("swapa 0x%02x:0x%016" TME_PRIx64 ": 0x%08" TME_PRIx32),
7671 TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
7672 address,
7673 (tme_uint32_t) TME_SPARC_FORMAT3_RD));
7674
7675 /* get the context: */
7676 context = ic->tme_sparc_memory_context_primary;
7677 if (__tme_predict_false(asi_mask_data
7678 & (TME_SPARC64_ASI_FLAG_SECONDARY
7679 + TME_SPARC64_ASI_MASK_FLAG_INSN_NUCLEUS))) {
7680 if (asi_mask_data & TME_SPARC64_ASI_FLAG_SECONDARY) {
7681 context = ic->tme_sparc_memory_context_secondary;
7682 }
7683 else if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_NUCLEUS) {
7684 context = 0;
7685 }
7686 }
7687
7688 /* assume that no DTLB ASI mask flags will require a slow store: */
7689 asi_mask_flags_slow = 0;
7690
7691 /* a swapa traps on no-fault addresses: */
7692 asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
7693
7694 /* a swapa traps on uncacheable addresses with side-effects: */
7695 asi_mask_flags_slow |= TME_SPARC64_ASI_MASK_FLAG_TLB_UNCACHEABLE;
7696
7697 /* if this swapa is using a no-fault ASI: */
7698 if (__tme_predict_false(asi_mask_data & TME_SPARC64_ASI_FLAG_NO_FAULT)) {
7699
7700 /* a swapa with a no-fault ASI traps: */
7701 asi_mask_flags_slow = 0 - (tme_uint32_t) 1;
7702 }
7703
7704 /* get and busy the DTLB entry: */
7705 dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, context, address))];
7706 tme_sparc_tlb_busy(dtlb);
7707
7708 /* assume that this DTLB applies and allows fast transfers: */
7709 memory = dtlb->tme_sparc_tlb_emulator_off_write;
7710
7711 /* if this DTLB matches any context, it matches this context: */
7712 dtlb_context = dtlb->tme_sparc_tlb_context;
7713 if (dtlb_context > ic->tme_sparc_memory_context_max) {
7714 dtlb_context = context;
7715 }
7716
7717 /* we must call the slow store function if: */
7718 if (__tme_predict_false(
7719
7720 /* the DTLB entry is invalid: */
7721 tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
7722
7723 /* the DTLB entry does not match the context: */
7724 || dtlb_context != context
7725
7726 /* the DTLB entry does not cover the needed addresses: */
7727 || (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
7728 || ((address + ((32 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
7729
7730 /* the DTLB entry does not cover the needed address space: */
7731 || (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, asi_mask_data))
7732
7733 /* the DTLB entry can't be used for a fast swapa: */
7734 || (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
7735
7736 /* the DTLB entry does not allow fast transfers: */
7737 || (memory != dtlb->tme_sparc_tlb_emulator_off_read)
7738 || (memory == TME_EMULATOR_OFF_UNDEF)
7739
7740 /* the address is misaligned: */
7741 || ((address % (32 / 8)) != 0)
7742
7743 )) {
7744
7745 /* call the slow store function: */
7746 memory = tme_sparc64_ls(ic,
7747 address,
7748 &TME_SPARC_FORMAT3_RD,
7749 (TME_SPARC_LSINFO_OP_ATOMIC
7750 | TME_SPARC_LSINFO_ASI(TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF))
7751 | TME_SPARC_LSINFO_A
7752 | (32 / 8)));
7753
7754 /* if the slow store function did the transfer: */
7755 if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
7756
7757 /* unbusy the TLB entry; */
7758 tme_sparc_tlb_unbusy(dtlb);
7759
7760 /* log the value loaded: */
7761 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
7762 tme_sparc_log(ic, 1000, TME_OK,
7763 (TME_SPARC_LOG_HANDLE(ic),
7764 _("swapa 0x%02x:0x%016" TME_PRIx64 ": 0x%08" TME_PRIx64),
7765 TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
7766 address,
7767 TME_SPARC_FORMAT3_RD));
7768
7769 TME_SPARC_INSN_OK;
7770 }
7771 }
7772
7773 /* get the byte order of this transfer: */
7774 endian_little = asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
7775 if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
7776 if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
7777 endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
7778 }
7779 else {
7780 assert (FALSE);
7781 }
7782 }
7783
7784 /* do the fast transfer: */
7785 memory += address;
7786 value32 = TME_SPARC_FORMAT3_RD;
7787 value32 = (endian_little ? tme_htole_u32(value32) : tme_htobe_u32(value32));
7788 value32 = tme_memory_atomic_xchg32((tme_shared tme_uint32_t *) memory, value32, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint8_t));
7789 value32 = (endian_little ? tme_letoh_u32(value32) : tme_betoh_u32(value32));
7790 TME_SPARC_FORMAT3_RD = value32;
7791
7792 /* unbusy the DTLB entry: */
7793 tme_sparc_tlb_unbusy(dtlb);
7794
7795 /* log the value loaded: */
7796 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
7797 tme_sparc_log(ic, 1000, TME_OK,
7798 (TME_SPARC_LOG_HANDLE(ic),
7799 _("swapa 0x%02x:0x%016" TME_PRIx64 ": 0x%08" TME_PRIx64),
7800 TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
7801 address,
7802 TME_SPARC_FORMAT3_RD));
7803
7804 TME_SPARC_INSN_OK;
7805 }
7806
7807 /* this does a sparc64 ldba: */
TME_SPARC_FORMAT3(tme_sparc64_ldba,tme_uint64_t)7808 TME_SPARC_FORMAT3(tme_sparc64_ldba, tme_uint64_t)
7809 {
7810 tme_uint32_t asi_mask_data;
7811 tme_uint64_t address;
7812 tme_bus_context_t context;
7813 tme_uint32_t asi_mask_flags_slow;
7814 struct tme_sparc_tlb *dtlb;
7815 const tme_shared tme_uint8_t *memory;
7816 tme_bus_context_t dtlb_context;
7817 tme_uint32_t endian_little;
7818 tme_uint8_t value8;
7819 tme_uint32_t value32;
7820
7821 /* get the alternate ASI mask: */
7822 asi_mask_data = _tme_sparc64_alternate_asi_mask(ic);
7823
7824 /* get the address: */
7825 address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
7826 address &= ic->tme_sparc_address_mask;
7827
7828 #ifdef _TME_SPARC_STATS
7829 /* track statistics: */
7830 ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
7831 #endif /* _TME_SPARC_STATS */
7832
7833 /* verify and maybe replay this transfer: */
7834 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
7835 asi_mask_data, address,
7836 (TME_RECODE_SIZE_8
7837 | TME_SPARC_RECODE_VERIFY_MEM_LOAD));
7838 if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
7839 TME_SPARC_INSN_OK;
7840 }
7841
7842 /* get the context: */
7843 context = ic->tme_sparc_memory_context_primary;
7844 if (__tme_predict_false(asi_mask_data
7845 & (TME_SPARC64_ASI_FLAG_SECONDARY
7846 + TME_SPARC64_ASI_MASK_FLAG_INSN_NUCLEUS))) {
7847 if (asi_mask_data & TME_SPARC64_ASI_FLAG_SECONDARY) {
7848 context = ic->tme_sparc_memory_context_secondary;
7849 }
7850 else if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_NUCLEUS) {
7851 context = 0;
7852 }
7853 }
7854
7855 /* assume that no DTLB ASI mask flags will require a slow load: */
7856 asi_mask_flags_slow = 0;
7857
7858 /* a ldba without a no-fault ASI traps on no-fault addresses: */
7859 asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
7860
7861 /* if this ldba is using a no-fault ASI: */
7862 if (__tme_predict_false(asi_mask_data & TME_SPARC64_ASI_FLAG_NO_FAULT)) {
7863
7864 /* a ldba with a no-fault ASI traps on addresses with side-effects: */
7865 asi_mask_flags_slow = TME_SPARC64_ASI_MASK_FLAG_TLB_SIDE_EFFECTS;
7866 }
7867
7868 /* get and busy the DTLB entry: */
7869 dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, context, address))];
7870 tme_sparc_tlb_busy(dtlb);
7871
7872 /* assume that this DTLB applies and allows fast transfers: */
7873 memory = dtlb->tme_sparc_tlb_emulator_off_read;
7874
7875 /* if this DTLB matches any context, it matches this context: */
7876 dtlb_context = dtlb->tme_sparc_tlb_context;
7877 if (dtlb_context > ic->tme_sparc_memory_context_max) {
7878 dtlb_context = context;
7879 }
7880
7881 /* we must call the slow load function if: */
7882 if (__tme_predict_false(
7883
7884 /* the DTLB entry is invalid: */
7885 tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
7886
7887 /* the DTLB entry does not match the context: */
7888 || dtlb_context != context
7889
7890 /* the DTLB entry does not cover the needed addresses: */
7891 || (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
7892 || ((address + ((8 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
7893
7894 /* the DTLB entry does not cover the needed address space: */
7895 || (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, asi_mask_data))
7896
7897 /* the DTLB entry can't be used for a fast ldba: */
7898 || (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
7899
7900 /* the DTLB entry does not allow fast transfers: */
7901 || (memory == TME_EMULATOR_OFF_UNDEF)
7902
7903 )) {
7904
7905 /* call the slow load function: */
7906 memory = tme_sparc64_ls(ic,
7907 address,
7908 &TME_SPARC_FORMAT3_RD,
7909 (TME_SPARC_LSINFO_OP_LD
7910 | TME_SPARC_LSINFO_ASI(TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF))
7911 | TME_SPARC_LSINFO_A
7912 | (8 / 8)));
7913
7914 /* if the slow load function did the transfer: */
7915 if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
7916
7917 /* unbusy the TLB entry; */
7918 tme_sparc_tlb_unbusy(dtlb);
7919
7920 /* log the value loaded: */
7921 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
7922 tme_sparc_log(ic, 1000, TME_OK,
7923 (TME_SPARC_LOG_HANDLE(ic),
7924 _("ldba 0x%02x:0x%016" TME_PRIx64 ": 0x%02" TME_PRIx64),
7925 TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
7926 address,
7927 TME_SPARC_FORMAT3_RD));
7928
7929 TME_SPARC_INSN_OK;
7930 }
7931 }
7932
7933 /* get the byte order of this transfer: */
7934 endian_little = asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
7935 if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
7936 if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
7937 endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
7938 }
7939 else {
7940 assert (FALSE);
7941 }
7942 }
7943
7944 /* do the fast transfer: */
7945 memory += address;
7946 value8 = tme_memory_bus_read8((const tme_shared tme_uint8_t *) memory, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint8_t), sizeof(tme_uint64_t));
7947
7948 /* possibly sign-extend the loaded value: */
7949 value32 = value8;
7950 if (TME_SPARC_INSN & TME_BIT(22)) {
7951 value32 = (tme_uint32_t) (tme_int32_t) (tme_int8_t) value32;
7952 }
7953 TME_SPARC_FORMAT3_RD = (tme_uint64_t) (tme_int64_t) (tme_int32_t) value32;
7954
7955 /* unbusy the DTLB entry: */
7956 tme_sparc_tlb_unbusy(dtlb);
7957
7958 /* log the value loaded: */
7959 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
7960 tme_sparc_log(ic, 1000, TME_OK,
7961 (TME_SPARC_LOG_HANDLE(ic),
7962 _("ldba 0x%02x:0x%016" TME_PRIx64 ": 0x%02" TME_PRIx64),
7963 TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
7964 address,
7965 TME_SPARC_FORMAT3_RD));
7966
7967 TME_SPARC_INSN_OK;
7968 }
7969
7970 /* this does a sparc64 stba: */
TME_SPARC_FORMAT3(tme_sparc64_stba,tme_uint64_t)7971 TME_SPARC_FORMAT3(tme_sparc64_stba, tme_uint64_t)
7972 {
7973 tme_uint32_t asi_mask_data;
7974 tme_uint64_t address;
7975 tme_bus_context_t context;
7976 tme_uint32_t asi_mask_flags_slow;
7977 struct tme_sparc_tlb *dtlb;
7978 tme_shared tme_uint8_t *memory;
7979 tme_bus_context_t dtlb_context;
7980 tme_uint32_t endian_little;
7981 tme_uint8_t value8;
7982
7983 /* get the alternate ASI mask: */
7984 asi_mask_data = _tme_sparc64_alternate_asi_mask(ic);
7985
7986 /* get the address: */
7987 address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
7988 address &= ic->tme_sparc_address_mask;
7989
7990 #ifdef _TME_SPARC_STATS
7991 /* track statistics: */
7992 ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
7993 #endif /* _TME_SPARC_STATS */
7994
7995 /* verify and maybe replay this transfer: */
7996 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
7997 asi_mask_data, address,
7998 (TME_RECODE_SIZE_8
7999 | TME_SPARC_RECODE_VERIFY_MEM_STORE));
8000 if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
8001 TME_SPARC_INSN_OK;
8002 }
8003
8004 /* log the value stored: */
8005 tme_sparc_log(ic, 1000, TME_OK,
8006 (TME_SPARC_LOG_HANDLE(ic),
8007 _("stba 0x%02x:0x%016" TME_PRIx64 ": 0x%02" TME_PRIx8),
8008 TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
8009 address,
8010 (tme_uint8_t) TME_SPARC_FORMAT3_RD));
8011
8012 /* get the context: */
8013 context = ic->tme_sparc_memory_context_primary;
8014 if (__tme_predict_false(asi_mask_data
8015 & (TME_SPARC64_ASI_FLAG_SECONDARY
8016 + TME_SPARC64_ASI_MASK_FLAG_INSN_NUCLEUS))) {
8017 if (asi_mask_data & TME_SPARC64_ASI_FLAG_SECONDARY) {
8018 context = ic->tme_sparc_memory_context_secondary;
8019 }
8020 else if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_NUCLEUS) {
8021 context = 0;
8022 }
8023 }
8024
8025 /* assume that no DTLB ASI mask flags will require a slow store: */
8026 asi_mask_flags_slow = 0;
8027
8028 /* a stba traps on no-fault addresses: */
8029 asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
8030
8031 /* if this stba is using a no-fault ASI: */
8032 if (__tme_predict_false(asi_mask_data & TME_SPARC64_ASI_FLAG_NO_FAULT)) {
8033
8034 /* a stba with a no-fault ASI traps: */
8035 asi_mask_flags_slow = 0 - (tme_uint32_t) 1;
8036 }
8037
8038 /* get and busy the DTLB entry: */
8039 dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, context, address))];
8040 tme_sparc_tlb_busy(dtlb);
8041
8042 /* assume that this DTLB applies and allows fast transfers: */
8043 memory = dtlb->tme_sparc_tlb_emulator_off_write;
8044
8045 /* if this DTLB matches any context, it matches this context: */
8046 dtlb_context = dtlb->tme_sparc_tlb_context;
8047 if (dtlb_context > ic->tme_sparc_memory_context_max) {
8048 dtlb_context = context;
8049 }
8050
8051 /* we must call the slow store function if: */
8052 if (__tme_predict_false(
8053
8054 /* the DTLB entry is invalid: */
8055 tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
8056
8057 /* the DTLB entry does not match the context: */
8058 || dtlb_context != context
8059
8060 /* the DTLB entry does not cover the needed addresses: */
8061 || (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
8062 || ((address + ((8 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
8063
8064 /* the DTLB entry does not cover the needed address space: */
8065 || (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, asi_mask_data))
8066
8067 /* the DTLB entry can't be used for a fast stba: */
8068 || (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
8069
8070 /* the DTLB entry does not allow fast transfers: */
8071 || (memory == TME_EMULATOR_OFF_UNDEF)
8072
8073 )) {
8074
8075 /* call the slow store function: */
8076 memory = tme_sparc64_ls(ic,
8077 address,
8078 &TME_SPARC_FORMAT3_RD,
8079 (TME_SPARC_LSINFO_OP_ST
8080 | TME_SPARC_LSINFO_ASI(TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF))
8081 | TME_SPARC_LSINFO_A
8082 | (8 / 8)));
8083
8084 /* if the slow store function did the transfer: */
8085 if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
8086
8087 /* unbusy the TLB entry; */
8088 tme_sparc_tlb_unbusy(dtlb);
8089
8090 TME_SPARC_INSN_OK;
8091 }
8092 }
8093
8094 /* get the byte order of this transfer: */
8095 endian_little = asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
8096 if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
8097 if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
8098 endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
8099 }
8100 else {
8101 assert (FALSE);
8102 }
8103 }
8104
8105 /* do the fast transfer: */
8106 memory += address;
8107 value8 = TME_SPARC_FORMAT3_RD;
8108 tme_memory_bus_write8((tme_shared tme_uint8_t *) memory, value8, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint8_t), sizeof(tme_uint64_t));
8109
8110 /* unbusy the DTLB entry: */
8111 tme_sparc_tlb_unbusy(dtlb);
8112
8113 TME_SPARC_INSN_OK;
8114 }
8115
8116 /* this does a sparc64 ldha: */
TME_SPARC_FORMAT3(tme_sparc64_ldha,tme_uint64_t)8117 TME_SPARC_FORMAT3(tme_sparc64_ldha, tme_uint64_t)
8118 {
8119 tme_uint32_t asi_mask_data;
8120 tme_uint64_t address;
8121 tme_bus_context_t context;
8122 tme_uint32_t asi_mask_flags_slow;
8123 struct tme_sparc_tlb *dtlb;
8124 const tme_shared tme_uint8_t *memory;
8125 tme_bus_context_t dtlb_context;
8126 tme_uint32_t endian_little;
8127 tme_uint16_t value16;
8128 tme_uint32_t value32;
8129
8130 /* get the alternate ASI mask: */
8131 asi_mask_data = _tme_sparc64_alternate_asi_mask(ic);
8132
8133 /* get the address: */
8134 address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
8135 address &= ic->tme_sparc_address_mask;
8136
8137 #ifdef _TME_SPARC_STATS
8138 /* track statistics: */
8139 ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
8140 #endif /* _TME_SPARC_STATS */
8141
8142 /* verify and maybe replay this transfer: */
8143 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
8144 asi_mask_data, address,
8145 (TME_RECODE_SIZE_16
8146 | TME_SPARC_RECODE_VERIFY_MEM_LOAD));
8147 if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
8148 TME_SPARC_INSN_OK;
8149 }
8150
8151 /* get the context: */
8152 context = ic->tme_sparc_memory_context_primary;
8153 if (__tme_predict_false(asi_mask_data
8154 & (TME_SPARC64_ASI_FLAG_SECONDARY
8155 + TME_SPARC64_ASI_MASK_FLAG_INSN_NUCLEUS))) {
8156 if (asi_mask_data & TME_SPARC64_ASI_FLAG_SECONDARY) {
8157 context = ic->tme_sparc_memory_context_secondary;
8158 }
8159 else if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_NUCLEUS) {
8160 context = 0;
8161 }
8162 }
8163
8164 /* assume that no DTLB ASI mask flags will require a slow load: */
8165 asi_mask_flags_slow = 0;
8166
8167 /* a ldha without a no-fault ASI traps on no-fault addresses: */
8168 asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
8169
8170 /* if this ldha is using a no-fault ASI: */
8171 if (__tme_predict_false(asi_mask_data & TME_SPARC64_ASI_FLAG_NO_FAULT)) {
8172
8173 /* a ldha with a no-fault ASI traps on addresses with side-effects: */
8174 asi_mask_flags_slow = TME_SPARC64_ASI_MASK_FLAG_TLB_SIDE_EFFECTS;
8175 }
8176
8177 /* get and busy the DTLB entry: */
8178 dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, context, address))];
8179 tme_sparc_tlb_busy(dtlb);
8180
8181 /* assume that this DTLB applies and allows fast transfers: */
8182 memory = dtlb->tme_sparc_tlb_emulator_off_read;
8183
8184 /* if this DTLB matches any context, it matches this context: */
8185 dtlb_context = dtlb->tme_sparc_tlb_context;
8186 if (dtlb_context > ic->tme_sparc_memory_context_max) {
8187 dtlb_context = context;
8188 }
8189
8190 /* we must call the slow load function if: */
8191 if (__tme_predict_false(
8192
8193 /* the DTLB entry is invalid: */
8194 tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
8195
8196 /* the DTLB entry does not match the context: */
8197 || dtlb_context != context
8198
8199 /* the DTLB entry does not cover the needed addresses: */
8200 || (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
8201 || ((address + ((16 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
8202
8203 /* the DTLB entry does not cover the needed address space: */
8204 || (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, asi_mask_data))
8205
8206 /* the DTLB entry can't be used for a fast ldha: */
8207 || (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
8208
8209 /* the DTLB entry does not allow fast transfers: */
8210 || (memory == TME_EMULATOR_OFF_UNDEF)
8211
8212 /* the address is misaligned: */
8213 || ((address % (16 / 8)) != 0)
8214
8215 )) {
8216
8217 /* call the slow load function: */
8218 memory = tme_sparc64_ls(ic,
8219 address,
8220 &TME_SPARC_FORMAT3_RD,
8221 (TME_SPARC_LSINFO_OP_LD
8222 | TME_SPARC_LSINFO_ASI(TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF))
8223 | TME_SPARC_LSINFO_A
8224 | (16 / 8)));
8225
8226 /* if the slow load function did the transfer: */
8227 if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
8228
8229 /* unbusy the TLB entry; */
8230 tme_sparc_tlb_unbusy(dtlb);
8231
8232 /* log the value loaded: */
8233 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
8234 tme_sparc_log(ic, 1000, TME_OK,
8235 (TME_SPARC_LOG_HANDLE(ic),
8236 _("ldha 0x%02x:0x%016" TME_PRIx64 ": 0x%04" TME_PRIx64),
8237 TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
8238 address,
8239 TME_SPARC_FORMAT3_RD));
8240
8241 TME_SPARC_INSN_OK;
8242 }
8243 }
8244
8245 /* get the byte order of this transfer: */
8246 endian_little = asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
8247 if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
8248 if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
8249 endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
8250 }
8251 else {
8252 assert (FALSE);
8253 }
8254 }
8255
8256 /* do the fast transfer: */
8257 memory += address;
8258 value16 = tme_memory_bus_read16((const tme_shared tme_uint16_t *) memory, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint16_t), sizeof(tme_uint64_t));
8259 value16 = (endian_little ? tme_letoh_u16(value16) : tme_betoh_u16(value16));
8260
8261 /* possibly sign-extend the loaded value: */
8262 value32 = value16;
8263 if (TME_SPARC_INSN & TME_BIT(22)) {
8264 value32 = (tme_uint32_t) (tme_int32_t) (tme_int16_t) value32;
8265 }
8266 TME_SPARC_FORMAT3_RD = (tme_uint64_t) (tme_int64_t) (tme_int32_t) value32;
8267
8268 /* unbusy the DTLB entry: */
8269 tme_sparc_tlb_unbusy(dtlb);
8270
8271 /* log the value loaded: */
8272 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
8273 tme_sparc_log(ic, 1000, TME_OK,
8274 (TME_SPARC_LOG_HANDLE(ic),
8275 _("ldha 0x%02x:0x%016" TME_PRIx64 ": 0x%04" TME_PRIx64),
8276 TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
8277 address,
8278 TME_SPARC_FORMAT3_RD));
8279
8280 TME_SPARC_INSN_OK;
8281 }
8282
8283 /* this does a sparc64 stha: */
TME_SPARC_FORMAT3(tme_sparc64_stha,tme_uint64_t)8284 TME_SPARC_FORMAT3(tme_sparc64_stha, tme_uint64_t)
8285 {
8286 tme_uint32_t asi_mask_data;
8287 tme_uint64_t address;
8288 tme_bus_context_t context;
8289 tme_uint32_t asi_mask_flags_slow;
8290 struct tme_sparc_tlb *dtlb;
8291 tme_shared tme_uint8_t *memory;
8292 tme_bus_context_t dtlb_context;
8293 tme_uint32_t endian_little;
8294 tme_uint16_t value16;
8295
8296 /* get the alternate ASI mask: */
8297 asi_mask_data = _tme_sparc64_alternate_asi_mask(ic);
8298
8299 /* get the address: */
8300 address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
8301 address &= ic->tme_sparc_address_mask;
8302
8303 #ifdef _TME_SPARC_STATS
8304 /* track statistics: */
8305 ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
8306 #endif /* _TME_SPARC_STATS */
8307
8308 /* verify and maybe replay this transfer: */
8309 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
8310 asi_mask_data, address,
8311 (TME_RECODE_SIZE_16
8312 | TME_SPARC_RECODE_VERIFY_MEM_STORE));
8313 if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
8314 TME_SPARC_INSN_OK;
8315 }
8316
8317 /* log the value stored: */
8318 tme_sparc_log(ic, 1000, TME_OK,
8319 (TME_SPARC_LOG_HANDLE(ic),
8320 _("stha 0x%02x:0x%016" TME_PRIx64 ": 0x%04" TME_PRIx16),
8321 TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
8322 address,
8323 (tme_uint16_t) TME_SPARC_FORMAT3_RD));
8324
8325 /* get the context: */
8326 context = ic->tme_sparc_memory_context_primary;
8327 if (__tme_predict_false(asi_mask_data
8328 & (TME_SPARC64_ASI_FLAG_SECONDARY
8329 + TME_SPARC64_ASI_MASK_FLAG_INSN_NUCLEUS))) {
8330 if (asi_mask_data & TME_SPARC64_ASI_FLAG_SECONDARY) {
8331 context = ic->tme_sparc_memory_context_secondary;
8332 }
8333 else if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_NUCLEUS) {
8334 context = 0;
8335 }
8336 }
8337
8338 /* assume that no DTLB ASI mask flags will require a slow store: */
8339 asi_mask_flags_slow = 0;
8340
8341 /* a stha traps on no-fault addresses: */
8342 asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
8343
8344 /* if this stha is using a no-fault ASI: */
8345 if (__tme_predict_false(asi_mask_data & TME_SPARC64_ASI_FLAG_NO_FAULT)) {
8346
8347 /* a stha with a no-fault ASI traps: */
8348 asi_mask_flags_slow = 0 - (tme_uint32_t) 1;
8349 }
8350
8351 /* get and busy the DTLB entry: */
8352 dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, context, address))];
8353 tme_sparc_tlb_busy(dtlb);
8354
8355 /* assume that this DTLB applies and allows fast transfers: */
8356 memory = dtlb->tme_sparc_tlb_emulator_off_write;
8357
8358 /* if this DTLB matches any context, it matches this context: */
8359 dtlb_context = dtlb->tme_sparc_tlb_context;
8360 if (dtlb_context > ic->tme_sparc_memory_context_max) {
8361 dtlb_context = context;
8362 }
8363
8364 /* we must call the slow store function if: */
8365 if (__tme_predict_false(
8366
8367 /* the DTLB entry is invalid: */
8368 tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
8369
8370 /* the DTLB entry does not match the context: */
8371 || dtlb_context != context
8372
8373 /* the DTLB entry does not cover the needed addresses: */
8374 || (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
8375 || ((address + ((16 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
8376
8377 /* the DTLB entry does not cover the needed address space: */
8378 || (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, asi_mask_data))
8379
8380 /* the DTLB entry can't be used for a fast stha: */
8381 || (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
8382
8383 /* the DTLB entry does not allow fast transfers: */
8384 || (memory == TME_EMULATOR_OFF_UNDEF)
8385
8386 /* the address is misaligned: */
8387 || ((address % (16 / 8)) != 0)
8388
8389 )) {
8390
8391 /* call the slow store function: */
8392 memory = tme_sparc64_ls(ic,
8393 address,
8394 &TME_SPARC_FORMAT3_RD,
8395 (TME_SPARC_LSINFO_OP_ST
8396 | TME_SPARC_LSINFO_ASI(TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF))
8397 | TME_SPARC_LSINFO_A
8398 | (16 / 8)));
8399
8400 /* if the slow store function did the transfer: */
8401 if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
8402
8403 /* unbusy the TLB entry; */
8404 tme_sparc_tlb_unbusy(dtlb);
8405
8406 TME_SPARC_INSN_OK;
8407 }
8408 }
8409
8410 /* get the byte order of this transfer: */
8411 endian_little = asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
8412 if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
8413 if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
8414 endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
8415 }
8416 else {
8417 assert (FALSE);
8418 }
8419 }
8420
8421 /* do the fast transfer: */
8422 memory += address;
8423 value16 = TME_SPARC_FORMAT3_RD;
8424 value16 = (endian_little ? tme_htole_u16(value16) : tme_htobe_u16(value16));
8425 tme_memory_bus_write16((tme_shared tme_uint16_t *) memory, value16, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint16_t), sizeof(tme_uint64_t));
8426
8427 /* unbusy the DTLB entry: */
8428 tme_sparc_tlb_unbusy(dtlb);
8429
8430 TME_SPARC_INSN_OK;
8431 }
8432
8433 /* this does a sparc64 lda: */
TME_SPARC_FORMAT3(tme_sparc64_lda,tme_uint64_t)8434 TME_SPARC_FORMAT3(tme_sparc64_lda, tme_uint64_t)
8435 {
8436 tme_uint32_t asi_mask_data;
8437 tme_uint64_t address;
8438 tme_bus_context_t context;
8439 tme_uint32_t asi_mask_flags_slow;
8440 struct tme_sparc_tlb *dtlb;
8441 const tme_shared tme_uint8_t *memory;
8442 tme_bus_context_t dtlb_context;
8443 tme_uint32_t endian_little;
8444 tme_uint32_t value32;
8445 tme_uint64_t value64;
8446
8447 /* get the alternate ASI mask: */
8448 asi_mask_data = _tme_sparc64_alternate_asi_mask(ic);
8449
8450 /* get the address: */
8451 address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
8452 address &= ic->tme_sparc_address_mask;
8453
8454 #ifdef _TME_SPARC_STATS
8455 /* track statistics: */
8456 ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
8457 #endif /* _TME_SPARC_STATS */
8458
8459 /* verify and maybe replay this transfer: */
8460 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
8461 asi_mask_data, address,
8462 (TME_RECODE_SIZE_32
8463 | TME_SPARC_RECODE_VERIFY_MEM_LOAD));
8464 if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
8465 TME_SPARC_INSN_OK;
8466 }
8467
8468 /* get the context: */
8469 context = ic->tme_sparc_memory_context_primary;
8470 if (__tme_predict_false(asi_mask_data
8471 & (TME_SPARC64_ASI_FLAG_SECONDARY
8472 + TME_SPARC64_ASI_MASK_FLAG_INSN_NUCLEUS))) {
8473 if (asi_mask_data & TME_SPARC64_ASI_FLAG_SECONDARY) {
8474 context = ic->tme_sparc_memory_context_secondary;
8475 }
8476 else if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_NUCLEUS) {
8477 context = 0;
8478 }
8479 }
8480
8481 /* assume that no DTLB ASI mask flags will require a slow load: */
8482 asi_mask_flags_slow = 0;
8483
8484 /* a lda without a no-fault ASI traps on no-fault addresses: */
8485 asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
8486
8487 /* if this lda is using a no-fault ASI: */
8488 if (__tme_predict_false(asi_mask_data & TME_SPARC64_ASI_FLAG_NO_FAULT)) {
8489
8490 /* a lda with a no-fault ASI traps on addresses with side-effects: */
8491 asi_mask_flags_slow = TME_SPARC64_ASI_MASK_FLAG_TLB_SIDE_EFFECTS;
8492 }
8493
8494 /* get and busy the DTLB entry: */
8495 dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, context, address))];
8496 tme_sparc_tlb_busy(dtlb);
8497
8498 /* assume that this DTLB applies and allows fast transfers: */
8499 memory = dtlb->tme_sparc_tlb_emulator_off_read;
8500
8501 /* if this DTLB matches any context, it matches this context: */
8502 dtlb_context = dtlb->tme_sparc_tlb_context;
8503 if (dtlb_context > ic->tme_sparc_memory_context_max) {
8504 dtlb_context = context;
8505 }
8506
8507 /* we must call the slow load function if: */
8508 if (__tme_predict_false(
8509
8510 /* the DTLB entry is invalid: */
8511 tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
8512
8513 /* the DTLB entry does not match the context: */
8514 || dtlb_context != context
8515
8516 /* the DTLB entry does not cover the needed addresses: */
8517 || (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
8518 || ((address + ((32 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
8519
8520 /* the DTLB entry does not cover the needed address space: */
8521 || (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, asi_mask_data))
8522
8523 /* the DTLB entry can't be used for a fast lda: */
8524 || (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
8525
8526 /* the DTLB entry does not allow fast transfers: */
8527 || (memory == TME_EMULATOR_OFF_UNDEF)
8528
8529 /* the address is misaligned: */
8530 || ((address % (32 / 8)) != 0)
8531
8532 )) {
8533
8534 /* call the slow load function: */
8535 memory = tme_sparc64_ls(ic,
8536 address,
8537 &TME_SPARC_FORMAT3_RD,
8538 (TME_SPARC_LSINFO_OP_LD
8539 | TME_SPARC_LSINFO_ASI(TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF))
8540 | TME_SPARC_LSINFO_A
8541 | (32 / 8)));
8542
8543 /* if the slow load function did the transfer: */
8544 if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
8545
8546 /* unbusy the TLB entry; */
8547 tme_sparc_tlb_unbusy(dtlb);
8548
8549 /* log the value loaded: */
8550 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
8551 tme_sparc_log(ic, 1000, TME_OK,
8552 (TME_SPARC_LOG_HANDLE(ic),
8553 _("lda 0x%02x:0x%016" TME_PRIx64 ": 0x%08" TME_PRIx64),
8554 TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
8555 address,
8556 TME_SPARC_FORMAT3_RD));
8557
8558 TME_SPARC_INSN_OK;
8559 }
8560 }
8561
8562 /* get the byte order of this transfer: */
8563 endian_little = asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
8564 if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
8565 if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
8566 endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
8567 }
8568 else {
8569 assert (FALSE);
8570 }
8571 }
8572
8573 /* do the fast transfer: */
8574 memory += address;
8575 value32 = tme_memory_bus_read32((const tme_shared tme_uint32_t *) memory, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint32_t), sizeof(tme_uint64_t));
8576 value32 = (endian_little ? tme_letoh_u32(value32) : tme_betoh_u32(value32));
8577
8578 /* possibly sign-extend the loaded value: */
8579 value64 = value32;
8580 if (TME_SPARC_INSN & TME_BIT(22)) {
8581 value64 = (tme_uint64_t) (tme_int64_t) (tme_int32_t) value64;
8582 }
8583 TME_SPARC_FORMAT3_RD = (tme_uint64_t) (tme_int64_t) (tme_int64_t) value64;
8584
8585 /* unbusy the DTLB entry: */
8586 tme_sparc_tlb_unbusy(dtlb);
8587
8588 /* log the value loaded: */
8589 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
8590 tme_sparc_log(ic, 1000, TME_OK,
8591 (TME_SPARC_LOG_HANDLE(ic),
8592 _("lda 0x%02x:0x%016" TME_PRIx64 ": 0x%08" TME_PRIx64),
8593 TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
8594 address,
8595 TME_SPARC_FORMAT3_RD));
8596
8597 TME_SPARC_INSN_OK;
8598 }
8599
8600 /* this does a sparc64 sta: */
TME_SPARC_FORMAT3(tme_sparc64_sta,tme_uint64_t)8601 TME_SPARC_FORMAT3(tme_sparc64_sta, tme_uint64_t)
8602 {
8603 tme_uint32_t asi_mask_data;
8604 tme_uint64_t address;
8605 tme_bus_context_t context;
8606 tme_uint32_t asi_mask_flags_slow;
8607 struct tme_sparc_tlb *dtlb;
8608 tme_shared tme_uint8_t *memory;
8609 tme_bus_context_t dtlb_context;
8610 tme_uint32_t endian_little;
8611 tme_uint32_t value32;
8612
8613 /* get the alternate ASI mask: */
8614 asi_mask_data = _tme_sparc64_alternate_asi_mask(ic);
8615
8616 /* get the address: */
8617 address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
8618 address &= ic->tme_sparc_address_mask;
8619
8620 #ifdef _TME_SPARC_STATS
8621 /* track statistics: */
8622 ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
8623 #endif /* _TME_SPARC_STATS */
8624
8625 /* verify and maybe replay this transfer: */
8626 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
8627 asi_mask_data, address,
8628 (TME_RECODE_SIZE_32
8629 | TME_SPARC_RECODE_VERIFY_MEM_STORE));
8630 if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
8631 TME_SPARC_INSN_OK;
8632 }
8633
8634 /* log the value stored: */
8635 tme_sparc_log(ic, 1000, TME_OK,
8636 (TME_SPARC_LOG_HANDLE(ic),
8637 _("sta 0x%02x:0x%016" TME_PRIx64 ": 0x%08" TME_PRIx32),
8638 TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
8639 address,
8640 (tme_uint32_t) TME_SPARC_FORMAT3_RD));
8641
8642 /* get the context: */
8643 context = ic->tme_sparc_memory_context_primary;
8644 if (__tme_predict_false(asi_mask_data
8645 & (TME_SPARC64_ASI_FLAG_SECONDARY
8646 + TME_SPARC64_ASI_MASK_FLAG_INSN_NUCLEUS))) {
8647 if (asi_mask_data & TME_SPARC64_ASI_FLAG_SECONDARY) {
8648 context = ic->tme_sparc_memory_context_secondary;
8649 }
8650 else if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_NUCLEUS) {
8651 context = 0;
8652 }
8653 }
8654
8655 /* assume that no DTLB ASI mask flags will require a slow store: */
8656 asi_mask_flags_slow = 0;
8657
8658 /* a sta traps on no-fault addresses: */
8659 asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
8660
8661 /* if this sta is using a no-fault ASI: */
8662 if (__tme_predict_false(asi_mask_data & TME_SPARC64_ASI_FLAG_NO_FAULT)) {
8663
8664 /* a sta with a no-fault ASI traps: */
8665 asi_mask_flags_slow = 0 - (tme_uint32_t) 1;
8666 }
8667
8668 /* get and busy the DTLB entry: */
8669 dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, context, address))];
8670 tme_sparc_tlb_busy(dtlb);
8671
8672 /* assume that this DTLB applies and allows fast transfers: */
8673 memory = dtlb->tme_sparc_tlb_emulator_off_write;
8674
8675 /* if this DTLB matches any context, it matches this context: */
8676 dtlb_context = dtlb->tme_sparc_tlb_context;
8677 if (dtlb_context > ic->tme_sparc_memory_context_max) {
8678 dtlb_context = context;
8679 }
8680
8681 /* we must call the slow store function if: */
8682 if (__tme_predict_false(
8683
8684 /* the DTLB entry is invalid: */
8685 tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
8686
8687 /* the DTLB entry does not match the context: */
8688 || dtlb_context != context
8689
8690 /* the DTLB entry does not cover the needed addresses: */
8691 || (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
8692 || ((address + ((32 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
8693
8694 /* the DTLB entry does not cover the needed address space: */
8695 || (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, asi_mask_data))
8696
8697 /* the DTLB entry can't be used for a fast sta: */
8698 || (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
8699
8700 /* the DTLB entry does not allow fast transfers: */
8701 || (memory == TME_EMULATOR_OFF_UNDEF)
8702
8703 /* the address is misaligned: */
8704 || ((address % (32 / 8)) != 0)
8705
8706 )) {
8707
8708 /* call the slow store function: */
8709 memory = tme_sparc64_ls(ic,
8710 address,
8711 &TME_SPARC_FORMAT3_RD,
8712 (TME_SPARC_LSINFO_OP_ST
8713 | TME_SPARC_LSINFO_ASI(TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF))
8714 | TME_SPARC_LSINFO_A
8715 | (32 / 8)));
8716
8717 /* if the slow store function did the transfer: */
8718 if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
8719
8720 /* unbusy the TLB entry; */
8721 tme_sparc_tlb_unbusy(dtlb);
8722
8723 TME_SPARC_INSN_OK;
8724 }
8725 }
8726
8727 /* get the byte order of this transfer: */
8728 endian_little = asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
8729 if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
8730 if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
8731 endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
8732 }
8733 else {
8734 assert (FALSE);
8735 }
8736 }
8737
8738 /* do the fast transfer: */
8739 memory += address;
8740 value32 = TME_SPARC_FORMAT3_RD;
8741 value32 = (endian_little ? tme_htole_u32(value32) : tme_htobe_u32(value32));
8742 tme_memory_bus_write32((tme_shared tme_uint32_t *) memory, value32, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint32_t), sizeof(tme_uint64_t));
8743
8744 /* unbusy the DTLB entry: */
8745 tme_sparc_tlb_unbusy(dtlb);
8746
8747 TME_SPARC_INSN_OK;
8748 }
8749
8750 /* this does a sparc64 ldda: */
TME_SPARC_FORMAT3(tme_sparc64_ldda,tme_uint64_t)8751 TME_SPARC_FORMAT3(tme_sparc64_ldda, tme_uint64_t)
8752 {
8753 tme_uint32_t asi_mask_data;
8754 tme_uint64_t address;
8755 tme_bus_context_t context;
8756 tme_uint32_t asi_mask_flags_slow;
8757 struct tme_sparc_tlb *dtlb;
8758 const tme_shared tme_uint8_t *memory;
8759 tme_bus_context_t dtlb_context;
8760 tme_uint32_t endian_little;
8761 tme_uint32_t value32;
8762
8763 /* get the alternate ASI mask: */
8764 asi_mask_data = _tme_sparc64_alternate_asi_mask(ic);
8765
8766 /* get the address: */
8767 address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
8768 address &= ic->tme_sparc_address_mask;
8769
8770 #ifdef _TME_SPARC_STATS
8771 /* track statistics: */
8772 ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
8773 #endif /* _TME_SPARC_STATS */
8774
8775 /* verify and maybe replay this transfer: */
8776 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
8777 asi_mask_data, address,
8778 (TME_RECODE_SIZE_32
8779 | TME_SPARC_RECODE_VERIFY_MEM_LOAD));
8780 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint64),
8781 asi_mask_data, address + sizeof(tme_uint32_t),
8782 (TME_RECODE_SIZE_32
8783 | TME_SPARC_RECODE_VERIFY_MEM_LOAD));
8784 if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
8785 TME_SPARC_INSN_OK;
8786 }
8787
8788 /* get the context: */
8789 context = ic->tme_sparc_memory_context_primary;
8790 if (__tme_predict_false(asi_mask_data
8791 & (TME_SPARC64_ASI_FLAG_SECONDARY
8792 + TME_SPARC64_ASI_MASK_FLAG_INSN_NUCLEUS))) {
8793 if (asi_mask_data & TME_SPARC64_ASI_FLAG_SECONDARY) {
8794 context = ic->tme_sparc_memory_context_secondary;
8795 }
8796 else if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_NUCLEUS) {
8797 context = 0;
8798 }
8799 }
8800
8801 /* assume that no DTLB ASI mask flags will require a slow load: */
8802 asi_mask_flags_slow = 0;
8803
8804 /* a ldda without a no-fault ASI traps on no-fault addresses: */
8805 asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
8806
8807 /* if this ldda is using a no-fault ASI: */
8808 if (__tme_predict_false(asi_mask_data & TME_SPARC64_ASI_FLAG_NO_FAULT)) {
8809
8810 /* a ldda with a no-fault ASI traps on addresses with side-effects: */
8811 asi_mask_flags_slow = TME_SPARC64_ASI_MASK_FLAG_TLB_SIDE_EFFECTS;
8812 }
8813
8814 /* get and busy the DTLB entry: */
8815 dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, context, address))];
8816 tme_sparc_tlb_busy(dtlb);
8817
8818 /* assume that this DTLB applies and allows fast transfers: */
8819 memory = dtlb->tme_sparc_tlb_emulator_off_read;
8820
8821 /* if this DTLB matches any context, it matches this context: */
8822 dtlb_context = dtlb->tme_sparc_tlb_context;
8823 if (dtlb_context > ic->tme_sparc_memory_context_max) {
8824 dtlb_context = context;
8825 }
8826
8827 /* we must call the slow load function if: */
8828 if (__tme_predict_false(
8829
8830 /* the DTLB entry is invalid: */
8831 tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
8832
8833 /* the DTLB entry does not match the context: */
8834 || dtlb_context != context
8835
8836 /* the DTLB entry does not cover the needed addresses: */
8837 || (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
8838 || ((address + ((64 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
8839
8840 /* the DTLB entry does not cover the needed address space: */
8841 || (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, asi_mask_data))
8842
8843 /* the DTLB entry can't be used for a fast ldda: */
8844 || (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
8845
8846 /* the DTLB entry does not allow fast transfers: */
8847 || (memory == TME_EMULATOR_OFF_UNDEF)
8848
8849 /* the address is misaligned: */
8850 || ((address % (64 / 8)) != 0)
8851
8852 /* the destination register number is odd: */
8853 || ((TME_SPARC_INSN & TME_BIT(25)) != 0)
8854
8855 )) {
8856
8857 /* call the slow load function: */
8858 memory = tme_sparc64_ls(ic,
8859 address,
8860 &TME_SPARC_FORMAT3_RD,
8861 (TME_SPARC_LSINFO_OP_LD
8862 | TME_SPARC_LSINFO_LDD_STD
8863 | TME_SPARC_LSINFO_ASI(TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF))
8864 | TME_SPARC_LSINFO_A
8865 | (64 / 8)));
8866
8867 /* if the slow load function did the transfer: */
8868 if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
8869
8870 /* unbusy the TLB entry; */
8871 tme_sparc_tlb_unbusy(dtlb);
8872
8873 /* log the value loaded: */
8874 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
8875 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint64));
8876 tme_sparc_log(ic, 1000, TME_OK,
8877 (TME_SPARC_LOG_HANDLE(ic),
8878 _("ldda 0x%02x:0x%016" TME_PRIx64 ": 0x%016" TME_PRIx64 " 0x%016" TME_PRIx64 ""),
8879 TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
8880 address,
8881 TME_SPARC_FORMAT3_RD,
8882 TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint64)));
8883
8884 TME_SPARC_INSN_OK;
8885 }
8886 }
8887
8888 /* get the byte order of this transfer: */
8889 endian_little = asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
8890 if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
8891 if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
8892 endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
8893 }
8894 else {
8895 assert (FALSE);
8896 }
8897 }
8898
8899 /* do the fast transfer: */
8900 memory += address;
8901 value32 = tme_memory_bus_read32(((const tme_shared tme_uint32_t *) memory) + 0, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint32_t) * 2, sizeof(tme_uint64_t));
8902 value32 = (endian_little ? tme_letoh_u32(value32) : tme_betoh_u32(value32));
8903 TME_SPARC_FORMAT3_RD = value32;
8904 value32 = tme_memory_bus_read32(((const tme_shared tme_uint32_t *) memory) + 1, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint32_t) * 1, sizeof(tme_uint64_t));
8905 value32 = (endian_little ? tme_letoh_u32(value32) : tme_betoh_u32(value32));
8906 TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint64) = value32;
8907
8908 /* unbusy the DTLB entry: */
8909 tme_sparc_tlb_unbusy(dtlb);
8910
8911 /* log the value loaded: */
8912 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
8913 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint64));
8914 tme_sparc_log(ic, 1000, TME_OK,
8915 (TME_SPARC_LOG_HANDLE(ic),
8916 _("ldda 0x%02x:0x%016" TME_PRIx64 ": 0x%016" TME_PRIx64 " 0x%016" TME_PRIx64 ""),
8917 TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
8918 address,
8919 TME_SPARC_FORMAT3_RD,
8920 TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint64)));
8921
8922 TME_SPARC_INSN_OK;
8923 }
8924
8925 /* this does a sparc64 stda: */
TME_SPARC_FORMAT3(tme_sparc64_stda,tme_uint64_t)8926 TME_SPARC_FORMAT3(tme_sparc64_stda, tme_uint64_t)
8927 {
8928 tme_uint32_t asi_mask_data;
8929 tme_uint64_t address;
8930 tme_bus_context_t context;
8931 tme_uint32_t asi_mask_flags_slow;
8932 struct tme_sparc_tlb *dtlb;
8933 tme_shared tme_uint8_t *memory;
8934 tme_bus_context_t dtlb_context;
8935 tme_uint32_t endian_little;
8936 tme_uint32_t value32;
8937
8938 /* get the alternate ASI mask: */
8939 asi_mask_data = _tme_sparc64_alternate_asi_mask(ic);
8940
8941 /* get the address: */
8942 address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
8943 address &= ic->tme_sparc_address_mask;
8944
8945 #ifdef _TME_SPARC_STATS
8946 /* track statistics: */
8947 ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
8948 #endif /* _TME_SPARC_STATS */
8949
8950 /* verify and maybe replay this transfer: */
8951 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
8952 asi_mask_data, address,
8953 (TME_RECODE_SIZE_32
8954 | TME_SPARC_RECODE_VERIFY_MEM_STORE));
8955 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint64),
8956 asi_mask_data, address + sizeof(tme_uint32_t),
8957 (TME_RECODE_SIZE_32
8958 | TME_SPARC_RECODE_VERIFY_MEM_STORE));
8959 if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
8960 TME_SPARC_INSN_OK;
8961 }
8962
8963 /* log the values stored: */
8964 tme_sparc_log(ic, 1000, TME_OK,
8965 (TME_SPARC_LOG_HANDLE(ic),
8966 _("stda 0x%02x:0x%016" TME_PRIx64 ": 0x%08" TME_PRIx32 " 0x%08" TME_PRIx32),
8967 TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
8968 address,
8969 (tme_uint32_t) TME_SPARC_FORMAT3_RD,
8970 (tme_uint32_t) TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint64)));
8971
8972 /* get the context: */
8973 context = ic->tme_sparc_memory_context_primary;
8974 if (__tme_predict_false(asi_mask_data
8975 & (TME_SPARC64_ASI_FLAG_SECONDARY
8976 + TME_SPARC64_ASI_MASK_FLAG_INSN_NUCLEUS))) {
8977 if (asi_mask_data & TME_SPARC64_ASI_FLAG_SECONDARY) {
8978 context = ic->tme_sparc_memory_context_secondary;
8979 }
8980 else if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_NUCLEUS) {
8981 context = 0;
8982 }
8983 }
8984
8985 /* assume that no DTLB ASI mask flags will require a slow store: */
8986 asi_mask_flags_slow = 0;
8987
8988 /* a stda traps on no-fault addresses: */
8989 asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
8990
8991 /* if this stda is using a no-fault ASI: */
8992 if (__tme_predict_false(asi_mask_data & TME_SPARC64_ASI_FLAG_NO_FAULT)) {
8993
8994 /* a stda with a no-fault ASI traps: */
8995 asi_mask_flags_slow = 0 - (tme_uint32_t) 1;
8996 }
8997
8998 /* get and busy the DTLB entry: */
8999 dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, context, address))];
9000 tme_sparc_tlb_busy(dtlb);
9001
9002 /* assume that this DTLB applies and allows fast transfers: */
9003 memory = dtlb->tme_sparc_tlb_emulator_off_write;
9004
9005 /* if this DTLB matches any context, it matches this context: */
9006 dtlb_context = dtlb->tme_sparc_tlb_context;
9007 if (dtlb_context > ic->tme_sparc_memory_context_max) {
9008 dtlb_context = context;
9009 }
9010
9011 /* we must call the slow store function if: */
9012 if (__tme_predict_false(
9013
9014 /* the DTLB entry is invalid: */
9015 tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
9016
9017 /* the DTLB entry does not match the context: */
9018 || dtlb_context != context
9019
9020 /* the DTLB entry does not cover the needed addresses: */
9021 || (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
9022 || ((address + ((64 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
9023
9024 /* the DTLB entry does not cover the needed address space: */
9025 || (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, asi_mask_data))
9026
9027 /* the DTLB entry can't be used for a fast stda: */
9028 || (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
9029
9030 /* the DTLB entry does not allow fast transfers: */
9031 || (memory == TME_EMULATOR_OFF_UNDEF)
9032
9033 /* the address is misaligned: */
9034 || ((address % (64 / 8)) != 0)
9035
9036 /* the destination register number is odd: */
9037 || ((TME_SPARC_INSN & TME_BIT(25)) != 0)
9038
9039 )) {
9040
9041 /* call the slow store function: */
9042 memory = tme_sparc64_ls(ic,
9043 address,
9044 &TME_SPARC_FORMAT3_RD,
9045 (TME_SPARC_LSINFO_OP_ST
9046 | TME_SPARC_LSINFO_LDD_STD
9047 | TME_SPARC_LSINFO_ASI(TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF))
9048 | TME_SPARC_LSINFO_A
9049 | (64 / 8)));
9050
9051 /* if the slow store function did the transfer: */
9052 if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
9053
9054 /* unbusy the TLB entry; */
9055 tme_sparc_tlb_unbusy(dtlb);
9056
9057 TME_SPARC_INSN_OK;
9058 }
9059 }
9060
9061 /* get the byte order of this transfer: */
9062 endian_little = asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
9063 if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
9064 if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
9065 endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
9066 }
9067 else {
9068 assert (FALSE);
9069 }
9070 }
9071
9072 /* do the fast transfer: */
9073 memory += address;
9074 value32 = TME_SPARC_FORMAT3_RD;
9075 value32 = (endian_little ? tme_htole_u32(value32) : tme_htobe_u32(value32));
9076 tme_memory_bus_write32(((tme_shared tme_uint32_t *) memory) + 0, value32, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint32_t) * 2, sizeof(tme_uint64_t));
9077 value32 = TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint64);
9078 value32 = (endian_little ? tme_htole_u32(value32) : tme_htobe_u32(value32));
9079 tme_memory_bus_write32(((tme_shared tme_uint32_t *) memory) + 1, value32, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint32_t) * 1, sizeof(tme_uint64_t));
9080
9081 /* unbusy the DTLB entry: */
9082 tme_sparc_tlb_unbusy(dtlb);
9083
9084 TME_SPARC_INSN_OK;
9085 }
9086
9087 /* this does a sparc64 jmpl: */
TME_SPARC_FORMAT3(tme_sparc64_jmpl,tme_uint64_t)9088 TME_SPARC_FORMAT3(tme_sparc64_jmpl, tme_uint64_t)
9089 {
9090 tme_uint64_t pc_next_next;
9091 tme_uint32_t ls_faults;
9092
9093 /* "The JMPL instruction causes a register-indirect delayed control
9094 transfer to the address given by r[rs1] + r[rs2] if the i field is
9095 zero, or r[rs1] + sign_ext(simm13) if the i field is one. The JMPL
9096 instruction copies the PC, which contains the address of the JMPL
9097 instruction, into register r[rd]. If either of the low-order two
9098 bits of the jump address is nonzero, a mem_address_not_aligned
9099 trap occurs." */
9100
9101 /* get the target address: */
9102 pc_next_next = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
9103 pc_next_next &= ic->tme_sparc_address_mask;
9104
9105 /* set the delayed control transfer: */
9106 ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_PC_NEXT_NEXT) = pc_next_next;
9107
9108 /* check the target address: */
9109 ls_faults = TME_SPARC_LS_FAULT_NONE;
9110 if (__tme_predict_false((pc_next_next
9111 + ic->tme_sparc64_ireg_va_hole_start)
9112 > ((ic->tme_sparc64_ireg_va_hole_start * 2) - 1))) {
9113 ls_faults += TME_SPARC64_LS_FAULT_VA_RANGE_NNPC;
9114 }
9115 if (__tme_predict_false((pc_next_next % sizeof(tme_uint32_t)) != 0)) {
9116 ls_faults += TME_SPARC_LS_FAULT_ADDRESS_NOT_ALIGNED;
9117 }
9118 if (__tme_predict_false(ls_faults != TME_SPARC_LS_FAULT_NONE)) {
9119 tme_sparc_nnpc_trap(ic, ls_faults);
9120 }
9121
9122 /* write the PC of the jmpl into r[rd]: */
9123 TME_SPARC_FORMAT3_RD = ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_PC);
9124
9125 /* log an indirect call instruction, which has 15 (%o7) for rd: */
9126 if (TME_FIELD_MASK_EXTRACTU(TME_SPARC_INSN, TME_SPARC_FORMAT3_MASK_RD) == 15) {
9127 tme_sparc_log(ic, 250, TME_OK,
9128 (TME_SPARC_LOG_HANDLE(ic),
9129 _("call 0x%016" TME_PRIx64),
9130 pc_next_next));
9131 }
9132
9133 /* log a ret or retl instruction, which has 0 (%g0) for rd,
9134 either 31 (%i7) or 15 (%o7) for rs1, and 8 for simm13: */
9135 else if ((TME_SPARC_INSN | (16 << 14))
9136 == ((tme_uint32_t) (0x2 << 30) | (0 << 25) | (0x38 << 19) | (31 << 14) | (0x1 << 13) | 8)) {
9137 tme_sparc_log(ic, 250, TME_OK,
9138 (TME_SPARC_LOG_HANDLE(ic),
9139 _("retl 0x%016" TME_PRIx64),
9140 pc_next_next));
9141 }
9142
9143 TME_SPARC_INSN_OK;
9144 }
9145
9146 /* this does a sparc64 ldf: */
TME_SPARC_FORMAT3(tme_sparc64_ldf,tme_uint64_t)9147 TME_SPARC_FORMAT3(tme_sparc64_ldf, tme_uint64_t)
9148 {
9149 tme_uint32_t misaligned;
9150 struct tme_float float_buffer;
9151 struct tme_float *fpreg;
9152
9153 /* get the least significant 32 bits of the address: */
9154 misaligned = TME_SPARC_FORMAT3_RS1;
9155 misaligned += (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
9156
9157 /* decode rd: */
9158 float_buffer.tme_float_format = TME_FLOAT_FORMAT_IEEE754_SINGLE;
9159 fpreg
9160 = _tme_sparc64_fpu_mem_fpreg(ic,
9161 misaligned,
9162 &float_buffer);
9163
9164 /* do the load: */
9165 tme_sparc64_ld(ic, _rs1, _rs2, &ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX));
9166
9167 /* set the floating-point register value: */
9168 assert (fpreg != &float_buffer);
9169 fpreg->tme_float_format = TME_FLOAT_FORMAT_IEEE754_SINGLE;
9170 fpreg->tme_float_value_ieee754_single
9171 = ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_FPX << 1);
9172
9173 TME_SPARC_INSN_OK;
9174 }
9175
9176 /* this does a sparc64 lddf: */
TME_SPARC_FORMAT3(tme_sparc64_lddf,tme_uint64_t)9177 TME_SPARC_FORMAT3(tme_sparc64_lddf, tme_uint64_t)
9178 {
9179 tme_uint64_t address;
9180 tme_uint32_t misaligned;
9181 struct tme_float float_buffer;
9182 struct tme_float *fpreg;
9183 tme_uint64_t offset;
9184
9185 /* get the address: */
9186 address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
9187
9188 /* get the least significant 32 bits of the address: */
9189 misaligned = address;
9190
9191 /* decode rd: */
9192 float_buffer.tme_float_format = TME_FLOAT_FORMAT_IEEE754_DOUBLE;
9193 fpreg
9194 = _tme_sparc64_fpu_mem_fpreg(ic,
9195 misaligned,
9196 &float_buffer);
9197
9198 /* if bit two of the address is set, and this SPARC supports
9199 32-bit-aligned lddf instructions: */
9200 if ((misaligned & sizeof(tme_uint32_t))
9201 && fpreg != &float_buffer) {
9202
9203 /* do two 32-bit loads: */
9204 offset = sizeof(tme_uint32_t) * 0;
9205 tme_sparc64_ld(ic, &address, &offset, &ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX + 0));
9206 offset = sizeof(tme_uint32_t) * 1;
9207 tme_sparc64_ld(ic, &address, &offset, &ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX + 1));
9208
9209 /* set the double floating-point register value: */
9210 fpreg->tme_float_format = TME_FLOAT_FORMAT_IEEE754_DOUBLE;
9211 fpreg->tme_float_value_ieee754_double.tme_value64_uint32_hi
9212 = ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX + 0);
9213 fpreg->tme_float_value_ieee754_double.tme_value64_uint32_lo
9214 = ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX + 1);
9215 }
9216
9217 /* otherwise, bit two of the address is not set, or this SPARC
9218 doesn't support 32-bit-aligned lddf instructions: */
9219 else {
9220
9221 /* do an ldx-style load: */
9222 tme_sparc64_ldx(ic, _rs1, _rs2, &ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX));
9223
9224 /* set the double floating-point register value: */
9225 assert (fpreg != &float_buffer);
9226 fpreg->tme_float_format = TME_FLOAT_FORMAT_IEEE754_DOUBLE;
9227 fpreg->tme_float_value_ieee754_double.tme_value64_uint
9228 = ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX);
9229 }
9230
9231 TME_SPARC_INSN_OK;
9232 }
9233
9234 /* this does a sparc64 ldfsr: */
TME_SPARC_FORMAT3(tme_sparc64_ldfsr,tme_uint64_t)9235 TME_SPARC_FORMAT3(tme_sparc64_ldfsr, tme_uint64_t)
9236 {
9237 tme_uint32_t fsr;
9238 tme_uint32_t reg_rd;
9239
9240 /* see if this is an ldfsr or an ldxfsr: */
9241 reg_rd = TME_FIELD_MASK_EXTRACTU(TME_SPARC_INSN, TME_SPARC_FORMAT3_MASK_RD);
9242 if (__tme_predict_false(reg_rd > 1)) {
9243 TME_SPARC_INSN_ILL(ic);
9244 }
9245
9246 _tme_sparc64_fpu_mem(ic);
9247
9248 /* if this is an ldxfsr: */
9249 if (reg_rd == 1) {
9250
9251 /* do the load: */
9252 tme_sparc64_ldx(ic, _rs1, _rs2, &ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX));
9253
9254 /* update the extended FSR: */
9255 ic->tme_sparc_fpu_xfsr
9256 = (ic->tme_sparc_ireg_uint32((TME_SPARC_IREG_FPX << 1) + 1)
9257 & 0x3f /* fcc3 .. fcc1 */);
9258 }
9259
9260 /* otherwise, this is an ldfsr. do the load: */
9261 else
9262 tme_sparc64_ld(ic, _rs1, _rs2, &ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX));
9263
9264 /* update the FSR: */
9265 fsr = ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_FPX << 1);
9266 /* "An LDFSR instruction does not affect ftt." */
9267 /* "The LDFSR instruction does not affect qne." */
9268 fsr &= ~(TME_SPARC_FSR_VER | TME_SPARC_FSR_FTT | TME_SPARC_FSR_QNE);
9269 ic->tme_sparc_fpu_fsr = (ic->tme_sparc_fpu_fsr & (TME_SPARC_FSR_VER | TME_SPARC_FSR_FTT | TME_SPARC_FSR_QNE)) | fsr;
9270
9271 TME_SPARC_INSN_OK;
9272 }
9273
9274 /* this does a sparc64 stf: */
TME_SPARC_FORMAT3(tme_sparc64_stf,tme_uint64_t)9275 TME_SPARC_FORMAT3(tme_sparc64_stf, tme_uint64_t)
9276 {
9277 tme_uint32_t misaligned;
9278 struct tme_float float_buffer;
9279 const struct tme_float *fpreg;
9280 const tme_uint32_t *value_single;
9281
9282 /* get the least significant 32 bits of the address: */
9283 misaligned = TME_SPARC_FORMAT3_RS1;
9284 misaligned += (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
9285
9286 /* decode rd: */
9287 float_buffer.tme_float_format = TME_FLOAT_FORMAT_IEEE754_SINGLE;
9288 fpreg
9289 = _tme_sparc64_fpu_mem_fpreg(ic,
9290 misaligned,
9291 &float_buffer);
9292
9293 /* get this single floating-point register in IEEE754 single-precision format: */
9294 value_single = tme_ieee754_single_value_get(fpreg, &float_buffer.tme_float_value_ieee754_single);
9295
9296 /* set the floating-point register value: */
9297 ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_FPX << 1) = *value_single;
9298
9299 /* do the store: */
9300 tme_sparc64_st(ic, _rs1, _rs2, &ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX));
9301
9302 assert (fpreg != &float_buffer);
9303 TME_SPARC_INSN_OK;
9304 }
9305
9306 /* this does a sparc64 stdf: */
TME_SPARC_FORMAT3(tme_sparc64_stdf,tme_uint64_t)9307 TME_SPARC_FORMAT3(tme_sparc64_stdf, tme_uint64_t)
9308 {
9309 tme_uint64_t address;
9310 tme_uint32_t misaligned;
9311 struct tme_float float_buffer;
9312 struct tme_float *fpreg;
9313 const union tme_value64 *value_double;
9314 tme_uint64_t offset;
9315
9316 /* get the address: */
9317 address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
9318
9319 /* get the least significant 32 bits of the address: */
9320 misaligned = address;
9321
9322 /* decode rd: */
9323 float_buffer.tme_float_format = TME_FLOAT_FORMAT_IEEE754_DOUBLE;
9324 fpreg
9325 = _tme_sparc64_fpu_mem_fpreg(ic,
9326 misaligned,
9327 &float_buffer);
9328
9329 /* get this double floating-point register in IEEE754 double-precision format: */
9330 value_double = tme_ieee754_double_value_get(fpreg, &float_buffer.tme_float_value_ieee754_double);
9331
9332 /* if bit two of the address is set, and this SPARC supports
9333 32-bit-aligned stdf instructions: */
9334 if ((misaligned & sizeof(tme_uint32_t))
9335 && fpreg != &float_buffer) {
9336
9337 /* set the floating-point register value: */
9338 ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX + 0)
9339 = value_double->tme_value64_uint32_hi;
9340 ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_FPX + 1)
9341 = value_double->tme_value64_uint32_lo;
9342
9343 /* do two 32-bit stores: */
9344 offset = sizeof(tme_uint32_t) * 0;
9345 tme_sparc64_st(ic, &address, &offset, &ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX + 0));
9346 offset = sizeof(tme_uint32_t) * 1;
9347 tme_sparc64_st(ic, &address, &offset, &ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX + 1));
9348 }
9349
9350 /* otherwise, bit two of the address is not set, or this SPARC
9351 doesn't support 32-bit-aligned stdf instructions: */
9352 else {
9353
9354 /* set the floating-point register value: */
9355 ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX)
9356 = value_double->tme_value64_uint;
9357
9358 /* do an stx-style store: */
9359 tme_sparc64_stx(ic, _rs1, _rs2, &ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX));
9360 }
9361
9362 assert (fpreg != &float_buffer);
9363 TME_SPARC_INSN_OK;
9364 }
9365
9366 /* this does a sparc64 stfsr: */
TME_SPARC_FORMAT3(tme_sparc64_stfsr,tme_uint64_t)9367 TME_SPARC_FORMAT3(tme_sparc64_stfsr, tme_uint64_t)
9368 {
9369 tme_uint32_t reg_rd;
9370
9371 /* see if this is an stfsr or an stxfsr: */
9372 reg_rd = TME_FIELD_MASK_EXTRACTU(TME_SPARC_INSN, TME_SPARC_FORMAT3_MASK_RD);
9373 if (__tme_predict_false(reg_rd > 1)) {
9374 TME_SPARC_INSN_ILL(ic);
9375 }
9376
9377 _tme_sparc64_fpu_mem(ic);
9378
9379 /* set the FSR value to store: */
9380 ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_FPX << 1) = ic->tme_sparc_fpu_fsr;
9381
9382 /* if this is an stxfsr: */
9383 if (reg_rd == 1) {
9384
9385 /* set in the extended FSR to store and do the store: */
9386 ic->tme_sparc_ireg_uint32((TME_SPARC_IREG_FPX << 1) + 1) = ic->tme_sparc_fpu_xfsr;
9387 tme_sparc64_stx(ic, _rs1, _rs2, &ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX));
9388 }
9389
9390 /* otherwise, this is a stfsr. do the store: */
9391 else
9392 tme_sparc64_st(ic, _rs1, _rs2, &ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX));
9393
9394 TME_SPARC_INSN_OK;
9395 }
9396
9397 /* this does a sparc64 fpop1: */
TME_SPARC_FORMAT3(tme_sparc64_fpop1,tme_uint64_t)9398 TME_SPARC_FORMAT3(tme_sparc64_fpop1, tme_uint64_t)
9399 {
9400 TME_SPARC_INSN_FPU;
9401 tme_sparc_fpu_fpop1(ic);
9402 TME_SPARC_INSN_OK;
9403 }
9404
9405 /* this does a sparc64 fpop2: */
TME_SPARC_FORMAT3(tme_sparc64_fpop2,tme_uint64_t)9406 TME_SPARC_FORMAT3(tme_sparc64_fpop2, tme_uint64_t)
9407 {
9408 TME_SPARC_INSN_FPU;
9409 tme_sparc_fpu_fpop2(ic);
9410 TME_SPARC_INSN_OK;
9411 }
9412
9413 /* this does a sparc64 "mulscc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_mulscc,tme_uint64_t)9414 TME_SPARC_FORMAT3(tme_sparc64_mulscc, tme_uint64_t)
9415 {
9416 tme_uint32_t src1;
9417 tme_uint32_t src2;
9418 tme_uint32_t dst;
9419 tme_uint32_t y;
9420 tme_uint32_t cc;
9421
9422 /* get the operands: */
9423 src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
9424 src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
9425
9426 /* perform the operation: */
9427
9428 /* "(1) The multiplier is established as r[rs2] if the i field is zero, or
9429 sign_ext(simm13) if the i field is one."
9430
9431 "(3) If the least significant bit of the Y register = 1, the shifted
9432 value from step (2) is added to the multiplier. If the LSB of the
9433 Y register = 0, then 0 is added to the shifted value from step (2)." */
9434 y = ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_Y << 1);
9435 if ((y & 1) == 0) {
9436 src2 = 0;
9437 }
9438
9439 /* "(6) The Y register is shifted right by one bit, with the LSB of the
9440 unshifted r[rs1] replacing the MSB of Y." */
9441 y >>= 1;
9442 if (src1 & 1) {
9443 y += 0x80000000;
9444 }
9445 ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_Y << 1) = y;
9446
9447 /* "(2) A 32-bit value is computed by shifting r[rs1] right by one
9448 bit with (N xor V) from the PSR replacing the high-order bit.
9449 (This is the proper sign for the previous partial product.)" */
9450 src1 >>= 1;
9451 if (((ic->tme_sparc64_ireg_ccr ^ (ic->tme_sparc64_ireg_ccr * (TME_SPARC64_CCR_ICC_N / TME_SPARC64_CCR_ICC_V))) & TME_SPARC64_CCR_ICC_N) != 0) {
9452 src1 += 0x80000000;
9453 }
9454
9455 /* "(4) The sum from step (3) is written into r[rd]." */
9456 dst = src1 + src2;
9457
9458 /* "(5) The integer condition codes, icc, are updated according to the
9459 addition performed in step (3)." */
9460
9461 /* store the destination: */
9462 TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
9463
9464 /* set Z if the destination is zero: */
9465 cc = ((((tme_int32_t) dst) == 0) * TME_SPARC64_CCR_ICC_Z);
9466
9467 /* set N if the destination is negative: */
9468 cc += ((((tme_int32_t) dst) < 0) * TME_SPARC64_CCR_ICC_N);
9469
9470 /* if the operands are the same sign, and the destination has
9471 a different sign, set V: */
9472 cc += ((((tme_int32_t) ((src2 ^ dst) & (src1 ^ (src2 ^ (((tme_uint32_t) 0) - 1))))) < 0) * TME_SPARC64_CCR_ICC_V);
9473
9474 /* if src1 and src2 both have the high bit set, or if dst does
9475 not have the high bit set and either src1 or src2 does, set C: */
9476 cc += (((tme_int32_t) (((tme_uint32_t) (src1 & src2)) | ((((tme_uint32_t) dst) ^ (((tme_uint32_t) 0) - 1)) & ((tme_uint32_t) (src1 | src2))))) < 0) * TME_SPARC64_CCR_ICC_C;
9477
9478 /* set Z if the destination is zero: */
9479 cc += ((((tme_int64_t) dst) == 0) * TME_SPARC64_CCR_XCC_Z);
9480
9481 /* if the operands are the same sign, and the destination has
9482 a different sign, set V: */
9483 cc += ((((tme_int64_t) ((src2 ^ dst) & (src1 ^ (src2 ^ (((tme_uint64_t) 0) - 1))))) < 0) * TME_SPARC64_CCR_XCC_V);
9484
9485 /* if src1 and src2 both have the high bit set, or if dst does
9486 not have the high bit set and either src1 or src2 does, set C: */
9487 cc += (((tme_int64_t) (((tme_uint64_t) (src1 & src2)) | ((((tme_uint64_t) dst) ^ (((tme_uint64_t) 0) - 1)) & ((tme_uint64_t) (src1 | src2))))) < 0) * TME_SPARC64_CCR_XCC_C;
9488
9489 /* set the condition codes: */
9490 ic->tme_sparc64_ireg_ccr = cc;
9491
9492 TME_SPARC_INSN_OK;
9493 }
9494
9495 /* this does a sparc64 ldx: */
TME_SPARC_FORMAT3(tme_sparc64_ldx,tme_uint64_t)9496 TME_SPARC_FORMAT3(tme_sparc64_ldx, tme_uint64_t)
9497 {
9498 tme_uint64_t address;
9499 tme_uint32_t asi_mask_flags_slow;
9500 struct tme_sparc_tlb *dtlb;
9501 const tme_shared tme_uint8_t *memory;
9502 tme_bus_context_t dtlb_context;
9503 tme_uint32_t endian_little;
9504 tme_uint64_t value64;
9505
9506 /* get the address: */
9507 address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
9508 address &= ic->tme_sparc_address_mask;
9509
9510 #ifdef _TME_SPARC_STATS
9511 /* track statistics: */
9512 ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
9513 #endif /* _TME_SPARC_STATS */
9514
9515 /* verify and maybe replay this transfer: */
9516 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
9517 ic->tme_sparc_asi_mask_data, address,
9518 (TME_RECODE_SIZE_64
9519 | TME_SPARC_RECODE_VERIFY_MEM_LOAD));
9520 if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
9521 TME_SPARC_INSN_OK;
9522 }
9523
9524 /* assume that no DTLB ASI mask flags will require a slow load: */
9525 asi_mask_flags_slow = 0;
9526
9527 /* a ldx without a no-fault ASI traps on no-fault addresses: */
9528 asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
9529
9530 /* get and busy the DTLB entry: */
9531 dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
9532 tme_sparc_tlb_busy(dtlb);
9533
9534 /* assume that this DTLB applies and allows fast transfers: */
9535 memory = dtlb->tme_sparc_tlb_emulator_off_read;
9536
9537 /* if this DTLB matches any context, it matches this context: */
9538 dtlb_context = dtlb->tme_sparc_tlb_context;
9539 if (dtlb_context > ic->tme_sparc_memory_context_max) {
9540 dtlb_context = ic->tme_sparc_memory_context_default;
9541 }
9542
9543 /* we must call the slow load function if: */
9544 if (__tme_predict_false(
9545
9546 /* the DTLB entry is invalid: */
9547 tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
9548
9549 /* the DTLB entry does not match the context: */
9550 || dtlb_context != ic->tme_sparc_memory_context_default
9551
9552 /* the DTLB entry does not cover the needed addresses: */
9553 || (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
9554 || ((address + ((64 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
9555
9556 /* the DTLB entry does not cover the needed address space: */
9557 || (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, ic->tme_sparc_asi_mask_data))
9558
9559 /* the DTLB entry can't be used for a fast ldx: */
9560 || (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
9561
9562 /* the DTLB entry does not allow fast transfers: */
9563 || (memory == TME_EMULATOR_OFF_UNDEF)
9564
9565 /* the address is misaligned: */
9566 || ((address % (64 / 8)) != 0)
9567
9568 )) {
9569
9570 /* call the slow load function: */
9571 memory = tme_sparc64_ls(ic,
9572 address,
9573 &TME_SPARC_FORMAT3_RD,
9574 (TME_SPARC_LSINFO_OP_LD
9575 | (64 / 8)));
9576 }
9577
9578 /* get the byte order of this transfer: */
9579 endian_little = ic->tme_sparc_asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
9580 if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
9581 if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
9582 endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
9583 }
9584 else {
9585 assert (FALSE);
9586 }
9587 }
9588
9589 /* do the fast transfer: */
9590 memory += address;
9591 value64 = tme_memory_bus_read64((const tme_shared tme_uint64_t *) memory, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint64_t), sizeof(tme_uint64_t));
9592 value64 = (endian_little ? tme_letoh_u64(value64) : tme_betoh_u64(value64));
9593 TME_SPARC_FORMAT3_RD = (tme_uint64_t) (tme_int64_t) (tme_int64_t) value64;
9594
9595 /* unbusy the DTLB entry: */
9596 tme_sparc_tlb_unbusy(dtlb);
9597
9598 /* log the value loaded: */
9599 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
9600 tme_sparc_log(ic, 1000, TME_OK,
9601 (TME_SPARC_LOG_HANDLE(ic),
9602 _("ldx 0x%02x:0x%016" TME_PRIx64 ": 0x%016" TME_PRIx64),
9603 TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
9604 address,
9605 TME_SPARC_FORMAT3_RD));
9606
9607 TME_SPARC_INSN_OK;
9608 }
9609
9610 /* this does a sparc64 stx: */
TME_SPARC_FORMAT3(tme_sparc64_stx,tme_uint64_t)9611 TME_SPARC_FORMAT3(tme_sparc64_stx, tme_uint64_t)
9612 {
9613 tme_uint64_t address;
9614 tme_uint32_t asi_mask_flags_slow;
9615 struct tme_sparc_tlb *dtlb;
9616 tme_shared tme_uint8_t *memory;
9617 tme_bus_context_t dtlb_context;
9618 tme_uint32_t endian_little;
9619 tme_uint64_t value64;
9620
9621 /* get the address: */
9622 address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
9623 address &= ic->tme_sparc_address_mask;
9624
9625 #ifdef _TME_SPARC_STATS
9626 /* track statistics: */
9627 ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
9628 #endif /* _TME_SPARC_STATS */
9629
9630 /* verify and maybe replay this transfer: */
9631 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
9632 ic->tme_sparc_asi_mask_data, address,
9633 (TME_RECODE_SIZE_64
9634 | TME_SPARC_RECODE_VERIFY_MEM_STORE));
9635 if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
9636 TME_SPARC_INSN_OK;
9637 }
9638
9639 /* log the value stored: */
9640 tme_sparc_log(ic, 1000, TME_OK,
9641 (TME_SPARC_LOG_HANDLE(ic),
9642 _("stx 0x%02x:0x%016" TME_PRIx64 ": 0x%016" TME_PRIx64),
9643 TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
9644 address,
9645 (tme_uint64_t) TME_SPARC_FORMAT3_RD));
9646
9647 /* assume that no DTLB ASI mask flags will require a slow store: */
9648 asi_mask_flags_slow = 0;
9649
9650 /* a stx traps on no-fault addresses: */
9651 asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
9652
9653 /* get and busy the DTLB entry: */
9654 dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
9655 tme_sparc_tlb_busy(dtlb);
9656
9657 /* assume that this DTLB applies and allows fast transfers: */
9658 memory = dtlb->tme_sparc_tlb_emulator_off_write;
9659
9660 /* if this DTLB matches any context, it matches this context: */
9661 dtlb_context = dtlb->tme_sparc_tlb_context;
9662 if (dtlb_context > ic->tme_sparc_memory_context_max) {
9663 dtlb_context = ic->tme_sparc_memory_context_default;
9664 }
9665
9666 /* we must call the slow store function if: */
9667 if (__tme_predict_false(
9668
9669 /* the DTLB entry is invalid: */
9670 tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
9671
9672 /* the DTLB entry does not match the context: */
9673 || dtlb_context != ic->tme_sparc_memory_context_default
9674
9675 /* the DTLB entry does not cover the needed addresses: */
9676 || (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
9677 || ((address + ((64 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
9678
9679 /* the DTLB entry does not cover the needed address space: */
9680 || (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, ic->tme_sparc_asi_mask_data))
9681
9682 /* the DTLB entry can't be used for a fast stx: */
9683 || (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
9684
9685 /* the DTLB entry does not allow fast transfers: */
9686 || (memory == TME_EMULATOR_OFF_UNDEF)
9687
9688 /* the address is misaligned: */
9689 || ((address % (64 / 8)) != 0)
9690
9691 )) {
9692
9693 /* call the slow store function: */
9694 memory = tme_sparc64_ls(ic,
9695 address,
9696 &TME_SPARC_FORMAT3_RD,
9697 (TME_SPARC_LSINFO_OP_ST
9698 | (64 / 8)));
9699
9700 /* if the slow store function did the transfer: */
9701 if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
9702
9703 /* unbusy the TLB entry; */
9704 tme_sparc_tlb_unbusy(dtlb);
9705
9706 TME_SPARC_INSN_OK;
9707 }
9708 }
9709
9710 /* get the byte order of this transfer: */
9711 endian_little = ic->tme_sparc_asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
9712 if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
9713 if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
9714 endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
9715 }
9716 else {
9717 assert (FALSE);
9718 }
9719 }
9720
9721 /* do the fast transfer: */
9722 memory += address;
9723 value64 = TME_SPARC_FORMAT3_RD;
9724 value64 = (endian_little ? tme_htole_u64(value64) : tme_htobe_u64(value64));
9725 tme_memory_bus_write64((tme_shared tme_uint64_t *) memory, value64, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint64_t), sizeof(tme_uint64_t));
9726
9727 /* unbusy the DTLB entry: */
9728 tme_sparc_tlb_unbusy(dtlb);
9729
9730 TME_SPARC_INSN_OK;
9731 }
9732
9733 /* this does a sparc64 ldxa: */
TME_SPARC_FORMAT3(tme_sparc64_ldxa,tme_uint64_t)9734 TME_SPARC_FORMAT3(tme_sparc64_ldxa, tme_uint64_t)
9735 {
9736 tme_uint32_t asi_mask_data;
9737 tme_uint64_t address;
9738 tme_bus_context_t context;
9739 tme_uint32_t asi_mask_flags_slow;
9740 struct tme_sparc_tlb *dtlb;
9741 const tme_shared tme_uint8_t *memory;
9742 tme_bus_context_t dtlb_context;
9743 tme_uint32_t endian_little;
9744 tme_uint64_t value64;
9745
9746 /* get the alternate ASI mask: */
9747 asi_mask_data = _tme_sparc64_alternate_asi_mask(ic);
9748
9749 /* get the address: */
9750 address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
9751 address &= ic->tme_sparc_address_mask;
9752
9753 #ifdef _TME_SPARC_STATS
9754 /* track statistics: */
9755 ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
9756 #endif /* _TME_SPARC_STATS */
9757
9758 /* verify and maybe replay this transfer: */
9759 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
9760 asi_mask_data, address,
9761 (TME_RECODE_SIZE_64
9762 | TME_SPARC_RECODE_VERIFY_MEM_LOAD));
9763 if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
9764 TME_SPARC_INSN_OK;
9765 }
9766
9767 /* get the context: */
9768 context = ic->tme_sparc_memory_context_primary;
9769 if (__tme_predict_false(asi_mask_data
9770 & (TME_SPARC64_ASI_FLAG_SECONDARY
9771 + TME_SPARC64_ASI_MASK_FLAG_INSN_NUCLEUS))) {
9772 if (asi_mask_data & TME_SPARC64_ASI_FLAG_SECONDARY) {
9773 context = ic->tme_sparc_memory_context_secondary;
9774 }
9775 else if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_NUCLEUS) {
9776 context = 0;
9777 }
9778 }
9779
9780 /* assume that no DTLB ASI mask flags will require a slow load: */
9781 asi_mask_flags_slow = 0;
9782
9783 /* a ldxa without a no-fault ASI traps on no-fault addresses: */
9784 asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
9785
9786 /* if this ldxa is using a no-fault ASI: */
9787 if (__tme_predict_false(asi_mask_data & TME_SPARC64_ASI_FLAG_NO_FAULT)) {
9788
9789 /* a ldxa with a no-fault ASI traps on addresses with side-effects: */
9790 asi_mask_flags_slow = TME_SPARC64_ASI_MASK_FLAG_TLB_SIDE_EFFECTS;
9791 }
9792
9793 /* get and busy the DTLB entry: */
9794 dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, context, address))];
9795 tme_sparc_tlb_busy(dtlb);
9796
9797 /* assume that this DTLB applies and allows fast transfers: */
9798 memory = dtlb->tme_sparc_tlb_emulator_off_read;
9799
9800 /* if this DTLB matches any context, it matches this context: */
9801 dtlb_context = dtlb->tme_sparc_tlb_context;
9802 if (dtlb_context > ic->tme_sparc_memory_context_max) {
9803 dtlb_context = context;
9804 }
9805
9806 /* we must call the slow load function if: */
9807 if (__tme_predict_false(
9808
9809 /* the DTLB entry is invalid: */
9810 tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
9811
9812 /* the DTLB entry does not match the context: */
9813 || dtlb_context != context
9814
9815 /* the DTLB entry does not cover the needed addresses: */
9816 || (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
9817 || ((address + ((64 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
9818
9819 /* the DTLB entry does not cover the needed address space: */
9820 || (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, asi_mask_data))
9821
9822 /* the DTLB entry can't be used for a fast ldxa: */
9823 || (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
9824
9825 /* the DTLB entry does not allow fast transfers: */
9826 || (memory == TME_EMULATOR_OFF_UNDEF)
9827
9828 /* the address is misaligned: */
9829 || ((address % (64 / 8)) != 0)
9830
9831 )) {
9832
9833 /* call the slow load function: */
9834 memory = tme_sparc64_ls(ic,
9835 address,
9836 &TME_SPARC_FORMAT3_RD,
9837 (TME_SPARC_LSINFO_OP_LD
9838 | TME_SPARC_LSINFO_ASI(TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF))
9839 | TME_SPARC_LSINFO_A
9840 | (64 / 8)));
9841
9842 /* if the slow load function did the transfer: */
9843 if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
9844
9845 /* unbusy the TLB entry; */
9846 tme_sparc_tlb_unbusy(dtlb);
9847
9848 /* log the value loaded: */
9849 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
9850 tme_sparc_log(ic, 1000, TME_OK,
9851 (TME_SPARC_LOG_HANDLE(ic),
9852 _("ldxa 0x%02x:0x%016" TME_PRIx64 ": 0x%016" TME_PRIx64),
9853 TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
9854 address,
9855 TME_SPARC_FORMAT3_RD));
9856
9857 TME_SPARC_INSN_OK;
9858 }
9859 }
9860
9861 /* get the byte order of this transfer: */
9862 endian_little = asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
9863 if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
9864 if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
9865 endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
9866 }
9867 else {
9868 assert (FALSE);
9869 }
9870 }
9871
9872 /* do the fast transfer: */
9873 memory += address;
9874 value64 = tme_memory_bus_read64((const tme_shared tme_uint64_t *) memory, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint64_t), sizeof(tme_uint64_t));
9875 value64 = (endian_little ? tme_letoh_u64(value64) : tme_betoh_u64(value64));
9876 TME_SPARC_FORMAT3_RD = (tme_uint64_t) (tme_int64_t) (tme_int64_t) value64;
9877
9878 /* unbusy the DTLB entry: */
9879 tme_sparc_tlb_unbusy(dtlb);
9880
9881 /* log the value loaded: */
9882 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
9883 tme_sparc_log(ic, 1000, TME_OK,
9884 (TME_SPARC_LOG_HANDLE(ic),
9885 _("ldxa 0x%02x:0x%016" TME_PRIx64 ": 0x%016" TME_PRIx64),
9886 TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
9887 address,
9888 TME_SPARC_FORMAT3_RD));
9889
9890 TME_SPARC_INSN_OK;
9891 }
9892
9893 /* this does a sparc64 stxa: */
TME_SPARC_FORMAT3(tme_sparc64_stxa,tme_uint64_t)9894 TME_SPARC_FORMAT3(tme_sparc64_stxa, tme_uint64_t)
9895 {
9896 tme_uint32_t asi_mask_data;
9897 tme_uint64_t address;
9898 tme_bus_context_t context;
9899 tme_uint32_t asi_mask_flags_slow;
9900 struct tme_sparc_tlb *dtlb;
9901 tme_shared tme_uint8_t *memory;
9902 tme_bus_context_t dtlb_context;
9903 tme_uint32_t endian_little;
9904 tme_uint64_t value64;
9905
9906 /* get the alternate ASI mask: */
9907 asi_mask_data = _tme_sparc64_alternate_asi_mask(ic);
9908
9909 /* get the address: */
9910 address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
9911 address &= ic->tme_sparc_address_mask;
9912
9913 #ifdef _TME_SPARC_STATS
9914 /* track statistics: */
9915 ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
9916 #endif /* _TME_SPARC_STATS */
9917
9918 /* verify and maybe replay this transfer: */
9919 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
9920 asi_mask_data, address,
9921 (TME_RECODE_SIZE_64
9922 | TME_SPARC_RECODE_VERIFY_MEM_STORE));
9923 if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
9924 TME_SPARC_INSN_OK;
9925 }
9926
9927 /* log the value stored: */
9928 tme_sparc_log(ic, 1000, TME_OK,
9929 (TME_SPARC_LOG_HANDLE(ic),
9930 _("stxa 0x%02x:0x%016" TME_PRIx64 ": 0x%016" TME_PRIx64),
9931 TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
9932 address,
9933 (tme_uint64_t) TME_SPARC_FORMAT3_RD));
9934
9935 /* get the context: */
9936 context = ic->tme_sparc_memory_context_primary;
9937 if (__tme_predict_false(asi_mask_data
9938 & (TME_SPARC64_ASI_FLAG_SECONDARY
9939 + TME_SPARC64_ASI_MASK_FLAG_INSN_NUCLEUS))) {
9940 if (asi_mask_data & TME_SPARC64_ASI_FLAG_SECONDARY) {
9941 context = ic->tme_sparc_memory_context_secondary;
9942 }
9943 else if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_NUCLEUS) {
9944 context = 0;
9945 }
9946 }
9947
9948 /* assume that no DTLB ASI mask flags will require a slow store: */
9949 asi_mask_flags_slow = 0;
9950
9951 /* a stxa traps on no-fault addresses: */
9952 asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
9953
9954 /* if this stxa is using a no-fault ASI: */
9955 if (__tme_predict_false(asi_mask_data & TME_SPARC64_ASI_FLAG_NO_FAULT)) {
9956
9957 /* a stxa with a no-fault ASI traps: */
9958 asi_mask_flags_slow = 0 - (tme_uint32_t) 1;
9959 }
9960
9961 /* get and busy the DTLB entry: */
9962 dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, context, address))];
9963 tme_sparc_tlb_busy(dtlb);
9964
9965 /* assume that this DTLB applies and allows fast transfers: */
9966 memory = dtlb->tme_sparc_tlb_emulator_off_write;
9967
9968 /* if this DTLB matches any context, it matches this context: */
9969 dtlb_context = dtlb->tme_sparc_tlb_context;
9970 if (dtlb_context > ic->tme_sparc_memory_context_max) {
9971 dtlb_context = context;
9972 }
9973
9974 /* we must call the slow store function if: */
9975 if (__tme_predict_false(
9976
9977 /* the DTLB entry is invalid: */
9978 tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
9979
9980 /* the DTLB entry does not match the context: */
9981 || dtlb_context != context
9982
9983 /* the DTLB entry does not cover the needed addresses: */
9984 || (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
9985 || ((address + ((64 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
9986
9987 /* the DTLB entry does not cover the needed address space: */
9988 || (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, asi_mask_data))
9989
9990 /* the DTLB entry can't be used for a fast stxa: */
9991 || (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
9992
9993 /* the DTLB entry does not allow fast transfers: */
9994 || (memory == TME_EMULATOR_OFF_UNDEF)
9995
9996 /* the address is misaligned: */
9997 || ((address % (64 / 8)) != 0)
9998
9999 )) {
10000
10001 /* call the slow store function: */
10002 memory = tme_sparc64_ls(ic,
10003 address,
10004 &TME_SPARC_FORMAT3_RD,
10005 (TME_SPARC_LSINFO_OP_ST
10006 | TME_SPARC_LSINFO_ASI(TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF))
10007 | TME_SPARC_LSINFO_A
10008 | (64 / 8)));
10009
10010 /* if the slow store function did the transfer: */
10011 if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
10012
10013 /* unbusy the TLB entry; */
10014 tme_sparc_tlb_unbusy(dtlb);
10015
10016 TME_SPARC_INSN_OK;
10017 }
10018 }
10019
10020 /* get the byte order of this transfer: */
10021 endian_little = asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
10022 if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
10023 if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
10024 endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
10025 }
10026 else {
10027 assert (FALSE);
10028 }
10029 }
10030
10031 /* do the fast transfer: */
10032 memory += address;
10033 value64 = TME_SPARC_FORMAT3_RD;
10034 value64 = (endian_little ? tme_htole_u64(value64) : tme_htobe_u64(value64));
10035 tme_memory_bus_write64((tme_shared tme_uint64_t *) memory, value64, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint64_t), sizeof(tme_uint64_t));
10036
10037 /* unbusy the DTLB entry: */
10038 tme_sparc_tlb_unbusy(dtlb);
10039
10040 TME_SPARC_INSN_OK;
10041 }
10042
10043 /* this does a sparc64 ldfa: */
TME_SPARC_FORMAT3(tme_sparc64_ldfa,tme_uint64_t)10044 TME_SPARC_FORMAT3(tme_sparc64_ldfa, tme_uint64_t)
10045 {
10046 tme_uint32_t misaligned;
10047 struct tme_float float_buffer;
10048 struct tme_float *fpreg;
10049
10050 /* get the least significant 32 bits of the address: */
10051 misaligned = TME_SPARC_FORMAT3_RS1;
10052 misaligned += (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
10053
10054 /* see if the address is misaligned for the ASI: */
10055 misaligned = (*ic->_tme_sparc_ls_asi_misaligned)(ic, misaligned);
10056
10057 /* decode rd: */
10058 float_buffer.tme_float_format = TME_FLOAT_FORMAT_IEEE754_SINGLE;
10059 fpreg
10060 = _tme_sparc64_fpu_mem_fpreg(ic,
10061 misaligned,
10062 &float_buffer);
10063
10064 /* do the load: */
10065 tme_sparc64_lda(ic, _rs1, _rs2, &ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX));
10066
10067 /* set the floating-point register value: */
10068 assert (fpreg != &float_buffer);
10069 fpreg->tme_float_format = TME_FLOAT_FORMAT_IEEE754_SINGLE;
10070 fpreg->tme_float_value_ieee754_single
10071 = ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_FPX << 1);
10072
10073 TME_SPARC_INSN_OK;
10074 }
10075
10076 /* this does a sparc64 lddfa: */
TME_SPARC_FORMAT3(tme_sparc64_lddfa,tme_uint64_t)10077 TME_SPARC_FORMAT3(tme_sparc64_lddfa, tme_uint64_t)
10078 {
10079 tme_uint64_t address;
10080 tme_uint32_t misaligned;
10081 struct tme_float float_buffer;
10082 struct tme_float *fpreg;
10083 tme_uint64_t offset;
10084
10085 /* get the address: */
10086 address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
10087
10088 /* get the least significant 32 bits of the address: */
10089 misaligned = address;
10090
10091 /* see if the address is misaligned for the ASI: */
10092 misaligned = (*ic->_tme_sparc_ls_asi_misaligned)(ic, misaligned);
10093
10094 /* decode rd: */
10095 float_buffer.tme_float_format = TME_FLOAT_FORMAT_IEEE754_DOUBLE;
10096 fpreg
10097 = _tme_sparc64_fpu_mem_fpreg(ic,
10098 misaligned,
10099 &float_buffer);
10100
10101 /* if bit two of the address is set, and this SPARC supports
10102 32-bit-aligned lddfa instructions: */
10103 if ((misaligned & sizeof(tme_uint32_t))
10104 && fpreg != &float_buffer) {
10105
10106 /* do two 32-bit loads: */
10107 offset = sizeof(tme_uint32_t) * 0;
10108 tme_sparc64_lda(ic, &address, &offset, &ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX + 0));
10109 offset = sizeof(tme_uint32_t) * 1;
10110 tme_sparc64_lda(ic, &address, &offset, &ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX + 1));
10111
10112 /* set the double floating-point register value: */
10113 fpreg->tme_float_format = TME_FLOAT_FORMAT_IEEE754_DOUBLE;
10114 fpreg->tme_float_value_ieee754_double.tme_value64_uint32_hi
10115 = ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX + 0);
10116 fpreg->tme_float_value_ieee754_double.tme_value64_uint32_lo
10117 = ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX + 1);
10118 }
10119
10120 /* otherwise, bit two of the address is not set, or this SPARC
10121 doesn't support 32-bit-aligned lddfa instructions: */
10122 else {
10123
10124 /* do an ldxa-style load: */
10125 tme_sparc64_ldxa(ic, _rs1, _rs2, &ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX));
10126
10127 /* set the double floating-point register value: */
10128 assert (fpreg != &float_buffer);
10129 fpreg->tme_float_format = TME_FLOAT_FORMAT_IEEE754_DOUBLE;
10130 fpreg->tme_float_value_ieee754_double.tme_value64_uint
10131 = ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX);
10132 }
10133
10134 TME_SPARC_INSN_OK;
10135 }
10136
10137 /* this does a sparc64 stfa: */
TME_SPARC_FORMAT3(tme_sparc64_stfa,tme_uint64_t)10138 TME_SPARC_FORMAT3(tme_sparc64_stfa, tme_uint64_t)
10139 {
10140 tme_uint32_t misaligned;
10141 struct tme_float float_buffer;
10142 const struct tme_float *fpreg;
10143 const tme_uint32_t *value_single;
10144
10145 /* get the least significant 32 bits of the address: */
10146 misaligned = TME_SPARC_FORMAT3_RS1;
10147 misaligned += (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
10148
10149 /* see if the address is misaligned for the ASI: */
10150 misaligned = (*ic->_tme_sparc_ls_asi_misaligned)(ic, misaligned);
10151
10152 /* decode rd: */
10153 float_buffer.tme_float_format = TME_FLOAT_FORMAT_IEEE754_SINGLE;
10154 fpreg
10155 = _tme_sparc64_fpu_mem_fpreg(ic,
10156 misaligned,
10157 &float_buffer);
10158
10159 /* get this single floating-point register in IEEE754 single-precision format: */
10160 value_single = tme_ieee754_single_value_get(fpreg, &float_buffer.tme_float_value_ieee754_single);
10161
10162 /* set the floating-point register value: */
10163 ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_FPX << 1) = *value_single;
10164
10165 /* do the store: */
10166 tme_sparc64_sta(ic, _rs1, _rs2, &ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX));
10167
10168 assert (fpreg != &float_buffer);
10169 TME_SPARC_INSN_OK;
10170 }
10171
10172 /* this does a sparc64 stdfa: */
TME_SPARC_FORMAT3(tme_sparc64_stdfa,tme_uint64_t)10173 TME_SPARC_FORMAT3(tme_sparc64_stdfa, tme_uint64_t)
10174 {
10175 tme_uint64_t address;
10176 tme_uint32_t misaligned;
10177 struct tme_float float_buffer;
10178 struct tme_float *fpreg;
10179 const union tme_value64 *value_double;
10180 tme_uint64_t offset;
10181
10182 /* get the address: */
10183 address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
10184
10185 /* get the least significant 32 bits of the address: */
10186 misaligned = address;
10187
10188 /* see if the address is misaligned for the ASI: */
10189 misaligned = (*ic->_tme_sparc_ls_asi_misaligned)(ic, misaligned);
10190
10191 /* decode rd: */
10192 float_buffer.tme_float_format = TME_FLOAT_FORMAT_IEEE754_DOUBLE;
10193 fpreg
10194 = _tme_sparc64_fpu_mem_fpreg(ic,
10195 misaligned,
10196 &float_buffer);
10197
10198 /* get this double floating-point register in IEEE754 double-precision format: */
10199 value_double = tme_ieee754_double_value_get(fpreg, &float_buffer.tme_float_value_ieee754_double);
10200
10201 /* if bit two of the address is set, and this SPARC supports
10202 32-bit-aligned stdfa instructions: */
10203 if ((misaligned & sizeof(tme_uint32_t))
10204 && fpreg != &float_buffer) {
10205
10206 /* set the floating-point register value: */
10207 ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX + 0)
10208 = value_double->tme_value64_uint32_hi;
10209 ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_FPX + 1)
10210 = value_double->tme_value64_uint32_lo;
10211
10212 /* do two 32-bit stores: */
10213 offset = sizeof(tme_uint32_t) * 0;
10214 tme_sparc64_sta(ic, &address, &offset, &ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX + 0));
10215 offset = sizeof(tme_uint32_t) * 1;
10216 tme_sparc64_sta(ic, &address, &offset, &ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX + 1));
10217 }
10218
10219 /* otherwise, bit two of the address is not set, or this SPARC
10220 doesn't support 32-bit-aligned stdfa instructions: */
10221 else {
10222
10223 /* set the floating-point register value: */
10224 ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX)
10225 = value_double->tme_value64_uint;
10226
10227 /* do an stxa-style store: */
10228 tme_sparc64_stxa(ic, _rs1, _rs2, &ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX));
10229 }
10230
10231 assert (fpreg != &float_buffer);
10232 TME_SPARC_INSN_OK;
10233 }
10234
10235 /* this does a sparc64 casa: */
TME_SPARC_FORMAT3(tme_sparc64_casa,tme_uint64_t)10236 TME_SPARC_FORMAT3(tme_sparc64_casa, tme_uint64_t)
10237 {
10238 tme_uint32_t asi_mask_data;
10239 tme_uint64_t address;
10240 tme_bus_context_t context;
10241 tme_uint32_t asi_mask_flags_slow;
10242 struct tme_sparc_tlb *dtlb;
10243 tme_shared tme_uint8_t *memory;
10244 tme_bus_context_t dtlb_context;
10245 tme_uint32_t endian_little;
10246 unsigned int reg_rs2;
10247 tme_uint32_t value_compare32;
10248 tme_uint32_t value_swap32;
10249 tme_uint32_t value_read32;
10250
10251 /* get the alternate ASI mask: */
10252 asi_mask_data = _tme_sparc64_alternate_asi_mask(ic);
10253
10254 /* get the address: */
10255 address = TME_SPARC_FORMAT3_RS1;
10256 address &= ic->tme_sparc_address_mask;
10257
10258 #ifdef _TME_SPARC_STATS
10259 /* track statistics: */
10260 ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
10261 #endif /* _TME_SPARC_STATS */
10262
10263 /* verify and maybe replay this transfer: */
10264 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
10265 asi_mask_data, address,
10266 (TME_RECODE_SIZE_32
10267 | TME_SPARC_RECODE_VERIFY_MEM_LOAD | TME_SPARC_RECODE_VERIFY_MEM_STORE));
10268 if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
10269 TME_SPARC_INSN_OK;
10270 }
10271
10272 /* log the value stored: */
10273 tme_sparc_log(ic, 1000, TME_OK,
10274 (TME_SPARC_LOG_HANDLE(ic),
10275 _("casa 0x%02x:0x%016" TME_PRIx64 ": 0x%08" TME_PRIx32),
10276 TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
10277 address,
10278 (tme_uint32_t) TME_SPARC_FORMAT3_RD));
10279
10280 /* get the context: */
10281 context = ic->tme_sparc_memory_context_primary;
10282 if (__tme_predict_false(asi_mask_data
10283 & (TME_SPARC64_ASI_FLAG_SECONDARY
10284 + TME_SPARC64_ASI_MASK_FLAG_INSN_NUCLEUS))) {
10285 if (asi_mask_data & TME_SPARC64_ASI_FLAG_SECONDARY) {
10286 context = ic->tme_sparc_memory_context_secondary;
10287 }
10288 else if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_NUCLEUS) {
10289 context = 0;
10290 }
10291 }
10292
10293 /* assume that no DTLB ASI mask flags will require a slow store: */
10294 asi_mask_flags_slow = 0;
10295
10296 /* a casa traps on no-fault addresses: */
10297 asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
10298
10299 /* a casa traps on uncacheable addresses with side-effects: */
10300 asi_mask_flags_slow |= TME_SPARC64_ASI_MASK_FLAG_TLB_UNCACHEABLE;
10301
10302 /* if this casa is using a no-fault ASI: */
10303 if (__tme_predict_false(asi_mask_data & TME_SPARC64_ASI_FLAG_NO_FAULT)) {
10304
10305 /* a casa with a no-fault ASI traps: */
10306 asi_mask_flags_slow = 0 - (tme_uint32_t) 1;
10307 }
10308
10309 /* get and busy the DTLB entry: */
10310 dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, context, address))];
10311 tme_sparc_tlb_busy(dtlb);
10312
10313 /* assume that this DTLB applies and allows fast transfers: */
10314 memory = dtlb->tme_sparc_tlb_emulator_off_write;
10315
10316 /* if this DTLB matches any context, it matches this context: */
10317 dtlb_context = dtlb->tme_sparc_tlb_context;
10318 if (dtlb_context > ic->tme_sparc_memory_context_max) {
10319 dtlb_context = context;
10320 }
10321
10322 /* we must call the slow store function if: */
10323 if (__tme_predict_false(
10324
10325 /* the DTLB entry is invalid: */
10326 tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
10327
10328 /* the DTLB entry does not match the context: */
10329 || dtlb_context != context
10330
10331 /* the DTLB entry does not cover the needed addresses: */
10332 || (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
10333 || ((address + ((32 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
10334
10335 /* the DTLB entry does not cover the needed address space: */
10336 || (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, asi_mask_data))
10337
10338 /* the DTLB entry can't be used for a fast casa: */
10339 || (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
10340
10341 /* the DTLB entry does not allow fast transfers: */
10342 || (memory != dtlb->tme_sparc_tlb_emulator_off_read)
10343 || (memory == TME_EMULATOR_OFF_UNDEF)
10344
10345 /* the address is misaligned: */
10346 || ((address % (32 / 8)) != 0)
10347
10348 )) {
10349
10350 /* call the slow store function: */
10351 memory = tme_sparc64_ls(ic,
10352 address,
10353 &TME_SPARC_FORMAT3_RD,
10354 (TME_SPARC_LSINFO_OP_ATOMIC
10355 | TME_SPARC_LSINFO_ASI(TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF))
10356 | TME_SPARC_LSINFO_A
10357 | (32 / 8)));
10358
10359 /* if the slow store function did the transfer: */
10360 if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
10361
10362 /* unbusy the TLB entry; */
10363 tme_sparc_tlb_unbusy(dtlb);
10364
10365 /* log the value loaded: */
10366 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
10367 tme_sparc_log(ic, 1000, TME_OK,
10368 (TME_SPARC_LOG_HANDLE(ic),
10369 _("casa 0x%02x:0x%016" TME_PRIx64 ": 0x%08" TME_PRIx64),
10370 TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
10371 address,
10372 TME_SPARC_FORMAT3_RD));
10373
10374 TME_SPARC_INSN_OK;
10375 }
10376 }
10377
10378 /* get the byte order of this transfer: */
10379 endian_little = asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
10380 if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
10381 if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
10382 endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
10383 }
10384 else {
10385 assert (FALSE);
10386 }
10387 }
10388
10389 /* do the fast transfer: */
10390 memory += address;
10391 reg_rs2 = TME_FIELD_MASK_EXTRACTU(TME_SPARC_INSN, TME_SPARC_FORMAT3_MASK_RS2);
10392 TME_SPARC_REG_INDEX(ic, reg_rs2);
10393 value_compare32 = ic->tme_sparc_ireg_uint64(reg_rs2);
10394 value_compare32 = (endian_little ? tme_htole_u32(value_compare32) : tme_htobe_u32(value_compare32));
10395 value_swap32 = TME_SPARC_FORMAT3_RD;
10396 value_swap32 = (endian_little ? tme_htole_u32(value_swap32) : tme_htobe_u32(value_swap32));
10397 value_read32 = tme_memory_atomic_cx32((tme_shared tme_uint32_t *) memory, value_compare32, value_swap32, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint32_t));
10398 value_read32 = (endian_little ? tme_letoh_u32(value_read32) : tme_betoh_u32(value_read32));
10399 TME_SPARC_FORMAT3_RD = value_read32;
10400
10401 /* unbusy the DTLB entry: */
10402 tme_sparc_tlb_unbusy(dtlb);
10403
10404 /* log the value loaded: */
10405 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
10406 tme_sparc_log(ic, 1000, TME_OK,
10407 (TME_SPARC_LOG_HANDLE(ic),
10408 _("casa 0x%02x:0x%016" TME_PRIx64 ": 0x%08" TME_PRIx64),
10409 TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
10410 address,
10411 TME_SPARC_FORMAT3_RD));
10412
10413 TME_SPARC_INSN_OK;
10414 }
10415
10416 /* this does a sparc64 casxa: */
TME_SPARC_FORMAT3(tme_sparc64_casxa,tme_uint64_t)10417 TME_SPARC_FORMAT3(tme_sparc64_casxa, tme_uint64_t)
10418 {
10419 tme_uint32_t asi_mask_data;
10420 tme_uint64_t address;
10421 tme_bus_context_t context;
10422 tme_uint32_t asi_mask_flags_slow;
10423 struct tme_sparc_tlb *dtlb;
10424 tme_shared tme_uint8_t *memory;
10425 tme_bus_context_t dtlb_context;
10426 tme_uint32_t endian_little;
10427 unsigned int reg_rs2;
10428 tme_uint64_t value_compare64;
10429 tme_uint64_t value_swap64;
10430 tme_uint64_t value_read64;
10431
10432 /* get the alternate ASI mask: */
10433 asi_mask_data = _tme_sparc64_alternate_asi_mask(ic);
10434
10435 /* get the address: */
10436 address = TME_SPARC_FORMAT3_RS1;
10437 address &= ic->tme_sparc_address_mask;
10438
10439 #ifdef _TME_SPARC_STATS
10440 /* track statistics: */
10441 ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
10442 #endif /* _TME_SPARC_STATS */
10443
10444 /* verify and maybe replay this transfer: */
10445 tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
10446 asi_mask_data, address,
10447 (TME_RECODE_SIZE_64
10448 | TME_SPARC_RECODE_VERIFY_MEM_LOAD | TME_SPARC_RECODE_VERIFY_MEM_STORE));
10449 if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
10450 TME_SPARC_INSN_OK;
10451 }
10452
10453 /* log the value stored: */
10454 tme_sparc_log(ic, 1000, TME_OK,
10455 (TME_SPARC_LOG_HANDLE(ic),
10456 _("casxa 0x%02x:0x%016" TME_PRIx64 ": 0x%016" TME_PRIx64),
10457 TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
10458 address,
10459 (tme_uint64_t) TME_SPARC_FORMAT3_RD));
10460
10461 /* get the context: */
10462 context = ic->tme_sparc_memory_context_primary;
10463 if (__tme_predict_false(asi_mask_data
10464 & (TME_SPARC64_ASI_FLAG_SECONDARY
10465 + TME_SPARC64_ASI_MASK_FLAG_INSN_NUCLEUS))) {
10466 if (asi_mask_data & TME_SPARC64_ASI_FLAG_SECONDARY) {
10467 context = ic->tme_sparc_memory_context_secondary;
10468 }
10469 else if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_NUCLEUS) {
10470 context = 0;
10471 }
10472 }
10473
10474 /* assume that no DTLB ASI mask flags will require a slow store: */
10475 asi_mask_flags_slow = 0;
10476
10477 /* a casxa traps on no-fault addresses: */
10478 asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
10479
10480 /* a casxa traps on uncacheable addresses with side-effects: */
10481 asi_mask_flags_slow |= TME_SPARC64_ASI_MASK_FLAG_TLB_UNCACHEABLE;
10482
10483 /* if this casxa is using a no-fault ASI: */
10484 if (__tme_predict_false(asi_mask_data & TME_SPARC64_ASI_FLAG_NO_FAULT)) {
10485
10486 /* a casxa with a no-fault ASI traps: */
10487 asi_mask_flags_slow = 0 - (tme_uint32_t) 1;
10488 }
10489
10490 /* get and busy the DTLB entry: */
10491 dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, context, address))];
10492 tme_sparc_tlb_busy(dtlb);
10493
10494 /* assume that this DTLB applies and allows fast transfers: */
10495 memory = dtlb->tme_sparc_tlb_emulator_off_write;
10496
10497 /* if this DTLB matches any context, it matches this context: */
10498 dtlb_context = dtlb->tme_sparc_tlb_context;
10499 if (dtlb_context > ic->tme_sparc_memory_context_max) {
10500 dtlb_context = context;
10501 }
10502
10503 /* we must call the slow store function if: */
10504 if (__tme_predict_false(
10505
10506 /* the DTLB entry is invalid: */
10507 tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
10508
10509 /* the DTLB entry does not match the context: */
10510 || dtlb_context != context
10511
10512 /* the DTLB entry does not cover the needed addresses: */
10513 || (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
10514 || ((address + ((64 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
10515
10516 /* the DTLB entry does not cover the needed address space: */
10517 || (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, asi_mask_data))
10518
10519 /* the DTLB entry can't be used for a fast casxa: */
10520 || (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
10521
10522 /* the DTLB entry does not allow fast transfers: */
10523 || (memory != dtlb->tme_sparc_tlb_emulator_off_read)
10524 || (memory == TME_EMULATOR_OFF_UNDEF)
10525
10526 /* the address is misaligned: */
10527 || ((address % (64 / 8)) != 0)
10528
10529 )) {
10530
10531 /* call the slow store function: */
10532 memory = tme_sparc64_ls(ic,
10533 address,
10534 &TME_SPARC_FORMAT3_RD,
10535 (TME_SPARC_LSINFO_OP_ATOMIC
10536 | TME_SPARC_LSINFO_ASI(TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF))
10537 | TME_SPARC_LSINFO_A
10538 | (64 / 8)));
10539
10540 /* if the slow store function did the transfer: */
10541 if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
10542
10543 /* unbusy the TLB entry; */
10544 tme_sparc_tlb_unbusy(dtlb);
10545
10546 /* log the value loaded: */
10547 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
10548 tme_sparc_log(ic, 1000, TME_OK,
10549 (TME_SPARC_LOG_HANDLE(ic),
10550 _("casxa 0x%02x:0x%016" TME_PRIx64 ": 0x%016" TME_PRIx64),
10551 TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
10552 address,
10553 TME_SPARC_FORMAT3_RD));
10554
10555 TME_SPARC_INSN_OK;
10556 }
10557 }
10558
10559 /* get the byte order of this transfer: */
10560 endian_little = asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
10561 if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
10562 if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
10563 endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
10564 }
10565 else {
10566 assert (FALSE);
10567 }
10568 }
10569
10570 /* do the fast transfer: */
10571 memory += address;
10572 reg_rs2 = TME_FIELD_MASK_EXTRACTU(TME_SPARC_INSN, TME_SPARC_FORMAT3_MASK_RS2);
10573 TME_SPARC_REG_INDEX(ic, reg_rs2);
10574 value_compare64 = ic->tme_sparc_ireg_uint64(reg_rs2);
10575 value_compare64 = (endian_little ? tme_htole_u64(value_compare64) : tme_htobe_u64(value_compare64));
10576 value_swap64 = TME_SPARC_FORMAT3_RD;
10577 value_swap64 = (endian_little ? tme_htole_u64(value_swap64) : tme_htobe_u64(value_swap64));
10578 value_read64 = tme_memory_atomic_cx64((tme_shared tme_uint64_t *) memory, value_compare64, value_swap64, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint64_t));
10579 value_read64 = (endian_little ? tme_letoh_u64(value_read64) : tme_betoh_u64(value_read64));
10580 TME_SPARC_FORMAT3_RD = value_read64;
10581
10582 /* unbusy the DTLB entry: */
10583 tme_sparc_tlb_unbusy(dtlb);
10584
10585 /* log the value loaded: */
10586 tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
10587 tme_sparc_log(ic, 1000, TME_OK,
10588 (TME_SPARC_LOG_HANDLE(ic),
10589 _("casxa 0x%02x:0x%016" TME_PRIx64 ": 0x%016" TME_PRIx64),
10590 TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
10591 address,
10592 TME_SPARC_FORMAT3_RD));
10593
10594 TME_SPARC_INSN_OK;
10595 }
10596
10597 /* the sparc64 mulx function: */
TME_SPARC_FORMAT3(tme_sparc64_mulx,tme_uint64_t)10598 TME_SPARC_FORMAT3(tme_sparc64_mulx, tme_uint64_t)
10599 {
10600 tme_uint64_t src1;
10601 tme_uint64_t src2;
10602 tme_uint64_t dst;
10603
10604 /* get the operands: */
10605 src1 = TME_SPARC_FORMAT3_RS1;
10606 src2 = TME_SPARC_FORMAT3_RS2;
10607
10608 /* do the mulx: */
10609 dst = src1 * src2;
10610
10611 TME_SPARC_FORMAT3_RD = dst;
10612 TME_SPARC_INSN_OK;
10613 }
10614
10615 /* the sparc64 sdivx function: */
TME_SPARC_FORMAT3(tme_sparc64_sdivx,tme_uint64_t)10616 TME_SPARC_FORMAT3(tme_sparc64_sdivx, tme_uint64_t)
10617 {
10618 tme_int64_t src1;
10619 tme_int64_t src2;
10620 tme_int64_t dst;
10621
10622 /* get the operands: */
10623 src1 = TME_SPARC_FORMAT3_RS1;
10624 src2 = TME_SPARC_FORMAT3_RS2;
10625
10626 /* do the sdivx: */
10627 if (__tme_predict_false(src2 == 0)) {
10628 tme_sparc64_trap(ic, TME_SPARC64_TRAP_division_by_zero);
10629 }
10630 dst = (src2 == -1 && src1 == (((tme_int64_t) 1) << 63) ? src1 : src1 / src2);
10631
10632 TME_SPARC_FORMAT3_RD = dst;
10633 TME_SPARC_INSN_OK;
10634 }
10635
10636 /* the sparc64 udivx function: */
TME_SPARC_FORMAT3(tme_sparc64_udivx,tme_uint64_t)10637 TME_SPARC_FORMAT3(tme_sparc64_udivx, tme_uint64_t)
10638 {
10639 tme_uint64_t src1;
10640 tme_uint64_t src2;
10641 tme_uint64_t dst;
10642
10643 /* get the operands: */
10644 src1 = TME_SPARC_FORMAT3_RS1;
10645 src2 = TME_SPARC_FORMAT3_RS2;
10646
10647 /* do the udivx: */
10648 if (__tme_predict_false(src2 == 0)) {
10649 tme_sparc64_trap(ic, TME_SPARC64_TRAP_division_by_zero);
10650 }
10651 dst = src1 / src2;
10652
10653 TME_SPARC_FORMAT3_RD = dst;
10654 TME_SPARC_INSN_OK;
10655 }
10656
10657 /* this does a slow atomic operation: */
10658 void
tme_sparc64_atomic(struct tme_sparc * ic,struct tme_sparc_ls * ls)10659 tme_sparc64_atomic(struct tme_sparc *ic, struct tme_sparc_ls *ls)
10660 {
10661 tme_uint32_t endian_little;
10662 tme_uint32_t insn;
10663 tme_uint64_t value64;
10664 tme_uint64_t value_swap64;
10665 unsigned int reg_rs2;
10666 tme_uint32_t value32;
10667 tme_uint32_t value_swap32;
10668 tme_uint32_t size;
10669
10670 /* if this is the beginning of the operation: */
10671 if (ls->tme_sparc_ls_state == 0) {
10672
10673 /* start the load part of the operation: */
10674 ls->tme_sparc_ls_state = ls->tme_sparc_ls_size;
10675 assert (ls->tme_sparc_ls_state != 0
10676 && (ls->tme_sparc_ls_state & TME_BIT(7)) == 0);
10677
10678 /* the load must start at the beginning of the buffer: */
10679 assert (ls->tme_sparc_ls_buffer_offset == 0);
10680 }
10681
10682 /* if this is the load part of the operation: */
10683 if ((ls->tme_sparc_ls_state & TME_BIT(7)) == 0) {
10684
10685 /* do one slow load cycle: */
10686 tme_sparc64_load(ic, ls);
10687
10688 /* if the slow load cycle did not load all of the data: */
10689 if (__tme_predict_false(ls->tme_sparc_ls_size != 0)) {
10690 return;
10691 }
10692
10693 /* get the byte order of this transfer: */
10694 endian_little = ls->tme_sparc_ls_lsinfo & TME_SPARC_LSINFO_ENDIAN_LITTLE;
10695
10696 /* dispatch on the op3 of the instruction: */
10697 insn = TME_SPARC_INSN;
10698 switch ((insn >> 19) & 0x3f) {
10699
10700 case 0x3c: /* casa */
10701
10702 /* finish the load part of the compare and swap: */
10703 assert (ls->tme_sparc_ls_state == sizeof(tme_uint32_t));
10704 value32 = ic->tme_sparc_memory_buffer.tme_sparc_memory_buffer32s[0];
10705 value_swap32 = *ls->tme_sparc_ls_rd64;
10706 if (endian_little) {
10707 value32 = tme_letoh_u32(value32);
10708 value_swap32 = tme_htole_u32(value_swap32);
10709 }
10710 else {
10711 value32 = tme_betoh_u32(value32);
10712 value_swap32 = tme_htobe_u32(value_swap32);
10713 }
10714 *ls->tme_sparc_ls_rd64 = value32;
10715
10716 /* if the comparison fails: */
10717 reg_rs2 = TME_FIELD_MASK_EXTRACTU(insn, TME_SPARC_FORMAT3_MASK_RS2);
10718 TME_SPARC_REG_INDEX(ic, reg_rs2);
10719 if (value32 != (tme_uint32_t) ic->tme_sparc_ireg_uint64(reg_rs2)) {
10720 return;
10721 }
10722
10723 /* start the store part of the compare and swap: */
10724 ic->tme_sparc_memory_buffer.tme_sparc_memory_buffer32s[0] = value_swap32;
10725 break;
10726
10727 case 0x3e: /* casxa */
10728
10729 /* finish the load part of the compare and swap: */
10730 assert (ls->tme_sparc_ls_state == sizeof(tme_uint64_t));
10731 value64 = ic->tme_sparc_memory_buffer.tme_sparc_memory_buffer64s[0];
10732 value_swap64 = *ls->tme_sparc_ls_rd64;
10733 if (endian_little) {
10734 value64 = tme_letoh_u64(value64);
10735 value_swap64 = tme_htole_u64(value_swap64);
10736 }
10737 else {
10738 value64 = tme_betoh_u64(value64);
10739 value_swap64 = tme_htobe_u64(value_swap64);
10740 }
10741 *ls->tme_sparc_ls_rd64 = value64;
10742
10743 /* if the comparison fails: */
10744 reg_rs2 = TME_FIELD_MASK_EXTRACTU(insn, TME_SPARC_FORMAT3_MASK_RS2);
10745 TME_SPARC_REG_INDEX(ic, reg_rs2);
10746 if (value64 != (tme_uint64_t) ic->tme_sparc_ireg_uint64(reg_rs2)) {
10747 return;
10748 }
10749
10750 /* start the store part of the compare and swap: */
10751 ic->tme_sparc_memory_buffer.tme_sparc_memory_buffer64s[0] = value_swap64;
10752 break;
10753
10754 case 0x0d: /* ldstub */
10755 case 0x1d: /* ldstuba */
10756
10757 /* finish the load part of the ldstub: */
10758 assert (ls->tme_sparc_ls_state == sizeof(tme_uint8_t));
10759 *ls->tme_sparc_ls_rd64 = ic->tme_sparc_memory_buffer.tme_sparc_memory_buffer8s[0];
10760
10761 /* start the store part of the ldstub: */
10762 ic->tme_sparc_memory_buffer.tme_sparc_memory_buffer8s[0] = 0xff;
10763 break;
10764
10765 /* otherwise, this must be swap: */
10766 default:
10767 assert (((insn >> 19) & 0x2f) == 0x0f /* swap, swapa */);
10768
10769 /* finish the load part of the swap: */
10770 assert (ls->tme_sparc_ls_state == sizeof(tme_uint32_t));
10771 value32 = ic->tme_sparc_memory_buffer.tme_sparc_memory_buffer32s[0];
10772 value_swap32 = *ls->tme_sparc_ls_rd64;
10773 if (endian_little) {
10774 value32 = tme_letoh_u32(value32);
10775 value_swap32 = tme_htole_u32(value32);
10776 }
10777 else {
10778 value32 = tme_betoh_u32(value32);
10779 value_swap32 = tme_htobe_u32(value32);
10780 }
10781 *ls->tme_sparc_ls_rd64 = value32;
10782
10783 /* start the store part of the swap: */
10784 ic->tme_sparc_memory_buffer.tme_sparc_memory_buffer32s[0] = value_swap32;
10785 break;
10786 }
10787
10788 /* start the store part of the operation: */
10789 size = ls->tme_sparc_ls_state;
10790 ls->tme_sparc_ls_address64 -= size;
10791 ls->tme_sparc_ls_size = size;
10792 ls->tme_sparc_ls_buffer_offset = 0;
10793 ls->tme_sparc_ls_state = size | TME_BIT(7);
10794 }
10795
10796 /* this is the store part of the operation: */
10797
10798 /* do one slow store cycle: */
10799 tme_sparc64_store(ic, ls);
10800
10801 /* if the slow store cycle did not store all of the data: */
10802 if (__tme_predict_false(ls->tme_sparc_ls_size != 0)) {
10803 return;
10804 }
10805 }
10806
10807 /* this does one slow load cycle: */
10808 void
tme_sparc64_load(struct tme_sparc * ic,struct tme_sparc_ls * ls)10809 tme_sparc64_load(struct tme_sparc *ic,
10810 struct tme_sparc_ls *ls)
10811 {
10812 struct tme_sparc_tlb *tlb;
10813 tme_uint64_t address;
10814 unsigned int cycle_size;
10815 tme_bus_addr_t physical_address;
10816 int shift;
10817 int err;
10818
10819 /* get the TLB entry: */
10820 tlb = ls->tme_sparc_ls_tlb;
10821
10822 /* the TLB entry must be busy and valid: */
10823 assert (tme_bus_tlb_is_valid(&tlb->tme_sparc_tlb_bus_tlb));
10824
10825 /* start the bus cycle structure: */
10826 ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_type = TME_BUS_CYCLE_READ;
10827
10828 /* get the buffer: */
10829 ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_buffer = &ic->tme_sparc_memory_buffer.tme_sparc_memory_buffer8s[ls->tme_sparc_ls_buffer_offset];
10830 ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_buffer_increment = 1;
10831
10832 /* get the current address: */
10833 address = ls->tme_sparc_ls_address64;
10834 ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_address = address;
10835
10836 /* start the cycle size: */
10837 cycle_size = ls->tme_sparc_ls_size;
10838 assert (cycle_size > 0);
10839 cycle_size--;
10840 cycle_size = TME_MIN(cycle_size, (((tme_bus_addr64_t) tlb->tme_sparc_tlb_addr_last) - address)) + 1;
10841
10842 /* if this TLB entry allows fast reads: */
10843 if (__tme_predict_true(tlb->tme_sparc_tlb_emulator_off_read != TME_EMULATOR_OFF_UNDEF)) {
10844
10845 /* do a read: */
10846 ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_size = cycle_size;
10847 tme_memory_bus_read_buffer((tlb->tme_sparc_tlb_emulator_off_read + (tme_uint64_t) ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_address),
10848 ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_buffer,
10849 ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_size,
10850 tlb->tme_sparc_tlb_bus_rwlock,
10851 sizeof(tme_uint8_t),
10852 sizeof(tme_uint64_t));
10853 }
10854
10855 /* otherwise, this TLB entry does not allow fast reads: */
10856 else {
10857
10858 /* finish the cycle size: */
10859 cycle_size = TME_MIN(cycle_size, 1 + ((~ (unsigned int) address) % sizeof(tme_uint64_t)));
10860 ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_size = cycle_size;
10861
10862 /* form the physical address for the bus cycle handler: */
10863 physical_address = ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_address;
10864 physical_address += tlb->tme_sparc_tlb_addr_offset;
10865 shift = tlb->tme_sparc_tlb_addr_shift;
10866 if (shift < 0) {
10867 physical_address <<= (0 - shift);
10868 }
10869 else if (shift > 0) {
10870 physical_address >>= shift;
10871 }
10872 ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_address = physical_address;
10873
10874 /* finish the bus cycle structure: */
10875 (*ic->_tme_sparc_ls_bus_cycle)(ic, ls);
10876 tme_sparc_log(ic, 10000, TME_OK,
10877 (TME_SPARC_LOG_HANDLE(ic),
10878 _("cycle-load%u 0x%016" TME_PRIx64),
10879 (unsigned int) (ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_size * 8),
10880 (tme_bus_addr64_t) ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_address));
10881
10882 /* callout the bus cycle: */
10883 tme_sparc_tlb_unbusy(tlb);
10884 tme_sparc_callout_unlock(ic);
10885 err = (*tlb->tme_sparc_tlb_bus_tlb.tme_bus_tlb_cycle)
10886 (tlb->tme_sparc_tlb_bus_tlb.tme_bus_tlb_cycle_private,
10887 &ls->tme_sparc_ls_bus_cycle);
10888 tme_sparc_callout_relock(ic);
10889 tme_sparc_tlb_busy(tlb);
10890
10891 /* the TLB entry can't have been invalidated before the load: */
10892 assert (err != EBADF);
10893
10894 /* if the bus cycle didn't complete normally: */
10895 if (err != TME_OK) {
10896
10897 /* if a real bus fault may have happened, instead of
10898 some synchronous event: */
10899 if (err != TME_BUS_CYCLE_SYNCHRONOUS_EVENT) {
10900
10901 /* call the bus fault handlers: */
10902 err = tme_bus_tlb_fault(&tlb->tme_sparc_tlb_bus_tlb, &ls->tme_sparc_ls_bus_cycle, err);
10903 }
10904
10905 /* if some synchronous event has happened: */
10906 if (err == TME_BUS_CYCLE_SYNCHRONOUS_EVENT) {
10907
10908 /* after the currently executing instruction finishes, check
10909 for external resets, halts, or interrupts: */
10910 ic->_tme_sparc_instruction_burst_remaining = 0;
10911 ic->_tme_sparc_instruction_burst_other = TRUE;
10912 }
10913
10914 /* otherwise, if no real bus fault happened: */
10915 else if (err == TME_OK) {
10916
10917 /* nothing to do */
10918 }
10919
10920 /* otherwise, a real bus fault happened: */
10921 else {
10922 (*ic->_tme_sparc_ls_bus_fault)(ic, ls, err);
10923 return;
10924 }
10925 }
10926 }
10927
10928 /* some data must have been transferred: */
10929 assert (ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_size > 0);
10930
10931 /* update: */
10932 cycle_size = ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_size;
10933 ls->tme_sparc_ls_address64 += cycle_size;
10934 ls->tme_sparc_ls_buffer_offset += cycle_size;
10935 ls->tme_sparc_ls_size -= cycle_size;
10936 }
10937
10938 /* this does one slow store cycle: */
10939 void
tme_sparc64_store(struct tme_sparc * ic,struct tme_sparc_ls * ls)10940 tme_sparc64_store(struct tme_sparc *ic,
10941 struct tme_sparc_ls *ls)
10942 {
10943 struct tme_sparc_tlb *tlb;
10944 tme_uint64_t address;
10945 unsigned int cycle_size;
10946 tme_bus_addr_t physical_address;
10947 int shift;
10948 int err;
10949
10950 /* get the TLB entry: */
10951 tlb = ls->tme_sparc_ls_tlb;
10952
10953 /* the TLB entry must be busy and valid: */
10954 assert (tme_bus_tlb_is_valid(&tlb->tme_sparc_tlb_bus_tlb));
10955
10956 /* start the bus cycle structure: */
10957 ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_type = TME_BUS_CYCLE_WRITE;
10958
10959 /* get the buffer: */
10960 ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_buffer = &ic->tme_sparc_memory_buffer.tme_sparc_memory_buffer8s[ls->tme_sparc_ls_buffer_offset];
10961 ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_buffer_increment = 1;
10962
10963 /* get the current address: */
10964 address = ls->tme_sparc_ls_address64;
10965 ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_address = address;
10966
10967 /* start the cycle size: */
10968 cycle_size = ls->tme_sparc_ls_size;
10969 assert (cycle_size > 0);
10970 cycle_size--;
10971 cycle_size = TME_MIN(cycle_size, (((tme_bus_addr64_t) tlb->tme_sparc_tlb_addr_last) - address)) + 1;
10972
10973 /* if this TLB entry allows fast writes: */
10974 if (__tme_predict_true(tlb->tme_sparc_tlb_emulator_off_write != TME_EMULATOR_OFF_UNDEF)) {
10975
10976 /* do a write: */
10977 ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_size = cycle_size;
10978 tme_memory_bus_write_buffer((tlb->tme_sparc_tlb_emulator_off_write + (tme_uint64_t) ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_address),
10979 ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_buffer,
10980 ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_size,
10981 tlb->tme_sparc_tlb_bus_rwlock,
10982 sizeof(tme_uint8_t),
10983 sizeof(tme_uint64_t));
10984 }
10985
10986 /* otherwise, this TLB entry does not allow fast writes: */
10987 else {
10988
10989 /* finish the cycle size: */
10990 cycle_size = TME_MIN(cycle_size, 1 + ((~ (unsigned int) address) % sizeof(tme_uint64_t)));
10991 ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_size = cycle_size;
10992
10993 /* form the physical address for the bus cycle handler: */
10994 physical_address = ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_address;
10995 physical_address += tlb->tme_sparc_tlb_addr_offset;
10996 shift = tlb->tme_sparc_tlb_addr_shift;
10997 if (shift < 0) {
10998 physical_address <<= (0 - shift);
10999 }
11000 else if (shift > 0) {
11001 physical_address >>= shift;
11002 }
11003 ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_address = physical_address;
11004
11005 /* finish the bus cycle structure: */
11006 (*ic->_tme_sparc_ls_bus_cycle)(ic, ls);
11007 tme_sparc_log(ic, 10000, TME_OK,
11008 (TME_SPARC_LOG_HANDLE(ic),
11009 _("cycle-store%u 0x%016" TME_PRIx64),
11010 (unsigned int) (ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_size * 8),
11011 (tme_bus_addr64_t) ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_address));
11012
11013 /* callout the bus cycle: */
11014 tme_sparc_tlb_unbusy(tlb);
11015 tme_sparc_callout_unlock(ic);
11016 err = (*tlb->tme_sparc_tlb_bus_tlb.tme_bus_tlb_cycle)
11017 (tlb->tme_sparc_tlb_bus_tlb.tme_bus_tlb_cycle_private,
11018 &ls->tme_sparc_ls_bus_cycle);
11019 tme_sparc_callout_relock(ic);
11020 tme_sparc_tlb_busy(tlb);
11021
11022 /* the TLB entry can't have been invalidated before the store: */
11023 assert (err != EBADF);
11024
11025 /* if the bus cycle didn't complete normally: */
11026 if (err != TME_OK) {
11027
11028 /* if a real bus fault may have happened, instead of
11029 some synchronous event: */
11030 if (err != TME_BUS_CYCLE_SYNCHRONOUS_EVENT) {
11031
11032 /* call the bus fault handlers: */
11033 err = tme_bus_tlb_fault(&tlb->tme_sparc_tlb_bus_tlb, &ls->tme_sparc_ls_bus_cycle, err);
11034 }
11035
11036 /* if some synchronous event has happened: */
11037 if (err == TME_BUS_CYCLE_SYNCHRONOUS_EVENT) {
11038
11039 /* after the currently executing instruction finishes, check
11040 for external resets, halts, or interrupts: */
11041 ic->_tme_sparc_instruction_burst_remaining = 0;
11042 ic->_tme_sparc_instruction_burst_other = TRUE;
11043 }
11044
11045 /* otherwise, if no real bus fault happened: */
11046 else if (err == TME_OK) {
11047
11048 /* nothing to do */
11049 }
11050
11051 /* otherwise, a real bus fault happened: */
11052 else {
11053 (*ic->_tme_sparc_ls_bus_fault)(ic, ls, err);
11054 return;
11055 }
11056 }
11057 }
11058
11059 /* some data must have been transferred: */
11060 assert (ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_size > 0);
11061
11062 /* if this was an atomic operation: */
11063 if (__tme_predict_false(ls->tme_sparc_ls_lsinfo & TME_SPARC_LSINFO_OP_ATOMIC)) {
11064
11065 /* we do not support atomic operations in TLB entries that
11066 do not support both fast reads and fast writes. assuming
11067 that all atomic operations are to regular memory, we
11068 should always get fast read and fast write TLBs. when
11069 we do not, it should only be because the memory has been
11070 made read-only in the MMU. the write above was supposed
11071 to cause a fault (with the instruction rerun later with
11072 a fast read and fast write TLB entry), but instead it
11073 succeeded and transferred some data. we have modified
11074 memory and cannot recover: */
11075 abort();
11076 }
11077
11078 /* update: */
11079 cycle_size = ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_size;
11080 ls->tme_sparc_ls_address64 += cycle_size;
11081 ls->tme_sparc_ls_buffer_offset += cycle_size;
11082 ls->tme_sparc_ls_size -= cycle_size;
11083 }
11084
11085 /* this does a slow load or store: */
11086 tme_shared tme_uint8_t *
tme_sparc64_ls(struct tme_sparc * ic,tme_uint64_t const address_first,tme_uint64_t * _rd,tme_uint32_t lsinfo)11087 tme_sparc64_ls(struct tme_sparc *ic,
11088 tme_uint64_t const address_first,
11089 tme_uint64_t *_rd,
11090 tme_uint32_t lsinfo)
11091 {
11092 struct tme_sparc_ls ls;
11093 tme_uint32_t size;
11094 tme_uint32_t asi;
11095 tme_uint32_t asi_mask_flags;
11096 tme_uint32_t asi_mask;
11097 tme_bus_context_t context;
11098 tme_uint32_t tlb_hash;
11099 unsigned long tlb_i;
11100 unsigned long handler_i;
11101 struct tme_sparc_tlb *tlb;
11102 unsigned int cycle_type;
11103 tme_uint64_t address;
11104 void (*address_map) _TME_P((struct tme_sparc *, struct tme_sparc_ls *));
11105 tme_bus_addr_t address_bus;
11106 int rc;
11107 const tme_shared tme_uint8_t *emulator_off;
11108 unsigned int buffer_offset;
11109 tme_uint64_t value;
11110 tme_uint32_t value32;
11111
11112 /* we must not be replaying instructions: */
11113 assert (tme_sparc_recode_verify_replay_last_pc(ic) == 0);
11114
11115 /* initialize the pointer to the rd register: */
11116 ls.tme_sparc_ls_rd64 = _rd;
11117
11118 #ifndef NDEBUG
11119
11120 /* initialize the cycle function: */
11121 ls.tme_sparc_ls_cycle = NULL;
11122
11123 /* initialize the TLB entry pointer: */
11124 ls.tme_sparc_ls_tlb = NULL;
11125
11126 #endif /* NDEBUG */
11127
11128 /* initialize the faults: */
11129 ls.tme_sparc_ls_faults = TME_SPARC_LS_FAULT_NONE;
11130
11131 /* initialize the address: */
11132 ls.tme_sparc_ls_address64 = address_first;
11133
11134 /* initialize the size: */
11135 size = TME_SPARC_LSINFO_WHICH_SIZE(lsinfo);
11136 ls.tme_sparc_ls_size = size;
11137
11138 /* initialize the info: */
11139 ls.tme_sparc_ls_lsinfo = lsinfo;
11140
11141 /* if the address is not aligned: */
11142 if (__tme_predict_false(((size - 1) & (tme_uint32_t) address_first) != 0)) {
11143 ls.tme_sparc_ls_faults |= TME_SPARC_LS_FAULT_ADDRESS_NOT_ALIGNED;
11144 }
11145
11146 /* otherwise, the address is aligned: */
11147 else {
11148
11149 /* the transfer must not cross a 32-bit boundary: */
11150 assert ((size - 1) <= (tme_uint32_t) ~address_first);
11151 }
11152
11153 /* initialize the address map: */
11154 ls.tme_sparc_ls_address_map = ic->_tme_sparc_ls_address_map;
11155
11156 /* if this is a ldd, ldda, std, or stda, or an instruction
11157 that loads or stores in the same way: */
11158 if (lsinfo & TME_SPARC_LSINFO_LDD_STD) {
11159
11160 /* if the rd register is odd: */
11161 /* NB: we don't check the rd field in the instruction,
11162 because the register number there might be encoded
11163 in some way, or the architecture might ignore bit
11164 zero in the rd field (for example, the sparc32 lddf).
11165 instead, we test the rd register pointer: */
11166 if (__tme_predict_false((ls.tme_sparc_ls_rd64
11167 - ic->tme_sparc_ic.tme_ic_iregs.tme_ic_iregs_uint64s)
11168 % 2)) {
11169 ls.tme_sparc_ls_faults |= TME_SPARC_LS_FAULT_LDD_STD_RD_ODD;
11170 }
11171 }
11172
11173 /* if the ASI has been specified: */
11174 if (lsinfo & TME_SPARC_LSINFO_A) {
11175
11176 /* get the ASI: */
11177 asi = TME_SPARC_LSINFO_WHICH_ASI(lsinfo);
11178
11179 /* get the flags for this ASI: */
11180 asi_mask_flags = ic->tme_sparc_asis[asi].tme_sparc_asi_mask_flags;
11181
11182 /* if this is a nonprivileged access: */
11183 if (!TME_SPARC_PRIV(ic)) {
11184
11185 /* if this is a restricted ASI: */
11186 if (__tme_predict_false((asi & TME_SPARC64_ASI_FLAG_UNRESTRICTED) == 0)) {
11187 ls.tme_sparc_ls_faults |= TME_SPARC64_LS_FAULT_PRIVILEGED_ASI;
11188 }
11189
11190 /* force a nonprivileged access with the ASI: */
11191 asi_mask_flags |= TME_SPARC64_ASI_MASK_FLAG_INSN_AS_IF_USER;
11192 }
11193
11194 /* make the ASI mask: */
11195 if (asi_mask_flags & TME_SPARC64_ASI_MASK_FLAG_SPECIAL) {
11196 asi_mask
11197 = (asi_mask_flags
11198 + TME_SPARC_ASI_MASK_SPECIAL(asi,
11199 (asi_mask_flags & TME_SPARC64_ASI_MASK_FLAG_INSN_AS_IF_USER) == 0));
11200 }
11201 else {
11202 asi_mask = TME_SPARC64_ASI_MASK(asi, asi_mask_flags);
11203 }
11204 ls.tme_sparc_ls_asi_mask = asi_mask;
11205
11206 /* if this is a no-fault ASI with a non-load instruction: */
11207 if (asi_mask & TME_SPARC64_ASI_FLAG_NO_FAULT) {
11208 if (__tme_predict_false(lsinfo & (TME_SPARC_LSINFO_OP_ST | TME_SPARC_LSINFO_OP_ATOMIC))) {
11209 ls.tme_sparc_ls_faults |= TME_SPARC64_LS_FAULT_NO_FAULT_NON_LOAD;
11210 }
11211 }
11212
11213 /* get the context for the alternate address space: */
11214 context = ic->tme_sparc_memory_context_primary;
11215 if (asi_mask & TME_SPARC64_ASI_FLAG_SECONDARY) {
11216 context = ic->tme_sparc_memory_context_secondary;
11217 }
11218 if (__tme_predict_false(asi_mask & TME_SPARC64_ASI_MASK_FLAG_INSN_NUCLEUS)) {
11219 if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_NUCLEUS) {
11220 context = 0;
11221 }
11222 }
11223 ls.tme_sparc_ls_context = context;
11224
11225 /* get the default TLB entry index: */
11226 tlb_hash = TME_SPARC_TLB_HASH(ic, context, address_first);
11227 if (lsinfo & TME_SPARC_LSINFO_OP_FETCH) {
11228 tlb_i = TME_SPARC_ITLB_ENTRY(ic, tlb_hash);
11229 }
11230 else {
11231 tlb_i = TME_SPARC_DTLB_ENTRY(ic, tlb_hash);
11232 }
11233 ls.tme_sparc_ls_tlb_i = tlb_i;
11234
11235 /* call any special handler for this ASI: */
11236 handler_i = ic->tme_sparc_asis[TME_SPARC_ASI_MASK_WHICH(asi_mask)].tme_sparc_asi_handler;
11237 if (__tme_predict_false(handler_i != 0)) {
11238 (*ic->_tme_sparc_ls_asi_handlers[handler_i])(ic, &ls);
11239 }
11240
11241 /* get the final TLB entry index: */
11242 tlb_i = ls.tme_sparc_ls_tlb_i;
11243 }
11244
11245 /* otherwise, the ASI has not been specified: */
11246 else {
11247
11248 /* get the ASI mask: */
11249 asi_mask = ic->tme_sparc_asi_mask_data;
11250
11251 /* add in any ASI mask flags from the instruction: */
11252 /* NB: initially, TME_SPARC64_ASI_FLAG_NO_FAULT is the
11253 only flag allowed, and only the flush instruction
11254 can use it: */
11255 assert (TME_SPARC_LSINFO_WHICH_ASI_FLAGS(lsinfo) == 0
11256 || (TME_SPARC_LSINFO_WHICH_ASI_FLAGS(lsinfo) == TME_SPARC64_ASI_FLAG_NO_FAULT
11257 && ((ic->_tme_sparc_insn >> 19) & 0x3f) == 0x3b)
11258 );
11259 asi_mask |= TME_SPARC_LSINFO_WHICH_ASI_FLAGS(lsinfo);
11260
11261 /* set the ASI mask: */
11262 ls.tme_sparc_ls_asi_mask = asi_mask;
11263
11264 /* get the context: */
11265 context = ic->tme_sparc_memory_context_default;
11266 ls.tme_sparc_ls_context = context;
11267
11268 /* this must not be a fetch: */
11269 assert ((lsinfo & TME_SPARC_LSINFO_OP_FETCH) == 0);
11270
11271 /* get the TLB entry index: */
11272 tlb_hash = TME_SPARC_TLB_HASH(ic, context, address_first);
11273 tlb_i = TME_SPARC_DTLB_ENTRY(ic, tlb_hash);
11274 ls.tme_sparc_ls_tlb_i = tlb_i;
11275 }
11276
11277 /* get the TLB entry pointer: */
11278 tlb = &ic->tme_sparc_tlbs[tlb_i];
11279 ls.tme_sparc_ls_tlb = tlb;
11280
11281 /* get the cycle type: */
11282 /* NB: we deliberately set this once, now, since the lsinfo
11283 may change once we start transferring: */
11284 cycle_type
11285 = ((lsinfo
11286 & (TME_SPARC_LSINFO_OP_ST
11287 | TME_SPARC_LSINFO_OP_ATOMIC))
11288 ? TME_BUS_CYCLE_WRITE
11289 : TME_BUS_CYCLE_READ);
11290
11291 /* loop until the transfer is complete: */
11292 for (;;) {
11293
11294 /* if we have faulted: */
11295 if (__tme_predict_false(ls.tme_sparc_ls_faults != TME_SPARC_LS_FAULT_NONE)) {
11296
11297 /* unbusy this TLB, since the trap function may not return: */
11298 tme_bus_tlb_unbusy(&tlb->tme_sparc_tlb_bus_tlb);
11299
11300 /* call the trap function, which will not return if it traps: */
11301 (*ic->_tme_sparc_ls_trap)(ic, &ls);
11302
11303 /* rebusy this TLB: */
11304 tme_bus_tlb_busy(&tlb->tme_sparc_tlb_bus_tlb);
11305
11306 /* since the trap function returned, it must have cleared the fault: */
11307 assert (ls.tme_sparc_ls_faults == TME_SPARC_LS_FAULT_NONE);
11308 }
11309
11310 /* if the transfer is complete, stop now: */
11311 if (__tme_predict_false(ls.tme_sparc_ls_size == 0)) {
11312 break;
11313 }
11314
11315 /* get the current address: */
11316 address = ls.tme_sparc_ls_address64;
11317
11318 /* if this TLB entry does not apply or is invalid: */
11319 if ((tlb->tme_sparc_tlb_context != ls.tme_sparc_ls_context
11320 && tlb->tme_sparc_tlb_context <= ic->tme_sparc_memory_context_max)
11321 || address < (tme_bus_addr64_t) tlb->tme_sparc_tlb_addr_first
11322 || address > (tme_bus_addr64_t) tlb->tme_sparc_tlb_addr_last
11323 || !TME_SPARC_TLB_ASI_MASK_OK(tlb, ls.tme_sparc_ls_asi_mask)
11324 || ((tlb->tme_sparc_tlb_cycles_ok & cycle_type) == 0
11325 && (cycle_type == TME_BUS_CYCLE_READ
11326 ? tlb->tme_sparc_tlb_emulator_off_read
11327 : tlb->tme_sparc_tlb_emulator_off_write) == TME_EMULATOR_OFF_UNDEF)
11328 || tme_bus_tlb_is_invalid(&tlb->tme_sparc_tlb_bus_tlb)) {
11329
11330 /* unbusy this TLB entry for filling: */
11331 tme_bus_tlb_unbusy_fill(&tlb->tme_sparc_tlb_bus_tlb);
11332
11333 /* if we haven't mapped this address yet: */
11334 address_map = ls.tme_sparc_ls_address_map;
11335 if (address_map != NULL) {
11336 ls.tme_sparc_ls_address_map = NULL;
11337
11338 /* count this mapping: */
11339 if (ls.tme_sparc_ls_lsinfo & TME_SPARC_LSINFO_OP_FETCH) {
11340 TME_SPARC_STAT(ic, tme_sparc_stats_itlb_map);
11341 }
11342 else {
11343 TME_SPARC_STAT(ic, tme_sparc_stats_dtlb_map);
11344 }
11345
11346 /* initialize the ASI mask and context on this TLB entry: */
11347 /* NB that the ASI mask will likely be updated by either the
11348 address mapping or the TLB fill: */
11349 tlb->tme_sparc_tlb_asi_mask
11350 = (ls.tme_sparc_ls_asi_mask
11351 & ~TME_SPARC_ASI_MASK_FLAGS_AVAIL);
11352 tlb->tme_sparc_tlb_context = ls.tme_sparc_ls_context;
11353
11354 /* NB: if the address mapping traps, we won't get a chance
11355 to finish updating this TLB entry, which is currently in
11356 an inconsistent state - but not necessarily an unusable
11357 state. poison it to be unusable, including any recode
11358 TLB entry: */
11359 tlb->tme_sparc_tlb_addr_first = 1;
11360 tlb->tme_sparc_tlb_addr_last = 0;
11361 #if TME_SPARC_HAVE_RECODE(ic)
11362 if (ls.tme_sparc_ls_lsinfo & TME_SPARC_LSINFO_OP_FETCH) {
11363 tme_sparc64_recode_chain_tlb_update(ic, &ls);
11364 }
11365 else {
11366 tme_sparc64_recode_ls_tlb_update(ic, &ls);
11367 }
11368 #endif /* TME_SPARC_HAVE_RECODE(ic) */
11369
11370 #ifndef NDEBUG
11371
11372 /* initialize the mapping TLB entry: */
11373 ls.tme_sparc_ls_tlb_map.tme_bus_tlb_addr_first = 0 - (tme_bus_addr_t) 1;
11374 ls.tme_sparc_ls_tlb_map.tme_bus_tlb_addr_last = 0 - (tme_bus_addr_t) 2;
11375 ls.tme_sparc_ls_tlb_map.tme_bus_tlb_cycles_ok = 0;
11376 ls.tme_sparc_ls_tlb_map.tme_bus_tlb_addr_offset = 0 - (tme_bus_addr_t) 1;
11377
11378 #endif /* !NDEBUG */
11379
11380 /* map the address: */
11381 (*address_map)(ic, &ls);
11382
11383 /* the address mapping must do any trapping itself: */
11384 assert (ls.tme_sparc_ls_faults == TME_SPARC_LS_FAULT_NONE);
11385
11386 /* if the address mapping completed the transfer: */
11387 if (__tme_predict_false(ls.tme_sparc_ls_size == 0)) {
11388
11389 /* rebusy the TLB entry: */
11390 tme_sparc_tlb_busy(tlb);
11391
11392 /* stop now: */
11393 break;
11394 }
11395
11396 /* the mapping must have actually made a mapping: */
11397 assert (ls.tme_sparc_ls_tlb_map.tme_bus_tlb_addr_first != 0 - (tme_bus_addr_t) 1);
11398 assert (ls.tme_sparc_ls_tlb_map.tme_bus_tlb_addr_last != 0 - (tme_bus_addr_t) 2);
11399 assert (ls.tme_sparc_ls_tlb_map.tme_bus_tlb_cycles_ok != 0);
11400 assert (ls.tme_sparc_ls_tlb_map.tme_bus_tlb_addr_offset != 0 - (tme_bus_addr_t) 1);
11401 }
11402
11403 /* count this fill: */
11404 if (ls.tme_sparc_ls_lsinfo & TME_SPARC_LSINFO_OP_FETCH) {
11405 TME_SPARC_STAT(ic, tme_sparc_stats_itlb_fill);
11406 }
11407 else {
11408 TME_SPARC_STAT(ic, tme_sparc_stats_dtlb_fill);
11409 }
11410
11411 /* get the bus address: */
11412 address_bus = ls.tme_sparc_ls_address64 + ls.tme_sparc_ls_tlb_map.tme_bus_tlb_addr_offset;
11413
11414 /* fill the TLB entry: */
11415 tme_sparc_callout_unlock(ic);
11416 rc = (*ic->_tme_sparc_bus_connection->tme_sparc_bus_tlb_fill)
11417 (ic->_tme_sparc_bus_connection,
11418 tlb,
11419 ls.tme_sparc_ls_asi_mask,
11420 address_bus,
11421 cycle_type);
11422 assert (rc == TME_OK);
11423 tme_sparc_callout_relock(ic);
11424
11425 /* map the TLB entry: */
11426 tme_bus_tlb_map(&tlb->tme_sparc_tlb_bus_tlb, address_bus,
11427 &ls.tme_sparc_ls_tlb_map, ls.tme_sparc_ls_address64);
11428
11429 /* update any recode TLB entry: */
11430 #if TME_SPARC_HAVE_RECODE(ic)
11431 if (ls.tme_sparc_ls_lsinfo & TME_SPARC_LSINFO_OP_FETCH) {
11432 tme_sparc64_recode_chain_tlb_update(ic, &ls);
11433 }
11434 else {
11435 tme_sparc64_recode_ls_tlb_update(ic, &ls);
11436 }
11437 #endif /* TME_SPARC_HAVE_RECODE(ic) */
11438
11439 /* rebusy the TLB entry: */
11440 tme_sparc_tlb_busy(tlb);
11441
11442 /* if this TLB entry is already invalid: */
11443 if (tme_bus_tlb_is_invalid(&tlb->tme_sparc_tlb_bus_tlb)) {
11444 continue;
11445 }
11446 }
11447
11448 /* this TLB entry must apply: */
11449 assert ((tlb->tme_sparc_tlb_context == ls.tme_sparc_ls_context
11450 || tlb->tme_sparc_tlb_context > ic->tme_sparc_memory_context_max)
11451 && ls.tme_sparc_ls_address64 >= (tme_bus_addr64_t) tlb->tme_sparc_tlb_addr_first
11452 && ls.tme_sparc_ls_address64 <= (tme_bus_addr64_t) tlb->tme_sparc_tlb_addr_last
11453 && ((tlb->tme_sparc_tlb_cycles_ok & cycle_type)
11454 || (cycle_type == TME_BUS_CYCLE_READ
11455 ? tlb->tme_sparc_tlb_emulator_off_read
11456 : tlb->tme_sparc_tlb_emulator_off_write) != TME_EMULATOR_OFF_UNDEF)
11457 && TME_SPARC_TLB_ASI_MASK_OK(tlb, ls.tme_sparc_ls_asi_mask));
11458
11459 /* get the current lsinfo: */
11460 lsinfo = ls.tme_sparc_ls_lsinfo;
11461
11462 /* if we have to check the TLB: */
11463 if (__tme_predict_true((lsinfo & TME_SPARC_LSINFO_NO_CHECK_TLB) == 0)) {
11464
11465 /* get the ASI mask for this TLB entry: */
11466 asi_mask = tlb->tme_sparc_tlb_asi_mask;
11467
11468 /* if this TLB entry is for no-fault accesses only: */
11469 if (__tme_predict_false(asi_mask & TME_SPARC64_ASI_FLAG_NO_FAULT)) {
11470
11471 /* if this access is not using a no-fault ASI: */
11472 if (__tme_predict_false((ls.tme_sparc_ls_asi_mask & TME_SPARC64_ASI_FLAG_NO_FAULT) == 0)) {
11473 ls.tme_sparc_ls_faults |= TME_SPARC64_LS_FAULT_NO_FAULT_FAULT;
11474 continue;
11475 }
11476 }
11477
11478 /* if this TLB entry is for addresses with side effects: */
11479 if (asi_mask & TME_SPARC64_ASI_MASK_FLAG_TLB_SIDE_EFFECTS) {
11480
11481 /* if this access is using a no-fault ASI: */
11482 /* NB: a flush may be implemented as a load with a no-fault ASI: */
11483 if (__tme_predict_false(ls.tme_sparc_ls_asi_mask & TME_SPARC64_ASI_FLAG_NO_FAULT)) {
11484 ls.tme_sparc_ls_faults |= TME_SPARC64_LS_FAULT_SIDE_EFFECTS;
11485 continue;
11486 }
11487 }
11488
11489 /* if this TLB entry is for uncacheable addresses: */
11490 if (asi_mask & TME_SPARC64_ASI_MASK_FLAG_TLB_UNCACHEABLE) {
11491
11492 /* if this is an atomic access: */
11493 if (__tme_predict_false(lsinfo & TME_SPARC_LSINFO_OP_ATOMIC)) {
11494 ls.tme_sparc_ls_faults |= TME_SPARC64_LS_FAULT_UNCACHEABLE;
11495 continue;
11496 }
11497 }
11498
11499 /* see if this is a little-endian instruction: */
11500 lsinfo
11501 = ((lsinfo
11502 & ~TME_SPARC_LSINFO_ENDIAN_LITTLE)
11503 + ((ls.tme_sparc_ls_asi_mask
11504 & TME_SPARC64_ASI_FLAG_LITTLE)
11505 #if TME_SPARC_LSINFO_ENDIAN_LITTLE < TME_SPARC64_ASI_FLAG_LITTLE
11506 #error "TME_SPARC_LSINFO_ENDIAN_ values changed"
11507 #endif
11508 * (TME_SPARC_LSINFO_ENDIAN_LITTLE
11509 / TME_SPARC64_ASI_FLAG_LITTLE)));
11510
11511 /* if this TLB entry has its little-endian bit set: */
11512 if (__tme_predict_false(asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
11513 assert (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN);
11514 if (TRUE) {
11515 lsinfo ^= TME_SPARC_LSINFO_ENDIAN_LITTLE;
11516 }
11517 }
11518 }
11519
11520 /* if we might not have to call a slow cycle function: */
11521 if (__tme_predict_true((lsinfo & TME_SPARC_LSINFO_SLOW_CYCLES) == 0)) {
11522
11523 /* if this TLB entry allows fast transfer of all of the addresses: */
11524 if (__tme_predict_true(((tme_bus_addr64_t) tlb->tme_sparc_tlb_addr_last) >= (address_first + (ls.tme_sparc_ls_size - 1)))) {
11525 emulator_off = tlb->tme_sparc_tlb_emulator_off_read;
11526 if (lsinfo & TME_SPARC_LSINFO_OP_ST) {
11527 emulator_off = tlb->tme_sparc_tlb_emulator_off_write;
11528 }
11529 if (__tme_predict_true(emulator_off != TME_EMULATOR_OFF_UNDEF
11530 && (((lsinfo & TME_SPARC_LSINFO_OP_ATOMIC) == 0)
11531 || emulator_off == tlb->tme_sparc_tlb_emulator_off_write))) {
11532
11533 /* return and let our caller do the transfer: */
11534 /* NB: we break const here: */
11535 return ((tme_shared tme_uint8_t *) emulator_off);
11536 }
11537 }
11538
11539 /* we have to call a slow cycle function: */
11540 lsinfo |= TME_SPARC_LSINFO_SLOW_CYCLES;
11541 assert (ls.tme_sparc_ls_cycle == NULL);
11542
11543 /* assume that this operation will transfer the start of the buffer: */
11544 buffer_offset = 0;
11545
11546 /* assume that this is a load or a fetch: */
11547 ls.tme_sparc_ls_cycle = tme_sparc64_load;
11548
11549 /* if this is a store: */
11550 if (lsinfo & TME_SPARC_LSINFO_OP_ST) {
11551
11552 /* put the (first) register to store in the memory buffer: */
11553 value = TME_SPARC_FORMAT3_RD;
11554 value = ((lsinfo & TME_SPARC_LSINFO_ENDIAN_LITTLE) ? tme_htole_u64(value) : tme_htobe_u64(value));
11555 ic->tme_sparc_memory_buffer.tme_sparc_memory_buffer64s[0] = value;
11556
11557 /* find the offset in the memory buffer corresponding to the
11558 first address: */
11559 buffer_offset = sizeof(tme_uint64_t) - ls.tme_sparc_ls_size;
11560 if ((lsinfo & TME_SPARC_LSINFO_ENDIAN_LITTLE)) {
11561 buffer_offset = 0;
11562 }
11563
11564 /* if this is a std or stda: */
11565 if (lsinfo & TME_SPARC_LSINFO_LDD_STD) {
11566
11567 /* put the odd 32-bit register to store in the memory buffer
11568 after the even 32-bit register. exactly where this is depends
11569 on the architecture and on the byte order of the store: */
11570 value32 = TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint64);
11571 if ((lsinfo & TME_SPARC_LSINFO_ENDIAN_LITTLE)) {
11572 value32 = tme_htole_u32(value32);
11573 ic->tme_sparc_memory_buffer.tme_sparc_memory_buffer32s[1] = value32;
11574 buffer_offset = 0;
11575 }
11576 else {
11577 value32 = tme_htobe_u32(value32);
11578 ic->tme_sparc_memory_buffer.tme_sparc_memory_buffer32s[(64 / 32)] = value32;
11579 buffer_offset = sizeof(tme_uint64_t) - sizeof(tme_uint32_t);
11580 }
11581 }
11582
11583 /* set the cycle function: */
11584 ls.tme_sparc_ls_cycle = tme_sparc64_store;
11585 }
11586
11587 /* otherwise, if this is an atomic: */
11588 else if (lsinfo & TME_SPARC_LSINFO_OP_ATOMIC) {
11589
11590 /* set the cycle function: */
11591 ls.tme_sparc_ls_cycle = tme_sparc64_atomic;
11592 }
11593
11594 /* set the buffer offset for the (first) slow cycle: */
11595 ls.tme_sparc_ls_buffer_offset = buffer_offset;
11596
11597 /* clear the state for this operation: */
11598 ls.tme_sparc_ls_state = 0;
11599 }
11600
11601 /* assume that we won't have to check the TLB again: */
11602 ls.tme_sparc_ls_lsinfo = lsinfo | TME_SPARC_LSINFO_NO_CHECK_TLB;
11603 /* call the slow cycle function: */
11604 (*ls.tme_sparc_ls_cycle)(ic, &ls);
11605 }
11606
11607 /* if this was a load that has already completed, a store,
11608 or an atomic, make sure our caller doesn't try to complete
11609 a fast transfer: */
11610 if (ls.tme_sparc_ls_lsinfo
11611 & (TME_SPARC_LSINFO_LD_COMPLETED
11612 | TME_SPARC_LSINFO_OP_ST
11613 | TME_SPARC_LSINFO_OP_ATOMIC)) {
11614 return (TME_EMULATOR_OFF_UNDEF);
11615 }
11616
11617 /* otherwise, this was a load that did slow cycles into the
11618 memory buffer and hasn't updated rd yet. return a pointer
11619 to the memory buffer so our caller can complete the load: */
11620 return (ic->tme_sparc_memory_buffer.tme_sparc_memory_buffer8s
11621 - address_first);
11622 }
11623
11624 #undef TME_SPARC_VERSION
11625 #define TME_SPARC_VERSION(ic) _TME_SPARC_VERSION(ic)
11626
11627 #endif /* TME_HAVE_INT64_T */
11628 /* automatically generated by sparc-misc-auto.sh, do not edit! */
11629
11630 /* the icc->conditions mapping: */
11631 const tme_uint8_t _tme_sparc_conds_icc[16] = {
11632 0,
11633 0 | TME_BIT(4) | TME_BIT(5),
11634 0 | TME_BIT(2) | TME_BIT(3) | TME_BIT(7),
11635 0 | TME_BIT(2) | TME_BIT(3) | TME_BIT(4) | TME_BIT(5) | TME_BIT(7),
11636 0 | TME_BIT(1) | TME_BIT(2) | TME_BIT(4),
11637 0 | TME_BIT(1) | TME_BIT(2) | TME_BIT(4) | TME_BIT(5),
11638 0 | TME_BIT(1) | TME_BIT(2) | TME_BIT(3) | TME_BIT(4) | TME_BIT(7),
11639 0 | TME_BIT(1) | TME_BIT(2) | TME_BIT(3) | TME_BIT(4) | TME_BIT(5) | TME_BIT(7),
11640 0 | TME_BIT(2) | TME_BIT(3) | TME_BIT(6),
11641 0 | TME_BIT(2) | TME_BIT(3) | TME_BIT(4) | TME_BIT(5) | TME_BIT(6),
11642 0 | TME_BIT(6) | TME_BIT(7),
11643 0 | TME_BIT(4) | TME_BIT(5) | TME_BIT(6) | TME_BIT(7),
11644 0 | TME_BIT(1) | TME_BIT(2) | TME_BIT(3) | TME_BIT(4) | TME_BIT(6),
11645 0 | TME_BIT(1) | TME_BIT(2) | TME_BIT(3) | TME_BIT(4) | TME_BIT(5) | TME_BIT(6),
11646 0 | TME_BIT(1) | TME_BIT(2) | TME_BIT(4) | TME_BIT(6) | TME_BIT(7),
11647 0 | TME_BIT(1) | TME_BIT(2) | TME_BIT(4) | TME_BIT(5) | TME_BIT(6) | TME_BIT(7),
11648 };
11649