1
2 /*---------------------------------------------------------------*/
3 /*--- begin guest_ppc_helpers.c ---*/
4 /*---------------------------------------------------------------*/
5
6 /*
7 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
9
10 Copyright (C) 2004-2017 OpenWorks LLP
11 info@open-works.net
12
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
26 02110-1301, USA.
27
28 The GNU General Public License is contained in the file COPYING.
29
30 Neither the names of the U.S. Department of Energy nor the
31 University of California nor the names of its contributors may be
32 used to endorse or promote products derived from this software
33 without prior written permission.
34 */
35
36 #include "libvex_basictypes.h"
37 #include "libvex_emnote.h"
38 #include "libvex_guest_ppc32.h"
39 #include "libvex_guest_ppc64.h"
40 #include "libvex_ir.h"
41 #include "libvex.h"
42
43 #include "main_util.h"
44 #include "main_globals.h"
45 #include "guest_generic_bb_to_IR.h"
46 #include "guest_ppc_defs.h"
47
48
49 /* This file contains helper functions for ppc32 and ppc64 guest code.
50 Calls to these functions are generated by the back end. These
51 calls are of course in the host machine code and this file will be
52 compiled to host machine code, so that all makes sense.
53
54 Only change the signatures of these helper functions very
55 carefully. If you change the signature here, you'll have to change
56 the parameters passed to it in the IR calls constructed by
57 guest-ppc/toIR.c.
58 */
59
60
61 /*---------------------------------------------------------------*/
62 /*--- Misc integer helpers. ---*/
63 /*---------------------------------------------------------------*/
64
65 /* CALLED FROM GENERATED CODE */
66 /* DIRTY HELPER (non-referentially-transparent) */
67 /* Horrible hack. On non-ppc platforms, return 1. */
68 /* Reads a complete, consistent 64-bit TB value. */
ppcg_dirtyhelper_MFTB(void)69 ULong ppcg_dirtyhelper_MFTB ( void )
70 {
71 # if defined(__powerpc__)
72 ULong res;
73 UInt lo, hi1, hi2;
74 while (1) {
75 __asm__ __volatile__ ("\n"
76 "\tmftbu %0\n"
77 "\tmftb %1\n"
78 "\tmftbu %2\n"
79 : "=r" (hi1), "=r" (lo), "=r" (hi2)
80 );
81 if (hi1 == hi2) break;
82 }
83 res = ((ULong)hi1) << 32;
84 res |= (ULong)lo;
85 return res;
86 # else
87 return 1ULL;
88 # endif
89 }
90
91
92 /* CALLED FROM GENERATED CODE */
93 /* DIRTY HELPER (non-referentially transparent) */
ppc32g_dirtyhelper_MFSPR_268_269(UInt r269)94 UInt ppc32g_dirtyhelper_MFSPR_268_269 ( UInt r269 )
95 {
96 # if defined(__powerpc__)
97 UInt spr;
98 if (r269) {
99 __asm__ __volatile__("mfspr %0,269" : "=b"(spr));
100 } else {
101 __asm__ __volatile__("mfspr %0,268" : "=b"(spr));
102 }
103 return spr;
104 # else
105 return 0;
106 # endif
107 }
108
109
110 /* CALLED FROM GENERATED CODE */
111 /* DIRTY HELPER (I'm not really sure what the side effects are) */
ppc32g_dirtyhelper_MFSPR_287(void)112 UInt ppc32g_dirtyhelper_MFSPR_287 ( void )
113 {
114 # if defined(__powerpc__)
115 UInt spr;
116 __asm__ __volatile__("mfspr %0,287" : "=b"(spr));
117 return spr;
118 # else
119 return 0;
120 # endif
121 }
122
123
124 /* CALLED FROM GENERATED CODE */
125 /* DIRTY HELPER (reads guest state, writes guest mem) */
ppc32g_dirtyhelper_LVS(VexGuestPPC32State * gst,UInt vD_off,UInt sh,UInt shift_right)126 void ppc32g_dirtyhelper_LVS ( VexGuestPPC32State* gst,
127 UInt vD_off, UInt sh, UInt shift_right )
128 {
129 static
130 UChar ref[32] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
131 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
132 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
133 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F };
134 U128* pU128_src;
135 U128* pU128_dst;
136
137 vassert( vD_off <= sizeof(VexGuestPPC32State)-8 );
138 vassert( sh <= 15 );
139 vassert( shift_right <= 1 );
140 if (shift_right)
141 sh = 16-sh;
142 /* else shift left */
143
144 pU128_src = (U128*)&ref[sh];
145 pU128_dst = (U128*)( ((UChar*)gst) + vD_off );
146
147 (*pU128_dst)[0] = (*pU128_src)[0];
148 (*pU128_dst)[1] = (*pU128_src)[1];
149 (*pU128_dst)[2] = (*pU128_src)[2];
150 (*pU128_dst)[3] = (*pU128_src)[3];
151 }
152
153 /* CALLED FROM GENERATED CODE */
154 /* DIRTY HELPER (reads guest state, writes guest mem) */
ppc64g_dirtyhelper_LVS(VexGuestPPC64State * gst,UInt vD_off,UInt sh,UInt shift_right,UInt endness)155 void ppc64g_dirtyhelper_LVS ( VexGuestPPC64State* gst,
156 UInt vD_off, UInt sh, UInt shift_right,
157 UInt endness )
158 {
159 UChar ref[32];
160 ULong i;
161 Int k;
162 /* ref[] used to be a static const array, but this doesn't work on
163 ppc64 because VEX doesn't load the TOC pointer for the call here,
164 and so we wind up picking up some totally random other data.
165 (It's a wonder we don't segfault.) So, just to be clear, this
166 "fix" (vex r2073) is really a kludgearound for the fact that
167 VEX's 64-bit ppc code generation doesn't provide a valid TOC
168 pointer for helper function calls. Ick. (Bug 250038) */
169 for (i = 0; i < 32; i++) ref[i] = i;
170
171 U128* pU128_src;
172 U128* pU128_dst;
173
174 vassert( vD_off <= sizeof(VexGuestPPC64State)-8 );
175 vassert( sh <= 15 );
176 vassert( shift_right <= 1 );
177 if (shift_right)
178 sh = 16-sh;
179 /* else shift left */
180
181 pU128_src = (U128*)&ref[sh];
182 pU128_dst = (U128*)( ((UChar*)gst) + vD_off );
183
184 if ((0x1 & endness) == 0x0) {
185 /* Little endian */
186 unsigned char *srcp, *dstp;
187 srcp = (unsigned char *)pU128_src;
188 dstp = (unsigned char *)pU128_dst;
189 for (k = 15; k >= 0; k--, srcp++)
190 dstp[k] = *srcp;
191 } else {
192 (*pU128_dst)[0] = (*pU128_src)[0];
193 (*pU128_dst)[1] = (*pU128_src)[1];
194 (*pU128_dst)[2] = (*pU128_src)[2];
195 (*pU128_dst)[3] = (*pU128_src)[3];
196 }
197 }
198
199
200 /* Helper-function specialiser. */
201
guest_ppc32_spechelper(const HChar * function_name,IRExpr ** args,IRStmt ** precedingStmts,Int n_precedingStmts)202 IRExpr* guest_ppc32_spechelper ( const HChar* function_name,
203 IRExpr** args,
204 IRStmt** precedingStmts,
205 Int n_precedingStmts )
206 {
207 return NULL;
208 }
209
guest_ppc64_spechelper(const HChar * function_name,IRExpr ** args,IRStmt ** precedingStmts,Int n_precedingStmts)210 IRExpr* guest_ppc64_spechelper ( const HChar* function_name,
211 IRExpr** args,
212 IRStmt** precedingStmts,
213 Int n_precedingStmts )
214 {
215 return NULL;
216 }
217
218
219 /* 16-bit floating point number is stored in the lower 16-bits of 32-bit value */
220 #define I16_EXP_MASK 0x7C00
221 #define I16_FRACTION_MASK 0x03FF
222 #define I32_EXP_MASK 0x7F800000
223 #define I32_FRACTION_MASK 0x007FFFFF
224 #define I64_EXP_MASK 0x7FF0000000000000ULL
225 #define I64_FRACTION_MASK 0x000FFFFFFFFFFFFFULL
226 #define V128_EXP_MASK 0x7FFF000000000000ULL
227 #define V128_FRACTION_MASK 0x0000FFFFFFFFFFFFULL /* upper 64-bit fractional mask */
228
generate_C_FPCC_helper(ULong irType,ULong src_hi,ULong src)229 ULong generate_C_FPCC_helper( ULong irType, ULong src_hi, ULong src )
230 {
231 UInt NaN, inf, zero, norm, dnorm, pos;
232 UInt bit0, bit1, bit2, bit3;
233 UInt sign_bit = 0;
234 ULong exp_mask = 0, exp_part = 0, frac_part = 0;
235 ULong fpcc, c;
236
237 if ( irType == Ity_I16 ) {
238 frac_part = I16_FRACTION_MASK & src;
239 exp_mask = I16_EXP_MASK;
240 exp_part = exp_mask & src;
241 sign_bit = src >> 15;
242
243 } else if ( irType == Ity_I32 ) {
244 frac_part = I32_FRACTION_MASK & src;
245 exp_mask = I32_EXP_MASK;
246 exp_part = exp_mask & src;
247 sign_bit = src >> 31;
248
249 } else if ( irType == Ity_I64 ) {
250 frac_part = I64_FRACTION_MASK & src;
251 exp_mask = I64_EXP_MASK;
252 exp_part = exp_mask & src;
253 sign_bit = src >> 63;
254
255 } else if ( irType == Ity_F128 ) {
256 /* only care if the frac part is zero or non-zero */
257 frac_part = (V128_FRACTION_MASK & src_hi) | src;
258 exp_mask = V128_EXP_MASK;
259 exp_part = exp_mask & src_hi;
260 sign_bit = src_hi >> 63;
261 } else {
262 vassert(0); // Unknown value of irType
263 }
264
265 /* NaN: exponene is all ones, fractional part not zero */
266 if ((exp_part == exp_mask) && (frac_part != 0))
267 NaN = 1;
268 else
269 NaN = 0;
270
271 /* inf: exponent all 1's, fraction part is zero */
272 if ((exp_part == exp_mask) && (frac_part == 0))
273 inf = 1;
274 else
275 inf = 0;
276
277 /* zero: exponent is 0, fraction part is zero */
278 if ((exp_part == 0) && (frac_part == 0))
279 zero = 1;
280 else
281 zero = 0;
282
283 /* norm: exponent is not 0, exponent is not all 1's */
284 if ((exp_part != 0) && (exp_part != exp_mask))
285 norm = 1;
286 else
287 norm = 0;
288
289 /* dnorm: exponent is all 0's, fraction is not 0 */
290 if ((exp_part == 0) && (frac_part != 0))
291 dnorm = 1;
292 else
293 dnorm = 0;
294
295 /* pos: MSB is 1 */
296 if (sign_bit == 0)
297 pos = 1;
298 else
299 pos = 0;
300
301 /* calculate FPCC */
302 /* If the result is NaN then must force bits 1, 2 and 3 to zero
303 * to get correct result.
304 */
305 bit0 = NaN | inf;
306
307 bit1 = (!NaN) & zero;
308 bit2 = (!NaN) & ((pos & dnorm) | (pos & norm) | (pos & inf))
309 & ((!zero) & (!NaN));
310 bit3 = (!NaN) & (((!pos) & dnorm) |((!pos) & norm) | ((!pos) & inf))
311 & ((!zero) & (!NaN));
312
313 fpcc = (bit3 << 3) | (bit2 << 2) | (bit1 << 1) | bit0;
314
315 /* calculate C */
316 c = NaN | ((!pos) & dnorm) | ((!pos) & zero) | (pos & dnorm);
317
318 /* return C in the upper 32-bits and FPCC in the lower 32 bits */
319 return (c <<32) | fpcc;
320 }
321
322
323 /*---------------------------------------------------------------*/
324 /*--- Misc BCD clean helpers. ---*/
325 /*---------------------------------------------------------------*/
326
327 /* NOTE, the clean and dirty helpers need to called using the
328 * fnptr_to_fnentry() function wrapper to handle the Big Endian
329 * pointer-to-function ABI and the Little Endian ABI.
330 */
331
332 /* This C-helper takes a 128-bit BCD value as two 64-bit pieces.
333 * It checks the string to see if it is a valid 128-bit BCD value.
334 * A valid BCD value has a sign value in bits [3:0] between 0xA
335 * and 0xF inclusive. each of the BCD digits represented as a 4-bit
336 * hex number in bits BCD value[128:4] mut be between 0 and 9
337 * inclusive. Returns an unsigned 64-bit value if valid.
338 */
is_BCDstring128_helper(ULong Signed,ULong bcd_string_hi,ULong bcd_string_low)339 ULong is_BCDstring128_helper( ULong Signed, ULong bcd_string_hi,
340 ULong bcd_string_low ) {
341 Int i;
342 ULong valid_bcd, sign_valid = False;
343 ULong digit;
344 UInt sign;
345
346 if ( Signed == True ) {
347 sign = bcd_string_low & 0xF;
348 if( ( sign >= 0xA ) && ( sign <= 0xF ) )
349 sign_valid = True;
350
351 /* Change the sign digit to a zero
352 * so the for loop below works the same
353 * for signed and unsigned BCD stings
354 */
355 bcd_string_low &= 0xFFFFFFFFFFFFFFF0ULL;
356
357 } else {
358 sign_valid = True; /* set sign to True so result is only
359 based on the validity of the digits */
360 }
361
362 valid_bcd = True; // Assume true to start
363 for( i = 0; i < 32; i++ ) {
364 /* check high and low 64-bit strings in parallel */
365 digit = bcd_string_low & 0xF;
366 if ( digit > 0x9 )
367 valid_bcd = False;
368 bcd_string_low = bcd_string_low >> 4;
369
370 digit = bcd_string_hi & 0xF;
371 if ( digit > 0x9 )
372 valid_bcd = False;
373 bcd_string_hi = bcd_string_hi >> 4;
374 }
375
376 return valid_bcd & sign_valid;
377 }
378
379 /* This clean helper takes a signed 32-bit BCD value and a carry in
380 * and adds 1 to the value of the BCD value. The BCD value is passed
381 * in as a single 64-bit value. The incremented value is returned in
382 * the lower 32 bits of the result. If the input was signed the sign of
383 * the result is the same as the input. The carry out is returned in
384 * bits [35:32] of the result.
385 */
increment_BCDstring32_helper(ULong Signed,ULong bcd_string,ULong carry_in)386 ULong increment_BCDstring32_helper( ULong Signed,
387 ULong bcd_string, ULong carry_in ) {
388 UInt i, num_digits = 8;
389 ULong bcd_value, result = 0;
390 ULong carry, digit, new_digit;
391
392 carry = carry_in;
393
394 if ( Signed == True ) {
395 bcd_value = bcd_string >> 4; /* remove sign */
396 num_digits = num_digits - 1;
397 } else {
398 bcd_value = bcd_string;
399 }
400
401 for( i = 0; i < num_digits; i++ ) {
402 digit = bcd_value & 0xF;
403 bcd_value = bcd_value >> 4;
404 new_digit = digit + carry;
405
406 if ( new_digit > 10 ) {
407 carry = 1;
408 new_digit = new_digit - 10;
409
410 } else {
411 carry = 0;
412 }
413 result = result | (new_digit << (i*4) );
414 }
415
416 if ( Signed == True ) {
417 result = ( carry << 32) | ( result << 4 ) | ( bcd_string & 0xF );
418 } else {
419 result = ( carry << 32) | result;
420 }
421
422 return result;
423 }
424
425 /*---------------------------------------------------------------*/
426 /*--- Misc packed decimal clean helpers. ---*/
427 /*---------------------------------------------------------------*/
428
429 /* This C-helper takes a 64-bit packed decimal value stored in a
430 * 64-bit value. It converts the zoned decimal format. The lower
431 * byte may contain a sign value, set it to zero. If return_upper
432 * is zero, return lower 64 bits of result, otherwise return upper
433 * 64 bits of the result.
434 */
convert_to_zoned_helper(ULong src_hi,ULong src_low,ULong upper_byte,ULong return_upper)435 ULong convert_to_zoned_helper( ULong src_hi, ULong src_low,
436 ULong upper_byte, ULong return_upper ) {
437 UInt i, sh;
438 ULong tmp = 0, new_value;
439
440 /* Remove the sign from the source. Put in the upper byte of result.
441 * Sign inserted later.
442 */
443 if ( return_upper == 0 ) { /* return lower 64-bit result */
444 for(i = 0; i < 7; i++) {
445 sh = ( 8 - i ) * 4;
446 new_value = ( ( src_low >> sh ) & 0xf ) | upper_byte;
447 tmp = tmp | ( new_value << ( ( 7 - i ) * 8 ) );
448 }
449
450 } else {
451 /* Byte for i=0 is in upper 64-bit of the source, do it separately */
452 new_value = ( src_hi & 0xf ) | upper_byte;
453 tmp = tmp | new_value << 56;
454
455 for( i = 1; i < 8; i++ ) {
456 sh = ( 16 - i ) * 4;
457 new_value = ( ( src_low >> sh ) & 0xf ) | upper_byte;
458 tmp = tmp | ( new_value << ( ( 7 - i ) * 8 ) );
459 }
460 }
461 return tmp;
462 }
463
464 /* This C-helper takes the lower 64-bits of the 128-bit packed decimal
465 * src value. It converts the src value to a 128-bit national format.
466 * If return_upper is zero, the helper returns lower 64 bits of result,
467 * otherwise it returns the upper 64-bits of the result.
468 */
convert_to_national_helper(ULong src,ULong return_upper)469 ULong convert_to_national_helper( ULong src, ULong return_upper ) {
470
471 UInt i;
472 UInt sh = 3, max = 4, min = 0; /* initialize max, min for return upper */
473 ULong tmp = 0, new_value;
474
475 if ( return_upper == 0 ) { /* return lower 64-bit result */
476 min = 4;
477 max = 7;
478 sh = 7;
479 }
480
481 for( i = min; i < max; i++ ) {
482 new_value = ( ( src >> ( ( 7 - i ) * 4 ) ) & 0xf ) | 0x0030;
483 tmp = tmp | ( new_value << ( ( sh - i ) * 16 ) );
484 }
485 return tmp;
486 }
487
488 /* This C-helper takes a 128-bit zoned value stored in a 128-bit
489 * value. It converts it to the packed 64-bit decimal format without a
490 * a sign value. The sign is supposed to be in bits [3:0] and the packed
491 * value in bits [67:4]. This helper leaves it to the caller to put the
492 * result into a V128 and shift the returned value over and put the sign
493 * in.
494 */
convert_from_zoned_helper(ULong src_hi,ULong src_low)495 ULong convert_from_zoned_helper( ULong src_hi, ULong src_low ) {
496 UInt i;
497 ULong tmp = 0, nibble;
498
499 /* Unroll the i = 0 iteration so the sizes of the loop for the upper
500 * and lower extraction match. Skip sign in lease significant byte.
501 */
502 nibble = ( src_hi >> 56 ) & 0xF;
503 tmp = tmp | ( nibble << 60 );
504
505 for( i = 1; i < 8; i++ ) {
506 /* get the high nibbles, put into result */
507 nibble = ( src_hi >> ( ( 7 - i ) * 8 ) ) & 0xF;
508 tmp = tmp | ( nibble << ( ( 15 - i ) * 4 ) );
509
510 /* get the low nibbles, put into result */
511 nibble = ( src_low >> ( ( 8 - i ) * 8 ) ) & 0xF;
512 tmp = tmp | ( nibble << ( ( 8 - i ) * 4 ) );
513 }
514 return tmp;
515 }
516
517 /* This C-helper takes a 128-bit national value stored in a 128-bit
518 * value. It converts it to a signless packed 64-bit decimal format.
519 */
convert_from_national_helper(ULong src_hi,ULong src_low)520 ULong convert_from_national_helper( ULong src_hi, ULong src_low ) {
521 UInt i;
522 ULong tmp = 0, hword;
523
524 src_low = src_low & 0xFFFFFFFFFFFFFFF0ULL; /* remove the sign */
525
526 for( i = 0; i < 4; i++ ) {
527 /* get the high half-word, put into result */
528 hword = ( src_hi >> ( ( 3 - i ) * 16 ) ) & 0xF;
529 tmp = tmp | ( hword << ( ( 7 - i ) * 4 ) );
530
531 /* get the low half-word, put into result */
532 hword = ( src_low >> ( ( 3 - i ) * 16 ) ) & 0xF;
533 tmp = tmp | ( hword << ( ( 3 - i ) * 4 ) );
534 }
535 return tmp;
536 }
537
538 /*----------------------------------------------*/
539 /*--- The exported fns .. ---*/
540 /*----------------------------------------------*/
541
542 /* VISIBLE TO LIBVEX CLIENT */
LibVEX_GuestPPC32_get_CR(const VexGuestPPC32State * vex_state)543 UInt LibVEX_GuestPPC32_get_CR ( /*IN*/const VexGuestPPC32State* vex_state )
544 {
545 # define FIELD(_n) \
546 ( ( (UInt) \
547 ( (vex_state->guest_CR##_n##_321 & (7<<1)) \
548 | (vex_state->guest_CR##_n##_0 & 1) \
549 ) \
550 ) \
551 << (4 * (7-(_n))) \
552 )
553
554 return
555 FIELD(0) | FIELD(1) | FIELD(2) | FIELD(3)
556 | FIELD(4) | FIELD(5) | FIELD(6) | FIELD(7);
557
558 # undef FIELD
559 }
560
561
562 /* VISIBLE TO LIBVEX CLIENT */
563 /* Note: %CR is 32 bits even for ppc64 */
LibVEX_GuestPPC64_get_CR(const VexGuestPPC64State * vex_state)564 UInt LibVEX_GuestPPC64_get_CR ( /*IN*/const VexGuestPPC64State* vex_state )
565 {
566 # define FIELD(_n) \
567 ( ( (UInt) \
568 ( (vex_state->guest_CR##_n##_321 & (7<<1)) \
569 | (vex_state->guest_CR##_n##_0 & 1) \
570 ) \
571 ) \
572 << (4 * (7-(_n))) \
573 )
574
575 return
576 FIELD(0) | FIELD(1) | FIELD(2) | FIELD(3)
577 | FIELD(4) | FIELD(5) | FIELD(6) | FIELD(7);
578
579 # undef FIELD
580 }
581
582
583 /* VISIBLE TO LIBVEX CLIENT */
LibVEX_GuestPPC32_put_CR(UInt cr_native,VexGuestPPC32State * vex_state)584 void LibVEX_GuestPPC32_put_CR ( UInt cr_native,
585 /*OUT*/VexGuestPPC32State* vex_state )
586 {
587 UInt t;
588
589 # define FIELD(_n) \
590 do { \
591 t = cr_native >> (4*(7-(_n))); \
592 vex_state->guest_CR##_n##_0 = toUChar(t & 1); \
593 vex_state->guest_CR##_n##_321 = toUChar(t & (7<<1)); \
594 } while (0)
595
596 FIELD(0);
597 FIELD(1);
598 FIELD(2);
599 FIELD(3);
600 FIELD(4);
601 FIELD(5);
602 FIELD(6);
603 FIELD(7);
604
605 # undef FIELD
606 }
607
608
609 /* VISIBLE TO LIBVEX CLIENT */
610 /* Note: %CR is 32 bits even for ppc64 */
LibVEX_GuestPPC64_put_CR(UInt cr_native,VexGuestPPC64State * vex_state)611 void LibVEX_GuestPPC64_put_CR ( UInt cr_native,
612 /*OUT*/VexGuestPPC64State* vex_state )
613 {
614 UInt t;
615
616 # define FIELD(_n) \
617 do { \
618 t = cr_native >> (4*(7-(_n))); \
619 vex_state->guest_CR##_n##_0 = toUChar(t & 1); \
620 vex_state->guest_CR##_n##_321 = toUChar(t & (7<<1)); \
621 } while (0)
622
623 FIELD(0);
624 FIELD(1);
625 FIELD(2);
626 FIELD(3);
627 FIELD(4);
628 FIELD(5);
629 FIELD(6);
630 FIELD(7);
631
632 # undef FIELD
633 }
634
635
636 /* VISIBLE TO LIBVEX CLIENT */
LibVEX_GuestPPC32_get_XER(const VexGuestPPC32State * vex_state)637 UInt LibVEX_GuestPPC32_get_XER ( /*IN*/const VexGuestPPC32State* vex_state )
638 {
639 UInt w = 0;
640 w |= ( ((UInt)vex_state->guest_XER_BC) & 0xFF );
641 w |= ( (((UInt)vex_state->guest_XER_SO) & 0x1) << 31 );
642 w |= ( (((UInt)vex_state->guest_XER_OV) & 0x1) << 30 );
643 w |= ( (((UInt)vex_state->guest_XER_CA) & 0x1) << 29 );
644 w |= ( (((UInt)vex_state->guest_XER_OV32) & 0x1) << 19 );
645 w |= ( (((UInt)vex_state->guest_XER_CA32) & 0x1) << 18 );
646 return w;
647 }
648
649
650 /* VISIBLE TO LIBVEX CLIENT */
651 /* Note: %XER is 32 bits even for ppc64 */
LibVEX_GuestPPC64_get_XER(const VexGuestPPC64State * vex_state)652 UInt LibVEX_GuestPPC64_get_XER ( /*IN*/const VexGuestPPC64State* vex_state )
653 {
654 UInt w = 0;
655 w |= ( ((UInt)vex_state->guest_XER_BC) & 0xFF );
656 w |= ( (((UInt)vex_state->guest_XER_SO) & 0x1) << 31 );
657 w |= ( (((UInt)vex_state->guest_XER_OV) & 0x1) << 30 );
658 w |= ( (((UInt)vex_state->guest_XER_CA) & 0x1) << 29 );
659 w |= ( (((UInt)vex_state->guest_XER_OV32) & 0x1) << 19 );
660 w |= ( (((UInt)vex_state->guest_XER_CA32) & 0x1) << 18 );
661 return w;
662 }
663
664
665 /* VISIBLE TO LIBVEX CLIENT */
LibVEX_GuestPPC32_put_XER(UInt xer_native,VexGuestPPC32State * vex_state)666 void LibVEX_GuestPPC32_put_XER ( UInt xer_native,
667 /*OUT*/VexGuestPPC32State* vex_state )
668 {
669 vex_state->guest_XER_BC = toUChar(xer_native & 0xFF);
670 vex_state->guest_XER_SO = toUChar((xer_native >> 31) & 0x1);
671 vex_state->guest_XER_OV = toUChar((xer_native >> 30) & 0x1);
672 vex_state->guest_XER_CA = toUChar((xer_native >> 29) & 0x1);
673 vex_state->guest_XER_OV32 = toUChar((xer_native >> 19) & 0x1);
674 vex_state->guest_XER_CA32 = toUChar((xer_native >> 18) & 0x1);
675 }
676
677 /* VISIBLE TO LIBVEX CLIENT */
678 /* Note: %XER is 32 bits even for ppc64 */
LibVEX_GuestPPC64_put_XER(UInt xer_native,VexGuestPPC64State * vex_state)679 void LibVEX_GuestPPC64_put_XER ( UInt xer_native,
680 /*OUT*/VexGuestPPC64State* vex_state )
681 {
682 vex_state->guest_XER_BC = toUChar(xer_native & 0xFF);
683 vex_state->guest_XER_SO = toUChar((xer_native >> 31) & 0x1);
684 vex_state->guest_XER_OV = toUChar((xer_native >> 30) & 0x1);
685 vex_state->guest_XER_CA = toUChar((xer_native >> 29) & 0x1);
686 vex_state->guest_XER_OV32 = toUChar((xer_native >> 19) & 0x1);
687 vex_state->guest_XER_CA32 = toUChar((xer_native >> 18) & 0x1);
688 }
689
690 /* VISIBLE TO LIBVEX CLIENT */
LibVEX_GuestPPC32_initialise(VexGuestPPC32State * vex_state)691 void LibVEX_GuestPPC32_initialise ( /*OUT*/VexGuestPPC32State* vex_state )
692 {
693 Int i;
694 vex_state->host_EvC_FAILADDR = 0;
695 vex_state->host_EvC_COUNTER = 0;
696 vex_state->pad3 = 0;
697 vex_state->pad4 = 0;
698
699 vex_state->guest_GPR0 = 0;
700 vex_state->guest_GPR1 = 0;
701 vex_state->guest_GPR2 = 0;
702 vex_state->guest_GPR3 = 0;
703 vex_state->guest_GPR4 = 0;
704 vex_state->guest_GPR5 = 0;
705 vex_state->guest_GPR6 = 0;
706 vex_state->guest_GPR7 = 0;
707 vex_state->guest_GPR8 = 0;
708 vex_state->guest_GPR9 = 0;
709 vex_state->guest_GPR10 = 0;
710 vex_state->guest_GPR11 = 0;
711 vex_state->guest_GPR12 = 0;
712 vex_state->guest_GPR13 = 0;
713 vex_state->guest_GPR14 = 0;
714 vex_state->guest_GPR15 = 0;
715 vex_state->guest_GPR16 = 0;
716 vex_state->guest_GPR17 = 0;
717 vex_state->guest_GPR18 = 0;
718 vex_state->guest_GPR19 = 0;
719 vex_state->guest_GPR20 = 0;
720 vex_state->guest_GPR21 = 0;
721 vex_state->guest_GPR22 = 0;
722 vex_state->guest_GPR23 = 0;
723 vex_state->guest_GPR24 = 0;
724 vex_state->guest_GPR25 = 0;
725 vex_state->guest_GPR26 = 0;
726 vex_state->guest_GPR27 = 0;
727 vex_state->guest_GPR28 = 0;
728 vex_state->guest_GPR29 = 0;
729 vex_state->guest_GPR30 = 0;
730 vex_state->guest_GPR31 = 0;
731
732 /* Initialise the vector state. */
733 # define VECZERO(_vr) _vr[0]=_vr[1]=_vr[2]=_vr[3] = 0;
734
735 VECZERO(vex_state->guest_VSR0 );
736 VECZERO(vex_state->guest_VSR1 );
737 VECZERO(vex_state->guest_VSR2 );
738 VECZERO(vex_state->guest_VSR3 );
739 VECZERO(vex_state->guest_VSR4 );
740 VECZERO(vex_state->guest_VSR5 );
741 VECZERO(vex_state->guest_VSR6 );
742 VECZERO(vex_state->guest_VSR7 );
743 VECZERO(vex_state->guest_VSR8 );
744 VECZERO(vex_state->guest_VSR9 );
745 VECZERO(vex_state->guest_VSR10);
746 VECZERO(vex_state->guest_VSR11);
747 VECZERO(vex_state->guest_VSR12);
748 VECZERO(vex_state->guest_VSR13);
749 VECZERO(vex_state->guest_VSR14);
750 VECZERO(vex_state->guest_VSR15);
751 VECZERO(vex_state->guest_VSR16);
752 VECZERO(vex_state->guest_VSR17);
753 VECZERO(vex_state->guest_VSR18);
754 VECZERO(vex_state->guest_VSR19);
755 VECZERO(vex_state->guest_VSR20);
756 VECZERO(vex_state->guest_VSR21);
757 VECZERO(vex_state->guest_VSR22);
758 VECZERO(vex_state->guest_VSR23);
759 VECZERO(vex_state->guest_VSR24);
760 VECZERO(vex_state->guest_VSR25);
761 VECZERO(vex_state->guest_VSR26);
762 VECZERO(vex_state->guest_VSR27);
763 VECZERO(vex_state->guest_VSR28);
764 VECZERO(vex_state->guest_VSR29);
765 VECZERO(vex_state->guest_VSR30);
766 VECZERO(vex_state->guest_VSR31);
767 VECZERO(vex_state->guest_VSR32);
768 VECZERO(vex_state->guest_VSR33);
769 VECZERO(vex_state->guest_VSR34);
770 VECZERO(vex_state->guest_VSR35);
771 VECZERO(vex_state->guest_VSR36);
772 VECZERO(vex_state->guest_VSR37);
773 VECZERO(vex_state->guest_VSR38);
774 VECZERO(vex_state->guest_VSR39);
775 VECZERO(vex_state->guest_VSR40);
776 VECZERO(vex_state->guest_VSR41);
777 VECZERO(vex_state->guest_VSR42);
778 VECZERO(vex_state->guest_VSR43);
779 VECZERO(vex_state->guest_VSR44);
780 VECZERO(vex_state->guest_VSR45);
781 VECZERO(vex_state->guest_VSR46);
782 VECZERO(vex_state->guest_VSR47);
783 VECZERO(vex_state->guest_VSR48);
784 VECZERO(vex_state->guest_VSR49);
785 VECZERO(vex_state->guest_VSR50);
786 VECZERO(vex_state->guest_VSR51);
787 VECZERO(vex_state->guest_VSR52);
788 VECZERO(vex_state->guest_VSR53);
789 VECZERO(vex_state->guest_VSR54);
790 VECZERO(vex_state->guest_VSR55);
791 VECZERO(vex_state->guest_VSR56);
792 VECZERO(vex_state->guest_VSR57);
793 VECZERO(vex_state->guest_VSR58);
794 VECZERO(vex_state->guest_VSR59);
795 VECZERO(vex_state->guest_VSR60);
796 VECZERO(vex_state->guest_VSR61);
797 VECZERO(vex_state->guest_VSR62);
798 VECZERO(vex_state->guest_VSR63);
799
800 # undef VECZERO
801
802 vex_state->guest_CIA = 0;
803 vex_state->guest_LR = 0;
804 vex_state->guest_CTR = 0;
805
806 vex_state->guest_XER_SO = 0;
807 vex_state->guest_XER_OV = 0;
808 vex_state->guest_XER_CA = 0;
809 vex_state->guest_XER_BC = 0;
810
811 vex_state->guest_XER_OV32 = 0;
812 vex_state->guest_XER_CA32 = 0;
813
814 vex_state->guest_CR0_321 = 0;
815 vex_state->guest_CR0_0 = 0;
816 vex_state->guest_CR1_321 = 0;
817 vex_state->guest_CR1_0 = 0;
818 vex_state->guest_CR2_321 = 0;
819 vex_state->guest_CR2_0 = 0;
820 vex_state->guest_CR3_321 = 0;
821 vex_state->guest_CR3_0 = 0;
822 vex_state->guest_CR4_321 = 0;
823 vex_state->guest_CR4_0 = 0;
824 vex_state->guest_CR5_321 = 0;
825 vex_state->guest_CR5_0 = 0;
826 vex_state->guest_CR6_321 = 0;
827 vex_state->guest_CR6_0 = 0;
828 vex_state->guest_CR7_321 = 0;
829 vex_state->guest_CR7_0 = 0;
830
831 vex_state->guest_FPROUND = PPCrm_NEAREST;
832 vex_state->guest_DFPROUND = PPCrm_NEAREST;
833 vex_state->guest_C_FPCC = 0;
834 vex_state->pad2 = 0;
835
836 vex_state->guest_VRSAVE = 0;
837
838 vex_state->guest_VSCR = 0x0; // Non-Java mode = 0
839
840 vex_state->guest_EMNOTE = EmNote_NONE;
841
842 vex_state->guest_CMSTART = 0;
843 vex_state->guest_CMLEN = 0;
844
845 vex_state->guest_NRADDR = 0;
846 vex_state->guest_NRADDR_GPR2 = 0;
847
848 vex_state->guest_REDIR_SP = -1;
849 for (i = 0; i < VEX_GUEST_PPC32_REDIR_STACK_SIZE; i++)
850 vex_state->guest_REDIR_STACK[i] = 0;
851
852 vex_state->guest_IP_AT_SYSCALL = 0;
853 vex_state->guest_SPRG3_RO = 0;
854 vex_state->guest_PPR = 0x4ULL << 50; // medium priority
855 vex_state->guest_PSPB = 0x100; // an arbitrary non-zero value to start with
856
857 vex_state->padding1 = 0;
858 /* vex_state->padding2 = 0; currently not used */
859 }
860
861
862 /* VISIBLE TO LIBVEX CLIENT */
LibVEX_GuestPPC64_initialise(VexGuestPPC64State * vex_state)863 void LibVEX_GuestPPC64_initialise ( /*OUT*/VexGuestPPC64State* vex_state )
864 {
865 Int i;
866 vex_state->host_EvC_FAILADDR = 0;
867 vex_state->host_EvC_COUNTER = 0;
868 vex_state->pad0 = 0;
869 vex_state->guest_GPR0 = 0;
870 vex_state->guest_GPR1 = 0;
871 vex_state->guest_GPR2 = 0;
872 vex_state->guest_GPR3 = 0;
873 vex_state->guest_GPR4 = 0;
874 vex_state->guest_GPR5 = 0;
875 vex_state->guest_GPR6 = 0;
876 vex_state->guest_GPR7 = 0;
877 vex_state->guest_GPR8 = 0;
878 vex_state->guest_GPR9 = 0;
879 vex_state->guest_GPR10 = 0;
880 vex_state->guest_GPR11 = 0;
881 vex_state->guest_GPR12 = 0;
882 vex_state->guest_GPR13 = 0;
883 vex_state->guest_GPR14 = 0;
884 vex_state->guest_GPR15 = 0;
885 vex_state->guest_GPR16 = 0;
886 vex_state->guest_GPR17 = 0;
887 vex_state->guest_GPR18 = 0;
888 vex_state->guest_GPR19 = 0;
889 vex_state->guest_GPR20 = 0;
890 vex_state->guest_GPR21 = 0;
891 vex_state->guest_GPR22 = 0;
892 vex_state->guest_GPR23 = 0;
893 vex_state->guest_GPR24 = 0;
894 vex_state->guest_GPR25 = 0;
895 vex_state->guest_GPR26 = 0;
896 vex_state->guest_GPR27 = 0;
897 vex_state->guest_GPR28 = 0;
898 vex_state->guest_GPR29 = 0;
899 vex_state->guest_GPR30 = 0;
900 vex_state->guest_GPR31 = 0;
901
902 /* Initialise the vector state. */
903 # define VECZERO(_vr) _vr[0]=_vr[1]=_vr[2]=_vr[3] = 0;
904
905 VECZERO(vex_state->guest_VSR0 );
906 VECZERO(vex_state->guest_VSR1 );
907 VECZERO(vex_state->guest_VSR2 );
908 VECZERO(vex_state->guest_VSR3 );
909 VECZERO(vex_state->guest_VSR4 );
910 VECZERO(vex_state->guest_VSR5 );
911 VECZERO(vex_state->guest_VSR6 );
912 VECZERO(vex_state->guest_VSR7 );
913 VECZERO(vex_state->guest_VSR8 );
914 VECZERO(vex_state->guest_VSR9 );
915 VECZERO(vex_state->guest_VSR10);
916 VECZERO(vex_state->guest_VSR11);
917 VECZERO(vex_state->guest_VSR12);
918 VECZERO(vex_state->guest_VSR13);
919 VECZERO(vex_state->guest_VSR14);
920 VECZERO(vex_state->guest_VSR15);
921 VECZERO(vex_state->guest_VSR16);
922 VECZERO(vex_state->guest_VSR17);
923 VECZERO(vex_state->guest_VSR18);
924 VECZERO(vex_state->guest_VSR19);
925 VECZERO(vex_state->guest_VSR20);
926 VECZERO(vex_state->guest_VSR21);
927 VECZERO(vex_state->guest_VSR22);
928 VECZERO(vex_state->guest_VSR23);
929 VECZERO(vex_state->guest_VSR24);
930 VECZERO(vex_state->guest_VSR25);
931 VECZERO(vex_state->guest_VSR26);
932 VECZERO(vex_state->guest_VSR27);
933 VECZERO(vex_state->guest_VSR28);
934 VECZERO(vex_state->guest_VSR29);
935 VECZERO(vex_state->guest_VSR30);
936 VECZERO(vex_state->guest_VSR31);
937 VECZERO(vex_state->guest_VSR32);
938 VECZERO(vex_state->guest_VSR33);
939 VECZERO(vex_state->guest_VSR34);
940 VECZERO(vex_state->guest_VSR35);
941 VECZERO(vex_state->guest_VSR36);
942 VECZERO(vex_state->guest_VSR37);
943 VECZERO(vex_state->guest_VSR38);
944 VECZERO(vex_state->guest_VSR39);
945 VECZERO(vex_state->guest_VSR40);
946 VECZERO(vex_state->guest_VSR41);
947 VECZERO(vex_state->guest_VSR42);
948 VECZERO(vex_state->guest_VSR43);
949 VECZERO(vex_state->guest_VSR44);
950 VECZERO(vex_state->guest_VSR45);
951 VECZERO(vex_state->guest_VSR46);
952 VECZERO(vex_state->guest_VSR47);
953 VECZERO(vex_state->guest_VSR48);
954 VECZERO(vex_state->guest_VSR49);
955 VECZERO(vex_state->guest_VSR50);
956 VECZERO(vex_state->guest_VSR51);
957 VECZERO(vex_state->guest_VSR52);
958 VECZERO(vex_state->guest_VSR53);
959 VECZERO(vex_state->guest_VSR54);
960 VECZERO(vex_state->guest_VSR55);
961 VECZERO(vex_state->guest_VSR56);
962 VECZERO(vex_state->guest_VSR57);
963 VECZERO(vex_state->guest_VSR58);
964 VECZERO(vex_state->guest_VSR59);
965 VECZERO(vex_state->guest_VSR60);
966 VECZERO(vex_state->guest_VSR61);
967 VECZERO(vex_state->guest_VSR62);
968 VECZERO(vex_state->guest_VSR63);
969
970 # undef VECZERO
971
972 vex_state->guest_CIA = 0;
973 vex_state->guest_LR = 0;
974 vex_state->guest_CTR = 0;
975
976 vex_state->guest_XER_SO = 0;
977 vex_state->guest_XER_OV = 0;
978 vex_state->guest_XER_CA = 0;
979 vex_state->guest_XER_BC = 0;
980
981 vex_state->guest_CR0_321 = 0;
982 vex_state->guest_CR0_0 = 0;
983 vex_state->guest_CR1_321 = 0;
984 vex_state->guest_CR1_0 = 0;
985 vex_state->guest_CR2_321 = 0;
986 vex_state->guest_CR2_0 = 0;
987 vex_state->guest_CR3_321 = 0;
988 vex_state->guest_CR3_0 = 0;
989 vex_state->guest_CR4_321 = 0;
990 vex_state->guest_CR4_0 = 0;
991 vex_state->guest_CR5_321 = 0;
992 vex_state->guest_CR5_0 = 0;
993 vex_state->guest_CR6_321 = 0;
994 vex_state->guest_CR6_0 = 0;
995 vex_state->guest_CR7_321 = 0;
996 vex_state->guest_CR7_0 = 0;
997
998 vex_state->guest_FPROUND = PPCrm_NEAREST;
999 vex_state->guest_DFPROUND = PPCrm_NEAREST;
1000 vex_state->guest_C_FPCC = 0;
1001 vex_state->pad2 = 0;
1002
1003 vex_state->guest_VRSAVE = 0;
1004
1005 vex_state->guest_VSCR = 0x0; // Non-Java mode = 0
1006
1007 vex_state->guest_EMNOTE = EmNote_NONE;
1008
1009 vex_state->padding = 0;
1010
1011 vex_state->guest_CMSTART = 0;
1012 vex_state->guest_CMLEN = 0;
1013
1014 vex_state->guest_NRADDR = 0;
1015 vex_state->guest_NRADDR_GPR2 = 0;
1016
1017 vex_state->guest_REDIR_SP = -1;
1018 for (i = 0; i < VEX_GUEST_PPC64_REDIR_STACK_SIZE; i++)
1019 vex_state->guest_REDIR_STACK[i] = 0;
1020
1021 vex_state->guest_IP_AT_SYSCALL = 0;
1022 vex_state->guest_SPRG3_RO = 0;
1023 vex_state->guest_TFHAR = 0;
1024 vex_state->guest_TFIAR = 0;
1025 vex_state->guest_TEXASR = 0;
1026 vex_state->guest_PPR = 0x4ULL << 50; // medium priority
1027 vex_state->guest_PSPB = 0x100; // an arbitrary non-zero value to start with
1028 vex_state->guest_DSCR = 0;
1029 }
1030
1031
1032 /*-----------------------------------------------------------*/
1033 /*--- Describing the ppc guest state, for the benefit ---*/
1034 /*--- of iropt and instrumenters. ---*/
1035 /*-----------------------------------------------------------*/
1036
1037 /* Figure out if any part of the guest state contained in minoff
1038 .. maxoff requires precise memory exceptions. If in doubt return
1039 True (but this is generates significantly slower code).
1040
1041 By default we enforce precise exns for guest R1 (stack pointer),
1042 CIA (current insn address) and LR (link register). These are the
1043 minimum needed to extract correct stack backtraces from ppc
1044 code. [[NB: not sure if keeping LR up to date is actually
1045 necessary.]]
1046
1047 Only R1 is needed in mode VexRegUpdSpAtMemAccess.
1048 */
guest_ppc32_state_requires_precise_mem_exns(Int minoff,Int maxoff,VexRegisterUpdates pxControl)1049 Bool guest_ppc32_state_requires_precise_mem_exns (
1050 Int minoff, Int maxoff, VexRegisterUpdates pxControl
1051 )
1052 {
1053 Int lr_min = offsetof(VexGuestPPC32State, guest_LR);
1054 Int lr_max = lr_min + 4 - 1;
1055 Int r1_min = offsetof(VexGuestPPC32State, guest_GPR1);
1056 Int r1_max = r1_min + 4 - 1;
1057 Int cia_min = offsetof(VexGuestPPC32State, guest_CIA);
1058 Int cia_max = cia_min + 4 - 1;
1059
1060 if (maxoff < r1_min || minoff > r1_max) {
1061 /* no overlap with R1 */
1062 if (pxControl == VexRegUpdSpAtMemAccess)
1063 return False; // We only need to check stack pointer.
1064 } else {
1065 return True;
1066 }
1067
1068 if (maxoff < lr_min || minoff > lr_max) {
1069 /* no overlap with LR */
1070 } else {
1071 return True;
1072 }
1073
1074 if (maxoff < cia_min || minoff > cia_max) {
1075 /* no overlap with CIA */
1076 } else {
1077 return True;
1078 }
1079
1080 return False;
1081 }
1082
guest_ppc64_state_requires_precise_mem_exns(Int minoff,Int maxoff,VexRegisterUpdates pxControl)1083 Bool guest_ppc64_state_requires_precise_mem_exns (
1084 Int minoff, Int maxoff, VexRegisterUpdates pxControl
1085 )
1086 {
1087 /* Given that R2 is a Big Deal in the ELF ppc64 ABI, it seems
1088 prudent to be conservative with it, even though thus far there
1089 is no evidence to suggest that it actually needs to be kept up
1090 to date wrt possible exceptions. */
1091 Int lr_min = offsetof(VexGuestPPC64State, guest_LR);
1092 Int lr_max = lr_min + 8 - 1;
1093 Int r1_min = offsetof(VexGuestPPC64State, guest_GPR1);
1094 Int r1_max = r1_min + 8 - 1;
1095 Int r2_min = offsetof(VexGuestPPC64State, guest_GPR2);
1096 Int r2_max = r2_min + 8 - 1;
1097 Int cia_min = offsetof(VexGuestPPC64State, guest_CIA);
1098 Int cia_max = cia_min + 8 - 1;
1099
1100 if (maxoff < r1_min || minoff > r1_max) {
1101 /* no overlap with R1 */
1102 if (pxControl == VexRegUpdSpAtMemAccess)
1103 return False; // We only need to check stack pointer.
1104 } else {
1105 return True;
1106 }
1107
1108 if (maxoff < lr_min || minoff > lr_max) {
1109 /* no overlap with LR */
1110 } else {
1111 return True;
1112 }
1113
1114 if (maxoff < r2_min || minoff > r2_max) {
1115 /* no overlap with R2 */
1116 } else {
1117 return True;
1118 }
1119
1120 if (maxoff < cia_min || minoff > cia_max) {
1121 /* no overlap with CIA */
1122 } else {
1123 return True;
1124 }
1125
1126 return False;
1127 }
1128
1129
1130 #define ALWAYSDEFD32(field) \
1131 { offsetof(VexGuestPPC32State, field), \
1132 (sizeof ((VexGuestPPC32State*)0)->field) }
1133
1134 VexGuestLayout
1135 ppc32Guest_layout
1136 = {
1137 /* Total size of the guest state, in bytes. */
1138 .total_sizeB = sizeof(VexGuestPPC32State),
1139
1140 /* Describe the stack pointer. */
1141 .offset_SP = offsetof(VexGuestPPC32State,guest_GPR1),
1142 .sizeof_SP = 4,
1143
1144 /* Describe the frame pointer. */
1145 .offset_FP = offsetof(VexGuestPPC32State,guest_GPR1),
1146 .sizeof_FP = 4,
1147
1148 /* Describe the instruction pointer. */
1149 .offset_IP = offsetof(VexGuestPPC32State,guest_CIA),
1150 .sizeof_IP = 4,
1151
1152 /* Describe any sections to be regarded by Memcheck as
1153 'always-defined'. */
1154 .n_alwaysDefd = 12,
1155
1156 .alwaysDefd
1157 = { /* 0 */ ALWAYSDEFD32(guest_CIA),
1158 /* 1 */ ALWAYSDEFD32(guest_EMNOTE),
1159 /* 2 */ ALWAYSDEFD32(guest_CMSTART),
1160 /* 3 */ ALWAYSDEFD32(guest_CMLEN),
1161 /* 4 */ ALWAYSDEFD32(guest_VSCR),
1162 /* 5 */ ALWAYSDEFD32(guest_FPROUND),
1163 /* 6 */ ALWAYSDEFD32(guest_NRADDR),
1164 /* 7 */ ALWAYSDEFD32(guest_NRADDR_GPR2),
1165 /* 8 */ ALWAYSDEFD32(guest_REDIR_SP),
1166 /* 9 */ ALWAYSDEFD32(guest_REDIR_STACK),
1167 /* 10 */ ALWAYSDEFD32(guest_IP_AT_SYSCALL),
1168 /* 11 */ ALWAYSDEFD32(guest_C_FPCC)
1169 }
1170 };
1171
1172 #define ALWAYSDEFD64(field) \
1173 { offsetof(VexGuestPPC64State, field), \
1174 (sizeof ((VexGuestPPC64State*)0)->field) }
1175
1176 VexGuestLayout
1177 ppc64Guest_layout
1178 = {
1179 /* Total size of the guest state, in bytes. */
1180 .total_sizeB = sizeof(VexGuestPPC64State),
1181
1182 /* Describe the stack pointer. */
1183 .offset_SP = offsetof(VexGuestPPC64State,guest_GPR1),
1184 .sizeof_SP = 8,
1185
1186 /* Describe the frame pointer. */
1187 .offset_FP = offsetof(VexGuestPPC64State,guest_GPR1),
1188 .sizeof_FP = 8,
1189
1190 /* Describe the instruction pointer. */
1191 .offset_IP = offsetof(VexGuestPPC64State,guest_CIA),
1192 .sizeof_IP = 8,
1193
1194 /* Describe any sections to be regarded by Memcheck as
1195 'always-defined'. */
1196 .n_alwaysDefd = 12,
1197
1198 .alwaysDefd
1199 = { /* 0 */ ALWAYSDEFD64(guest_CIA),
1200 /* 1 */ ALWAYSDEFD64(guest_EMNOTE),
1201 /* 2 */ ALWAYSDEFD64(guest_CMSTART),
1202 /* 3 */ ALWAYSDEFD64(guest_CMLEN),
1203 /* 4 */ ALWAYSDEFD64(guest_VSCR),
1204 /* 5 */ ALWAYSDEFD64(guest_FPROUND),
1205 /* 6 */ ALWAYSDEFD64(guest_NRADDR),
1206 /* 7 */ ALWAYSDEFD64(guest_NRADDR_GPR2),
1207 /* 8 */ ALWAYSDEFD64(guest_REDIR_SP),
1208 /* 9 */ ALWAYSDEFD64(guest_REDIR_STACK),
1209 /* 10 */ ALWAYSDEFD64(guest_IP_AT_SYSCALL),
1210 /* 11 */ ALWAYSDEFD64(guest_C_FPCC)
1211 }
1212 };
1213
1214 /*---------------------------------------------------------------*/
1215 /*--- end guest_ppc_helpers.c ---*/
1216 /*---------------------------------------------------------------*/
1217