1 /*
2  * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
3  * Copyright 2012, 2014 SAP AG. All rights reserved.
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This code is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 only, as
8  * published by the Free Software Foundation.
9  *
10  * This code is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13  * version 2 for more details (a copy is included in the LICENSE file that
14  * accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License version
17  * 2 along with this work; if not, write to the Free Software Foundation,
18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19  *
20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21  * or visit www.oracle.com if you need additional information or have any
22  * questions.
23  *
24  */
25 
26 #include "precompiled.hpp"
27 #include "asm/assembler.inline.hpp"
28 #include "gc_interface/collectedHeap.inline.hpp"
29 #include "interpreter/interpreter.hpp"
30 #include "memory/cardTableModRefBS.hpp"
31 #include "memory/resourceArea.hpp"
32 #include "prims/methodHandles.hpp"
33 #include "runtime/biasedLocking.hpp"
34 #include "runtime/interfaceSupport.hpp"
35 #include "runtime/objectMonitor.hpp"
36 #include "runtime/os.hpp"
37 #include "runtime/sharedRuntime.hpp"
38 #include "runtime/stubRoutines.hpp"
39 #include "utilities/macros.hpp"
40 #if INCLUDE_ALL_GCS
41 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
42 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
43 #include "gc_implementation/g1/heapRegion.hpp"
44 #endif // INCLUDE_ALL_GCS
45 
46 #ifdef PRODUCT
47 #define BLOCK_COMMENT(str) // nothing
48 #else
49 #define BLOCK_COMMENT(str) block_comment(str)
50 #endif
51 
code_fill_byte()52 int AbstractAssembler::code_fill_byte() {
53   return 0x00;                  // illegal instruction 0x00000000
54 }
55 
print_instruction(int inst)56 void Assembler::print_instruction(int inst) {
57   Unimplemented();
58 }
59 
60 // Patch instruction `inst' at offset `inst_pos' to refer to
61 // `dest_pos' and return the resulting instruction.  We should have
62 // pcs, not offsets, but since all is relative, it will work out fine.
patched_branch(int dest_pos,int inst,int inst_pos)63 int Assembler::patched_branch(int dest_pos, int inst, int inst_pos) {
64   int m = 0; // mask for displacement field
65   int v = 0; // new value for displacement field
66 
67   switch (inv_op_ppc(inst)) {
68   case b_op:  m = li(-1); v = li(disp(dest_pos, inst_pos)); break;
69   case bc_op: m = bd(-1); v = bd(disp(dest_pos, inst_pos)); break;
70     default: ShouldNotReachHere();
71   }
72   return inst & ~m | v;
73 }
74 
75 // Return the offset, relative to _code_begin, of the destination of
76 // the branch inst at offset pos.
branch_destination(int inst,int pos)77 int Assembler::branch_destination(int inst, int pos) {
78   int r = 0;
79   switch (inv_op_ppc(inst)) {
80     case b_op:  r = bxx_destination_offset(inst, pos); break;
81     case bc_op: r = inv_bd_field(inst, pos); break;
82     default: ShouldNotReachHere();
83   }
84   return r;
85 }
86 
87 // Low-level andi-one-instruction-macro.
andi(Register a,Register s,const int ui16)88 void Assembler::andi(Register a, Register s, const int ui16) {
89   assert(is_uimm(ui16, 16), "must be 16-bit unsigned immediate");
90   if (is_power_of_2_long(((jlong) ui16)+1)) {
91     // pow2minus1
92     clrldi(a, s, 64-log2_long((((jlong) ui16)+1)));
93   } else if (is_power_of_2_long((jlong) ui16)) {
94     // pow2
95     rlwinm(a, s, 0, 31-log2_long((jlong) ui16), 31-log2_long((jlong) ui16));
96   } else if (is_power_of_2_long((jlong)-ui16)) {
97     // negpow2
98     clrrdi(a, s, log2_long((jlong)-ui16));
99   } else {
100     andi_(a, s, ui16);
101   }
102 }
103 
104 // RegisterOrConstant version.
ld(Register d,RegisterOrConstant roc,Register s1)105 void Assembler::ld(Register d, RegisterOrConstant roc, Register s1) {
106   if (roc.is_constant()) {
107     if (s1 == noreg) {
108       int simm16_rest = load_const_optimized(d, roc.as_constant(), noreg, true);
109       Assembler::ld(d, simm16_rest, d);
110     } else if (is_simm(roc.as_constant(), 16)) {
111       Assembler::ld(d, roc.as_constant(), s1);
112     } else {
113       load_const_optimized(d, roc.as_constant());
114       Assembler::ldx(d, d, s1);
115     }
116   } else {
117     if (s1 == noreg)
118       Assembler::ld(d, 0, roc.as_register());
119     else
120       Assembler::ldx(d, roc.as_register(), s1);
121   }
122 }
123 
lwa(Register d,RegisterOrConstant roc,Register s1)124 void Assembler::lwa(Register d, RegisterOrConstant roc, Register s1) {
125   if (roc.is_constant()) {
126     if (s1 == noreg) {
127       int simm16_rest = load_const_optimized(d, roc.as_constant(), noreg, true);
128       Assembler::lwa(d, simm16_rest, d);
129     } else if (is_simm(roc.as_constant(), 16)) {
130       Assembler::lwa(d, roc.as_constant(), s1);
131     } else {
132       load_const_optimized(d, roc.as_constant());
133       Assembler::lwax(d, d, s1);
134     }
135   } else {
136     if (s1 == noreg)
137       Assembler::lwa(d, 0, roc.as_register());
138     else
139       Assembler::lwax(d, roc.as_register(), s1);
140   }
141 }
142 
lwz(Register d,RegisterOrConstant roc,Register s1)143 void Assembler::lwz(Register d, RegisterOrConstant roc, Register s1) {
144   if (roc.is_constant()) {
145     if (s1 == noreg) {
146       int simm16_rest = load_const_optimized(d, roc.as_constant(), noreg, true);
147       Assembler::lwz(d, simm16_rest, d);
148     } else if (is_simm(roc.as_constant(), 16)) {
149       Assembler::lwz(d, roc.as_constant(), s1);
150     } else {
151       load_const_optimized(d, roc.as_constant());
152       Assembler::lwzx(d, d, s1);
153     }
154   } else {
155     if (s1 == noreg)
156       Assembler::lwz(d, 0, roc.as_register());
157     else
158       Assembler::lwzx(d, roc.as_register(), s1);
159   }
160 }
161 
lha(Register d,RegisterOrConstant roc,Register s1)162 void Assembler::lha(Register d, RegisterOrConstant roc, Register s1) {
163   if (roc.is_constant()) {
164     if (s1 == noreg) {
165       int simm16_rest = load_const_optimized(d, roc.as_constant(), noreg, true);
166       Assembler::lha(d, simm16_rest, d);
167     } else if (is_simm(roc.as_constant(), 16)) {
168       Assembler::lha(d, roc.as_constant(), s1);
169     } else {
170       load_const_optimized(d, roc.as_constant());
171       Assembler::lhax(d, d, s1);
172     }
173   } else {
174     if (s1 == noreg)
175       Assembler::lha(d, 0, roc.as_register());
176     else
177       Assembler::lhax(d, roc.as_register(), s1);
178   }
179 }
180 
lhz(Register d,RegisterOrConstant roc,Register s1)181 void Assembler::lhz(Register d, RegisterOrConstant roc, Register s1) {
182   if (roc.is_constant()) {
183     if (s1 == noreg) {
184       int simm16_rest = load_const_optimized(d, roc.as_constant(), noreg, true);
185       Assembler::lhz(d, simm16_rest, d);
186     } else if (is_simm(roc.as_constant(), 16)) {
187       Assembler::lhz(d, roc.as_constant(), s1);
188     } else {
189       load_const_optimized(d, roc.as_constant());
190       Assembler::lhzx(d, d, s1);
191     }
192   } else {
193     if (s1 == noreg)
194       Assembler::lhz(d, 0, roc.as_register());
195     else
196       Assembler::lhzx(d, roc.as_register(), s1);
197   }
198 }
199 
lbz(Register d,RegisterOrConstant roc,Register s1)200 void Assembler::lbz(Register d, RegisterOrConstant roc, Register s1) {
201   if (roc.is_constant()) {
202     if (s1 == noreg) {
203       int simm16_rest = load_const_optimized(d, roc.as_constant(), noreg, true);
204       Assembler::lbz(d, simm16_rest, d);
205     } else if (is_simm(roc.as_constant(), 16)) {
206       Assembler::lbz(d, roc.as_constant(), s1);
207     } else {
208       load_const_optimized(d, roc.as_constant());
209       Assembler::lbzx(d, d, s1);
210     }
211   } else {
212     if (s1 == noreg)
213       Assembler::lbz(d, 0, roc.as_register());
214     else
215       Assembler::lbzx(d, roc.as_register(), s1);
216   }
217 }
218 
std(Register d,RegisterOrConstant roc,Register s1,Register tmp)219 void Assembler::std(Register d, RegisterOrConstant roc, Register s1, Register tmp) {
220   if (roc.is_constant()) {
221     if (s1 == noreg) {
222       guarantee(tmp != noreg, "Need tmp reg to encode large constants");
223       int simm16_rest = load_const_optimized(tmp, roc.as_constant(), noreg, true);
224       Assembler::std(d, simm16_rest, tmp);
225     } else if (is_simm(roc.as_constant(), 16)) {
226       Assembler::std(d, roc.as_constant(), s1);
227     } else {
228       guarantee(tmp != noreg, "Need tmp reg to encode large constants");
229       load_const_optimized(tmp, roc.as_constant());
230       Assembler::stdx(d, tmp, s1);
231     }
232   } else {
233     if (s1 == noreg)
234       Assembler::std(d, 0, roc.as_register());
235     else
236       Assembler::stdx(d, roc.as_register(), s1);
237   }
238 }
239 
stw(Register d,RegisterOrConstant roc,Register s1,Register tmp)240 void Assembler::stw(Register d, RegisterOrConstant roc, Register s1, Register tmp) {
241   if (roc.is_constant()) {
242     if (s1 == noreg) {
243       guarantee(tmp != noreg, "Need tmp reg to encode large constants");
244       int simm16_rest = load_const_optimized(tmp, roc.as_constant(), noreg, true);
245       Assembler::stw(d, simm16_rest, tmp);
246     } else if (is_simm(roc.as_constant(), 16)) {
247       Assembler::stw(d, roc.as_constant(), s1);
248     } else {
249       guarantee(tmp != noreg, "Need tmp reg to encode large constants");
250       load_const_optimized(tmp, roc.as_constant());
251       Assembler::stwx(d, tmp, s1);
252     }
253   } else {
254     if (s1 == noreg)
255       Assembler::stw(d, 0, roc.as_register());
256     else
257       Assembler::stwx(d, roc.as_register(), s1);
258   }
259 }
260 
sth(Register d,RegisterOrConstant roc,Register s1,Register tmp)261 void Assembler::sth(Register d, RegisterOrConstant roc, Register s1, Register tmp) {
262   if (roc.is_constant()) {
263     if (s1 == noreg) {
264       guarantee(tmp != noreg, "Need tmp reg to encode large constants");
265       int simm16_rest = load_const_optimized(tmp, roc.as_constant(), noreg, true);
266       Assembler::sth(d, simm16_rest, tmp);
267     } else if (is_simm(roc.as_constant(), 16)) {
268       Assembler::sth(d, roc.as_constant(), s1);
269     } else {
270       guarantee(tmp != noreg, "Need tmp reg to encode large constants");
271       load_const_optimized(tmp, roc.as_constant());
272       Assembler::sthx(d, tmp, s1);
273     }
274   } else {
275     if (s1 == noreg)
276       Assembler::sth(d, 0, roc.as_register());
277     else
278       Assembler::sthx(d, roc.as_register(), s1);
279   }
280 }
281 
stb(Register d,RegisterOrConstant roc,Register s1,Register tmp)282 void Assembler::stb(Register d, RegisterOrConstant roc, Register s1, Register tmp) {
283   if (roc.is_constant()) {
284     if (s1 == noreg) {
285       guarantee(tmp != noreg, "Need tmp reg to encode large constants");
286       int simm16_rest = load_const_optimized(tmp, roc.as_constant(), noreg, true);
287       Assembler::stb(d, simm16_rest, tmp);
288     } else if (is_simm(roc.as_constant(), 16)) {
289       Assembler::stb(d, roc.as_constant(), s1);
290     } else {
291       guarantee(tmp != noreg, "Need tmp reg to encode large constants");
292       load_const_optimized(tmp, roc.as_constant());
293       Assembler::stbx(d, tmp, s1);
294     }
295   } else {
296     if (s1 == noreg)
297       Assembler::stb(d, 0, roc.as_register());
298     else
299       Assembler::stbx(d, roc.as_register(), s1);
300   }
301 }
302 
add(Register d,RegisterOrConstant roc,Register s1)303 void Assembler::add(Register d, RegisterOrConstant roc, Register s1) {
304   if (roc.is_constant()) {
305     intptr_t c = roc.as_constant();
306     assert(is_simm(c, 16), "too big");
307     addi(d, s1, (int)c);
308   }
309   else add(d, roc.as_register(), s1);
310 }
311 
subf(Register d,RegisterOrConstant roc,Register s1)312 void Assembler::subf(Register d, RegisterOrConstant roc, Register s1) {
313   if (roc.is_constant()) {
314     intptr_t c = roc.as_constant();
315     assert(is_simm(-c, 16), "too big");
316     addi(d, s1, (int)-c);
317   }
318   else subf(d, roc.as_register(), s1);
319 }
320 
cmpd(ConditionRegister d,RegisterOrConstant roc,Register s1)321 void Assembler::cmpd(ConditionRegister d, RegisterOrConstant roc, Register s1) {
322   if (roc.is_constant()) {
323     intptr_t c = roc.as_constant();
324     assert(is_simm(c, 16), "too big");
325     cmpdi(d, s1, (int)c);
326   }
327   else cmpd(d, roc.as_register(), s1);
328 }
329 
330 // Load a 64 bit constant. Patchable.
load_const(Register d,long x,Register tmp)331 void Assembler::load_const(Register d, long x, Register tmp) {
332   // 64-bit value: x = xa xb xc xd
333   int xa = (x >> 48) & 0xffff;
334   int xb = (x >> 32) & 0xffff;
335   int xc = (x >> 16) & 0xffff;
336   int xd = (x >>  0) & 0xffff;
337   if (tmp == noreg) {
338     Assembler::lis( d, (int)(short)xa);
339     Assembler::ori( d, d, (unsigned int)xb);
340     Assembler::sldi(d, d, 32);
341     Assembler::oris(d, d, (unsigned int)xc);
342     Assembler::ori( d, d, (unsigned int)xd);
343   } else {
344     // exploit instruction level parallelism if we have a tmp register
345     assert_different_registers(d, tmp);
346     Assembler::lis(tmp, (int)(short)xa);
347     Assembler::lis(d, (int)(short)xc);
348     Assembler::ori(tmp, tmp, (unsigned int)xb);
349     Assembler::ori(d, d, (unsigned int)xd);
350     Assembler::insrdi(d, tmp, 32, 0);
351   }
352 }
353 
354 // Load a 64 bit constant, optimized, not identifyable.
355 // Tmp can be used to increase ILP. Set return_simm16_rest=true to get a
356 // 16 bit immediate offset.
load_const_optimized(Register d,long x,Register tmp,bool return_simm16_rest)357 int Assembler::load_const_optimized(Register d, long x, Register tmp, bool return_simm16_rest) {
358   // Avoid accidentally trying to use R0 for indexed addressing.
359   assert(d != R0, "R0 not allowed");
360   assert_different_registers(d, tmp);
361 
362   short xa, xb, xc, xd; // Four 16-bit chunks of const.
363   long rem = x;         // Remaining part of const.
364 
365   xd = rem & 0xFFFF;    // Lowest 16-bit chunk.
366   rem = (rem >> 16) + ((unsigned short)xd >> 15); // Compensation for sign extend.
367 
368   if (rem == 0) { // opt 1: simm16
369     li(d, xd);
370     return 0;
371   }
372 
373   xc = rem & 0xFFFF; // Next 16-bit chunk.
374   rem = (rem >> 16) + ((unsigned short)xc >> 15); // Compensation for sign extend.
375 
376   if (rem == 0) { // opt 2: simm32
377     lis(d, xc);
378   } else { // High 32 bits needed.
379 
380     if (tmp != noreg) { // opt 3: We have a temp reg.
381       // No carry propagation between xc and higher chunks here (use logical instructions).
382       xa = (x >> 48) & 0xffff;
383       xb = (x >> 32) & 0xffff; // No sign compensation, we use lis+ori or li to allow usage of R0.
384       bool load_xa = (xa != 0) || (xb < 0);
385       bool return_xd = false;
386 
387       if (load_xa) { lis(tmp, xa); }
388       if (xc) { lis(d, xc); }
389       if (load_xa) {
390         if (xb) { ori(tmp, tmp, (unsigned short)xb); } // No addi, we support tmp == R0.
391       } else {
392         li(tmp, xb); // non-negative
393       }
394       if (xc) {
395         if (return_simm16_rest && xd >= 0) { return_xd = true; } // >= 0 to avoid carry propagation after insrdi/rldimi.
396         else if (xd) { addi(d, d, xd); }
397       } else {
398         li(d, xd);
399       }
400       insrdi(d, tmp, 32, 0);
401       return return_xd ? xd : 0; // non-negative
402     }
403 
404     xb = rem & 0xFFFF; // Next 16-bit chunk.
405     rem = (rem >> 16) + ((unsigned short)xb >> 15); // Compensation for sign extend.
406 
407     xa = rem & 0xFFFF; // Highest 16-bit chunk.
408 
409     // opt 4: avoid adding 0
410     if (xa) { // Highest 16-bit needed?
411       lis(d, xa);
412       if (xb) { addi(d, d, xb); }
413     } else {
414       li(d, xb);
415     }
416     sldi(d, d, 32);
417     if (xc) { addis(d, d, xc); }
418   }
419 
420   // opt 5: Return offset to be inserted into following instruction.
421   if (return_simm16_rest) return xd;
422 
423   if (xd) { addi(d, d, xd); }
424   return 0;
425 }
426 
427 #ifndef PRODUCT
428 // Test of ppc assembler.
test_asm()429 void Assembler::test_asm() {
430   // PPC 1, section 3.3.8, Fixed-Point Arithmetic Instructions
431   addi(   R0,  R1,  10);
432   addis(  R5,  R2,  11);
433   addic_( R3,  R31, 42);
434   subfic( R21, R12, 2112);
435   add(    R3,  R2,  R1);
436   add_(   R11, R22, R30);
437   subf(   R7,  R6,  R5);
438   subf_(  R8,  R9,  R4);
439   addc(   R11, R12, R13);
440   addc_(  R14, R14, R14);
441   subfc(  R15, R16, R17);
442   subfc_( R18, R20, R19);
443   adde(   R20, R22, R24);
444   adde_(  R29, R27, R26);
445   subfe(  R28, R1,  R0);
446   subfe_( R21, R11, R29);
447   neg(    R21, R22);
448   neg_(   R13, R23);
449   mulli(  R0,  R11, -31);
450   mulld(  R1,  R18, R21);
451   mulld_( R2,  R17, R22);
452   mullw(  R3,  R16, R23);
453   mullw_( R4,  R15, R24);
454   divd(   R5,  R14, R25);
455   divd_(  R6,  R13, R26);
456   divw(   R7,  R12, R27);
457   divw_(  R8,  R11, R28);
458 
459   li(     R3, -4711);
460 
461   // PPC 1, section 3.3.9, Fixed-Point Compare Instructions
462   cmpi(   CCR7,  0, R27, 4711);
463   cmp(    CCR0, 1, R14, R11);
464   cmpli(  CCR5,  1, R17, 45);
465   cmpl(   CCR3, 0, R9,  R10);
466 
467   cmpwi(  CCR7,  R27, 4711);
468   cmpw(   CCR0, R14, R11);
469   cmplwi( CCR5,  R17, 45);
470   cmplw(  CCR3, R9,  R10);
471 
472   cmpdi(  CCR7,  R27, 4711);
473   cmpd(   CCR0, R14, R11);
474   cmpldi( CCR5,  R17, 45);
475   cmpld(  CCR3, R9,  R10);
476 
477   // PPC 1, section 3.3.11, Fixed-Point Logical Instructions
478   andi_(  R4,  R5,  0xff);
479   andis_( R12, R13, 0x7b51);
480   ori(    R1,  R4,  13);
481   oris(   R3,  R5,  177);
482   xori(   R7,  R6,  51);
483   xoris(  R29, R0,  1);
484   andr(   R17, R21, R16);
485   and_(   R3,  R5,  R15);
486   orr(    R2,  R1,  R9);
487   or_(    R17, R15, R11);
488   xorr(   R19, R18, R10);
489   xor_(   R31, R21, R11);
490   nand(   R5,  R7,  R3);
491   nand_(  R3,  R1,  R0);
492   nor(    R2,  R3,  R5);
493   nor_(   R3,  R6,  R8);
494   andc(   R25, R12, R11);
495   andc_(  R24, R22, R21);
496   orc(    R20, R10, R12);
497   orc_(   R22, R2,  R13);
498 
499   nop();
500 
501   // PPC 1, section 3.3.12, Fixed-Point Rotate and Shift Instructions
502   sld(    R5,  R6,  R8);
503   sld_(   R3,  R5,  R9);
504   slw(    R2,  R1,  R10);
505   slw_(   R6,  R26, R16);
506   srd(    R16, R24, R8);
507   srd_(   R21, R14, R7);
508   srw(    R22, R25, R29);
509   srw_(   R5,  R18, R17);
510   srad(   R7,  R11, R0);
511   srad_(  R9,  R13, R1);
512   sraw(   R7,  R15, R2);
513   sraw_(  R4,  R17, R3);
514   sldi(   R3,  R18, 63);
515   sldi_(  R2,  R20, 30);
516   slwi(   R1,  R21, 30);
517   slwi_(  R7,  R23, 8);
518   srdi(   R0,  R19, 2);
519   srdi_(  R12, R24, 5);
520   srwi(   R13, R27, 6);
521   srwi_(  R14, R29, 7);
522   sradi(  R15, R30, 9);
523   sradi_( R16, R31, 19);
524   srawi(  R17, R31, 15);
525   srawi_( R18, R31, 12);
526 
527   clrrdi( R3, R30, 5);
528   clrldi( R9, R10, 11);
529 
530   rldicr( R19, R20, 13, 15);
531   rldicr_(R20, R20, 16, 14);
532   rldicl( R21, R21, 30, 33);
533   rldicl_(R22, R1,  20, 25);
534   rlwinm( R23, R2,  25, 10, 11);
535   rlwinm_(R24, R3,  12, 13, 14);
536 
537   // PPC 1, section 3.3.2 Fixed-Point Load Instructions
538   lwzx(   R3,  R5, R7);
539   lwz(    R11,  0, R1);
540   lwzu(   R31, -4, R11);
541 
542   lwax(   R3,  R5, R7);
543   lwa(    R31, -4, R11);
544   lhzx(   R3,  R5, R7);
545   lhz(    R31, -4, R11);
546   lhzu(   R31, -4, R11);
547 
548 
549   lhax(   R3,  R5, R7);
550   lha(    R31, -4, R11);
551   lhau(   R11,  0, R1);
552 
553   lbzx(   R3,  R5, R7);
554   lbz(    R31, -4, R11);
555   lbzu(   R11,  0, R1);
556 
557   ld(     R31, -4, R11);
558   ldx(    R3,  R5, R7);
559   ldu(    R31, -4, R11);
560 
561   //  PPC 1, section 3.3.3 Fixed-Point Store Instructions
562   stwx(   R3,  R5, R7);
563   stw(    R31, -4, R11);
564   stwu(   R11,  0, R1);
565 
566   sthx(   R3,  R5, R7 );
567   sth(    R31, -4, R11);
568   sthu(   R31, -4, R11);
569 
570   stbx(   R3,  R5, R7);
571   stb(    R31, -4, R11);
572   stbu(   R31, -4, R11);
573 
574   std(    R31, -4, R11);
575   stdx(   R3,  R5, R7);
576   stdu(   R31, -4, R11);
577 
578  // PPC 1, section 3.3.13 Move To/From System Register Instructions
579   mtlr(   R3);
580   mflr(   R3);
581   mtctr(  R3);
582   mfctr(  R3);
583   mtcrf(  0xff, R15);
584   mtcr(   R15);
585   mtcrf(  0x03, R15);
586   mtcr(   R15);
587   mfcr(   R15);
588 
589  // PPC 1, section 2.4.1 Branch Instructions
590   Label lbl1, lbl2, lbl3;
591   bind(lbl1);
592 
593   b(pc());
594   b(pc() - 8);
595   b(lbl1);
596   b(lbl2);
597   b(lbl3);
598 
599   bl(pc() - 8);
600   bl(lbl1);
601   bl(lbl2);
602 
603   bcl(4, 10, pc() - 8);
604   bcl(4, 10, lbl1);
605   bcl(4, 10, lbl2);
606 
607   bclr( 4, 6, 0);
608   bclrl(4, 6, 0);
609 
610   bind(lbl2);
611 
612   bcctr( 4, 6, 0);
613   bcctrl(4, 6, 0);
614 
615   blt(CCR0, lbl2);
616   bgt(CCR1, lbl2);
617   beq(CCR2, lbl2);
618   bso(CCR3, lbl2);
619   bge(CCR4, lbl2);
620   ble(CCR5, lbl2);
621   bne(CCR6, lbl2);
622   bns(CCR7, lbl2);
623 
624   bltl(CCR0, lbl2);
625   bgtl(CCR1, lbl2);
626   beql(CCR2, lbl2);
627   bsol(CCR3, lbl2);
628   bgel(CCR4, lbl2);
629   blel(CCR5, lbl2);
630   bnel(CCR6, lbl2);
631   bnsl(CCR7, lbl2);
632   blr();
633 
634   sync();
635   icbi( R1, R2);
636   dcbst(R2, R3);
637 
638   // FLOATING POINT instructions ppc.
639   // PPC 1, section 4.6.2 Floating-Point Load Instructions
640   lfs( F1, -11, R3);
641   lfsu(F2, 123, R4);
642   lfsx(F3, R5,  R6);
643   lfd( F4, 456, R7);
644   lfdu(F5, 789, R8);
645   lfdx(F6, R10, R11);
646 
647   // PPC 1, section 4.6.3 Floating-Point Store Instructions
648   stfs(  F7,  876, R12);
649   stfsu( F8,  543, R13);
650   stfsx( F9,  R14, R15);
651   stfd(  F10, 210, R16);
652   stfdu( F11, 111, R17);
653   stfdx( F12, R18, R19);
654 
655   // PPC 1, section 4.6.4 Floating-Point Move Instructions
656   fmr(   F13, F14);
657   fmr_(  F14, F15);
658   fneg(  F16, F17);
659   fneg_( F18, F19);
660   fabs(  F20, F21);
661   fabs_( F22, F23);
662   fnabs( F24, F25);
663   fnabs_(F26, F27);
664 
665   // PPC 1, section 4.6.5.1 Floating-Point Elementary Arithmetic
666   // Instructions
667   fadd(  F28, F29, F30);
668   fadd_( F31, F0,  F1);
669   fadds( F2,  F3,  F4);
670   fadds_(F5,  F6,  F7);
671   fsub(  F8,  F9,  F10);
672   fsub_( F11, F12, F13);
673   fsubs( F14, F15, F16);
674   fsubs_(F17, F18, F19);
675   fmul(  F20, F21, F22);
676   fmul_( F23, F24, F25);
677   fmuls( F26, F27, F28);
678   fmuls_(F29, F30, F31);
679   fdiv(  F0,  F1,  F2);
680   fdiv_( F3,  F4,  F5);
681   fdivs( F6,  F7,  F8);
682   fdivs_(F9,  F10, F11);
683 
684   // PPC 1, section 4.6.6 Floating-Point Rounding and Conversion
685   // Instructions
686   frsp(  F12, F13);
687   fctid( F14, F15);
688   fctidz(F16, F17);
689   fctiw( F18, F19);
690   fctiwz(F20, F21);
691   fcfid( F22, F23);
692 
693   // PPC 1, section 4.6.7 Floating-Point Compare Instructions
694   fcmpu( CCR7, F24, F25);
695 
696   tty->print_cr("\ntest_asm disassembly (0x%lx 0x%lx):", p2i(code()->insts_begin()), p2i(code()->insts_end()));
697   code()->decode();
698 }
699 
700 #endif // !PRODUCT
701