1# e500 core instructions, for PSIM, the PowerPC simulator.
2
3# Copyright 2003-2013 Free Software Foundation, Inc.
4
5# Contributed by Red Hat Inc; developed under contract from Motorola.
6# Written by matthew green <mrg@redhat.com>.
7
8# This file is part of GDB.
9
10# This program is free software; you can redistribute it and/or modify
11# it under the terms of the GNU General Public License as published by
12# the Free Software Foundation; either version 3 of the License, or
13# (at your option) any later version.
14
15# This program is distributed in the hope that it will be useful,
16# but WITHOUT ANY WARRANTY; without even the implied warranty of
17# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18# GNU General Public License for more details.
19
20# You should have received a copy of the GNU General Public License
21# along with this program.  If not, see <http://www.gnu.org/licenses/>.
22
23#
24# e500 Core Complex Instructions
25#
26
27:cache:e500::signed_word *:rAh:RA:(cpu_registers(processor)->e500.gprh + RA)
28:cache:e500::signed_word *:rSh:RS:(cpu_registers(processor)->e500.gprh + RS)
29:cache:e500::signed_word *:rBh:RB:(cpu_registers(processor)->e500.gprh + RB)
30
31# Flags for model.h
32::model-macro:::
33	#define PPC_INSN_INT_SPR(OUT_MASK, IN_MASK, SPR) \
34		do { \
35		  if (CURRENT_MODEL_ISSUE > 0) \
36		    ppc_insn_int_spr(MY_INDEX, cpu_model(processor), OUT_MASK, IN_MASK, SPR); \
37		} while (0)
38
39# Schedule an instruction that takes 2 integer register and produces a special purpose output register plus an integer output register
40void::model-function::ppc_insn_int_spr:itable_index index, model_data *model_ptr, const unsigned32 out_mask, const unsigned32 in_mask, const unsigned nSPR
41	const unsigned32 int_mask = out_mask | in_mask;
42	model_busy *busy_ptr;
43
44	while ((model_ptr->int_busy & int_mask) != 0 || model_ptr->spr_busy[nSPR] != 0) {
45	  if (WITH_TRACE && ppc_trace[trace_model])
46	    model_trace_busy_p(model_ptr, int_mask, 0, 0, nSPR);
47
48	  model_ptr->nr_stalls_data++;
49	  model_new_cycle(model_ptr);
50	}
51
52	busy_ptr = model_wait_for_unit(index, model_ptr, &model_ptr->timing[index]);
53	busy_ptr->int_busy |= out_mask;
54	model_ptr->int_busy |= out_mask;
55	busy_ptr->spr_busy = nSPR;
56	model_ptr->spr_busy[nSPR] = 1;
57	busy_ptr->nr_writebacks = (PPC_ONE_BIT_SET_P(out_mask)) ? 3 : 2;
58	TRACE(trace_model,("Making register %s busy.\n", spr_name(nSPR)));
59
60#
61# SPE Modulo Fractional Multiplication handling support
62#
63:function:e500::unsigned64:ev_multiply16_smf:signed16 a, signed16 b, int *sat
64	signed32 a32 = a, b32 = b, rv32;
65	rv32 = a * b;
66	*sat = (rv32 & (3<<30)) == (3<<30);
67	return (signed64)rv32 << 1;
68
69:function:e500::unsigned64:ev_multiply32_smf:signed32 a, signed32 b, int *sat
70	signed64 rv64, a64 = a, b64 = b;
71	rv64 = a64 * b64;
72	*sat = (rv64 & ((signed64)3<<62)) == ((signed64)3<<62);
73	/* Loses top sign bit.  */
74	return rv64 << 1;
75#
76# SPE Saturation handling support
77#
78:function:e500::signed32:ev_multiply16_ssf:signed16 a, signed16 b, int *sat
79	signed32 rv32;
80	if (a == 0xffff8000 && b == 0xffff8000)
81	  {
82	    rv32 = 0x7fffffffL;
83	    * sat = 1;
84	    return rv32;
85	  }
86	else
87	  {
88	    signed32 a32 = a, b32 = b;
89
90	    rv32 = a * b;
91	    * sat = (rv32 & (3<<30)) == (3<<30);
92	    return (signed64)rv32 << 1;
93	  }
94
95:function:e500::signed64:ev_multiply32_ssf:signed32 a, signed32 b, int *sat
96	signed64 rv64;
97	if (a == 0x80000000 && b == 0x80000000)
98	  {
99	    rv64 = 0x7fffffffffffffffLL;
100	    * sat = 1;
101	    return rv64;
102	  }
103	else
104	  {
105	    signed64 a64 = a, b64 = b;
106	    rv64 = a64 * b64;
107	    *sat = (rv64 & ((signed64)3<<62)) == ((signed64)3<<62);
108	    /* Loses top sign bit.  */
109	    return rv64 << 1;
110	  }
111
112#
113# SPE FP handling support
114#
115
116:function:e500::void:ev_check_guard:sim_fpu *a, int fg, int fx, cpu *processor
117	unsigned64 guard;
118	guard = sim_fpu_guard(a, 0);
119	if (guard & 1)
120	  EV_SET_SPEFSCR_BITS(fg);
121	if (guard & ~1)
122	  EV_SET_SPEFSCR_BITS(fx);
123
124:function:e500::void:booke_sim_fpu_32to:sim_fpu *dst, unsigned32 packed
125	sim_fpu_32to (dst, packed);
126
127	/* Set normally unused fields to allow booke arithmetic.  */
128	if (dst->class == sim_fpu_class_infinity)
129	  {
130	    dst->normal_exp = 128;
131	    dst->fraction = ((unsigned64)1 << 60);
132	  }
133	else if (dst->class == sim_fpu_class_qnan
134		 || dst->class == sim_fpu_class_snan)
135	  {
136	    dst->normal_exp = 128;
137	    /* This is set, but without the implicit bit, so we have to or
138	       in the implicit bit.  */
139	    dst->fraction |= ((unsigned64)1 << 60);
140	  }
141
142:function:e500::int:booke_sim_fpu_add:sim_fpu *d, sim_fpu *a, sim_fpu *b, int inv, int over, int under, cpu *processor
143	int invalid_operand, overflow_result, underflow_result;
144	int dest_exp;
145
146	invalid_operand = 0;
147	overflow_result = 0;
148	underflow_result = 0;
149
150	/* Treat NaN, Inf, and denorm like normal numbers, and signal invalid
151	   operand if it hasn't already been done.  */
152	if (EV_IS_INFDENORMNAN (a))
153	  {
154	    a->class = sim_fpu_class_number;
155
156	    EV_SET_SPEFSCR_BITS (inv);
157	    invalid_operand = 1;
158	  }
159	if (EV_IS_INFDENORMNAN (b))
160	  {
161	    b->class = sim_fpu_class_number;
162
163	    if (! invalid_operand)
164	      {
165		EV_SET_SPEFSCR_BITS (inv);
166		invalid_operand = 1;
167	      }
168	  }
169
170	sim_fpu_add (d, a, b);
171
172	dest_exp = booke_sim_fpu_exp (d);
173	/* If this is a denorm, force to zero, and signal underflow if
174	   we haven't already indicated invalid operand.  */
175	if (dest_exp <= -127)
176	  {
177	    int sign = d->sign;
178
179	    *d = sim_fpu_zero;
180	    d->sign = sign;
181	    if (! invalid_operand)
182	      {
183		EV_SET_SPEFSCR_BITS (under);
184		underflow_result = 1;
185	      }
186	  }
187	/* If this is Inf/NaN, force to pmax/nmax, and signal overflow if
188	   we haven't already indicated invalid operand.  */
189	else if (dest_exp >= 127)
190	  {
191	    int sign = d->sign;
192
193	    *d = sim_fpu_max32;
194	    d->sign = sign;
195	    if (! invalid_operand)
196	      {
197		EV_SET_SPEFSCR_BITS (over);
198		overflow_result = 1;
199	      }
200	  }
201	/* Destination sign is sign of operand with larger magnitude, or
202	   the sign of the first operand if operands have the same
203	   magnitude.  Thus if the result is zero, we force it to have
204	   the sign of the first operand.  */
205	else if (d->fraction == 0)
206	  d->sign = a->sign;
207
208	return invalid_operand || overflow_result || underflow_result;
209
210:function:e500::unsigned32:ev_fs_add:unsigned32 aa, unsigned32 bb, int inv, int over, int under, int fg, int fx, cpu *processor
211	sim_fpu a, b, d;
212	unsigned32 w;
213	int exception;
214
215	booke_sim_fpu_32to (&a, aa);
216	booke_sim_fpu_32to (&b, bb);
217
218	exception = booke_sim_fpu_add (&d, &a, &b, inv, over, under,
219				       processor);
220
221	sim_fpu_to32 (&w, &d);
222	if (! exception)
223	  ev_check_guard(&d, fg, fx, processor);
224	return w;
225
226:function:e500::unsigned32:ev_fs_sub:unsigned32 aa, unsigned32 bb, int inv, int over, int under, int fg, int fx, cpu *processor
227	sim_fpu a, b, d;
228	unsigned32 w;
229	int exception;
230
231	booke_sim_fpu_32to (&a, aa);
232	booke_sim_fpu_32to (&b, bb);
233
234	/* Invert sign of second operand, and add.  */
235	b.sign = ! b.sign;
236	exception = booke_sim_fpu_add (&d, &a, &b, inv, over, under,
237				       processor);
238
239	sim_fpu_to32 (&w, &d);
240	if (! exception)
241	  ev_check_guard(&d, fg, fx, processor);
242	return w;
243
244# sim_fpu_exp leaves the normal_exp field undefined for Inf and NaN.
245# The booke algorithms require exp values, so we fake them here.
246# fixme: It also apparently does the same for zero, but should not.
247:function:e500::unsigned32:booke_sim_fpu_exp:sim_fpu *x
248	int y = sim_fpu_is (x);
249	if (y == SIM_FPU_IS_PZERO || y == SIM_FPU_IS_NZERO)
250	  return 0;
251	else if (y == SIM_FPU_IS_SNAN || y == SIM_FPU_IS_QNAN
252		 || y == SIM_FPU_IS_NINF || y == SIM_FPU_IS_PINF)
253	  return 128;
254	else
255	  return sim_fpu_exp (x);
256
257:function:e500::unsigned32:ev_fs_mul:unsigned32 aa, unsigned32 bb, int inv, int over, int under, int fg, int fx, cpu *processor
258	sim_fpu a, b, d;
259	unsigned32 w;
260	int sa, sb, ea, eb, ei;
261	sim_fpu_32to (&a, aa);
262	sim_fpu_32to (&b, bb);
263	sa = sim_fpu_sign(&a);
264	sb = sim_fpu_sign(&b);
265	ea = booke_sim_fpu_exp(&a);
266	eb = booke_sim_fpu_exp(&b);
267	ei = ea + eb + 127;
268	if (sim_fpu_is_zero (&a) || sim_fpu_is_zero (&b))
269	  w = 0;
270	else if (sa == sb) {
271	  if (ei >= 254) {
272	    w = EV_PMAX;
273	    EV_SET_SPEFSCR_BITS(over);
274	  } else if (ei < 1) {
275	    d = sim_fpu_zero;
276	    sim_fpu_to32 (&w, &d);
277	    w &= 0x7fffffff;	/* Clear sign bit.  */
278	  } else {
279	    goto normal_mul;
280	  }
281	} else {
282	  if (ei >= 254) {
283	    w = EV_NMAX;
284	    EV_SET_SPEFSCR_BITS(over);
285	  } else if (ei < 1) {
286	    d = sim_fpu_zero;
287	    sim_fpu_to32 (&w, &d);
288	    w |= 0x80000000;	/* Set sign bit.  */
289	  } else {
290	normal_mul:
291	    if (EV_IS_INFDENORMNAN(&a) || EV_IS_INFDENORMNAN(&b))
292	      EV_SET_SPEFSCR_BITS(inv);
293	    sim_fpu_mul (&d, &a, &b);
294	    sim_fpu_to32 (&w, &d);
295	  }
296	}
297	return w;
298
299:function:e500::unsigned32:ev_fs_div:unsigned32 aa, unsigned32 bb, int inv, int over, int under, int dbz, int fg, int fx, cpu *processor
300	sim_fpu a, b, d;
301	unsigned32 w;
302	int sa, sb, ea, eb, ei;
303
304	sim_fpu_32to (&a, aa);
305	sim_fpu_32to (&b, bb);
306	sa = sim_fpu_sign(&a);
307	sb = sim_fpu_sign(&b);
308	ea = booke_sim_fpu_exp(&a);
309	eb = booke_sim_fpu_exp(&b);
310	ei = ea - eb + 127;
311
312	/* Special cases to handle behaviour of e500 hardware.
313	   cf case 107543.  */
314	if (sim_fpu_is_nan (&a) || sim_fpu_is_nan (&b)
315	  || sim_fpu_is_zero (&a) || sim_fpu_is_zero (&b))
316	{
317	  if (sim_fpu_is_snan (&a) || sim_fpu_is_snan (&b))
318	    {
319	      if (bb == 0x3f800000)
320	        w = EV_PMAX;
321	      else if (aa == 0x7fc00001)
322	        w = 0x3fbffffe;
323	      else
324	        goto normal_div;
325	    }
326	  else
327	    goto normal_div;
328	}
329	else if (sim_fpu_is_infinity (&a) && sim_fpu_is_infinity (&b))
330	{
331	  if (sa == sb)
332	    sim_fpu_32to (&d, 0x3f800000);
333	  else
334	    sim_fpu_32to (&d, 0xbf800000);
335	  sim_fpu_to32 (&w, &d);
336	}
337	else if (sa == sb) {
338	  if (ei > 254) {
339	    w = EV_PMAX;
340	    EV_SET_SPEFSCR_BITS(over);
341	  } else if (ei <= 1) {
342	    d = sim_fpu_zero;
343	    sim_fpu_to32 (&w, &d);
344	    w &= 0x7fffffff;	/* Clear sign bit.  */
345	  } else {
346	    goto normal_div;
347	  }
348	} else {
349	  if (ei > 254) {
350	    w = EV_NMAX;
351	    EV_SET_SPEFSCR_BITS(over);
352	  } else if (ei <= 1) {
353	    d = sim_fpu_zero;
354	    sim_fpu_to32 (&w, &d);
355	    w |= 0x80000000;	/* Set sign bit.  */
356	  } else {
357	normal_div:
358	    if (EV_IS_INFDENORMNAN(&a) || EV_IS_INFDENORMNAN(&b))
359	      EV_SET_SPEFSCR_BITS(inv);
360	    if (sim_fpu_is_zero (&b))
361	      {
362	        if (sim_fpu_is_zero (&a))
363	          EV_SET_SPEFSCR_BITS(dbz);
364	        else
365	          EV_SET_SPEFSCR_BITS(inv);
366	        w = sa ? EV_NMAX : EV_PMAX;
367	      }
368	    else
369	      {
370	        sim_fpu_div (&d, &a, &b);
371	        sim_fpu_to32 (&w, &d);
372	        ev_check_guard(&d, fg, fx, processor);
373	      }
374	  }
375	}
376	return w;
377
378
379#
380# A.2.7 Integer SPE Simple Instructions
381#
382
3830.4,6.RS,11.RA,16.RB,21.512:X:e500:evaddw %RS,%RA,%RB:Vector Add Word
384	unsigned32 w1, w2;
385	w1 = *rBh + *rAh;
386	w2 = *rB + *rA;
387	EV_SET_REG2(*rSh, *rS, w1, w2);
388		//printf("evaddw: *rSh = %08x; *rS = %08x; w1 = %08x w2 = %08x\n", *rSh, *rS, w1, w2);
389	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
390
3910.4,6.RS,11.IMM,16.RB,21.514:X:e500:evaddiw %RS,%RB,%IMM:Vector Add Immediate Word
392	unsigned32 w1, w2;
393	w1 = *rBh + IMM;
394	w2 = *rB + IMM;
395	EV_SET_REG2(*rSh, *rS, w1, w2);
396		//printf("evaddiw: *rSh = %08x; *rS = %08x; w1 = %08x w2 = %08x\n", *rSh, *rS, w1, w2);
397	PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
398
3990.4,6.RS,11.RA,16.RB,21.516:X:e500:evsubfw %RS,%RA,%RB:Vector Subtract from Word
400	unsigned32 w1, w2;
401	w1 = *rBh - *rAh;
402	w2 = *rB - *rA;
403	EV_SET_REG2(*rSh, *rS, w1, w2);
404		//printf("evsubfw: *rSh = %08x; *rS = %08x; w1 = %08x w2 = %08x\n", *rSh, *rS, w1, w2);
405	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
406
4070.4,6.RS,11.IMM,16.RB,21.518:X:e500:evsubifw %RS,%RB,%IMM:Vector Subtract Immediate from Word
408	unsigned32 w1, w2;
409	w1 = *rBh - IMM;
410	w2 = *rB - IMM;
411	EV_SET_REG2(*rSh, *rS, w1, w2);
412		//printf("evsubifw: *rSh = %08x; *rS = %08x; IMM = %d\n", *rSh, *rS, IMM);
413	PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
414
4150.4,6.RS,11.RA,16.0,21.520:X:e500:evabs %RS,%RA:Vector Absolute Value
416	signed32 w1, w2;
417	w1 = *rAh;
418	if (w1 < 0 && w1 != 0x80000000)
419	  w1 = -w1;
420	w2 = *rA;
421	if (w2 < 0 && w2 != 0x80000000)
422	  w2 = -w2;
423	EV_SET_REG2(*rSh, *rS, w1, w2);
424	PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
425
4260.4,6.RS,11.RA,16.0,21.521:X:e500:evneg %RS,%RA:Vector Negate
427	signed32 w1, w2;
428	w1 = *rAh;
429	/* the negative most negative number is the most negative number */
430	if (w1 != 0x80000000)
431	  w1 = -w1;
432	w2 = *rA;
433	if (w2 != 0x80000000)
434	  w2 = -w2;
435	EV_SET_REG2(*rSh, *rS, w1, w2);
436	PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
437
4380.4,6.RS,11.RA,16.0,21.522:X:e500:evextsb %RS,%RA:Vector Extend Signed Byte
439	unsigned64 w1, w2;
440	w1 = *rAh & 0xff;
441	if (w1 & 0x80)
442	  w1 |= 0xffffff00;
443	w2 = *rA & 0xff;
444	if (w2 & 0x80)
445	  w2 |= 0xffffff00;
446	EV_SET_REG2(*rSh, *rS, w1, w2);
447	PPC_INSN_INT(RS_BITMASK, RA_BITMASK , 0);
448
4490.4,6.RS,11.RA,16.0,21.523:X:e500:evextsb %RS,%RA:Vector Extend Signed Half Word
450	unsigned64 w1, w2;
451	w1 = *rAh & 0xffff;
452	if (w1 & 0x8000)
453	  w1 |= 0xffff0000;
454	w2 = *rA & 0xffff;
455	if (w2 & 0x8000)
456	  w2 |= 0xffff0000;
457	EV_SET_REG2(*rSh, *rS, w1, w2);
458	PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
459
4600.4,6.RS,11.RA,16.RB,21.529:X:e500:evand %RS,%RA,%RB:Vector AND
461	unsigned32 w1, w2;
462	w1 = *rBh & *rAh;
463	w2 = *rB & *rA;
464	EV_SET_REG2(*rSh, *rS, w1, w2);
465	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
466
4670.4,6.RS,11.RA,16.RB,21.535:X:e500:evor %RS,%RA,%RB:Vector OR
468	unsigned32 w1, w2;
469	w1 = *rBh | *rAh;
470	w2 = *rB | *rA;
471	EV_SET_REG2(*rSh, *rS, w1, w2);
472	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
473
4740.4,6.RS,11.RA,16.RB,21.534:X:e500:evxor %RS,%RA,%RB:Vector XOR
475	unsigned32 w1, w2;
476	w1 = *rBh ^ *rAh;
477	w2 = *rB ^ *rA;
478	EV_SET_REG2(*rSh, *rS, w1, w2);
479	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
480
4810.4,6.RS,11.RA,16.RB,21.542:X:e500:evnand %RS,%RA,%RB:Vector NAND
482	unsigned32 w1, w2;
483	w1 = ~(*rBh & *rAh);
484	w2 = ~(*rB & *rA);
485	EV_SET_REG2(*rSh, *rS, w1, w2);
486	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
487
4880.4,6.RS,11.RA,16.RB,21.536:X:e500:evnor %RS,%RA,%RB:Vector NOR
489	unsigned32 w1, w2;
490	w1 = ~(*rBh | *rAh);
491	w2 = ~(*rB | *rA);
492	EV_SET_REG2(*rSh, *rS, w1, w2);
493	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
494
4950.4,6.RS,11.RA,16.RB,21.537:X:e500:eveqv %RS,%RA,%RB:Vector Equivalent
496	unsigned32 w1, w2;
497	w1 = (~*rBh) ^ *rAh;
498	w2 = (~*rB) ^ *rA;
499	EV_SET_REG2(*rSh, *rS, w1, w2);
500	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
501
5020.4,6.RS,11.RA,16.RB,21.530:X:e500:evandc %RS,%RA,%RB:Vector AND with Compliment
503	unsigned32 w1, w2;
504	w1 = (~*rBh) & *rAh;
505	w2 = (~*rB) & *rA;
506	EV_SET_REG2(*rSh, *rS, w1, w2);
507		//printf("evandc: *rSh = %08x; *rS = %08x\n", *rSh, *rS);
508	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
509
5100.4,6.RS,11.RA,16.RB,21.539:X:e500:evorc %RS,%RA,%RB:Vector OR with Compliment
511	unsigned32 w1, w2;
512	w1 = (~*rBh) | *rAh;
513	w2 = (~*rB) | *rA;
514	EV_SET_REG2(*rSh, *rS, w1, w2);
515		//printf("evorc: *rSh = %08x; *rS = %08x\n", *rSh, *rS);
516	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
517
5180.4,6.RS,11.RA,16.RB,21.552:X:e500:evrlw %RS,%RA,%RB:Vector Rotate Left Word
519	unsigned32 nh, nl, w1, w2;
520	nh = *rBh & 0x1f;
521	nl = *rB & 0x1f;
522	w1 = ((unsigned32)*rAh) << nh | ((unsigned32)*rAh) >> (32 - nh);
523	w2 = ((unsigned32)*rA) << nl | ((unsigned32)*rA) >> (32 - nl);
524	EV_SET_REG2(*rSh, *rS, w1, w2);
525		//printf("evrlw: nh %d nl %d *rSh = %08x; *rS = %08x\n", nh, nl, *rSh, *rS);
526	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
527
5280.4,6.RS,11.RA,16.UIMM,21.554:X:e500:evrlwi %RS,%RA,%UIMM:Vector Rotate Left Word Immediate
529	unsigned32 w1, w2, imm;
530	imm = (unsigned32)UIMM;
531	w1 = ((unsigned32)*rAh) << imm | ((unsigned32)*rAh) >> (32 - imm);
532	w2 = ((unsigned32)*rA) << imm | ((unsigned32)*rA) >> (32 - imm);
533	EV_SET_REG2(*rSh, *rS, w1, w2);
534	PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
535
5360.4,6.RS,11.RA,16.RB,21.548:X:e500:evslw %RS,%RA,%RB:Vector Shift Left Word
537	unsigned32 nh, nl, w1, w2;
538	nh = *rBh & 0x1f;
539	nl = *rB & 0x1f;
540	w1 = ((unsigned32)*rAh) << nh;
541	w2 = ((unsigned32)*rA) << nl;
542	EV_SET_REG2(*rSh, *rS, w1, w2);
543	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
544
5450.4,6.RS,11.RA,16.UIMM,21.550:X:e500:evslwi %RS,%RA,%UIMM:Vector Shift Left Word Immediate
546	unsigned32 w1, w2, imm = UIMM;
547	w1 = ((unsigned32)*rAh) << imm;
548	w2 = ((unsigned32)*rA) << imm;
549	EV_SET_REG2(*rSh, *rS, w1, w2);
550	PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
551
5520.4,6.RS,11.RA,16.RB,21.545:X:e500:evsrws %RS,%RA,%RB:Vector Shift Right Word Signed
553	signed32 w1, w2;
554	unsigned32 nh, nl;
555	nh = *rBh & 0x1f;
556	nl = *rB & 0x1f;
557	w1 = ((signed32)*rAh) >> nh;
558	w2 = ((signed32)*rA) >> nl;
559	EV_SET_REG2(*rSh, *rS, w1, w2);
560		//printf("evsrws: nh %d nl %d *rSh = %08x; *rS = %08x\n", nh, nl, *rSh, *rS);
561	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
562
5630.4,6.RS,11.RA,16.RB,21.544:X:e500:evsrwu %RS,%RA,%RB:Vector Shift Right Word Unsigned
564	unsigned32 w1, w2, nh, nl;
565	nh = *rBh & 0x1f;
566	nl = *rB & 0x1f;
567	w1 = ((unsigned32)*rAh) >> nh;
568	w2 = ((unsigned32)*rA) >> nl;
569	EV_SET_REG2(*rSh, *rS, w1, w2);
570	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
571
5720.4,6.RS,11.RA,16.UIMM,21.547:X:e500:evsrwis %RS,%RA,%UIMM:Vector Shift Right Word Immediate Signed
573	signed32 w1, w2;
574	unsigned32 imm = UIMM;
575	w1 = ((signed32)*rAh) >> imm;
576	w2 = ((signed32)*rA) >> imm;
577	EV_SET_REG2(*rSh, *rS, w1, w2);
578	PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
579
5800.4,6.RS,11.RA,16.UIMM,21.546:X:e500:evsrwiu %RS,%RA,%UIMM:Vector Shift Right Word Immediate Unsigned
581	unsigned32 w1, w2, imm = UIMM;
582	w1 = ((unsigned32)*rAh) >> imm;
583	w2 = ((unsigned32)*rA) >> imm;
584	EV_SET_REG2(*rSh, *rS, w1, w2);
585	PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
586
5870.4,6.RS,11.RA,16.0,21.525:X:e500:evcntlzw %RS,%RA:Vector Count Leading Zeros Word
588	unsigned32 w1, w2, mask, c1, c2;
589	for (c1 = 0, mask = 0x80000000, w1 = *rAh;
590	      !(w1 & mask) && mask != 0; mask >>= 1)
591	  c1++;
592	for (c2 = 0, mask = 0x80000000, w2 = *rA;
593	      !(w2 & mask) && mask != 0; mask >>= 1)
594	  c2++;
595	EV_SET_REG2(*rSh, *rS, c1, c2);
596	PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
597
5980.4,6.RS,11.RA,16.0,21.526:X:e500:evcntlsw %RS,%RA:Vector Count Leading Sign Bits Word
599	unsigned32 w1, w2, mask, sign_bit, c1, c2;
600	for (c1 = 0, mask = 0x80000000, w1 = *rAh, sign_bit = w1 & mask;
601	     ((w1 & mask) == sign_bit) && mask != 0;
602	     mask >>= 1, sign_bit >>= 1)
603	  c1++;
604	for (c2 = 0, mask = 0x80000000, w2 = *rA, sign_bit = w2 & mask;
605	     ((w2 & mask) == sign_bit) && mask != 0;
606	     mask >>= 1, sign_bit >>= 1)
607	  c2++;
608	EV_SET_REG2(*rSh, *rS, c1, c2);
609	PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
610
6110.4,6.RS,11.RA,16.0,21.524:X:e500:evrndw %RS,%RA:Vector Round Word
612	unsigned32 w1, w2;
613	w1 = ((unsigned32)*rAh + 0x8000) & 0xffff0000;
614	w2 = ((unsigned32)*rA + 0x8000) & 0xffff0000;
615	EV_SET_REG2(*rSh, *rS, w1, w2);
616		//printf("evrndw: *rSh = %08x; *rS = %08x\n", *rSh, *rS);
617	PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
618
6190.4,6.RS,11.RA,16.RB,21.556:X:e500:evmergehi %RS,%RA,%RB:Vector Merge Hi
620	unsigned32 w1, w2;
621	w1 = *rAh;
622	w2 = *rBh;
623	EV_SET_REG2(*rSh, *rS, w1, w2);
624	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
625
6260.4,6.RS,11.RA,16.RB,21.557:X:e500:evmergelo %RS,%RA,%RB:Vector Merge Low
627	unsigned32 w1, w2;
628	w1 = *rA;
629	w2 = *rB;
630	EV_SET_REG2(*rSh, *rS, w1, w2);
631	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
632
6330.4,6.RS,11.RA,16.RB,21.559:X:e500:evmergelohi %RS,%RA,%RB:Vector Merge Low Hi
634	unsigned32 w1, w2;
635	w1 = *rA;
636	w2 = *rBh;
637	EV_SET_REG2(*rSh, *rS, w1, w2);
638	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
639
6400.4,6.RS,11.RA,16.RB,21.558:X:e500:evmergehilo %RS,%RA,%RB:Vector Merge Hi Low
641	unsigned32 w1, w2;
642	w1 = *rAh;
643	w2 = *rB;
644	EV_SET_REG2(*rSh, *rS, w1, w2);
645	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
646
6470.4,6.RS,11.SIMM,16.0,21.553:X:e500:evsplati %RS,%SIMM:Vector Splat Immediate
648	unsigned32 w;
649	w = SIMM & 0x1f;
650	if (w & 0x10)
651	  w |= 0xffffffe0;
652	EV_SET_REG2(*rSh, *rS, w, w);
653	PPC_INSN_INT(RS_BITMASK, 0, 0);
654
6550.4,6.RS,11.SIMM,16.0,21.555:X:e500:evsplatfi %RS,%SIMM:Vector Splat Fractional Immediate
656	unsigned32 w;
657	w = SIMM << 27;
658	EV_SET_REG2(*rSh, *rS, w, w);
659	PPC_INSN_INT(RS_BITMASK, 0, 0);
660
6610.4,6.BF,9.0,11.RA,16.RB,21.561:X:e500:evcmpgts %BF,%RA,%RB:Vector Compare Greater Than Signed
662	signed32 ah, al, bh, bl;
663	int w, ch, cl;
664	ah = *rAh;
665	al = *rA;
666	bh = *rBh;
667	bl = *rB;
668	if (ah > bh)
669	  ch = 1;
670	else
671	  ch = 0;
672	if (al > bl)
673	  cl = 1;
674	else
675	  cl = 0;
676	w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
677	CR_SET(BF, w);
678	PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
679
6800.4,6.BF,9.0,11.RA,16.RB,21.560:X:e500:evcmpgtu %BF,%RA,%RB:Vector Compare Greater Than Unsigned
681	unsigned32 ah, al, bh, bl;
682	int w, ch, cl;
683	ah = *rAh;
684	al = *rA;
685	bh = *rBh;
686	bl = *rB;
687	if (ah > bh)
688	  ch = 1;
689	else
690	  ch = 0;
691	if (al > bl)
692	  cl = 1;
693	else
694	  cl = 0;
695	w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
696	CR_SET(BF, w);
697	PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
698
6990.4,6.BF,9.0,11.RA,16.RB,21.563:X:e500:evcmplts %BF,%RA,%RB:Vector Compare Less Than Signed
700	signed32 ah, al, bh, bl;
701	int w, ch, cl;
702	ah = *rAh;
703	al = *rA;
704	bh = *rBh;
705	bl = *rB;
706	if (ah < bh)
707	  ch = 1;
708	else
709	  ch = 0;
710	if (al < bl)
711	  cl = 1;
712	else
713	  cl = 0;
714	w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
715	CR_SET(BF, w);
716	PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
717
7180.4,6.BF,9.0,11.RA,16.RB,21.562:X:e500:evcmpltu %BF,%RA,%RB:Vector Compare Less Than Unsigned
719	unsigned32 ah, al, bh, bl;
720	int w, ch, cl;
721	ah = *rAh;
722	al = *rA;
723	bh = *rBh;
724	bl = *rB;
725	if (ah < bh)
726	  ch = 1;
727	else
728	  ch = 0;
729	if (al < bl)
730	  cl = 1;
731	else
732	  cl = 0;
733	w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
734	CR_SET(BF, w);
735	PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
736
7370.4,6.BF,9.0,11.RA,16.RB,21.564:X:e500:evcmpeq %BF,%RA,%RB:Vector Compare Equal
738	unsigned32 ah, al, bh, bl;
739	int w, ch, cl;
740	ah = *rAh;
741	al = *rA;
742	bh = *rBh;
743	bl = *rB;
744	if (ah == bh)
745	  ch = 1;
746	else
747	  ch = 0;
748	if (al == bl)
749	  cl = 1;
750	else
751	  cl = 0;
752	w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
753	CR_SET(BF, w);
754		//printf("evcmpeq: ch %d cl %d BF %d, CR is now %08x\n", ch, cl, BF, CR);
755	PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
756
7570.4,6.RS,11.RA,16.RB,21.79,29.CRFS:X:e500:evsel %RS,%RA,%RB,%CRFS:Vector Select
758	unsigned32 w1, w2;
759	int cr;
760	cr = CR_FIELD(CRFS);
761	if (cr & 8)
762	  w1 = *rAh;
763	else
764	  w1 = *rBh;
765	if (cr & 4)
766	  w2 = *rA;
767	else
768	  w2 = *rB;
769	EV_SET_REG2(*rSh, *rS, w1, w2);
770	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
771
7720.4,6.RS,11.RA,16.RB,21.527:X:e500:brinc %RS,%RA,%RB:Bit Reversed Increment
773	unsigned32 w1, w2, a, d, mask;
774	mask = (*rB) & 0xffff;
775	a = (*rA) & 0xffff;
776	d = EV_BITREVERSE16(1 + EV_BITREVERSE16(a | ~mask));
777	*rS = ((*rA) & 0xffff0000) | (d & 0xffff);
778		//printf("brinc: *rS = %08x\n", *rS);
779	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
780
781#
782# A.2.8 Integer SPE Complex Instructions
783#
784
7850.4,6.RS,11.RA,16.RB,21.1031:EVX:e500:evmhossf %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Saturate Fractional
786	signed16 al, ah, bl, bh;
787	signed32 tl, th;
788	int movl, movh;
789
790	al = (signed16) EV_LOHALF (*rA);
791	ah = (signed16) EV_LOHALF (*rAh);
792	bl = (signed16) EV_LOHALF (*rB);
793	bh = (signed16) EV_LOHALF (*rBh);
794	tl = ev_multiply16_ssf (al, bl, &movl);
795	th = ev_multiply16_ssf (ah, bh, &movh);
796	EV_SET_REG2 (*rSh, *rS, EV_SATURATE (movh, 0x7fffffff, th),
797			        EV_SATURATE (movl, 0x7fffffff, tl));
798	EV_SET_SPEFSCR_OV (movl, movh);
799	PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
800
8010.4,6.RS,11.RA,16.RB,21.1063:EVX:e500:evmhossfa %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Saturate Fractional Accumulate
802	signed16 al, ah, bl, bh;
803	signed32 tl, th;
804	int movl, movh;
805
806	al = (signed16) EV_LOHALF (*rA);
807	ah = (signed16) EV_LOHALF (*rAh);
808	bl = (signed16) EV_LOHALF (*rB);
809	bh = (signed16) EV_LOHALF (*rBh);
810	tl = ev_multiply16_ssf (al, bl, &movl);
811	th = ev_multiply16_ssf (ah, bh, &movh);
812	EV_SET_REG2 (*rSh, *rS, EV_SATURATE (movh, 0x7fffffff, th),
813			        EV_SATURATE (movl, 0x7fffffff, tl));
814	EV_SET_SPEFSCR_OV (movl, movh);
815	PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
816
8170.4,6.RS,11.RA,16.RB,21.1039:EVX:e500:evmhosmf %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Modulo Fractional
818	signed16 al, ah, bl, bh;
819	signed32 tl, th;
820	int dummy;
821
822	al = (signed16) EV_LOHALF (*rA);
823	ah = (signed16) EV_LOHALF (*rAh);
824	bl = (signed16) EV_LOHALF (*rB);
825	bh = (signed16) EV_LOHALF (*rBh);
826	tl = ev_multiply16_smf (al, bl, & dummy);
827	th = ev_multiply16_smf (ah, bh, & dummy);
828	EV_SET_REG2 (*rSh, *rS, th, tl);
829	PPC_INSN_INT (RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
830
8310.4,6.RS,11.RA,16.RB,21.1071:EVX:e500:evmhosmfa %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Modulo Fractional Accumulate
832	signed32 al, ah, bl, bh;
833	signed32 tl, th;
834	int dummy;
835
836	al = (signed16) EV_LOHALF (*rA);
837	ah = (signed16) EV_LOHALF (*rAh);
838	bl = (signed16) EV_LOHALF (*rB);
839	bh = (signed16) EV_LOHALF (*rBh);
840	tl = ev_multiply16_smf (al, bl, & dummy);
841	th = ev_multiply16_smf (ah, bh, & dummy);
842	EV_SET_REG2_ACC (*rSh, *rS, th, tl);
843	PPC_INSN_INT (RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
844
8450.4,6.RS,11.RA,16.RB,21.1037:EVX:e500:evmhosmi %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Modulo Integer
846	signed32 al, ah, bl, bh, tl, th;
847	al = (signed32)(signed16)EV_LOHALF(*rA);
848	ah = (signed32)(signed16)EV_LOHALF(*rAh);
849	bl = (signed32)(signed16)EV_LOHALF(*rB);
850	bh = (signed32)(signed16)EV_LOHALF(*rBh);
851	tl = al * bl;
852	th = ah * bh;
853	EV_SET_REG2(*rSh, *rS, th, tl);
854		//printf("evmhosmi: *rSh = %08x; *rS = %08x\n", *rSh, *rS);
855	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
856
8570.4,6.RS,11.RA,16.RB,21.1069:EVX:e500:evmhosmia %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Modulo Integer Accumulate
858	signed32 al, ah, bl, bh, tl, th;
859	al = (signed32)(signed16)EV_LOHALF(*rA);
860	ah = (signed32)(signed16)EV_LOHALF(*rAh);
861	bl = (signed32)(signed16)EV_LOHALF(*rB);
862	bh = (signed32)(signed16)EV_LOHALF(*rBh);
863	tl = al * bl;
864	th = ah * bh;
865	EV_SET_REG2_ACC(*rSh, *rS, th, tl);
866		//printf("evmhosmia: ACC = %08x; *rSh = %08x; *rS = %08x\n", ACC, *rSh, *rS);
867	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
868
8690.4,6.RS,11.RA,16.RB,21.1036:EVX:e500:evmhoumi %RS,%RA,%RB:Vector Multiply Half Words Odd Unsigned Modulo Integer
870	unsigned32 al, ah, bl, bh, tl, th;
871	al = (unsigned32)(unsigned16)EV_LOHALF(*rA);
872	ah = (unsigned32)(unsigned16)EV_LOHALF(*rAh);
873	bl = (unsigned32)(unsigned16)EV_LOHALF(*rB);
874	bh = (unsigned32)(unsigned16)EV_LOHALF(*rBh);
875	tl = al * bl;
876	th = ah * bh;
877	EV_SET_REG2(*rSh, *rS, th, tl);
878	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
879
8800.4,6.RS,11.RA,16.RB,21.1068:EVX:e500:evmhoumia %RS,%RA,%RB:Vector Multiply Half Words Odd Unsigned Modulo Integer Accumulate
881	unsigned32 al, ah, bl, bh, tl, th;
882	al = (unsigned32)(unsigned16)EV_LOHALF(*rA);
883	ah = (unsigned32)(unsigned16)EV_LOHALF(*rAh);
884	bl = (unsigned32)(unsigned16)EV_LOHALF(*rB);
885	bh = (unsigned32)(unsigned16)EV_LOHALF(*rBh);
886	tl = al * bl;
887	th = ah * bh;
888	EV_SET_REG2_ACC(*rSh, *rS, th, tl);
889	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
890
8910.4,6.RS,11.RA,16.RB,21.1027:EVX:e500:evmhessf %RS,%RA,%RB:Vector Multiply Half Words Even Signed Saturate Fractional
892	signed16 al, ah, bl, bh;
893	signed32 tl, th;
894	int movl, movh;
895
896	al = (signed16) EV_HIHALF (*rA);
897	ah = (signed16) EV_HIHALF (*rAh);
898	bl = (signed16) EV_HIHALF (*rB);
899	bh = (signed16) EV_HIHALF (*rBh);
900	tl = ev_multiply16_ssf (al, bl, &movl);
901	th = ev_multiply16_ssf (ah, bh, &movh);
902	EV_SET_REG2 (*rSh, *rS, EV_SATURATE (movh, 0x7fffffff, th),
903			       EV_SATURATE (movl, 0x7fffffff, tl));
904	EV_SET_SPEFSCR_OV (movl, movh);
905	PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
906
9070.4,6.RS,11.RA,16.RB,21.1059:EVX:e500:evmhessfa %RS,%RA,%RB:Vector Multiply Half Words Even Signed Saturate Fractional Accumulate
908	signed16 al, ah, bl, bh;
909	signed32 tl, th;
910	int movl, movh;
911
912	al = (signed16) EV_HIHALF (*rA);
913	ah = (signed16) EV_HIHALF (*rAh);
914	bl = (signed16) EV_HIHALF (*rB);
915	bh = (signed16) EV_HIHALF (*rBh);
916	tl = ev_multiply16_ssf (al, bl, &movl);
917	th = ev_multiply16_ssf (ah, bh, &movh);
918	EV_SET_REG2_ACC (*rSh, *rS, EV_SATURATE (movh, 0x7fffffff, th),
919				    EV_SATURATE (movl, 0x7fffffff, tl));
920	EV_SET_SPEFSCR_OV (movl, movh);
921	PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
922
9230.4,6.RS,11.RA,16.RB,21.1035:EVX:e500:evmhesmf %RS,%RA,%RB:Vector Multiply Half Words Even Signed Modulo Fractional
924	signed16 al, ah, bl, bh;
925	signed64 tl, th;
926	int movl, movh;
927
928	al = (signed16) EV_HIHALF (*rA);
929	ah = (signed16) EV_HIHALF (*rAh);
930	bl = (signed16) EV_HIHALF (*rB);
931	bh = (signed16) EV_HIHALF (*rBh);
932	tl = ev_multiply16_smf (al, bl, &movl);
933	th = ev_multiply16_smf (ah, bh, &movh);
934	EV_SET_REG2 (*rSh, *rS, th, tl);
935	EV_SET_SPEFSCR_OV (movl, movh);
936	PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
937
9380.4,6.RS,11.RA,16.RB,21.1067:EVX:e500:evmhesmfa %RS,%RA,%RB:Vector Multiply Half Words Even Signed Modulo Fractional Accumulate
939	signed16 al, ah, bl, bh;
940	signed32 tl, th;
941	int dummy;
942
943	al = (signed16) EV_HIHALF (*rA);
944	ah = (signed16) EV_HIHALF (*rAh);
945	bl = (signed16) EV_HIHALF (*rB);
946	bh = (signed16) EV_HIHALF (*rBh);
947	tl = ev_multiply16_smf (al, bl, & dummy);
948	th = ev_multiply16_smf (ah, bh, & dummy);
949	EV_SET_REG2_ACC (*rSh, *rS, th, tl);
950	PPC_INSN_INT (RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
951
9520.4,6.RS,11.RA,16.RB,21.1033:EVX:e500:evmhesmi %RS,%RA,%RB:Vector Multiply Half Words Even Signed Modulo Integer
953	signed16 al, ah, bl, bh;
954	signed32 tl, th;
955
956	al = (signed16) EV_HIHALF (*rA);
957	ah = (signed16) EV_HIHALF (*rAh);
958	bl = (signed16) EV_HIHALF (*rB);
959	bh = (signed16) EV_HIHALF (*rBh);
960	tl = al * bl;
961	th = ah * bh;
962	EV_SET_REG2 (*rSh, *rS, th, tl);
963	PPC_INSN_INT (RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
964
9650.4,6.RS,11.RA,16.RB,21.1065:EVX:e500:evmhesmia %RS,%RA,%RB:Vector Multiply Half Words Even Signed Modulo Integer Accumulate
966	signed32 al, ah, bl, bh, tl, th;
967	al = (signed32)(signed16)EV_HIHALF(*rA);
968	ah = (signed32)(signed16)EV_HIHALF(*rAh);
969	bl = (signed32)(signed16)EV_HIHALF(*rB);
970	bh = (signed32)(signed16)EV_HIHALF(*rBh);
971	tl = al * bl;
972	th = ah * bh;
973	EV_SET_REG2_ACC(*rSh, *rS, th, tl);
974	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
975
9760.4,6.RS,11.RA,16.RB,21.1032:EVX:e500:evmheumi %RS,%RA,%RB:Vector Multiply Half Words Even Unsigned Modulo Integer
977	unsigned32 al, ah, bl, bh, tl, th;
978	al = (unsigned32)(unsigned16)EV_HIHALF(*rA);
979	ah = (unsigned32)(unsigned16)EV_HIHALF(*rAh);
980	bl = (unsigned32)(unsigned16)EV_HIHALF(*rB);
981	bh = (unsigned32)(unsigned16)EV_HIHALF(*rBh);
982	tl = al * bl;
983	th = ah * bh;
984	EV_SET_REG2(*rSh, *rS, th, tl);
985	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
986
9870.4,6.RS,11.RA,16.RB,21.1064:EVX:e500:evmheumia %RS,%RA,%RB:Vector Multiply Half Words Even Unsigned Modulo Integer Accumulate
988	unsigned32 al, ah, bl, bh, tl, th;
989	al = (unsigned32)(unsigned16)EV_HIHALF(*rA);
990	ah = (unsigned32)(unsigned16)EV_HIHALF(*rAh);
991	bl = (unsigned32)(unsigned16)EV_HIHALF(*rB);
992	bh = (unsigned32)(unsigned16)EV_HIHALF(*rBh);
993	tl = al * bl;
994	th = ah * bh;
995	EV_SET_REG2_ACC(*rSh, *rS, th, tl);
996	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
997
9980.4,6.RS,11.RA,16.RB,21.1287:EVX:e500:evmhossfaaw %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Saturate Fractional and Accumulate into Words
999	signed16 al, ah, bl, bh;
1000	signed32 t1, t2;
1001	signed64 tl, th;
1002	int movl, movh, ovl, ovh;
1003
1004	al = (signed16) EV_LOHALF (*rA);
1005	ah = (signed16) EV_LOHALF (*rAh);
1006	bl = (signed16) EV_LOHALF (*rB);
1007	bh = (signed16) EV_LOHALF (*rBh);
1008	t1 = ev_multiply16_ssf (ah, bh, &movh);
1009	t2 = ev_multiply16_ssf (al, bl, &movl);
1010	th = EV_ACCHIGH + EV_SATURATE (movh, 0x7fffffff, t1);
1011	tl = EV_ACCLOW  + EV_SATURATE (movl, 0x7fffffff, t2);
1012	ovh = EV_SAT_P_S32 (th);
1013	ovl = EV_SAT_P_S32 (tl);
1014	EV_SET_REG2_ACC (*rSh, *rS, EV_SATURATE_ACC (ovh, th, 0x80000000, 0x7fffffff, th),
1015			            EV_SATURATE_ACC (ovl, tl, 0x80000000, 0x7fffffff, tl));
1016	EV_SET_SPEFSCR_OV (movl | ovl, movh | ovh);
1017	PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1018
10190.4,6.RS,11.RA,16.RB,21.1285:EVX:e500:evmhossiaaw %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Saturate Integer and Accumulate into Words
1020	signed32 al, ah, bl, bh;
1021	signed64 t1, t2, tl, th;
1022	int ovl, ovh;
1023	al = (signed32)(signed16)EV_LOHALF(*rA);
1024	ah = (signed32)(signed16)EV_LOHALF(*rAh);
1025	bl = (signed32)(signed16)EV_LOHALF(*rB);
1026	bh = (signed32)(signed16)EV_LOHALF(*rBh);
1027	t1 = ah * bh;
1028	t2 = al * bl;
1029	th = EV_ACCHIGH + t1;
1030	tl = EV_ACCLOW + t2;
1031	ovh = EV_SAT_P_S32(th);
1032	ovl = EV_SAT_P_S32(tl);
1033	EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0x80000000, 0x7fffffff, th),
1034			           EV_SATURATE_ACC(ovl, tl, 0x80000000, 0x7fffffff, tl));
1035		//printf("evmhossiaaw: ovh %d ovl %d al %d ah %d bl %d bh %d t1 %qd t2 %qd tl %qd th %qd\n", ovh, ovl, al, ah, bl, bh, t1, t2, tl, th);
1036		//printf("evmhossiaaw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
1037	EV_SET_SPEFSCR_OV(ovl, ovh);
1038	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1039
10400.4,6.RS,11.RA,16.RB,21.1295:EVX:e500:evmhosmfaaw %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Modulo Fractional and Accumulate into Words
1041	signed32 al, ah, bl, bh;
1042	signed64 t1, t2, tl, th;
1043	al = (signed32)(signed16)EV_LOHALF(*rA);
1044	ah = (signed32)(signed16)EV_LOHALF(*rAh);
1045	bl = (signed32)(signed16)EV_LOHALF(*rB);
1046	bh = (signed32)(signed16)EV_LOHALF(*rBh);
1047	t1 = ((signed64)ah * bh) << 1;
1048	t2 = ((signed64)al * bl) << 1;
1049	th = EV_ACCHIGH + (t1 & 0xffffffff);
1050	tl = EV_ACCLOW + (t2 & 0xffffffff);
1051	EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
1052	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1053
10540.4,6.RS,11.RA,16.RB,21.1293:EVX:e500:evmhosmiaaw %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Modulo Integer and Accumulate into Words
1055	signed32 al, ah, bl, bh;
1056	signed64 t1, t2, tl, th;
1057	al = (signed32)(signed16)EV_LOHALF(*rA);
1058	ah = (signed32)(signed16)EV_LOHALF(*rAh);
1059	bl = (signed32)(signed16)EV_LOHALF(*rB);
1060	bh = (signed32)(signed16)EV_LOHALF(*rBh);
1061	t1 = ah * bh;
1062	t2 = al * bl;
1063	th = EV_ACCHIGH + t1;
1064	tl = EV_ACCLOW + t2;
1065	EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
1066		//printf("evmhosmiaaw: al %d ah %d bl %d bh %d t1 %qd t2 %qd tl %qd th %qd\n", al, ah, bl, bh, t1, t2, tl, th);
1067		//printf("evmhosmiaaw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
1068	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1069
10700.4,6.RS,11.RA,16.RB,21.1284:EVX:e500:evmhousiaaw %RS,%RA,%RB:Vector Multiply Half Words Odd Unsigned Saturate Integer and Accumulate into Words
1071	unsigned32 al, ah, bl, bh;
1072	unsigned64 t1, t2;
1073	signed64 tl, th;
1074	int ovl, ovh;
1075	al = (unsigned32)(unsigned16)EV_LOHALF(*rA);
1076	ah = (unsigned32)(unsigned16)EV_LOHALF(*rAh);
1077	bl = (unsigned32)(unsigned16)EV_LOHALF(*rB);
1078	bh = (unsigned32)(unsigned16)EV_LOHALF(*rBh);
1079	t1 = ah * bh;
1080	t2 = al * bl;
1081	th = (signed64)EV_ACCHIGH + (signed64)t1;
1082	tl = (signed64)EV_ACCLOW + (signed64)t2;
1083	ovh = EV_SAT_P_U32(th);
1084	ovl = EV_SAT_P_U32(tl);
1085	EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0, 0xffffffff, th),
1086			           EV_SATURATE_ACC(ovl, tl, 0, 0xffffffff, tl));
1087		//printf("evmhousiaaw: al %u ah %u bl %u bh %u t1 %qu t2 %qu tl %qu th %qu\n", al, ah, bl, bh, t1, t2, tl, th);
1088		//printf("evmhousiaaw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
1089	EV_SET_SPEFSCR_OV(ovl, ovh);
1090	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1091
10920.4,6.RS,11.RA,16.RB,21.1292:EVX:e500:evmhoumiaaw %RS,%RA,%RB:Vector Multiply Half Words Odd Unsigned Modulo Integer and Accumulate into Words
1093	unsigned32 al, ah, bl, bh;
1094	unsigned32 t1, t2;
1095	signed64 tl, th;
1096	al = (unsigned32)(unsigned16)EV_LOHALF(*rA);
1097	ah = (unsigned32)(unsigned16)EV_LOHALF(*rAh);
1098	bl = (unsigned32)(unsigned16)EV_LOHALF(*rB);
1099	bh = (unsigned32)(unsigned16)EV_LOHALF(*rBh);
1100	t1 = ah * bh;
1101	t2 = al * bl;
1102	th = EV_ACCHIGH + t1;
1103	tl = EV_ACCLOW + t2;
1104	EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
1105		//printf("evmhoumiaaw: al %u ah %u bl %u bh %u t1 %qu t2 %qu tl %qu th %qu\n", al, ah, bl, bh, t1, t2, tl, th);
1106		//printf("evmhoumiaaw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
1107	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1108
11090.4,6.RS,11.RA,16.RB,21.1283:EVX:e500:evmhessfaaw %RS,%RA,%RB:Vector Multiply Half Words Even Signed Saturate Fractional and Accumulate into Words
1110	signed16 al, ah, bl, bh;
1111	signed32 t1, t2;
1112	signed64 tl, th;
1113	int movl, movh, ovl, ovh;
1114
1115	al = (signed16) EV_HIHALF (*rA);
1116	ah = (signed16) EV_HIHALF (*rAh);
1117	bl = (signed16) EV_HIHALF (*rB);
1118	bh = (signed16) EV_HIHALF (*rBh);
1119	t1 = ev_multiply16_ssf (ah, bh, &movh);
1120	t2 = ev_multiply16_ssf (al, bl, &movl);
1121	th = EV_ACCHIGH + EV_SATURATE (movh, 0x7fffffff, t1);
1122	tl = EV_ACCLOW  + EV_SATURATE (movl, 0x7fffffff, t2);
1123	ovh = EV_SAT_P_S32 (th);
1124	ovl = EV_SAT_P_S32 (tl);
1125	EV_SET_REG2_ACC (*rSh, *rS, EV_SATURATE_ACC (ovh, th, 0x80000000, 0x7fffffff, th),
1126			            EV_SATURATE_ACC (ovl, tl, 0x80000000, 0x7fffffff, tl));
1127	EV_SET_SPEFSCR_OV (movl | ovl, movh | ovh);
1128	PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1129
11300.4,6.RS,11.RA,16.RB,21.1281:EVX:e500:evmhessiaaw %RS,%RA,%RB:Vector Multiply Half Words Even Signed Saturate Integer and Accumulate into Words
1131	signed32 al, ah, bl, bh;
1132	signed64 t1, t2, tl, th;
1133	int ovl, ovh;
1134	al = (signed32)(signed16)EV_HIHALF(*rA);
1135	ah = (signed32)(signed16)EV_HIHALF(*rAh);
1136	bl = (signed32)(signed16)EV_HIHALF(*rB);
1137	bh = (signed32)(signed16)EV_HIHALF(*rBh);
1138	t1 = ah * bh;
1139	t2 = al * bl;
1140	th = EV_ACCHIGH + t1;
1141	tl = EV_ACCLOW + t2;
1142	ovh = EV_SAT_P_S32(th);
1143	ovl = EV_SAT_P_S32(tl);
1144	EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0x80000000, 0x7fffffff, th),
1145			           EV_SATURATE_ACC(ovl, tl, 0x80000000, 0x7fffffff, tl));
1146		//printf("evmhessiaaw: ovh %d ovl %d al %d ah %d bl %d bh %d t1 %qd t2 %qd tl %qd th %qd\n", ovh, ovl, al, ah, bl, bh, t1, t2, tl, th);
1147		//printf("evmhessiaaw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
1148	EV_SET_SPEFSCR_OV(ovl, ovh);
1149	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1150
11510.4,6.RS,11.RA,16.RB,21.1291:EVX:e500:evmhesmfaaw %RS,%RA,%RB:Vector Multiply Half Words Even Signed Modulo Fractional and Accumulate into Words
1152	signed16 al, ah, bl, bh;
1153	signed32 t1, t2, th, tl;
1154	int dummy;
1155
1156	al = (signed16)EV_HIHALF(*rA);
1157	ah = (signed16)EV_HIHALF(*rAh);
1158	bl = (signed16)EV_HIHALF(*rB);
1159	bh = (signed16)EV_HIHALF(*rBh);
1160	t1 = ev_multiply16_smf (ah, bh, &dummy);
1161	t2 = ev_multiply16_smf (al, bl, &dummy);
1162	th = EV_ACCHIGH + t1;
1163	tl = EV_ACCLOW + t2;
1164	EV_SET_REG2_ACC(*rSh, *rS, th, tl);
1165	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1166
11670.4,6.RS,11.RA,16.RB,21.1289:EVX:e500:evmhesmiaaw %RS,%RA,%RB:Vector Multiply Half Words Even Signed Modulo Integer and Accumulate into Words
1168	signed32 al, ah, bl, bh;
1169	signed64 t1, t2, tl, th;
1170	al = (signed32)(signed16)EV_HIHALF(*rA);
1171	ah = (signed32)(signed16)EV_HIHALF(*rAh);
1172	bl = (signed32)(signed16)EV_HIHALF(*rB);
1173	bh = (signed32)(signed16)EV_HIHALF(*rBh);
1174	t1 = ah * bh;
1175	t2 = al * bl;
1176	th = EV_ACCHIGH + t1;
1177	tl = EV_ACCLOW + t2;
1178	EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
1179	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1180
11810.4,6.RS,11.RA,16.RB,21.1280:EVX:e500:evmheusiaaw %RS,%RA,%RB:Vector Multiply Half Words Even Unsigned Saturate Integer and Accumulate into Words
1182	unsigned32 al, ah, bl, bh;
1183	unsigned64 t1, t2;
1184	signed64 tl, th;
1185	int ovl, ovh;
1186	al = (unsigned32)(unsigned16)EV_HIHALF(*rA);
1187	ah = (unsigned32)(unsigned16)EV_HIHALF(*rAh);
1188	bl = (unsigned32)(unsigned16)EV_HIHALF(*rB);
1189	bh = (unsigned32)(unsigned16)EV_HIHALF(*rBh);
1190	t1 = ah * bh;
1191	t2 = al * bl;
1192	th = (signed64)EV_ACCHIGH + (signed64)t1;
1193	tl = (signed64)EV_ACCLOW + (signed64)t2;
1194	ovh = EV_SAT_P_U32(th);
1195	ovl = EV_SAT_P_U32(tl);
1196	EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0, 0xffffffff, th),
1197			           EV_SATURATE_ACC(ovl, tl, 0, 0xffffffff, tl));
1198	EV_SET_SPEFSCR_OV(ovl, ovh);
1199	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1200
12010.4,6.RS,11.RA,16.RB,21.1288:EVX:e500:evmheumiaaw %RS,%RA,%RB:Vector Multiply Half Words Even Unsigned Modulo Integer and Accumulate into Words
1202	unsigned32 al, ah, bl, bh;
1203	unsigned32 t1, t2;
1204	unsigned64 tl, th;
1205	al = (unsigned32)(unsigned16)EV_HIHALF(*rA);
1206	ah = (unsigned32)(unsigned16)EV_HIHALF(*rAh);
1207	bl = (unsigned32)(unsigned16)EV_HIHALF(*rB);
1208	bh = (unsigned32)(unsigned16)EV_HIHALF(*rBh);
1209	t1 = ah * bh;
1210	t2 = al * bl;
1211	th = EV_ACCHIGH + t1;
1212	tl = EV_ACCLOW + t2;
1213	EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
1214	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1215
1216
12170.4,6.RS,11.RA,16.RB,21.1415:EVX:e500:evmhossfanw %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Saturate Fractional and Accumulate Negative into Words
1218	signed16 al, ah, bl, bh;
1219	signed32 t1, t2;
1220	signed64 tl, th;
1221	int movl, movh, ovl, ovh;
1222
1223	al = (signed16) EV_LOHALF (*rA);
1224	ah = (signed16) EV_LOHALF (*rAh);
1225	bl = (signed16) EV_LOHALF (*rB);
1226	bh = (signed16) EV_LOHALF (*rBh);
1227	t1 = ev_multiply16_ssf (ah, bh, &movh);
1228	t2 = ev_multiply16_ssf (al, bl, &movl);
1229	th = EV_ACCHIGH - EV_SATURATE (movh, 0x7fffffff, t1);
1230	tl = EV_ACCLOW  - EV_SATURATE (movl, 0x7fffffff, t2);
1231	ovh = EV_SAT_P_S32 (th);
1232	ovl = EV_SAT_P_S32 (tl);
1233	EV_SET_REG2_ACC (*rSh, *rS, EV_SATURATE_ACC (ovh, th, 0x80000000, 0x7fffffff, th),
1234			            EV_SATURATE_ACC (ovl, tl, 0x80000000, 0x7fffffff, tl));
1235	EV_SET_SPEFSCR_OV (movl | ovl, movh | ovh);
1236	PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1237
12380.4,6.RS,11.RA,16.RB,21.1413:EVX:e500:evmhossianw %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Saturate Integer and Accumulate Negative into Words
1239	signed32 al, ah, bl, bh;
1240	signed64 t1, t2, tl, th;
1241	int ovl, ovh;
1242	al = (signed32)(signed16)EV_LOHALF(*rA);
1243	ah = (signed32)(signed16)EV_LOHALF(*rAh);
1244	bl = (signed32)(signed16)EV_LOHALF(*rB);
1245	bh = (signed32)(signed16)EV_LOHALF(*rBh);
1246	t1 = ah * bh;
1247	t2 = al * bl;
1248	th = EV_ACCHIGH - t1;
1249	tl = EV_ACCLOW - t2;
1250	ovh = EV_SAT_P_S32(th);
1251	ovl = EV_SAT_P_S32(tl);
1252	EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0x80000000, 0x7fffffff, th),
1253			           EV_SATURATE_ACC(ovl, tl, 0x80000000, 0x7fffffff, tl));
1254	EV_SET_SPEFSCR_OV(ovl, ovh);
1255		//printf("evmhossianw: ACC = %08x; *rSh = %08x; *rS = %08x\n", ACC, *rSh, *rS);
1256	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1257
12580.4,6.RS,11.RA,16.RB,21.1423:EVX:e500:evmhosmfanw %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Modulo Fractional and Accumulate Negative into Words
1259	signed32 al, ah, bl, bh;
1260	signed64 t1, t2, tl, th;
1261	al = (signed32)(signed16)EV_LOHALF(*rA);
1262	ah = (signed32)(signed16)EV_LOHALF(*rAh);
1263	bl = (signed32)(signed16)EV_LOHALF(*rB);
1264	bh = (signed32)(signed16)EV_LOHALF(*rBh);
1265	t1 = ((signed64)ah * bh) << 1;
1266	t2 = ((signed64)al * bl) << 1;
1267	th = EV_ACCHIGH - (t1 & 0xffffffff);
1268	tl = EV_ACCLOW - (t2 & 0xffffffff);
1269	EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
1270	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1271
12720.4,6.RS,11.RA,16.RB,21.1421:EVX:e500:evmhosmianw %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Modulo Integer and Accumulate Negative into Words
1273	signed32 al, ah, bl, bh;
1274	signed64 t1, t2, tl, th;
1275	al = (signed32)(signed16)EV_LOHALF(*rA);
1276	ah = (signed32)(signed16)EV_LOHALF(*rAh);
1277	bl = (signed32)(signed16)EV_LOHALF(*rB);
1278	bh = (signed32)(signed16)EV_LOHALF(*rBh);
1279	t1 = ah * bh;
1280	t2 = al * bl;
1281	th = EV_ACCHIGH - t1;
1282	tl = EV_ACCLOW - t2;
1283	EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
1284	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1285
12860.4,6.RS,11.RA,16.RB,21.1412:EVX:e500:evmhousianw %RS,%RA,%RB:Vector Multiply Half Words Odd Unsigned Saturate Integer and Accumulate Negative into Words
1287	unsigned32 al, ah, bl, bh;
1288	unsigned64 t1, t2;
1289	signed64 tl, th;
1290	int ovl, ovh;
1291	al = (unsigned32)(unsigned16)EV_LOHALF(*rA);
1292	ah = (unsigned32)(unsigned16)EV_LOHALF(*rAh);
1293	bl = (unsigned32)(unsigned16)EV_LOHALF(*rB);
1294	bh = (unsigned32)(unsigned16)EV_LOHALF(*rBh);
1295	t1 = ah * bh;
1296	t2 = al * bl;
1297	th = (signed64)EV_ACCHIGH - (signed64)t1;
1298	tl = (signed64)EV_ACCLOW - (signed64)t2;
1299	ovl = EV_SAT_P_U32(tl);
1300	ovh = EV_SAT_P_U32(th);
1301	EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0, 0xffffffff, th),
1302			           EV_SATURATE_ACC(ovl, tl, 0, 0xffffffff, tl));
1303		//printf("evmhousianw: ovh %d ovl %d al %d ah %d bl %d bh %d t1 %qd t2 %qd tl %qd th %qd\n", ovh, ovl, al, ah, bl, bh, t1, t2, tl, th);
1304		//printf("evmoussianw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
1305	EV_SET_SPEFSCR_OV(ovl, ovh);
1306	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1307
13080.4,6.RS,11.RA,16.RB,21.1420:EVX:e500:evmhoumianw %RS,%RA,%RB:Vector Multiply Half Words Odd Unsigned Modulo Integer and Accumulate Negative into Words
1309	unsigned32 al, ah, bl, bh;
1310	unsigned32 t1, t2;
1311	unsigned64 tl, th;
1312	al = (unsigned32)(unsigned16)EV_LOHALF(*rA);
1313	ah = (unsigned32)(unsigned16)EV_LOHALF(*rAh);
1314	bl = (unsigned32)(unsigned16)EV_LOHALF(*rB);
1315	bh = (unsigned32)(unsigned16)EV_LOHALF(*rBh);
1316	t1 = ah * bh;
1317	t2 = al * bl;
1318	th = EV_ACCHIGH - t1;
1319	tl = EV_ACCLOW - t2;
1320	EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
1321	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1322
13230.4,6.RS,11.RA,16.RB,21.1411:EVX:e500:evmhessfanw %RS,%RA,%RB:Vector Multiply Half Words Even Signed Saturate Fractional and Accumulate Negative into Words
1324	signed16 al, ah, bl, bh;
1325	signed32 t1, t2;
1326	signed64 tl, th;
1327	int movl, movh, ovl, ovh;
1328
1329	al = (signed16) EV_HIHALF (*rA);
1330	ah = (signed16) EV_HIHALF (*rAh);
1331	bl = (signed16) EV_HIHALF (*rB);
1332	bh = (signed16) EV_HIHALF (*rBh);
1333	t1 = ev_multiply16_ssf (ah, bh, &movh);
1334	t2 = ev_multiply16_ssf (al, bl, &movl);
1335	th = EV_ACCHIGH - EV_SATURATE (movh, 0x7fffffff, t1);
1336	tl = EV_ACCLOW  - EV_SATURATE (movl, 0x7fffffff, t2);
1337	ovh = EV_SAT_P_S32 (th);
1338	ovl = EV_SAT_P_S32 (tl);
1339	EV_SET_REG2_ACC (*rSh, *rS, EV_SATURATE_ACC (ovh, th, 0x80000000, 0x7fffffff, th),
1340			            EV_SATURATE_ACC (ovl, tl, 0x80000000, 0x7fffffff, tl));
1341	EV_SET_SPEFSCR_OV (movl | ovl, movh | ovh);
1342	PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1343
13440.4,6.RS,11.RA,16.RB,21.1409:EVX:e500:evmhessianw %RS,%RA,%RB:Vector Multiply Half Words Even Signed Saturate Integer and Accumulate Negative into Words
1345	signed32 al, ah, bl, bh;
1346	signed64 t1, t2, tl, th;
1347	int ovl, ovh;
1348	al = (signed32)(signed16)EV_HIHALF(*rA);
1349	ah = (signed32)(signed16)EV_HIHALF(*rAh);
1350	bl = (signed32)(signed16)EV_HIHALF(*rB);
1351	bh = (signed32)(signed16)EV_HIHALF(*rBh);
1352	t1 = ah * bh;
1353	t2 = al * bl;
1354	th = EV_ACCHIGH - t1;
1355	tl = EV_ACCLOW - t2;
1356	ovh = EV_SAT_P_S32(th);
1357	ovl = EV_SAT_P_S32(tl);
1358	EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0x80000000, 0x7fffffff, th),
1359			           EV_SATURATE_ACC(ovl, tl, 0x80000000, 0x7fffffff, tl));
1360	EV_SET_SPEFSCR_OV(ovl, ovh);
1361	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1362
13630.4,6.RS,11.RA,16.RB,21.1419:EVX:e500:evmhesmfanw %RS,%RA,%RB:Vector Multiply Half Words Even Signed Modulo Fractional and Accumulate Negative into Words
1364	signed32 al, ah, bl, bh;
1365	signed64 t1, t2, tl, th;
1366	al = (unsigned32)(unsigned16)EV_HIHALF(*rA);
1367	ah = (unsigned32)(unsigned16)EV_HIHALF(*rAh);
1368	bl = (unsigned32)(unsigned16)EV_HIHALF(*rB);
1369	bh = (unsigned32)(unsigned16)EV_HIHALF(*rBh);
1370	t1 = ((signed64)ah * bh) << 1;
1371	t2 = ((signed64)al * bl) << 1;
1372	th = EV_ACCHIGH - (t1 & 0xffffffff);
1373	tl = EV_ACCLOW - (t2 & 0xffffffff);
1374	EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
1375	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1376
13770.4,6.RS,11.RA,16.RB,21.1417:EVX:e500:evmhesmianw %RS,%RA,%RB:Vector Multiply Half Words Even Signed Modulo Integer and Accumulate Negative into Words
1378	signed32 al, ah, bl, bh;
1379	signed64 t1, t2, tl, th;
1380	al = (signed32)(signed16)EV_HIHALF(*rA);
1381	ah = (signed32)(signed16)EV_HIHALF(*rAh);
1382	bl = (signed32)(signed16)EV_HIHALF(*rB);
1383	bh = (signed32)(signed16)EV_HIHALF(*rBh);
1384	t1 = ah * bh;
1385	t2 = al * bl;
1386	th = EV_ACCHIGH - t1;
1387	tl = EV_ACCLOW - t2;
1388	EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
1389		//printf("evmhesmianw: al %d ah %d bl %d bh %d t1 %qd t2 %qd tl %qd th %qd\n", al, ah, bl, bh, t1, t2, tl, th);
1390		//printf("evmhesmianw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
1391	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1392
13930.4,6.RS,11.RA,16.RB,21.1408:EVX:e500:evmheusianw %RS,%RA,%RB:Vector Multiply Half Words Even Unsigned Saturate Integer and Accumulate Negative into Words
1394	unsigned32 al, ah, bl, bh;
1395	unsigned64 t1, t2;
1396	signed64 tl, th;
1397	int ovl, ovh;
1398	al = (unsigned32)(unsigned16)EV_HIHALF(*rA);
1399	ah = (unsigned32)(unsigned16)EV_HIHALF(*rAh);
1400	bl = (unsigned32)(unsigned16)EV_HIHALF(*rB);
1401	bh = (unsigned32)(unsigned16)EV_HIHALF(*rBh);
1402	t1 = ah * bh;
1403	t2 = al * bl;
1404	th = (signed64)EV_ACCHIGH - (signed64)t1;
1405	tl = (signed64)EV_ACCLOW - (signed64)t2;
1406	ovl = EV_SAT_P_U32(tl);
1407	ovh = EV_SAT_P_U32(th);
1408	EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0, 0xffffffff, th),
1409			           EV_SATURATE_ACC(ovl, tl, 0, 0xffffffff, tl));
1410		//printf("evmheusianw: ovh %d ovl %d al %u ah %u bl %u bh %u t1 %qu t2 %qu tl %qd th %qd\n", ovh, ovl, al, ah, bl, bh, t1, t2, tl, th);
1411		//printf("evmheusianw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
1412	EV_SET_SPEFSCR_OV(ovl, ovh);
1413	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1414
14150.4,6.RS,11.RA,16.RB,21.1416:EVX:e500:evmheumianw %RS,%RA,%RB:Vector Multiply Half Words Even Unsigned Modulo Integer and Accumulate Negative into Words
1416	unsigned32 al, ah, bl, bh;
1417	unsigned32 t1, t2;
1418	unsigned64 tl, th;
1419	al = (unsigned32)(unsigned16)EV_HIHALF(*rA);
1420	ah = (unsigned32)(unsigned16)EV_HIHALF(*rAh);
1421	bl = (unsigned32)(unsigned16)EV_HIHALF(*rB);
1422	bh = (unsigned32)(unsigned16)EV_HIHALF(*rBh);
1423	t1 = ah * bh;
1424	t2 = al * bl;
1425	th = EV_ACCHIGH - t1;
1426	tl = EV_ACCLOW - t2;
1427	EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
1428	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1429
14300.4,6.RS,11.RA,16.RB,21.1327:EVX:e500:evmhogsmfaa %RS,%RA,%RB:Multiply Half Words Odd Guarded Signed Modulo Fractional and Accumulate
1431	signed32 a, b;
1432	signed64 t1, t2;
1433	a = (signed32)(signed16)EV_LOHALF(*rA);
1434	b = (signed32)(signed16)EV_LOHALF(*rB);
1435	t1 = EV_MUL16_SSF(a, b);
1436	if (t1 & ((unsigned64)1 << 32))
1437	  t1 |= 0xfffffffe00000000;
1438	t2 = ACC + t1;
1439	EV_SET_REG1_ACC(*rSh, *rS, t2);
1440	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1441
14420.4,6.RS,11.RA,16.RB,21.1325:EVX:e500:evmhogsmiaa %RS,%RA,%RB:Multiply Half Words Odd Guarded Signed Modulo Integer and Accumulate
1443	signed32 a, b;
1444	signed64 t1, t2;
1445	a = (signed32)(signed16)EV_LOHALF(*rA);
1446	b = (signed32)(signed16)EV_LOHALF(*rB);
1447	t1 = (signed64)a * (signed64)b;
1448	t2 = (signed64)ACC + t1;
1449	EV_SET_REG1_ACC(*rSh, *rS, t2);
1450		//printf("evmhogsmiaa: a %d b %d t1 %qd t2 %qd\n", a, b, t1, t2);
1451		//printf("evmhogsmiaa: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
1452	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1453
14540.4,6.RS,11.RA,16.RB,21.1324:EVX:e500:evmhogumiaa %RS,%RA,%RB:Multiply Half Words Odd Guarded Unsigned Modulo Integer and Accumulate
1455	unsigned32 a, b;
1456	unsigned64 t1, t2;
1457	a = (unsigned32)(unsigned16)EV_LOHALF(*rA);
1458	b = (unsigned32)(unsigned16)EV_LOHALF(*rB);
1459	t1 = a * b;
1460	t2 = ACC + t1;
1461	EV_SET_REG1_ACC(*rSh, *rS, t2);
1462	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1463
14640.4,6.RS,11.RA,16.RB,21.1323:EVX:e500:evmhegsmfaa %RS,%RA,%RB:Multiply Half Words Even Guarded Signed Modulo Fractional and Accumulate
1465	signed32 a, b;
1466	signed64 t1, t2;
1467	a = (signed32)(signed16)EV_HIHALF(*rA);
1468	b = (signed32)(signed16)EV_HIHALF(*rB);
1469	t1 = EV_MUL16_SSF(a, b);
1470	if (t1 & ((unsigned64)1 << 32))
1471	  t1 |= 0xfffffffe00000000;
1472	t2 = ACC + t1;
1473	EV_SET_REG1_ACC(*rSh, *rS, t2);
1474	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1475
14760.4,6.RS,11.RA,16.RB,21.1321:EVX:e500:evmhegsmiaa %RS,%RA,%RB:Multiply Half Words Even Guarded Signed Modulo Integer and Accumulate
1477	signed32 a, b;
1478	signed64 t1, t2;
1479	a = (signed32)(signed16)EV_HIHALF(*rA);
1480	b = (signed32)(signed16)EV_HIHALF(*rB);
1481	t1 = (signed64)(a * b);
1482	t2 = ACC + t1;
1483	EV_SET_REG1_ACC(*rSh, *rS, t2);
1484	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1485
14860.4,6.RS,11.RA,16.RB,21.1320:EVX:e500:evmhegumiaa %RS,%RA,%RB:Multiply Half Words Even Guarded Unsigned Modulo Integer and Accumulate
1487	unsigned32 a, b;
1488	unsigned64 t1, t2;
1489	a = (unsigned32)(unsigned16)EV_HIHALF(*rA);
1490	b = (unsigned32)(unsigned16)EV_HIHALF(*rB);
1491	t1 = a * b;
1492	t2 = ACC + t1;
1493	EV_SET_REG1_ACC(*rSh, *rS, t2);
1494	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1495
1496
14970.4,6.RS,11.RA,16.RB,21.1455:EVX:e500:evmhogsmfan %RS,%RA,%RB:Multiply Half Words Odd Guarded Signed Modulo Fractional and Accumulate Negative
1498	signed32 a, b;
1499	signed64 t1, t2;
1500	a = (signed32)(signed16)EV_LOHALF(*rA);
1501	b = (signed32)(signed16)EV_LOHALF(*rB);
1502	t1 = EV_MUL16_SSF(a, b);
1503	if (t1 & ((unsigned64)1 << 32))
1504	  t1 |= 0xfffffffe00000000;
1505	t2 = ACC - t1;
1506	EV_SET_REG1_ACC(*rSh, *rS, t2);
1507	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1508
15090.4,6.RS,11.RA,16.RB,21.1453:EVX:e500:evmhogsmian %RS,%RA,%RB:Multiply Half Words Odd Guarded Signed Modulo Integer and Accumulate Negative
1510	signed32 a, b;
1511	signed64 t1, t2;
1512	a = (signed32)(signed16)EV_LOHALF(*rA);
1513	b = (signed32)(signed16)EV_LOHALF(*rB);
1514	t1 = (signed64)a * (signed64)b;
1515	t2 = ACC - t1;
1516	EV_SET_REG1_ACC(*rSh, *rS, t2);
1517		//printf("evmhogsmian: a %d b %d t1 %qd t2 %qd\n", a, b, t1, t2);
1518		//printf("evmhogsmian: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
1519	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1520
15210.4,6.RS,11.RA,16.RB,21.1452:EVX:e500:evmhogumian %RS,%RA,%RB:Multiply Half Words Odd Guarded Unsigned Modulo Integer and Accumulate Negative
1522	unsigned32 a, b;
1523	unsigned64 t1, t2;
1524	a = (unsigned32)(unsigned16)EV_LOHALF(*rA);
1525	b = (unsigned32)(unsigned16)EV_LOHALF(*rB);
1526	t1 = (unsigned64)a * (unsigned64)b;
1527	t2 = ACC - t1;
1528	EV_SET_REG1_ACC(*rSh, *rS, t2);
1529	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1530
15310.4,6.RS,11.RA,16.RB,21.1451:EVX:e500:evmhegsmfan %RS,%RA,%RB:Multiply Half Words Even Guarded Signed Modulo Fractional and Accumulate Negative
1532	signed32 a, b;
1533	signed64 t1, t2;
1534	a = (signed32)(signed16)EV_HIHALF(*rA);
1535	b = (signed32)(signed16)EV_HIHALF(*rB);
1536	t1 = EV_MUL16_SSF(a, b);
1537	if (t1 & ((unsigned64)1 << 32))
1538	  t1 |= 0xfffffffe00000000;
1539	t2 = ACC - t1;
1540	EV_SET_REG1_ACC(*rSh, *rS, t2);
1541	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1542
15430.4,6.RS,11.RA,16.RB,21.1449:EVX:e500:evmhegsmian %RS,%RA,%RB:Multiply Half Words Even Guarded Signed Modulo Integer and Accumulate Negative
1544	signed32 a, b;
1545	signed64 t1, t2;
1546	a = (signed32)(signed16)EV_HIHALF(*rA);
1547	b = (signed32)(signed16)EV_HIHALF(*rB);
1548	t1 = (signed64)a * (signed64)b;
1549	t2 = ACC - t1;
1550	EV_SET_REG1_ACC(*rSh, *rS, t2);
1551	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1552
15530.4,6.RS,11.RA,16.RB,21.1448:EVX:e500:evmhegumian %RS,%RA,%RB:Multiply Half Words Even Guarded Unsigned Modulo Integer and Accumulate Negative
1554	unsigned32 a, b;
1555	unsigned64 t1, t2;
1556	a = (unsigned32)(unsigned16)EV_HIHALF(*rA);
1557	b = (unsigned32)(unsigned16)EV_HIHALF(*rB);
1558	t1 = (unsigned64)a * (unsigned64)b;
1559	t2 = ACC - t1;
1560	EV_SET_REG1_ACC(*rSh, *rS, t2);
1561	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1562
1563
15640.4,6.RS,11.RA,16.RB,21.1095:EVX:e500:evmwhssf %RS,%RA,%RB:Vector Multiply Word High Signed Saturate Fractional
1565	signed32 al, ah, bl, bh;
1566	signed64 t1, t2;
1567	int movl, movh;
1568	al = *rA;
1569	ah = *rAh;
1570	bl = *rB;
1571	bh = *rBh;
1572	t1 = ev_multiply32_ssf(al, bl, &movl);
1573	t2 = ev_multiply32_ssf(ah, bh, &movh);
1574	EV_SET_REG2(*rSh, *rS, EV_SATURATE(movh, 0x7fffffff, t2 >> 32),
1575			       EV_SATURATE(movl, 0x7fffffff, t1 >> 32));
1576	EV_SET_SPEFSCR_OV(movl, movh);
1577	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1578
15790.4,6.RS,11.RA,16.RB,21.1127:EVX:e500:evmwhssfa %RS,%RA,%RB:Vector Multiply Word High Signed Saturate Fractional and Accumulate
1580	signed32 al, ah, bl, bh;
1581	signed64 t1, t2;
1582	int movl, movh;
1583	al = *rA;
1584	ah = *rAh;
1585	bl = *rB;
1586	bh = *rBh;
1587	t1 = ev_multiply32_ssf(al, bl, &movl);
1588	t2 = ev_multiply32_ssf(ah, bh, &movh);
1589	EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE(movh, 0x7fffffff, t2 >> 32),
1590			           EV_SATURATE(movl, 0x7fffffff, t1 >> 32));
1591	EV_SET_SPEFSCR_OV(movl, movh);
1592	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1593
15940.4,6.RS,11.RA,16.RB,21.1103:EVX:e500:evmwhsmf %RS,%RA,%RB:Vector Multiply Word High Signed Modulo Fractional
1595	signed32 al, ah, bl, bh;
1596	signed64 t1, t2;
1597	al = *rA;
1598	ah = *rAh;
1599	bl = *rB;
1600	bh = *rBh;
1601	t1 = EV_MUL32_SSF(al, bl);
1602	t2 = EV_MUL32_SSF(ah, bh);
1603	EV_SET_REG2(*rSh, *rS, t2 >> 32, t1 >> 32);
1604	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1605
16060.4,6.RS,11.RA,16.RB,21.1135:EVX:e500:evmwhsmfa %RS,%RA,%RB:Vector Multiply Word High Signed Modulo Fractional and Accumulate
1607	signed32 al, ah, bl, bh;
1608	signed64 t1, t2;
1609	al = *rA;
1610	ah = *rAh;
1611	bl = *rB;
1612	bh = *rBh;
1613	t1 = EV_MUL32_SSF(al, bl);
1614	t2 = EV_MUL32_SSF(ah, bh);
1615	EV_SET_REG2_ACC(*rSh, *rS, t2 >> 32, t1 >> 32);
1616	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1617
16180.4,6.RS,11.RA,16.RB,21.1101:EVX:e500:evmwhsmi %RS,%RA,%RB:Vector Multiply Word High Signed Modulo Integer
1619	signed32 al, ah, bl, bh;
1620	signed64 t1, t2;
1621	al = *rA;
1622	ah = *rAh;
1623	bl = *rB;
1624	bh = *rBh;
1625	t1 = (signed64)al * (signed64)bl;
1626	t2 = (signed64)ah * (signed64)bh;
1627	EV_SET_REG2(*rSh, *rS, t2 >> 32, t1 >> 32);
1628	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1629
16300.4,6.RS,11.RA,16.RB,21.1133:EVX:e500:evmwhsmia %RS,%RA,%RB:Vector Multiply Word High Signed Modulo Integer and Accumulate
1631	signed32 al, ah, bl, bh;
1632	signed64 t1, t2;
1633	al = *rA;
1634	ah = *rAh;
1635	bl = *rB;
1636	bh = *rBh;
1637	t1 = (signed64)al * (signed64)bl;
1638	t2 = (signed64)ah * (signed64)bh;
1639	EV_SET_REG2_ACC(*rSh, *rS, t2 >> 32, t1 >> 32);
1640	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1641
16420.4,6.RS,11.RA,16.RB,21.1100:EVX:e500:evmwhumi %RS,%RA,%RB:Vector Multiply Word High Unsigned Modulo Integer
1643	unsigned32 al, ah, bl, bh;
1644	unsigned64 t1, t2;
1645	al = *rA;
1646	ah = *rAh;
1647	bl = *rB;
1648	bh = *rBh;
1649	t1 = (unsigned64)al * (unsigned64)bl;
1650	t2 = (unsigned64)ah * (unsigned64)bh;
1651	EV_SET_REG2(*rSh, *rS, t2 >> 32, t1 >> 32);
1652	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1653
16540.4,6.RS,11.RA,16.RB,21.1132:EVX:e500:evmwhumia %RS,%RA,%RB:Vector Multiply Word High Unsigned Modulo Integer and Accumulate
1655	unsigned32 al, ah, bl, bh;
1656	unsigned64 t1, t2;
1657	al = *rA;
1658	ah = *rAh;
1659	bl = *rB;
1660	bh = *rBh;
1661	t1 = (unsigned64)al * (unsigned64)bl;
1662	t2 = (unsigned64)ah * (unsigned64)bh;
1663	EV_SET_REG2_ACC(*rSh, *rS, t2 >> 32, t1 >> 32);
1664	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1665
1666
16670.4,6.RS,11.RA,16.RB,21.1091:EVX:e500:evmwlssf %RS,%RA,%RB:Vector Multiply Word Low Signed Saturate Fractional
1668	signed32 al, ah, bl, bh;
1669	signed64 t1, t2;
1670	int movl, movh;
1671	al = *rA;
1672	ah = *rAh;
1673	bl = *rB;
1674	bh = *rBh;
1675	t1 = ev_multiply32_ssf(al, bl, &movl);
1676	t2 = ev_multiply32_ssf(ah, bh, &movh);
1677	EV_SET_REG2(*rSh, *rS, EV_SATURATE(movh, 0xffffffff, t2),
1678			       EV_SATURATE(movl, 0xffffffff, t1));
1679	EV_SET_SPEFSCR_OV(movl, movh);
1680	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1681
16820.4,6.RS,11.RA,16.RB,21.1123:EVX:e500:evmwlssfa %RS,%RA,%RB:Vector Multiply Word Low Signed Saturate Fractional and Accumulate
1683	signed32 al, ah, bl, bh;
1684	signed64 t1, t2;
1685	int movl, movh;
1686	al = *rA;
1687	ah = *rAh;
1688	bl = *rB;
1689	bh = *rBh;
1690	t1 = ev_multiply32_ssf(al, bl, &movl);
1691	t2 = ev_multiply32_ssf(ah, bh, &movh);
1692	EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE(movh, 0xffffffff, t2),
1693			           EV_SATURATE(movl, 0xffffffff, t1));
1694	EV_SET_SPEFSCR_OV(movl, movh);
1695	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1696
16970.4,6.RS,11.RA,16.RB,21.1099:EVX:e500:evmwlsmf %RS,%RA,%RB:Vector Multiply Word Low Signed Modulo Fractional
1698	signed32 al, ah, bl, bh;
1699	signed64 t1, t2;
1700	al = *rA;
1701	ah = *rAh;
1702	bl = *rB;
1703	bh = *rBh;
1704	t1 = EV_MUL32_SSF(al, bl);
1705	t2 = EV_MUL32_SSF(ah, bh);
1706	EV_SET_REG2(*rSh, *rS, t2, t1);
1707	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1708
17090.4,6.RS,11.RA,16.RB,21.1131:EVX:e500:evmwlsmfa %RS,%RA,%RB:Vector Multiply Word Low Signed Modulo Fractional and Accumulate
1710	signed32 al, ah, bl, bh;
1711	signed64 t1, t2;
1712	al = *rA;
1713	ah = *rAh;
1714	bl = *rB;
1715	bh = *rBh;
1716	t1 = EV_MUL32_SSF(al, bl);
1717	t2 = EV_MUL32_SSF(ah, bh);
1718	EV_SET_REG2_ACC(*rSh, *rS, t2, t1);
1719	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1720
17210.4,6.RS,11.RA,16.RB,21.1096:EVX:e500:evmwlumi %RS,%RA,%RB:Vector Multiply Word Low Unsigned Modulo Integer
1722	unsigned32 al, ah, bl, bh;
1723	unsigned64 t1, t2;
1724	al = *rA;
1725	ah = *rAh;
1726	bl = *rB;
1727	bh = *rBh;
1728	t1 = (unsigned64)al * (unsigned64)bl;
1729	t2 = (unsigned64)ah * (unsigned64)bh;
1730	EV_SET_REG2(*rSh, *rS, t2, t1);
1731	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1732
17330.4,6.RS,11.RA,16.RB,21.1128:EVX:e500:evmwlumia %RS,%RA,%RB:Vector Multiply Word Low Unsigned Modulo Integer and Accumulate
1734	unsigned32 al, ah, bl, bh;
1735	unsigned64 t1, t2;
1736	al = *rA;
1737	ah = *rAh;
1738	bl = *rB;
1739	bh = *rBh;
1740	t1 = (unsigned64)al * (unsigned64)bl;
1741	t2 = (unsigned64)ah * (unsigned64)bh;
1742	EV_SET_REG2_ACC(*rSh, *rS, t2, t1);
1743	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1744
1745
17460.4,6.RS,11.RA,16.RB,21.1347:EVX:e500:evmwlssfaaw %RS,%RA,%RB:Vector Multiply Word Low Signed Saturate Fractional and Accumulate in Words
1747	signed32 al, ah, bl, bh;
1748	signed64 t1, t2, tl, th;
1749	int movl, movh, ovl, ovh;
1750	al = *rA;
1751	ah = *rAh;
1752	bl = *rB;
1753	bh = *rBh;
1754	t1 = ev_multiply32_ssf(ah, bh, &movh);
1755	t2 = ev_multiply32_ssf(al, bl, &movl);
1756	th = EV_ACCHIGH + EV_SATURATE(movh, 0xffffffff, t1);
1757	tl = EV_ACCLOW + EV_SATURATE(movl, 0xffffffff, t2);
1758	ovh = EV_SAT_P_S32(th);
1759	ovl = EV_SAT_P_S32(tl);
1760	EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0x80000000, 0x7fffffff, th),
1761			           EV_SATURATE_ACC(ovl, tl, 0x80000000, 0x7fffffff, tl));
1762	EV_SET_SPEFSCR_OV(movl | ovl, movh | ovh);
1763	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1764
17650.4,6.RS,11.RA,16.RB,21.1345:EVX:e500:evmwlssiaaw %RS,%RA,%RB:Vector Multiply Word Low Signed Saturate Integer and Accumulate in Words
1766	signed32 al, ah, bl, bh;
1767	signed64 t1, t2, tl, th;
1768	int ovl, ovh;
1769	al = *rA;
1770	ah = *rAh;
1771	bl = *rB;
1772	bh = *rBh;
1773	t1 = (signed64)ah * (signed64)bh;
1774	t2 = (signed64)al * (signed64)bl;
1775	th = EV_ACCHIGH + (t1 & 0xffffffff);
1776	tl = EV_ACCLOW + (t2 & 0xffffffff);
1777	ovh = EV_SAT_P_S32(th);
1778	ovl = EV_SAT_P_S32(tl);
1779	EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0x80000000, 0x7fffffff, th),
1780			           EV_SATURATE_ACC(ovl, tl, 0x80000000, 0x7fffffff, tl));
1781	EV_SET_SPEFSCR_OV(ovl, ovh);
1782	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1783
17840.4,6.RS,11.RA,16.RB,21.1355:EVX:e500:evmwlsmfaaw %RS,%RA,%RB:Vector Multiply Word Low Signed Modulo Fractional and Accumulate in Words
1785	signed32 al, ah, bl, bh;
1786	signed64 t1, t2;
1787	int mov;
1788	al = *rA;
1789	ah = *rAh;
1790	bl = *rB;
1791	bh = *rBh;
1792	t1 = ev_multiply32_smf(ah, bh, &mov);
1793	t2 = ev_multiply32_smf(al, bl, &mov);
1794	EV_SET_REG2_ACC(*rSh, *rS, EV_ACCHIGH + (t1 & 0xffffffff),
1795				 EV_ACCLOW + (t2 & 0xffffffff));
1796	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1797
17980.4,6.RS,11.RA,16.RB,21.1353:EVX:e500:evmwlsmiaaw %RS,%RA,%RB:Vector Multiply Word Low Signed Modulo Integer and Accumulate in Words
1799	signed32 al, ah, bl, bh;
1800	signed64 t1, t2;
1801	al = *rA;
1802	ah = *rAh;
1803	bl = *rB;
1804	bh = *rBh;
1805	t1 = (signed64)ah * (signed64)bh;
1806	t2 = (signed64)al * (signed64)bl;
1807	EV_SET_REG2_ACC(*rSh, *rS, EV_ACCHIGH + (t1 & 0xffffffff),
1808				 EV_ACCLOW + (t2 & 0xffffffff));
1809		//printf("evmwlsmiaaw: al %d ah %d bl %d bh %d t1 %qd t2 %qd\n", al, ah, bl, bh, t1, t2);
1810		//printf("evmwlsmiaaw: *rSh = %08x; *rS = %08x\n", *rSh, *rS);
1811	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1812
18130.4,6.RS,11.RA,16.RB,21.1344:EVX:e500:evmwlusiaaw %RS,%RA,%RB:Vector Multiply Word Low Unsigned Saturate Integer and Accumulate in Words
1814	unsigned32 al, ah, bl, bh;
1815	unsigned64 t1, t2, tl, th;
1816	int ovl, ovh;
1817	al = *rA;
1818	ah = *rAh;
1819	bl = *rB;
1820	bh = *rBh;
1821	t1 = (unsigned64)ah * (unsigned64)bh;
1822	t2 = (unsigned64)al * (unsigned64)bl;
1823	th = EV_ACCHIGH + (t1 & 0xffffffff);
1824	tl = EV_ACCLOW + (t2 & 0xffffffff);
1825	ovh = (th >> 32);
1826	ovl = (tl >> 32);
1827	EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE(ovh, 0xffffffff, th),
1828			           EV_SATURATE(ovl, 0xffffffff, tl));
1829	EV_SET_SPEFSCR_OV(ovl, ovh);
1830	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1831
18320.4,6.RS,11.RA,16.RB,21.1352:EVX:e500:evmwlumiaaw %RS,%RA,%RB:Vector Multiply Word Low Unsigned Modulo Integer and Accumulate in Words
1833	unsigned32 al, ah, bl, bh;
1834	unsigned64 t1, t2;
1835	al = *rA;
1836	ah = *rAh;
1837	bl = *rB;
1838	bh = *rBh;
1839	t1 = (unsigned64)ah * (unsigned64)bh;
1840	t2 = (unsigned64)al * (unsigned64)bl;
1841	EV_SET_REG2_ACC(*rSh, *rS, EV_ACCHIGH + (t1 & 0xffffffff),
1842				 EV_ACCLOW + (t2 & 0xffffffff));
1843	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1844
1845
18460.4,6.RS,11.RA,16.RB,21.1475:EVX:e500:evmwlssfanw %RS,%RA,%RB:Vector Multiply Word Low Signed Saturate Fractional and Accumulate Negative in Words
1847	signed32 al, ah, bl, bh;
1848	signed64 t1, t2, tl, th;
1849	int movl, movh, ovl, ovh;
1850	al = *rA;
1851	ah = *rAh;
1852	bl = *rB;
1853	bh = *rBh;
1854	t1 = ev_multiply32_ssf(ah, bh, &movh);
1855	t2 = ev_multiply32_ssf(al, bl, &movl);
1856	th = EV_ACCHIGH - EV_SATURATE(movh, 0xffffffff, t1);
1857	tl = EV_ACCLOW - EV_SATURATE(movl, 0xffffffff, t2);
1858	ovh = EV_SAT_P_S32(th);
1859	ovl = EV_SAT_P_S32(tl);
1860	EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0x80000000, 0x7fffffff, th),
1861			           EV_SATURATE_ACC(ovl, tl, 0x80000000, 0x7fffffff, tl));
1862	EV_SET_SPEFSCR_OV(movl | ovl, movh | ovh);
1863	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1864
18650.4,6.RS,11.RA,16.RB,21.1473:EVX:e500:evmwlssianw %RS,%RA,%RB:Vector Multiply Word Low Signed Saturate Integer and Accumulate Negative in Words
1866	signed32 al, ah, bl, bh;
1867	signed64 t1, t2, tl, th;
1868	int ovl, ovh;
1869	al = *rA;
1870	ah = *rAh;
1871	bl = *rB;
1872	bh = *rBh;
1873	t1 = (signed64)ah * (signed64)bh;
1874	t2 = (signed64)al * (signed64)bl;
1875	th = EV_ACCHIGH - (t1 & 0xffffffff);
1876	tl = EV_ACCLOW - (t2 & 0xffffffff);
1877	ovh = EV_SAT_P_S32(th);
1878	ovl = EV_SAT_P_S32(tl);
1879	EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0x80000000, 0x7fffffff, th),
1880			           EV_SATURATE_ACC(ovl, tl, 0x80000000, 0x7fffffff, tl));
1881	EV_SET_SPEFSCR_OV(ovl, ovh);
1882	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1883
18840.4,6.RS,11.RA,16.RB,21.1483:EVX:e500:evmwlsmfanw %RS,%RA,%RB:Vector Multiply Word Low Signed Modulo Fractional and Accumulate Negative in Words
1885	signed32 al, ah, bl, bh;
1886	signed64 t1, t2;
1887	int mov;
1888	al = *rA;
1889	ah = *rAh;
1890	bl = *rB;
1891	bh = *rBh;
1892	t1 = ev_multiply32_smf(ah, bh, &mov);
1893	t2 = ev_multiply32_smf(al, bl, &mov);
1894	EV_SET_REG2_ACC(*rSh, *rS, EV_ACCHIGH - (t1 & 0xffffffff),
1895				 EV_ACCLOW - (t2 & 0xffffffff));
1896	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1897
18980.4,6.RS,11.RA,16.RB,21.1481:EVX:e500:evmwlsmianw %RS,%RA,%RB:Vector Multiply Word Low Signed Modulo Integer and Accumulate Negative in Words
1899	signed32 al, ah, bl, bh;
1900	signed64 t1, t2;
1901	al = *rA;
1902	ah = *rAh;
1903	bl = *rB;
1904	bh = *rBh;
1905	t1 = (signed64)ah * (signed64)bh;
1906	t2 = (signed64)al * (signed64)bl;
1907	EV_SET_REG2_ACC(*rSh, *rS, EV_ACCHIGH - (t1 & 0xffffffff),
1908				 EV_ACCLOW - (t2 & 0xffffffff));
1909	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1910
19110.4,6.RS,11.RA,16.RB,21.1472:EVX:e500:evmwlusianw %RS,%RA,%RB:Vector Multiply Word Low Unsigned Saturate Integer and Accumulate Negative in Words
1912	unsigned32 al, ah, bl, bh;
1913	unsigned64 t1, t2, tl, th;
1914	int ovl, ovh;
1915	al = *rA;
1916	ah = *rAh;
1917	bl = *rB;
1918	bh = *rBh;
1919	t1 = (unsigned64)ah * (unsigned64)bh;
1920	t2 = (unsigned64)al * (unsigned64)bl;
1921	th = EV_ACCHIGH - (t1 & 0xffffffff);
1922	tl = EV_ACCLOW - (t2 & 0xffffffff);
1923	ovh = (th >> 32);
1924	ovl = (tl >> 32);
1925	EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE(ovh, 0xffffffff, th),
1926			           EV_SATURATE(ovl, 0xffffffff, tl));
1927		//printf("evmwlusianw: ovl %d ovh %d al %d ah %d bl %d bh %d t1 %qd t2 %qd th %qd tl %qd\n", ovl, ovh, al, ah, al, bh, t1, t2, th, tl);
1928		//printf("evmwlusianw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
1929	EV_SET_SPEFSCR_OV(ovl, ovh);
1930	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1931
19320.4,6.RS,11.RA,16.RB,21.1480:EVX:e500:evmwlumianw %RS,%RA,%RB:Vector Multiply Word Low Unsigned Modulo Integer and Accumulate Negative in Words
1933	unsigned32 al, ah, bl, bh;
1934	unsigned64 t1, t2;
1935	al = *rA;
1936	ah = *rAh;
1937	bl = *rB;
1938	bh = *rBh;
1939	t1 = (unsigned64)ah * (unsigned64)bh;
1940	t2 = (unsigned64)al * (unsigned64)bl;
1941	EV_SET_REG2_ACC(*rSh, *rS, EV_ACCHIGH - (t1 & 0xffffffff),
1942				   EV_ACCLOW - (t2 & 0xffffffff));
1943	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1944
1945
19460.4,6.RS,11.RA,16.RB,21.1107:EVX:e500:evmwssf %RS,%RA,%RB:Vector Multiply Word Signed Saturate Fractional
1947	signed32 a, b;
1948	signed64 t;
1949	int movl;
1950	a = *rA;
1951	b = *rB;
1952	t = ev_multiply32_ssf(a, b, &movl);
1953	EV_SET_REG1(*rSh, *rS, EV_SATURATE(movl, 0x7fffffffffffffff, t));
1954	EV_SET_SPEFSCR_OV(movl, 0);
1955	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1956
19570.4,6.RS,11.RA,16.RB,21.1139:EVX:e500:evmwssfa %RS,%RA,%RB:Vector Multiply Word Signed Saturate Fractional and Accumulate
1958	signed32 a, b;
1959	signed64 t;
1960	int movl;
1961	a = *rA;
1962	b = *rB;
1963	t = ev_multiply32_ssf(a, b, &movl);
1964	EV_SET_REG1_ACC(*rSh, *rS, EV_SATURATE(movl, 0x7fffffffffffffff, t));
1965	EV_SET_SPEFSCR_OV(movl, 0);
1966	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1967
19680.4,6.RS,11.RA,16.RB,21.1115:EVX:e500:evmwsmf %RS,%RA,%RB:Vector Multiply Word Signed Modulo Fractional
1969	signed32 a, b;
1970	signed64 t;
1971	int movl;
1972	a = *rA;
1973	b = *rB;
1974	t = ev_multiply32_smf(a, b, &movl);
1975	EV_SET_REG1(*rSh, *rS, t);
1976	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1977
19780.4,6.RS,11.RA,16.RB,21.1147:EVX:e500:evmwsmfa %RS,%RA,%RB:Vector Multiply Word Signed Modulo Fractional and Accumulate
1979	signed32 a, b;
1980	signed64 t;
1981	int movl;
1982	a = *rA;
1983	b = *rB;
1984	t = ev_multiply32_smf(a, b, &movl);
1985	EV_SET_REG1_ACC(*rSh, *rS, t);
1986	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1987
19880.4,6.RS,11.RA,16.RB,21.1113:EVX:e500:evmwsmi %RS,%RA,%RB:Vector Multiply Word Signed Modulo Integer
1989	signed32 a, b;
1990	signed64 t;
1991	int movl;
1992	a = *rA;
1993	b = *rB;
1994	t = (signed64)a * (signed64)b;
1995	EV_SET_REG1(*rSh, *rS, t);
1996	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1997
19980.4,6.RS,11.RA,16.RB,21.1145:EVX:e500:evmwsmia %RS,%RA,%RB:Vector Multiply Word Signed Modulo Integer and Accumulate
1999	signed32 a, b;
2000	signed64 t;
2001	int movl;
2002	a = *rA;
2003	b = *rB;
2004	t = (signed64)a * (signed64)b;
2005	EV_SET_REG1_ACC(*rSh, *rS, t);
2006	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
2007
20080.4,6.RS,11.RA,16.RB,21.1112:EVX:e500:evmwumi %RS,%RA,%RB:Vector Multiply Word Unigned Modulo Integer
2009	unsigned32 a, b;
2010	unsigned64 t;
2011	int movl;
2012	a = *rA;
2013	b = *rB;
2014	t = (signed64)a * (signed64)b;
2015	EV_SET_REG1(*rSh, *rS, t);
2016	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
2017
20180.4,6.RS,11.RA,16.RB,21.1144:EVX:e500:evmwumia %RS,%RA,%RB:Vector Multiply Word Unigned Modulo Integer and Accumulate
2019	unsigned32 a, b;
2020	unsigned64 t;
2021	int movl;
2022	a = *rA;
2023	b = *rB;
2024	t = (signed64)a * (signed64)b;
2025	EV_SET_REG1_ACC(*rSh, *rS, t);
2026	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
2027
2028
20290.4,6.RS,11.RA,16.RB,21.1363:EVX:e500:evmwssfaa %RS,%RA,%RB:Vector Multiply Word Signed Saturate Fractional Add and Accumulate
2030	signed64 t1, t2;
2031	signed32 a, b;
2032	int movl;
2033	a = *rA;
2034	b = *rB;
2035	t1 = ev_multiply32_ssf(a, b, &movl);
2036	t2 = ACC + EV_SATURATE(movl, 0x7fffffffffffffff, t1);
2037	EV_SET_REG1_ACC(*rSh, *rS, t2);
2038	EV_SET_SPEFSCR_OV(movl, 0);
2039	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
2040
20410.4,6.RS,11.RA,16.RB,21.1371:EVX:e500:evmwsmfaa %RS,%RA,%RB:Vector Multiply Word Signed Modulo Fractional Add and Accumulate
2042	signed64 t1, t2;
2043	signed32 a, b;
2044	int movl;
2045	a = *rA;
2046	b = *rB;
2047	t1 = ev_multiply32_smf(a, b, &movl);
2048	t2 = ACC + t1;
2049	EV_SET_REG1_ACC(*rSh, *rS, t2);
2050	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
2051
20520.4,6.RS,11.RA,16.RB,21.1369:EVX:e500:evmwsmiaa %RS,%RA,%RB:Vector Multiply Word Signed Modulo Integer And and Accumulate
2053	signed64 t1, t2;
2054	signed32 a, b;
2055	a = *rA;
2056	b = *rB;
2057	t1 = (signed64)a * (signed64)b;
2058	t2 = ACC + t1;
2059	EV_SET_REG1_ACC(*rSh, *rS, t2);
2060	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
2061
20620.4,6.RS,11.RA,16.RB,21.1368:EVX:e500:evmwumiaa %RS,%RA,%RB:Vector Multiply Word Unsigned Modulo Integer Add and Accumulate
2063	unsigned64 t1, t2;
2064	unsigned32 a, b;
2065	a = *rA;
2066	b = *rB;
2067	t1 = (unsigned64)a * (unsigned64)b;
2068	t2 = ACC + t1;
2069	EV_SET_REG1_ACC(*rSh, *rS, t2);
2070	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
2071
2072
20730.4,6.RS,11.RA,16.RB,21.1491:EVX:e500:evmwssfan %RS,%RA,%RB:Vector Multiply Word Signed Saturate Fractional and Accumulate Negative
2074	signed64 t1, t2;
2075	signed32 a, b;
2076	int movl;
2077	a = *rA;
2078	b = *rB;
2079	t1 = ev_multiply32_ssf(a, b, &movl);
2080	t2 = ACC - EV_SATURATE(movl, 0x7fffffffffffffff, t1);
2081	EV_SET_REG1_ACC(*rSh, *rS, t2);
2082	EV_SET_SPEFSCR_OV(movl, 0);
2083	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
2084
20850.4,6.RS,11.RA,16.RB,21.1499:EVX:e500:evmwsmfan %RS,%RA,%RB:Vector Multiply Word Signed Modulo Fractional and Accumulate Negative
2086	signed64 t1, t2;
2087	signed32 a, b;
2088	int movl;
2089	a = *rA;
2090	b = *rB;
2091	t1 = ev_multiply32_smf(a, b, &movl);
2092	t2 = ACC - t1;
2093	EV_SET_REG1_ACC(*rSh, *rS, t2);
2094	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
2095
20960.4,6.RS,11.RA,16.RB,21.1497:EVX:e500:evmwsmian %RS,%RA,%RB:Vector Multiply Word Signed Modulo Integer and Accumulate Negative
2097	signed64 t1, t2;
2098	signed32 a, b;
2099	a = *rA;
2100	b = *rB;
2101	t1 = (signed64)a * (signed64)b;
2102	t2 = ACC - t1;
2103	EV_SET_REG1_ACC(*rSh, *rS, t2);
2104	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
2105
21060.4,6.RS,11.RA,16.RB,21.1496:EVX:e500:evmwumian %RS,%RA,%RB:Vector Multiply Word Unsigned Modulo Integer and Accumulate Negative
2107	unsigned64 t1, t2;
2108	unsigned32 a, b;
2109	a = *rA;
2110	b = *rB;
2111	t1 = (unsigned64)a * (unsigned64)b;
2112	t2 = ACC - t1;
2113	EV_SET_REG1_ACC(*rSh, *rS, t2);
2114	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
2115
2116
21170.4,6.RS,11.RA,16.0,21.1217:EVX:e500:evaddssiaaw %RS,%RA:Vector Add Signed Saturate Integer to Accumulator Word
2118	signed64 t1, t2;
2119	signed32 al, ah;
2120	int ovl, ovh;
2121	al = *rA;
2122	ah = *rAh;
2123	t1 = (signed64)EV_ACCHIGH + (signed64)ah;
2124	t2 = (signed64)EV_ACCLOW + (signed64)al;
2125	ovh = EV_SAT_P_S32(t1);
2126	ovl = EV_SAT_P_S32(t2);
2127	EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, t1 & ((unsigned64)1 << 32), 0x80000000, 0x7fffffff, t1),
2128			           EV_SATURATE_ACC(ovl, t2 & ((unsigned64)1 << 32), 0x80000000, 0x7fffffff, t2));
2129	EV_SET_SPEFSCR_OV(ovl, ovh);
2130	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK, spr_spefscr);
2131
21320.4,6.RS,11.RA,16.0,21.1225:EVX:e500:evaddsmiaaw %RS,%RA:Vector Add Signed Modulo Integer to Accumulator Word
2133	signed64 t1, t2;
2134	signed32 al, ah;
2135	al = *rA;
2136	ah = *rAh;
2137	t1 = (signed64)EV_ACCHIGH + (signed64)ah;
2138	t2 = (signed64)EV_ACCLOW + (signed64)al;
2139	EV_SET_REG2_ACC(*rSh, *rS, t1, t2);
2140		//printf("evaddsmiaaw: al %d ah %d t1 %qd t2 %qd\n", al, ah, t1, t2);
2141		//printf("evaddsmiaaw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
2142	PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
2143
21440.4,6.RS,11.RA,16.0,21.1216:EVX:e500:evaddusiaaw %RS,%RA:Vector Add Unsigned Saturate Integer to Accumulator Word
2145	signed64 t1, t2;
2146	unsigned32 al, ah;
2147	int ovl, ovh;
2148	al = *rA;
2149	ah = *rAh;
2150	t1 = (signed64)EV_ACCHIGH + (signed64)ah;
2151	t2 = (signed64)EV_ACCLOW + (signed64)al;
2152	ovh = EV_SAT_P_U32(t1);
2153	ovl = EV_SAT_P_U32(t2);
2154	EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE(ovh, 0xffffffff, t1),
2155				   EV_SATURATE(ovl, 0xffffffff, t2));
2156		//printf("evaddusiaaw: ovl %d ovh %d al %d ah %d t1 %qd t2 %qd\n", ovl, ovh, al, ah, t1, t2);
2157		//printf("evaddusiaaw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
2158	EV_SET_SPEFSCR_OV(ovl, ovh);
2159	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK, spr_spefscr);
2160
21610.4,6.RS,11.RA,16.0,21.1224:EVX:e500:evaddumiaaw %RS,%RA:Vector Add Unsigned Modulo Integer to Accumulator Word
2162	unsigned64 t1, t2;
2163	unsigned32 al, ah;
2164	al = *rA;
2165	ah = *rAh;
2166	t1 = (unsigned64)EV_ACCHIGH + (unsigned64)ah;
2167	t2 = EV_ACCLOW + al;
2168	EV_SET_REG2_ACC(*rSh, *rS, t1, t2);
2169	PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
2170
2171
21720.4,6.RS,11.RA,16.0,21.1219:EVX:e500:evsubfssiaaw %RS,%RA:Vector Subtract Signed Saturate Integer to Accumulator Word
2173	signed64 t1, t2;
2174	signed32 al, ah;
2175	int ovl, ovh;
2176	al = *rA;
2177	ah = *rAh;
2178	t1 = (signed64)EV_ACCHIGH - (signed64)ah;
2179	t2 = (signed64)EV_ACCLOW - (signed64)al;
2180	ovh = EV_SAT_P_S32(t1);
2181	ovl = EV_SAT_P_S32(t2);
2182	EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, t1, 0x80000000, 0x7fffffff, t1),
2183			           EV_SATURATE_ACC(ovl, t2, 0x80000000, 0x7fffffff, t2));
2184	EV_SET_SPEFSCR_OV(ovl, ovh);
2185	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK, spr_spefscr);
2186
21870.4,6.RS,11.RA,16.0,21.1227:EVX:e500:evsubfsmiaaw %RS,%RA:Vector Subtract Signed Modulo Integer to Accumulator Word
2188	signed64 t1, t2;
2189	signed32 al, ah;
2190	al = *rA;
2191	ah = *rAh;
2192	t1 = (signed64)EV_ACCHIGH - (signed64)ah;
2193	t2 = (signed64)EV_ACCLOW - (signed64)al;
2194	EV_SET_REG2_ACC(*rSh, *rS, t1, t2);
2195	PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
2196
21970.4,6.RS,11.RA,16.0,21.1218:EVX:e500:evsubfusiaaw %RS,%RA:Vector Subtract Unsigned Saturate Integer to Accumulator Word
2198	signed64 t1, t2;
2199	unsigned32 al, ah;
2200	int ovl, ovh;
2201
2202	al = *rA;
2203	ah = *rAh;
2204	t1 = (signed64)EV_ACCHIGH - (signed64)ah;
2205	t2 = (signed64)EV_ACCLOW - (signed64)al;
2206	ovh = EV_SAT_P_U32(t1);
2207	ovl = EV_SAT_P_U32(t2);
2208	EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE(ovh, 0, t1),
2209			           EV_SATURATE(ovl, 0, t2));
2210	EV_SET_SPEFSCR_OV(ovl, ovh);
2211	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK, spr_spefscr);
2212
22130.4,6.RS,11.RA,16.0,21.1226:EVX:e500:evsubfumiaaw %RS,%RA:Vector Subtract Unsigned Modulo Integer to Accumulator Word
2214	unsigned64 t1, t2;
2215	unsigned32 al, ah;
2216	al = *rA;
2217	ah = *rAh;
2218	t1 = (unsigned64)EV_ACCHIGH - (unsigned64)ah;
2219	t2 = (unsigned64)EV_ACCLOW - (unsigned64)al;
2220	EV_SET_REG2_ACC(*rSh, *rS, t1, t2);
2221	PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
2222
2223
22240.4,6.RS,11.RA,16.0,21.1220:EVX:e500:evmra %RS,%RA:Initialize Accumulator
2225	EV_SET_REG2_ACC(*rSh, *rS, *rAh, *rA);
2226	PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
2227
22280.4,6.RS,11.RA,16.RB,21.1222:EVX:e500:evdivws %RS,%RA,%RB:Vector Divide Word Signed
2229	signed32 dividendh, dividendl, divisorh, divisorl;
2230	signed32 w1, w2;
2231	int ovh, ovl;
2232	dividendh = *rAh;
2233	dividendl = *rA;
2234	divisorh = *rBh;
2235	divisorl = *rB;
2236	if (dividendh < 0 && divisorh == 0) {
2237	  w1 = 0x80000000;
2238	  ovh = 1;
2239	} else if (dividendh > 0 && divisorh == 0) {
2240	  w1 = 0x7fffffff;
2241	  ovh = 1;
2242	} else if (dividendh == 0x80000000 && divisorh == -1) {
2243	  w1 = 0x7fffffff;
2244	  ovh = 1;
2245	} else {
2246	  w1 = dividendh / divisorh;
2247	  ovh = 0;
2248	}
2249	if (dividendl < 0 && divisorl == 0) {
2250	  w2 = 0x80000000;
2251	  ovl = 1;
2252	} else if (dividendl > 0 && divisorl == 0) {
2253	  w2 = 0x7fffffff;
2254	  ovl = 1;
2255	} else if (dividendl == 0x80000000 && divisorl == -1) {
2256	  w2 = 0x7fffffff;
2257	  ovl = 1;
2258	} else {
2259	  w2 = dividendl / divisorl;
2260	  ovl = 0;
2261	}
2262	EV_SET_REG2(*rSh, *rS, w1, w2);
2263	EV_SET_SPEFSCR_OV(ovl, ovh);
2264	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK, spr_spefscr);
2265
2266
22670.4,6.RS,11.RA,16.RB,21.1223:EVX:e500:evdivwu %RS,%RA,%RB:Vector Divide Word Unsigned
2268	unsigned32 dividendh, dividendl, divisorh, divisorl;
2269	unsigned32 w1, w2;
2270	int ovh, ovl;
2271	dividendh = *rAh;
2272	dividendl = *rA;
2273	divisorh = *rBh;
2274	divisorl = *rB;
2275	if (divisorh == 0) {
2276	  w1 = 0xffffffff;
2277	  ovh = 1;
2278	} else {
2279	  w1 = dividendh / divisorh;
2280	  ovh = 0;
2281	}
2282	if (divisorl == 0) {
2283	  w2 = 0xffffffff;
2284	  ovl = 1;
2285	} else {
2286	  w2 = dividendl / divisorl;
2287	  ovl = 0;
2288	}
2289	EV_SET_REG2(*rSh, *rS, w1, w2);
2290	EV_SET_SPEFSCR_OV(ovl, ovh);
2291	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK, spr_spefscr);
2292
2293
2294#
2295# A.2.9 Floating Point SPE Instructions
2296#
2297
22980.4,6.RS,11.RA,16.0,21.644:EVX:e500:evfsabs %RS,%RA:Vector Floating-Point Absolute Value
2299	unsigned32 w1, w2;
2300	w1 = *rAh & 0x7fffffff;
2301	w2 = *rA & 0x7fffffff;
2302	EV_SET_REG2(*rSh, *rS, w1, w2);
2303	PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
2304
23050.4,6.RS,11.RA,16.0,21.645:EVX:e500:evfsnabs %RS,%RA:Vector Floating-Point Negative Absolute Value
2306	unsigned32 w1, w2;
2307	w1 = *rAh | 0x80000000;
2308	w2 = *rA | 0x80000000;
2309	EV_SET_REG2(*rSh, *rS, w1, w2);
2310	PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
2311
23120.4,6.RS,11.RA,16.0,21.646:EVX:e500:evfsneg %RS,%RA:Vector Floating-Point Negate
2313	unsigned32 w1, w2;
2314	w1 = *rAh;
2315	w2 = *rA;
2316	w1 = (w1 & 0x7fffffff) | ((~w1) & 0x80000000);
2317	w2 = (w2 & 0x7fffffff) | ((~w2) & 0x80000000);
2318	EV_SET_REG2(*rSh, *rS, w1, w2);
2319	PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
2320
23210.4,6.RS,11.RA,16.RB,21.640:EVX:e500:evfsadd %RS,%RA,%RB:Vector Floating-Point Add
2322	unsigned32 w1, w2;
2323	w1 = ev_fs_add (*rAh, *rBh, spefscr_finvh, spefscr_fovfh, spefscr_funfh, spefscr_fgh, spefscr_fxh, processor);
2324	w2 = ev_fs_add (*rA, *rB, spefscr_finv, spefscr_fovf, spefscr_funf, spefscr_fgh, spefscr_fxh, processor);
2325	EV_SET_REG2(*rSh, *rS, w1, w2);
2326	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
2327
23280.4,6.RS,11.RA,16.RB,21.641:EVX:e500:evfssub %RS,%RA,%RB:Vector Floating-Point Subtract
2329	unsigned32 w1, w2;
2330	w1 = ev_fs_sub (*rAh, *rBh, spefscr_finvh, spefscr_fovfh, spefscr_funfh, spefscr_fgh, spefscr_fxh, processor);
2331	w2 = ev_fs_sub (*rA, *rB, spefscr_finv, spefscr_fovf, spefscr_funf, spefscr_fgh, spefscr_fxh, processor);
2332	EV_SET_REG2(*rSh, *rS, w1, w2);
2333	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
2334
23350.4,6.RS,11.RA,16.RB,21.648:EVX:e500:evfsmul %RS,%RA,%RB:Vector Floating-Point Multiply
2336	unsigned32 w1, w2;
2337	w1 = ev_fs_mul (*rAh, *rBh, spefscr_finvh, spefscr_fovfh, spefscr_funfh, spefscr_fgh, spefscr_fxh, processor);
2338	w2 = ev_fs_mul (*rA, *rB, spefscr_finv, spefscr_fovf, spefscr_funf, spefscr_fgh, spefscr_fxh, processor);
2339	EV_SET_REG2(*rSh, *rS, w1, w2);
2340	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
2341
23420.4,6.RS,11.RA,16.RB,21.649:EVX:e500:evfsdiv %RS,%RA,%RB:Vector Floating-Point Divide
2343	signed32 w1, w2;
2344	w1 = ev_fs_div (*rAh, *rBh, spefscr_finvh, spefscr_fovfh, spefscr_funfh, spefscr_fdbzh, spefscr_fgh, spefscr_fxh, processor);
2345	w2 = ev_fs_div (*rA, *rB, spefscr_finv, spefscr_fovf, spefscr_funf, spefscr_fdbz, spefscr_fg, spefscr_fx, processor);
2346	EV_SET_REG2(*rSh, *rS, w1, w2);
2347	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
2348
23490.4,6.BF,9./,11.RA,16.RB,21.652:EVX:e500:evfscmpgt %BF,%RA,%RB:Vector Floating-Point Compare Greater Than
2350	sim_fpu al, ah, bl, bh;
2351	int w, ch, cl;
2352	sim_fpu_32to (&al, *rA);
2353	sim_fpu_32to (&ah, *rAh);
2354	sim_fpu_32to (&bl, *rB);
2355	sim_fpu_32to (&bh, *rBh);
2356	if (EV_IS_INFDENORMNAN(&al) || EV_IS_INFDENORMNAN(&bl))
2357	  EV_SET_SPEFSCR_BITS(spefscr_finv);
2358	if (EV_IS_INFDENORMNAN(&ah) || EV_IS_INFDENORMNAN(&bh))
2359	  EV_SET_SPEFSCR_BITS(spefscr_finvh);
2360	if (sim_fpu_is_gt(&ah, &bh))
2361	  ch = 1;
2362	else
2363	  ch = 0;
2364	if (sim_fpu_is_gt(&al, &bl))
2365	  cl = 1;
2366	else
2367	  cl = 0;
2368	w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
2369	CR_SET(BF, w);
2370	PPC_INSN_INT_SPR(0, RA_BITMASK | RB_BITMASK, spr_spefscr);
2371
23720.4,6.BF,9./,11.RA,16.RB,21.653:EVX:e500:evfscmplt %BF,%RA,%RB:Vector Floating-Point Compare Less Than
2373	sim_fpu al, ah, bl, bh;
2374	int w, ch, cl;
2375	sim_fpu_32to (&al, *rA);
2376	sim_fpu_32to (&ah, *rAh);
2377	sim_fpu_32to (&bl, *rB);
2378	sim_fpu_32to (&bh, *rBh);
2379	if (EV_IS_INFDENORMNAN(&al) || EV_IS_INFDENORMNAN(&bl))
2380	  EV_SET_SPEFSCR_BITS(spefscr_finv);
2381	if (EV_IS_INFDENORMNAN(&ah) || EV_IS_INFDENORMNAN(&bh))
2382	  EV_SET_SPEFSCR_BITS(spefscr_finvh);
2383	if (sim_fpu_is_lt(&ah, &bh))
2384	  ch = 1;
2385	else
2386	  ch = 0;
2387	if (sim_fpu_is_lt(&al, &bl))
2388	  cl = 1;
2389	else
2390	  cl = 0;
2391	w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
2392	CR_SET(BF, w);
2393	PPC_INSN_INT_SPR(0, RA_BITMASK | RB_BITMASK, spr_spefscr);
2394
23950.4,6.BF,9./,11.RA,16.RB,21.654:EVX:e500:evfscmpeq %BF,%RA,%RB:Vector Floating-Point Compare Equal
2396	sim_fpu al, ah, bl, bh;
2397	int w, ch, cl;
2398	sim_fpu_32to (&al, *rA);
2399	sim_fpu_32to (&ah, *rAh);
2400	sim_fpu_32to (&bl, *rB);
2401	sim_fpu_32to (&bh, *rBh);
2402	if (EV_IS_INFDENORMNAN(&al) || EV_IS_INFDENORMNAN(&bl))
2403	  EV_SET_SPEFSCR_BITS(spefscr_finv);
2404	if (EV_IS_INFDENORMNAN(&ah) || EV_IS_INFDENORMNAN(&bh))
2405	  EV_SET_SPEFSCR_BITS(spefscr_finvh);
2406	if (sim_fpu_is_eq(&ah, &bh))
2407	  ch = 1;
2408	else
2409	  ch = 0;
2410	if (sim_fpu_is_eq(&al, &bl))
2411	  cl = 1;
2412	else
2413	  cl = 0;
2414	w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
2415	CR_SET(BF, w);
2416	PPC_INSN_INT_SPR(0, RA_BITMASK | RB_BITMASK, spr_spefscr);
2417
24180.4,6.BF,9./,11.RA,16.RB,21.668:EVX:e500:evfststgt %BF,%RA,%RB:Vector Floating-Point Test Greater Than
2419	sim_fpu al, ah, bl, bh;
2420	int w, ch, cl;
2421	sim_fpu_32to (&al, *rA);
2422	sim_fpu_32to (&ah, *rAh);
2423	sim_fpu_32to (&bl, *rB);
2424	sim_fpu_32to (&bh, *rBh);
2425	if (sim_fpu_is_gt(&ah, &bh))
2426	  ch = 1;
2427	else
2428	  ch = 0;
2429	if (sim_fpu_is_gt(&al, &bl))
2430	  cl = 1;
2431	else
2432	  cl = 0;
2433	w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
2434	CR_SET(BF, w);
2435	PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
2436
24370.4,6.BF,9./,11.RA,16.RB,21.669:EVX:e500:evfststlt %BF,%RA,%RB:Vector Floating-Point Test Less Than
2438	sim_fpu al, ah, bl, bh;
2439	int w, ch, cl;
2440	sim_fpu_32to (&al, *rA);
2441	sim_fpu_32to (&ah, *rAh);
2442	sim_fpu_32to (&bl, *rB);
2443	sim_fpu_32to (&bh, *rBh);
2444	if (sim_fpu_is_lt(&ah, &bh))
2445	  ch = 1;
2446	else
2447	  ch = 0;
2448	if (sim_fpu_is_lt(&al, &bl))
2449	  cl = 1;
2450	else
2451	  cl = 0;
2452	w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
2453	CR_SET(BF, w);
2454	PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
2455
24560.4,6.BF,9./,11.RA,16.RB,21.670:EVX:e500:evfststeq %BF,%RA,%RB:Vector Floating-Point Test Equal
2457	sim_fpu al, ah, bl, bh;
2458	int w, ch, cl;
2459	sim_fpu_32to (&al, *rA);
2460	sim_fpu_32to (&ah, *rAh);
2461	sim_fpu_32to (&bl, *rB);
2462	sim_fpu_32to (&bh, *rBh);
2463	if (sim_fpu_is_eq(&ah, &bh))
2464	  ch = 1;
2465	else
2466	  ch = 0;
2467	if (sim_fpu_is_eq(&al, &bl))
2468	  cl = 1;
2469	else
2470	  cl = 0;
2471	w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
2472	CR_SET(BF, w);
2473	PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
2474
24750.4,6.RS,11.0,16.RB,21.656:EVX:e500:evfscfui %RS,%RB:Vector Convert Floating-Point from Unsigned Integer
2476	unsigned32 f, w1, w2;
2477	sim_fpu b;
2478
2479	sim_fpu_u32to (&b, *rBh, sim_fpu_round_default);
2480	sim_fpu_to32 (&w1, &b);
2481	sim_fpu_u32to (&b, *rB, sim_fpu_round_default);
2482	sim_fpu_to32 (&w2, &b);
2483
2484	EV_SET_REG2(*rSh, *rS, w1, w2);
2485	PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2486
24870.4,6.RS,11.0,16.RB,21.664:EVX:e500:evfsctuiz %RS,%RB:Vector Convert Floating-Point to Unsigned Integer with Round toward Zero
2488	unsigned32 w1, w2;
2489	sim_fpu b;
2490
2491	sim_fpu_32to (&b, *rBh);
2492	sim_fpu_to32u (&w1, &b, sim_fpu_round_zero);
2493	sim_fpu_32to (&b, *rB);
2494	sim_fpu_to32u (&w2, &b, sim_fpu_round_zero);
2495
2496	EV_SET_REG2(*rSh, *rS, w1, w2);
2497	PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2498
24990.4,6.RS,11.0,16.RB,21.657:EVX:e500:evfscfsi %RS,%RB:Vector Convert Floating-Point from Signed Integer
2500	signed32 w1, w2;
2501	sim_fpu b, x, y;
2502
2503	sim_fpu_i32to (&b, *rBh, sim_fpu_round_default);
2504	sim_fpu_to32 (&w1, &b);
2505	sim_fpu_i32to (&b, *rB, sim_fpu_round_default);
2506	sim_fpu_to32 (&w2, &b);
2507
2508	EV_SET_REG2(*rSh, *rS, w1, w2);
2509	PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2510
25110.4,6.RS,11.0,16.RB,21.658:EVX:e500:evfscfuf %RS,%RB:Vector Convert Floating-Point from Unsigned Fraction
2512	unsigned32 w1, w2, bh, bl;
2513	sim_fpu b, x, y;
2514	bh = *rBh;
2515	if (bh == 0xffffffff)
2516	  sim_fpu_to32 (&w1, &sim_fpu_one);
2517	else {
2518	  sim_fpu_u64to (&x, 0x100000000, sim_fpu_round_default);
2519	  sim_fpu_u32to (&y, bh, sim_fpu_round_default);
2520	  sim_fpu_div (&b, &y, &x);
2521	  sim_fpu_to32 (&w1, &b);
2522	}
2523	bl = *rB;
2524	if (bl == 0xffffffff)
2525	  sim_fpu_to32 (&w2, &sim_fpu_one);
2526	else {
2527	  sim_fpu_u64to (&x, 0x100000000, sim_fpu_round_default);
2528	  sim_fpu_u32to (&y, bl, sim_fpu_round_default);
2529	  sim_fpu_div (&b, &y, &x);
2530	  sim_fpu_to32 (&w2, &b);
2531	}
2532	EV_SET_REG2(*rSh, *rS, w1, w2);
2533	PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2534
25350.4,6.RS,11.0,16.RB,21.659:EVX:e500:evfscfsf %RS,%RB:Vector Convert Floating-Point from Signed Fraction
2536	unsigned32 w1, w2;
2537	sim_fpu b, x, y;
2538
2539	sim_fpu_u32to (&x, 0x80000000, sim_fpu_round_default);
2540	sim_fpu_i32to (&y, *rBh, sim_fpu_round_default);
2541	sim_fpu_div (&b, &y, &x);
2542	sim_fpu_to32 (&w1, &b);
2543
2544	sim_fpu_u32to (&x, 0x80000000, sim_fpu_round_default);
2545	sim_fpu_i32to (&y, *rB, sim_fpu_round_default);
2546	sim_fpu_div (&b, &y, &x);
2547	sim_fpu_to32 (&w2, &b);
2548
2549	EV_SET_REG2(*rSh, *rS, w1, w2);
2550	PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2551
25520.4,6.RS,11.0,16.RB,21.660:EVX:e500:evfsctui %RS,%RB:Vector Convert Floating-Point to Unsigned Integer
2553	unsigned32 w1, w2;
2554	sim_fpu b;
2555
2556	sim_fpu_32to (&b, *rBh);
2557	sim_fpu_to32u (&w1, &b, sim_fpu_round_default);
2558	sim_fpu_32to (&b, *rB);
2559	sim_fpu_to32u (&w2, &b, sim_fpu_round_default);
2560
2561	EV_SET_REG2(*rSh, *rS, w1, w2);
2562	PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2563
25640.4,6.RS,11.0,16.RB,21.661:EVX:e500:evfsctsi %RS,%RB:Vector Convert Floating-Point to Signed Integer
2565	signed32 w1, w2;
2566	sim_fpu b;
2567
2568	sim_fpu_32to (&b, *rBh);
2569	sim_fpu_to32i (&w1, &b, sim_fpu_round_default);
2570	sim_fpu_32to (&b, *rB);
2571	sim_fpu_to32i (&w2, &b, sim_fpu_round_default);
2572
2573	EV_SET_REG2(*rSh, *rS, w1, w2);
2574	PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2575
25760.4,6.RS,11.0,16.RB,21.666:EVX:e500:evfsctsiz %RS,%RB:Vector Convert Floating-Point to Signed Integer with Round toward Zero
2577	signed32 w1, w2;
2578	sim_fpu b;
2579
2580	sim_fpu_32to (&b, *rBh);
2581	sim_fpu_to32i (&w1, &b, sim_fpu_round_zero);
2582	sim_fpu_32to (&b, *rB);
2583	sim_fpu_to32i (&w2, &b, sim_fpu_round_zero);
2584
2585	EV_SET_REG2(*rSh, *rS, w1, w2);
2586	PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2587
25880.4,6.RS,11.0,16.RB,21.662:EVX:e500:evfsctuf %RS,%RB:Vector Convert Floating-Point to Unsigned Fraction
2589	unsigned32 w1, w2;
2590	sim_fpu b, x, y;
2591
2592	sim_fpu_u64to (&x, 0x100000000, sim_fpu_round_default);
2593	sim_fpu_32to (&y, *rBh);
2594	sim_fpu_mul (&b, &y, &x);
2595	sim_fpu_to32u (&w1, &b, sim_fpu_round_default);
2596
2597	sim_fpu_u64to (&x, 0x100000000, sim_fpu_round_default);
2598	sim_fpu_32to (&y, *rB);
2599	sim_fpu_mul (&b, &y, &x);
2600	sim_fpu_to32u (&w2, &b, sim_fpu_round_default);
2601
2602	EV_SET_REG2(*rSh, *rS, w1, w2);
2603	PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2604
26050.4,6.RS,11.0,16.RB,21.663:EVX:e500:evfsctsf %RS,%RB:Vector Convert Floating-Point to Signed Fraction
2606	signed32 w1, w2;
2607	sim_fpu b, x, y;
2608
2609	sim_fpu_32to (&y, *rBh);
2610	sim_fpu_u32to (&x, 0x80000000, sim_fpu_round_default);
2611	sim_fpu_mul (&b, &y, &x);
2612	sim_fpu_to32i (&w1, &b, sim_fpu_round_near);
2613
2614	sim_fpu_32to (&y, *rB);
2615	sim_fpu_u32to (&x, 0x80000000, sim_fpu_round_default);
2616	sim_fpu_mul (&b, &y, &x);
2617	sim_fpu_to32i (&w2, &b, sim_fpu_round_near);
2618
2619	EV_SET_REG2(*rSh, *rS, w1, w2);
2620	PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2621
2622
26230.4,6.RS,11.RA,16.0,21.708:EVX:e500:efsabs %RS,%RA:Floating-Point Absolute Value
2624	unsigned32 w1, w2;
2625	w1 = *rSh;
2626	w2 = *rA & 0x7fffffff;
2627	EV_SET_REG2(*rSh, *rS, w1, w2);
2628	PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
2629
26300.4,6.RS,11.RA,16.0,21.709:EVX:e500:efsnabs %RS,%RA:Floating-Point Negative Absolute Value
2631	unsigned32 w1, w2;
2632	w1 = *rSh;
2633	w2 = *rA | 0x80000000;
2634	EV_SET_REG2(*rSh, *rS, w1, w2);
2635	PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
2636
26370.4,6.RS,11.RA,16.0,21.710:EVX:e500:efsneg %RS,%RA:Floating-Point Negate
2638	unsigned32 w1, w2;
2639	w1 = *rSh;
2640	w2 = (*rA & 0x7fffffff) | ((~*rA) & 0x80000000);
2641	EV_SET_REG2(*rSh, *rS, w1, w2);
2642	PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
2643
26440.4,6.RS,11.RA,16.RB,21.704:EVX:e500:efsadd %RS,%RA,%RB:Floating-Point Add
2645	unsigned32 w;
2646	w = ev_fs_add (*rA, *rB, spefscr_finv, spefscr_fovf, spefscr_funf, spefscr_fgh, spefscr_fxh, processor);
2647	EV_SET_REG(*rS, w);
2648	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
2649
26500.4,6.RS,11.RA,16.RB,21.705:EVX:e500:efssub %RS,%RA,%RB:Floating-Point Subtract
2651	unsigned32 w;
2652	w = ev_fs_sub (*rA, *rB, spefscr_finv, spefscr_fovf, spefscr_funf, spefscr_fgh, spefscr_fxh, processor);
2653	EV_SET_REG(*rS, w);
2654	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
2655
26560.4,6.RS,11.RA,16.RB,21.712:EVX:e500:efsmul %RS,%RA,%RB:Floating-Point Multiply
2657	unsigned32 w;
2658	w = ev_fs_mul (*rA, *rB, spefscr_finv, spefscr_fovf, spefscr_funf, spefscr_fgh, spefscr_fxh, processor);
2659	EV_SET_REG(*rS, w);
2660	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
2661
26620.4,6.RS,11.RA,16.RB,21.713:EVX:e500:efsdiv %RS,%RA,%RB:Floating-Point Divide
2663	unsigned32 w;
2664	w = ev_fs_div (*rA, *rB, spefscr_finv, spefscr_fovf, spefscr_funf, spefscr_fdbz, spefscr_fg, spefscr_fx, processor);
2665	EV_SET_REG(*rS, w);
2666	PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
2667
26680.4,6.BF,9./,11.RA,16.RB,21.716:EVX:e500:efscmpgt %BF,%RA,%RB:Floating-Point Compare Greater Than
2669	sim_fpu a, b;
2670	int w, cl;
2671	sim_fpu_32to (&a, *rA);
2672	sim_fpu_32to (&b, *rB);
2673	if (EV_IS_INFDENORMNAN(&a) || EV_IS_INFDENORMNAN(&b))
2674	  EV_SET_SPEFSCR_BITS(spefscr_finv);
2675	if (sim_fpu_is_gt(&a, &b))
2676	  cl = 1;
2677	else
2678	  cl = 0;
2679	w = cl << 2 | cl << 1;
2680	CR_SET(BF, w);
2681	PPC_INSN_INT_SPR(0, RA_BITMASK | RB_BITMASK, spr_spefscr);
2682
26830.4,6.BF,9./,11.RA,16.RB,21.717:EVX:e500:efscmplt %BF,%RA,%RB:Floating-Point Compare Less Than
2684	sim_fpu al, bl;
2685	int w, cl;
2686	sim_fpu_32to (&al, *rA);
2687	sim_fpu_32to (&bl, *rB);
2688	if (EV_IS_INFDENORMNAN(&al) || EV_IS_INFDENORMNAN(&bl))
2689	  EV_SET_SPEFSCR_BITS(spefscr_finv);
2690	if (sim_fpu_is_lt(&al, &bl))
2691	  cl = 1;
2692	else
2693	  cl = 0;
2694	w = cl << 2 | cl << 1;
2695	CR_SET(BF, w);
2696	PPC_INSN_INT_SPR(0, RA_BITMASK | RB_BITMASK, spr_spefscr);
2697
26980.4,6.BF,9./,11.RA,16.RB,21.718:EVX:e500:efscmpeq %BF,%RA,%RB:Floating-Point Compare Equal
2699	sim_fpu al, bl;
2700	int w, cl;
2701	sim_fpu_32to (&al, *rA);
2702	sim_fpu_32to (&bl, *rB);
2703	if (EV_IS_INFDENORMNAN(&al) || EV_IS_INFDENORMNAN(&bl))
2704	  EV_SET_SPEFSCR_BITS(spefscr_finv);
2705	if (sim_fpu_is_eq(&al, &bl))
2706	  cl = 1;
2707	else
2708	  cl = 0;
2709	w = cl << 2 | cl << 1;
2710	CR_SET(BF, w);
2711	PPC_INSN_INT_SPR(0, RA_BITMASK | RB_BITMASK, spr_spefscr);
2712
27130.4,6.BF,9./,11.RA,16.RB,21.732:EVX:e500:efststgt %BF,%RA,%RB:Floating-Point Test Greater Than
2714	sim_fpu al, bl;
2715	int w, cl;
2716	sim_fpu_32to (&al, *rA);
2717	sim_fpu_32to (&bl, *rB);
2718	if (sim_fpu_is_gt(&al, &bl))
2719	  cl = 1;
2720	else
2721	  cl = 0;
2722	w = cl << 2 | cl << 1;
2723	CR_SET(BF, w);
2724	PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
2725
27260.4,6.BF,9./,11.RA,16.RB,21.733:EVX:e500:efststlt %BF,%RA,%RB:Floating-Point Test Less Than
2727	sim_fpu al, bl;
2728	int w, cl;
2729	sim_fpu_32to (&al, *rA);
2730	sim_fpu_32to (&bl, *rB);
2731	if (sim_fpu_is_lt(&al, &bl))
2732	  cl = 1;
2733	else
2734	  cl = 0;
2735	w = cl << 2 | cl << 1;
2736	CR_SET(BF, w);
2737	PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
2738
27390.4,6.BF,9./,11.RA,16.RB,21.734:EVX:e500:efststeq %BF,%RA,%RB:Floating-Point Test Equal
2740	sim_fpu al, bl;
2741	int w, cl;
2742	sim_fpu_32to (&al, *rA);
2743	sim_fpu_32to (&bl, *rB);
2744	if (sim_fpu_is_eq(&al, &bl))
2745	  cl = 1;
2746	else
2747	  cl = 0;
2748	w = cl << 2 | cl << 1;
2749	CR_SET(BF, w);
2750	PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
2751
27520.4,6.RS,11.0,16.RB,21.721:EVX:e500:efscfsi %RS,%RB:Convert Floating-Point from Signed Integer
2753	signed32 f, w1, w2;
2754	sim_fpu b;
2755	w1 = *rSh;
2756	sim_fpu_i32to (&b, *rB, sim_fpu_round_default);
2757	sim_fpu_to32 (&w2, &b);
2758	EV_SET_REG2(*rSh, *rS, w1, w2);
2759	PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2760
27610.4,6.RS,11.0,16.RB,21.720:EVX:e500:efscfui %RS,%RB:Convert Floating-Point from Unsigned Integer
2762	unsigned32 w1, w2;
2763	sim_fpu b;
2764	w1 = *rSh;
2765	sim_fpu_u32to (&b, *rB, sim_fpu_round_default);
2766	sim_fpu_to32 (&w2, &b);
2767	EV_SET_REG2(*rSh, *rS, w1, w2);
2768	PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2769
27700.4,6.RS,11.0,16.RB,21.723:EVX:e500:efscfsf %RS,%RB:Convert Floating-Point from Signed Fraction
2771	unsigned32 w1, w2;
2772	sim_fpu b, x, y;
2773	w1 = *rSh;
2774	sim_fpu_u32to (&x, 0x80000000, sim_fpu_round_default);
2775	sim_fpu_i32to (&y, *rB, sim_fpu_round_default);
2776	sim_fpu_div (&b, &y, &x);
2777	sim_fpu_to32 (&w2, &b);
2778	EV_SET_REG2(*rSh, *rS, w1, w2);
2779	PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2780
27810.4,6.RS,11.0,16.RB,21.722:EVX:e500:efscfuf %RS,%RB:Convert Floating-Point from Unsigned Fraction
2782	unsigned32 w1, w2, bl;
2783	sim_fpu b, x, y;
2784	w1 = *rSh;
2785	bl = *rB;
2786	if (bl == 0xffffffff)
2787	  sim_fpu_to32 (&w2, &sim_fpu_one);
2788	else {
2789	  sim_fpu_u64to (&x, 0x100000000, sim_fpu_round_default);
2790	  sim_fpu_u32to (&y, bl, sim_fpu_round_default);
2791	  sim_fpu_div (&b, &y, &x);
2792	  sim_fpu_to32 (&w2, &b);
2793	}
2794	EV_SET_REG2(*rSh, *rS, w1, w2);
2795	PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2796
27970.4,6.RS,11.0,16.RB,21.725:EVX:e500:efsctsi %RS,%RB:Convert Floating-Point to Signed Integer
2798	signed64 temp;
2799	signed32 w1, w2;
2800	sim_fpu b;
2801	w1 = *rSh;
2802	sim_fpu_32to (&b, *rB);
2803	sim_fpu_to32i (&w2, &b, sim_fpu_round_default);
2804	EV_SET_REG2(*rSh, *rS, w1, w2);
2805	PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2806
28070.4,6.RS,11.0,16.RB,21.730:EVX:e500:efsctsiz %RS,%RB:Convert Floating-Point to Signed Integer with Round toward Zero
2808	signed64 temp;
2809	signed32 w1, w2;
2810	sim_fpu b;
2811	w1 = *rSh;
2812	sim_fpu_32to (&b, *rB);
2813	sim_fpu_to32i (&w2, &b, sim_fpu_round_zero);
2814	EV_SET_REG2(*rSh, *rS, w1, w2);
2815	PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2816
28170.4,6.RS,11.0,16.RB,21.724:EVX:e500:efsctui %RS,%RB:Convert Floating-Point to Unsigned Integer
2818	unsigned64 temp;
2819	signed32 w1, w2;
2820	sim_fpu b;
2821	w1 = *rSh;
2822	sim_fpu_32to (&b, *rB);
2823	sim_fpu_to32u (&w2, &b, sim_fpu_round_default);
2824	EV_SET_REG2(*rSh, *rS, w1, w2);
2825	PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2826
28270.4,6.RS,11.0,16.RB,21.728:EVX:e500:efsctuiz %RS,%RB:Convert Floating-Point to Unsigned Integer with Round toward Zero
2828	unsigned64 temp;
2829	signed32 w1, w2;
2830	sim_fpu b;
2831	w1 = *rSh;
2832	sim_fpu_32to (&b, *rB);
2833	sim_fpu_to32u (&w2, &b, sim_fpu_round_zero);
2834	EV_SET_REG2(*rSh, *rS, w1, w2);
2835	PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2836
28370.4,6.RS,11.0,16.RB,21.727:EVX:e500:efsctsf %RS,%RB:Convert Floating-Point to Signed Fraction
2838	unsigned32 w1, w2;
2839	sim_fpu b, x, y;
2840	w1 = *rSh;
2841	sim_fpu_32to (&y, *rB);
2842	sim_fpu_u32to (&x, 0x80000000, sim_fpu_round_default);
2843	sim_fpu_mul (&b, &y, &x);
2844	sim_fpu_to32i (&w2, &b, sim_fpu_round_default);
2845	sim_fpu_to32 (&w2, &b);
2846	EV_SET_REG2(*rSh, *rS, w1, w2);
2847	PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2848
28490.4,6.RS,11.0,16.RB,21.726:EVX:e500:efsctuf %RS,%RB:Convert Floating-Point to Unsigned Fraction
2850	unsigned32 w1, w2;
2851	sim_fpu b, x, y;
2852	w1 = *rSh;
2853	sim_fpu_u64to (&x, 0x100000000, sim_fpu_round_default);
2854	sim_fpu_32to (&y, *rB);
2855	sim_fpu_mul (&b, &y, &x);
2856	sim_fpu_to32u (&w2, &b, sim_fpu_round_default);
2857	EV_SET_REG2(*rSh, *rS, w1, w2);
2858	PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2859
2860
2861#
2862# A.2.10 Vector Load/Store Instructions
2863#
2864
28650.4,6.RS,11.RA,16.UIMM,21.769:EVX:e500:evldd %RS,%RA,%UIMM:Vector Load Double Word into Double Word
2866	unsigned64 m;
2867	unsigned_word b;
2868	unsigned_word EA;
2869	if (RA_is_0) b = 0;
2870	else         b = *rA;
2871	EA = b + (UIMM << 3);
2872	m = MEM(unsigned, EA, 8);
2873	EV_SET_REG1(*rSh, *rS, m);
2874		//printf("evldd(%d<-%d + %u): m %08x.%08x, *rSh %x *rS %x\n", RS, RA, UIMM, (int)(m >> 32), (int)m, *rSh, *rS);
2875	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
2876
28770.4,6.RS,11.RA,16.RB,21.768:EVX:e500:evlddx %RS,%RA,%RB:Vector Load Double Word into Double Word Indexed
2878	unsigned64 m;
2879	unsigned_word b;
2880	unsigned_word EA;
2881	if (RA_is_0) b = 0;
2882	else         b = *rA;
2883	EA = b + *rB;
2884	m = MEM(unsigned, EA, 8);
2885	EV_SET_REG1(*rSh, *rS, m);
2886	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
2887
28880.4,6.RS,11.RA,16.UIMM,21.771:EVX:e500:evldw %RS,%RA,%UIMM:Vector Load Double into Two Words
2889	unsigned_word b;
2890	unsigned_word EA;
2891	unsigned32 w1, w2;
2892	if (RA_is_0) b = 0;
2893	else         b = *rA;
2894	EA = b + (UIMM << 3);
2895	w1 = MEM(unsigned, EA, 4);
2896	w2 = MEM(unsigned, EA + 4, 4);
2897	EV_SET_REG2(*rSh, *rS, w1, w2);
2898	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
2899
29000.4,6.RS,11.RA,16.RB,21.770:EVX:e500:evldwx %RS,%RA,%RB:Vector Load Double into Two Words Indexed
2901	unsigned_word b;
2902	unsigned_word EA;
2903	unsigned32 w1, w2;
2904	if (RA_is_0) b = 0;
2905	else         b = *rA;
2906	EA = b + *rB;
2907	w1 = MEM(unsigned, EA, 4);
2908	w2 = MEM(unsigned, EA + 4, 4);
2909	EV_SET_REG2(*rSh, *rS, w1, w2);
2910	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
2911
29120.4,6.RS,11.RA,16.UIMM,21.773:EVX:e500:evldh %RS,%RA,%UIMM:Vector Load Double into 4 Half Words
2913	unsigned_word b;
2914	unsigned_word EA;
2915	unsigned16 h1, h2, h3, h4;
2916	if (RA_is_0) b = 0;
2917	else         b = *rA;
2918	EA = b + (UIMM << 3);
2919	h1 = MEM(unsigned, EA, 2);
2920	h2 = MEM(unsigned, EA + 2, 2);
2921	h3 = MEM(unsigned, EA + 4, 2);
2922	h4 = MEM(unsigned, EA + 6, 2);
2923	EV_SET_REG4(*rSh, *rS, h1, h2, h3, h4);
2924	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
2925
29260.4,6.RS,11.RA,16.RB,21.772:EVX:e500:evldhx %RS,%RA,%RB:Vector Load Double into 4 Half Words Indexed
2927	unsigned_word b;
2928	unsigned_word EA;
2929	unsigned16 h1, h2, h3, h4;
2930	if (RA_is_0) b = 0;
2931	else         b = *rA;
2932	EA = b + *rB;
2933	h1 = MEM(unsigned, EA, 2);
2934	h2 = MEM(unsigned, EA + 2, 2);
2935	h3 = MEM(unsigned, EA + 4, 2);
2936	h4 = MEM(unsigned, EA + 6, 2);
2937	EV_SET_REG4(*rSh, *rS, h1, h2, h3, h4);
2938	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
2939
29400.4,6.RS,11.RA,16.UIMM,21.785:EVX:e500:evlwhe %RS,%RA,%UIMM:Vector Load Word into Two Half Words Even
2941	unsigned_word b;
2942	unsigned_word EA;
2943	unsigned16 h1, h2, h3, h4;
2944	if (RA_is_0) b = 0;
2945	else         b = *rA;
2946	EA = b + (UIMM << 2);
2947	h1 = MEM(unsigned, EA, 2);
2948	h2 = 0;
2949	h3 = MEM(unsigned, EA + 2, 2);
2950	h4 = 0;
2951	EV_SET_REG4(*rSh, *rS, h1, h2, h3, h4);
2952	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
2953
29540.4,6.RS,11.RA,16.RB,21.784:EVX:e500:evlwhex %RS,%RA,%RB:Vector Load Word into Two Half Words Even Indexed
2955	unsigned_word b;
2956	unsigned_word EA;
2957	unsigned16 h1, h2, h3, h4;
2958	if (RA_is_0) b = 0;
2959	else         b = *rA;
2960	EA = b + *rB;
2961	h1 = MEM(unsigned, EA, 2);
2962	h2 = 0;
2963	h3 = MEM(unsigned, EA + 2, 2);
2964	h4 = 0;
2965	EV_SET_REG4(*rSh, *rS, h1, h2, h3, h4);
2966	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
2967
29680.4,6.RS,11.RA,16.UIMM,21.789:EVX:e500:evlwhou %RS,%RA,%UIMM:Vector Load Word into Two Half Words Odd Unsigned zero-extended
2969	unsigned_word b;
2970	unsigned_word EA;
2971	unsigned16 h1, h2, h3, h4;
2972	if (RA_is_0) b = 0;
2973	else         b = *rA;
2974	EA = b + (UIMM << 2);
2975	h1 = 0;
2976	h2 = MEM(unsigned, EA, 2);
2977	h3 = 0;
2978	h4 = MEM(unsigned, EA + 2, 2);
2979	EV_SET_REG4(*rSh, *rS, h1, h2, h3, h4);
2980	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
2981
29820.4,6.RS,11.RA,16.RB,21.788:EVX:e500:evlwhoux %RS,%RA,%RB:Vector Load Word into Two Half Words Odd Unsigned Indexed zero-extended
2983	unsigned_word b;
2984	unsigned_word EA;
2985	unsigned16 h1, h2, h3, h4;
2986	if (RA_is_0) b = 0;
2987	else         b = *rA;
2988	EA = b + *rB;
2989	h1 = 0;
2990	h2 = MEM(unsigned, EA, 2);
2991	h3 = 0;
2992	h4 = MEM(unsigned, EA + 2, 2);
2993	EV_SET_REG4(*rSh, *rS, h1, h2, h3, h4);
2994	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
2995
29960.4,6.RS,11.RA,16.UIMM,21.791:EVX:e500:evlwhos %RS,%RA,%UIMM:Vector Load Word into Half Words Odd Signed with sign extension
2997	unsigned_word b;
2998	unsigned_word EA;
2999	unsigned16 h1, h2, h3, h4;
3000	if (RA_is_0) b = 0;
3001	else         b = *rA;
3002	EA = b + (UIMM << 2);
3003	h2 = MEM(unsigned, EA, 2);
3004	if (h2 & 0x8000)
3005	  h1 = 0xffff;
3006	else
3007	  h1 = 0;
3008	h4 = MEM(unsigned, EA + 2, 2);
3009	if (h4 & 0x8000)
3010	  h3 = 0xffff;
3011	else
3012	  h3 = 0;
3013	EV_SET_REG4(*rSh, *rS, h1, h2, h3, h4);
3014	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
3015
30160.4,6.RS,11.RA,16.RB,21.790:EVX:e500:evlwhosx %RS,%RA,%RB:Vector Load Word into Half Words Odd Signed Indexed with sign extension
3017	unsigned_word b;
3018	unsigned_word EA;
3019	unsigned16 h1, h2, h3, h4;
3020	if (RA_is_0) b = 0;
3021	else         b = *rA;
3022	EA = b + *rB;
3023	h2 = MEM(unsigned, EA, 2);
3024	if (h2 & 0x8000)
3025	  h1 = 0xffff;
3026	else
3027	  h1 = 0;
3028	h4 = MEM(unsigned, EA + 2, 2);
3029	if (h4 & 0x8000)
3030	  h3 = 0xffff;
3031	else
3032	  h3 = 0;
3033	EV_SET_REG4(*rSh, *rS, h1, h2, h3, h4);
3034	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
3035
30360.4,6.RS,11.RA,16.UIMM,21.793:EVX:e500:evlwwsplat %RS,%RA,%UIMM:Vector Load Word into Word and Splat
3037	unsigned_word b;
3038	unsigned_word EA;
3039	unsigned32 w1;
3040	if (RA_is_0) b = 0;
3041	else         b = *rA;
3042	EA = b + (UIMM << 2);
3043	w1 = MEM(unsigned, EA, 4);
3044	EV_SET_REG2(*rSh, *rS, w1, w1);
3045	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
3046
30470.4,6.RS,11.RA,16.RB,21.792:EVX:e500:evlwwsplatx %RS,%RA,%RB:Vector Load Word into Word and Splat Indexed
3048	unsigned_word b;
3049	unsigned_word EA;
3050	unsigned32 w1;
3051	if (RA_is_0) b = 0;
3052	else         b = *rA;
3053	EA = b + *rB;
3054	w1 = MEM(unsigned, EA, 4);
3055	EV_SET_REG2(*rSh, *rS, w1, w1);
3056	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
3057
30580.4,6.RS,11.RA,16.UIMM,21.797:EVX:e500:evlwhsplat %RS,%RA,%UIMM:Vector Load Word into 2 Half Words and Splat
3059	unsigned_word b;
3060	unsigned_word EA;
3061	unsigned16 h1, h2;
3062	if (RA_is_0) b = 0;
3063	else         b = *rA;
3064	EA = b + (UIMM << 2);
3065	h1 = MEM(unsigned, EA, 2);
3066	h2 = MEM(unsigned, EA + 2, 2);
3067	EV_SET_REG4(*rSh, *rS, h1, h1, h2, h2);
3068	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
3069
30700.4,6.RS,11.RA,16.RB,21.796:EVX:e500:evlwhsplatx %RS,%RA,%RB:Vector Load Word into 2 Half Words and Splat Indexed
3071	unsigned_word b;
3072	unsigned_word EA;
3073	unsigned16 h1, h2;
3074	if (RA_is_0) b = 0;
3075	else         b = *rA;
3076	EA = b + *rB;
3077	h1 = MEM(unsigned, EA, 2);
3078	h2 = MEM(unsigned, EA + 2, 2);
3079	EV_SET_REG4(*rSh, *rS, h1, h1, h2, h2);
3080	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
3081
30820.4,6.RS,11.RA,16.UIMM,21.777:EVX:e500:evlhhesplat %RS,%RA,%UIMM:Vector Load Half Word into Half Words Even and Splat
3083	unsigned_word b;
3084	unsigned_word EA;
3085	unsigned16 h;
3086	if (RA_is_0) b = 0;
3087	else         b = *rA;
3088	EA = b + (UIMM << 1);
3089	h = MEM(unsigned, EA, 2);
3090	EV_SET_REG4(*rSh, *rS, h, 0, h, 0);
3091	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
3092
30930.4,6.RS,11.RA,16.RB,21.776:EVX:e500:evlhhesplatx %RS,%RA,%RB:Vector Load Half Word into Half Words Even and Splat Indexed
3094	unsigned_word b;
3095	unsigned_word EA;
3096	unsigned16 h;
3097	if (RA_is_0) b = 0;
3098	else         b = *rA;
3099	EA = b + *rB;
3100	h = MEM(unsigned, EA, 2);
3101	EV_SET_REG4(*rSh, *rS, h, 0, h, 0);
3102	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
3103
31040.4,6.RS,11.RA,16.UIMM,21.781:EVX:e500:evlhhousplat %RS,%RA,%UIMM:Vector Load Half Word into Half Word Odd Unsigned and Splat
3105	unsigned_word b;
3106	unsigned_word EA;
3107	unsigned16 h;
3108	if (RA_is_0) b = 0;
3109	else         b = *rA;
3110	EA = b + (UIMM << 1);
3111	h = MEM(unsigned, EA, 2);
3112	EV_SET_REG4(*rSh, *rS, 0, h, 0, h);
3113	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
3114
31150.4,6.RS,11.RA,16.RB,21.780:EVX:e500:evlhhousplatx %RS,%RA,%RB:Vector Load Half Word into Half Word Odd Unsigned and Splat Indexed
3116	unsigned_word b;
3117	unsigned_word EA;
3118	unsigned16 h;
3119	if (RA_is_0) b = 0;
3120	else         b = *rA;
3121	EA = b + *rB;
3122	h = MEM(unsigned, EA, 2);
3123	EV_SET_REG4(*rSh, *rS, 0, h, 0, h);
3124	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
3125
31260.4,6.RS,11.RA,16.UIMM,21.783:EVX:e500:evlhhossplat %RS,%RA,%UIMM:Vector Load Half Word into Half Word Odd Signed and Splat
3127	unsigned_word b;
3128	unsigned_word EA;
3129	unsigned16 h1, h2;
3130	if (RA_is_0) b = 0;
3131	else         b = *rA;
3132	EA = b + (UIMM << 1);
3133	h2 = MEM(unsigned, EA, 2);
3134	if (h2 & 0x8000)
3135	  h1 = 0xffff;
3136	else
3137	  h1 = 0;
3138	EV_SET_REG4(*rSh, *rS, h1, h2, h1, h2);
3139	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
3140
31410.4,6.RS,11.RA,16.RB,21.782:EVX:e500:evlhhossplatx %RS,%RA,%RB:Vector Load Half Word into Half Word Odd Signed and Splat Indexed
3142	unsigned_word b;
3143	unsigned_word EA;
3144	unsigned16 h1, h2;
3145	if (RA_is_0) b = 0;
3146	else         b = *rA;
3147	EA = b + *rB;
3148	h2 = MEM(unsigned, EA, 2);
3149	if (h2 & 0x8000)
3150	  h1 = 0xffff;
3151	else
3152	  h1 = 0;
3153	EV_SET_REG4(*rSh, *rS, h1, h2, h1, h2);
3154	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
3155
3156
31570.4,6.RS,11.RA,16.UIMM,21.801:EVX:e500:evstdd %RS,%RA,%UIMM:Vector Store Double of Double
3158	unsigned_word b;
3159	unsigned_word EA;
3160	if (RA_is_0) b = 0;
3161	else         b = *rA;
3162	EA = b + (UIMM << 3);
3163	STORE(EA, 4, (*rSh));
3164	STORE(EA + 4, 4, (*rS));
3165	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
3166
31670.4,6.RS,11.RA,16.RB,21.800:EVX:e500:evstddx %RS,%RA,%RB:Vector Store Double of Double Indexed
3168	unsigned_word b;
3169	unsigned_word EA;
3170	if (RA_is_0) b = 0;
3171	else         b = *rA;
3172	EA = b + *rB;
3173	STORE(EA, 4, (*rSh));
3174	STORE(EA + 4, 4, (*rS));
3175	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
3176
31770.4,6.RS,11.RA,16.UIMM,21.803:EVX:e500:evstdw %RS,%RA,%UIMM:Vector Store Double of Two Words
3178	unsigned_word b;
3179	unsigned_word EA;
3180	unsigned32 w1, w2;
3181	if (RA_is_0) b = 0;
3182	else         b = *rA;
3183	EA = b + (UIMM << 3);
3184	w1 = *rSh;
3185	w2 = *rS;
3186	STORE(EA + 0, 4, w1);
3187	STORE(EA + 4, 4, w2);
3188	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
3189
31900.4,6.RS,11.RA,16.RB,21.802:EVX:e500:evstdwx %RS,%RA,%RB:Vector Store Double of Two Words Indexed
3191	unsigned_word b;
3192	unsigned_word EA;
3193	unsigned32 w1, w2;
3194	if (RA_is_0) b = 0;
3195	else         b = *rA;
3196	EA = b + *rB;
3197	w1 = *rSh;
3198	w2 = *rS;
3199	STORE(EA + 0, 4, w1);
3200	STORE(EA + 4, 4, w2);
3201	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
3202
32030.4,6.RS,11.RA,16.UIMM,21.805:EVX:e500:evstdh %RS,%RA,%UIMM:Vector Store Double of Four Half Words
3204	unsigned_word b;
3205	unsigned_word EA;
3206	unsigned16 h1, h2, h3, h4;
3207	if (RA_is_0) b = 0;
3208	else         b = *rA;
3209	EA = b + (UIMM << 3);
3210	h1 = EV_HIHALF(*rSh);
3211	h2 = EV_LOHALF(*rSh);
3212	h3 = EV_HIHALF(*rS);
3213	h4 = EV_LOHALF(*rS);
3214	STORE(EA + 0, 2, h1);
3215	STORE(EA + 2, 2, h2);
3216	STORE(EA + 4, 2, h3);
3217	STORE(EA + 6, 2, h4);
3218	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
3219
32200.4,6.RS,11.RA,16.RB,21.804:EVX:e500:evstdhx %RS,%RA,%RB:Vector Store Double of Four Half Words Indexed
3221	unsigned_word b;
3222	unsigned_word EA;
3223	unsigned16 h1, h2, h3, h4;
3224	if (RA_is_0) b = 0;
3225	else         b = *rA;
3226	EA = b + *rB;
3227	h1 = EV_HIHALF(*rSh);
3228	h2 = EV_LOHALF(*rSh);
3229	h3 = EV_HIHALF(*rS);
3230	h4 = EV_LOHALF(*rS);
3231	STORE(EA + 0, 2, h1);
3232	STORE(EA + 2, 2, h2);
3233	STORE(EA + 4, 2, h3);
3234	STORE(EA + 6, 2, h4);
3235	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
3236
32370.4,6.RS,11.RA,16.UIMM,21.825:EVX:e500:evstwwe %RS,%RA,%UIMM:Vector Store Word of Word from Even
3238	unsigned_word b;
3239	unsigned_word EA;
3240	unsigned32 w;
3241	if (RA_is_0) b = 0;
3242	else         b = *rA;
3243	EA = b + (UIMM << 3);
3244	w = *rSh;
3245	STORE(EA, 4, w);
3246	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
3247
32480.4,6.RS,11.RA,16.RB,21.824:EVX:e500:evstwwex %RS,%RA,%RB:Vector Store Word of Word from Even Indexed
3249	unsigned_word b;
3250	unsigned_word EA;
3251	unsigned32 w;
3252	if (RA_is_0) b = 0;
3253	else         b = *rA;
3254	EA = b + *rB;
3255	w = *rSh;
3256	STORE(EA, 4, w);
3257	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
3258
32590.4,6.RS,11.RA,16.UIMM,21.829:EVX:e500:evstwwo %RS,%RA,%UIMM:Vector Store Word of Word from Odd
3260	unsigned_word b;
3261	unsigned_word EA;
3262	unsigned32 w;
3263	if (RA_is_0) b = 0;
3264	else         b = *rA;
3265	EA = b + (UIMM << 3);
3266	w = *rS;
3267	STORE(EA, 4, w);
3268	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
3269
32700.4,6.RS,11.RA,16.RB,21.828:EVX:e500:evstwwox %RS,%RA,%RB:Vector Store Word of Word from Odd Indexed
3271	unsigned_word b;
3272	unsigned_word EA;
3273	unsigned32 w;
3274	if (RA_is_0) b = 0;
3275	else         b = *rA;
3276	EA = b + *rB;
3277	w = *rS;
3278	STORE(EA, 4, w);
3279	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
3280
32810.4,6.RS,11.RA,16.UIMM,21.817:EVX:e500:evstwhe %RS,%RA,%UIMM:Vector Store Word of Two Half Words from Even
3282	unsigned_word b;
3283	unsigned_word EA;
3284	unsigned16 h1, h2;
3285	if (RA_is_0) b = 0;
3286	else         b = *rA;
3287	EA = b + (UIMM << 3);
3288	h1 = EV_HIHALF(*rSh);
3289	h2 = EV_HIHALF(*rS);
3290	STORE(EA + 0, 2, h1);
3291	STORE(EA + 2, 2, h2);
3292	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
3293
32940.4,6.RS,11.RA,16.RB,21.816:EVX:e500:evstwhex %RS,%RA,%RB:Vector Store Word of Two Half Words from Even Indexed
3295	unsigned_word b;
3296	unsigned_word EA;
3297	unsigned16 h1, h2;
3298	if (RA_is_0) b = 0;
3299	else         b = *rA;
3300	EA = b + *rB;
3301	h1 = EV_HIHALF(*rSh);
3302	h2 = EV_HIHALF(*rS);
3303	STORE(EA + 0, 2, h1);
3304	STORE(EA + 2, 2, h2);
3305	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
3306
33070.4,6.RS,11.RA,16.UIMM,21.821:EVX:e500:evstwho %RS,%RA,%UIMM:Vector Store Word of Two Half Words from Odd
3308	unsigned_word b;
3309	unsigned_word EA;
3310	unsigned16 h1, h2;
3311	if (RA_is_0) b = 0;
3312	else         b = *rA;
3313	EA = b + (UIMM << 3);
3314	h1 = EV_LOHALF(*rSh);
3315	h2 = EV_LOHALF(*rS);
3316	STORE(EA + 0, 2, h1);
3317	STORE(EA + 2, 2, h2);
3318	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
3319
33200.4,6.RS,11.RA,16.RB,21.820:EVX:e500:evstwhox %RS,%RA,%RB:Vector Store Word of Two Half Words from Odd Indexed
3321	unsigned_word b;
3322	unsigned_word EA;
3323	unsigned16 h1, h2;
3324	if (RA_is_0) b = 0;
3325	else         b = *rA;
3326	EA = b + *rB;
3327	h1 = EV_LOHALF(*rSh);
3328	h2 = EV_LOHALF(*rS);
3329	STORE(EA + 0, 2, h1);
3330	STORE(EA + 2, 2, h2);
3331	PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
3332
3333
3334#
3335# 4.5.1 Integer Select Instruction
3336#
3337
33380.31,6.RS,11.RA,16.RB,21.CRB,26.30:X:e500:isel %RS,%RA,%RB,%CRB:Integer Select
3339	if (CR & (1 << (31 - (unsigned)CRB)))
3340	  if (RA_is_0)
3341	    EV_SET_REG1(*rSh, *rS, 0);
3342	  else
3343	    EV_SET_REG2(*rSh, *rS, *rAh, *rA);
3344	else
3345	  EV_SET_REG2(*rSh, *rS, *rBh, *rB);
3346	PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
3347