1;; AltiVec patterns.
2;; Copyright (C) 2002-2018 Free Software Foundation, Inc.
3;; Contributed by Aldy Hernandez (aldy@quesejoda.com)
4
5;; This file is part of GCC.
6
7;; GCC is free software; you can redistribute it and/or modify it
8;; under the terms of the GNU General Public License as published
9;; by the Free Software Foundation; either version 3, or (at your
10;; option) any later version.
11
12;; GCC is distributed in the hope that it will be useful, but WITHOUT
13;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14;; or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
15;; License for more details.
16
17;; You should have received a copy of the GNU General Public License
18;; along with GCC; see the file COPYING3.  If not see
19;; <http://www.gnu.org/licenses/>.
20
21(define_c_enum "unspec"
22  [UNSPEC_VCMPBFP
23   UNSPEC_VMSUMU
24   UNSPEC_VMSUMM
25   UNSPEC_VMSUMSHM
26   UNSPEC_VMSUMUHS
27   UNSPEC_VMSUMSHS
28   UNSPEC_VMHADDSHS
29   UNSPEC_VMHRADDSHS
30   UNSPEC_VADDCUW
31   UNSPEC_VADDU
32   UNSPEC_VADDS
33   UNSPEC_VAVGU
34   UNSPEC_VAVGS
35   UNSPEC_VMULEUB
36   UNSPEC_VMULESB
37   UNSPEC_VMULEUH
38   UNSPEC_VMULESH
39   UNSPEC_VMULOUB
40   UNSPEC_VMULOSB
41   UNSPEC_VMULOUH
42   UNSPEC_VMULOSH
43   UNSPEC_VPKPX
44   UNSPEC_VPACK_SIGN_SIGN_SAT
45   UNSPEC_VPACK_SIGN_UNS_SAT
46   UNSPEC_VPACK_UNS_UNS_SAT
47   UNSPEC_VPACK_UNS_UNS_MOD
48   UNSPEC_VPACK_UNS_UNS_MOD_DIRECT
49   UNSPEC_VSLV4SI
50   UNSPEC_VSLO
51   UNSPEC_VSR
52   UNSPEC_VSRO
53   UNSPEC_VSUBCUW
54   UNSPEC_VSUBU
55   UNSPEC_VSUBS
56   UNSPEC_VSUM4UBS
57   UNSPEC_VSUM4S
58   UNSPEC_VSUM2SWS
59   UNSPEC_VSUMSWS
60   UNSPEC_VPERM
61   UNSPEC_VPERMR
62   UNSPEC_VPERM_UNS
63   UNSPEC_VRFIN
64   UNSPEC_VCFUX
65   UNSPEC_VCFSX
66   UNSPEC_VCTUXS
67   UNSPEC_VCTSXS
68   UNSPEC_VLOGEFP
69   UNSPEC_VEXPTEFP
70   UNSPEC_VSLDOI
71   UNSPEC_VUNPACK_HI_SIGN
72   UNSPEC_VUNPACK_LO_SIGN
73   UNSPEC_VUNPACK_HI_SIGN_DIRECT
74   UNSPEC_VUNPACK_LO_SIGN_DIRECT
75   UNSPEC_VUPKHPX
76   UNSPEC_VUPKLPX
77   UNSPEC_DARN
78   UNSPEC_DARN_32
79   UNSPEC_DARN_RAW
80   UNSPEC_DST
81   UNSPEC_DSTT
82   UNSPEC_DSTST
83   UNSPEC_DSTSTT
84   UNSPEC_LVSL
85   UNSPEC_LVSR
86   UNSPEC_LVE
87   UNSPEC_STVX
88   UNSPEC_STVXL
89   UNSPEC_STVE
90   UNSPEC_SET_VSCR
91   UNSPEC_GET_VRSAVE
92   UNSPEC_LVX
93   UNSPEC_REDUC_PLUS
94   UNSPEC_VECSH
95   UNSPEC_EXTEVEN_V4SI
96   UNSPEC_EXTEVEN_V8HI
97   UNSPEC_EXTEVEN_V16QI
98   UNSPEC_EXTEVEN_V4SF
99   UNSPEC_EXTODD_V4SI
100   UNSPEC_EXTODD_V8HI
101   UNSPEC_EXTODD_V16QI
102   UNSPEC_EXTODD_V4SF
103   UNSPEC_INTERHI_V4SI
104   UNSPEC_INTERHI_V8HI
105   UNSPEC_INTERHI_V16QI
106   UNSPEC_INTERLO_V4SI
107   UNSPEC_INTERLO_V8HI
108   UNSPEC_INTERLO_V16QI
109   UNSPEC_LVLX
110   UNSPEC_LVLXL
111   UNSPEC_LVRX
112   UNSPEC_LVRXL
113   UNSPEC_STVLX
114   UNSPEC_STVLXL
115   UNSPEC_STVRX
116   UNSPEC_STVRXL
117   UNSPEC_VADU
118   UNSPEC_VSLV
119   UNSPEC_VSRV
120   UNSPEC_VMULWHUB
121   UNSPEC_VMULWLUB
122   UNSPEC_VMULWHSB
123   UNSPEC_VMULWLSB
124   UNSPEC_VMULWHUH
125   UNSPEC_VMULWLUH
126   UNSPEC_VMULWHSH
127   UNSPEC_VMULWLSH
128   UNSPEC_VUPKHUB
129   UNSPEC_VUPKHUH
130   UNSPEC_VUPKLUB
131   UNSPEC_VUPKLUH
132   UNSPEC_VPERMSI
133   UNSPEC_VPERMHI
134   UNSPEC_INTERHI
135   UNSPEC_INTERLO
136   UNSPEC_VUPKHS_V4SF
137   UNSPEC_VUPKLS_V4SF
138   UNSPEC_VUPKHU_V4SF
139   UNSPEC_VUPKLU_V4SF
140   UNSPEC_VGBBD
141   UNSPEC_VMRGH_DIRECT
142   UNSPEC_VMRGL_DIRECT
143   UNSPEC_VSPLT_DIRECT
144   UNSPEC_VMRGEW_DIRECT
145   UNSPEC_VSUMSWS_DIRECT
146   UNSPEC_VADDCUQ
147   UNSPEC_VADDEUQM
148   UNSPEC_VADDECUQ
149   UNSPEC_VSUBCUQ
150   UNSPEC_VSUBEUQM
151   UNSPEC_VSUBECUQ
152   UNSPEC_VBPERMQ
153   UNSPEC_VBPERMD
154   UNSPEC_BCDADD
155   UNSPEC_BCDSUB
156   UNSPEC_BCD_OVERFLOW
157   UNSPEC_CMPRB
158   UNSPEC_CMPRB2
159   UNSPEC_CMPEQB
160   UNSPEC_VRLMI
161   UNSPEC_VRLNM
162])
163
164(define_c_enum "unspecv"
165  [UNSPECV_SET_VRSAVE
166   UNSPECV_MTVSCR
167   UNSPECV_MFVSCR
168   UNSPECV_DSSALL
169   UNSPECV_DSS
170  ])
171
172;; Like VI, defined in vector.md, but add ISA 2.07 integer vector ops
173(define_mode_iterator VI2 [V4SI V8HI V16QI V2DI])
174;; Short vec int modes
175(define_mode_iterator VIshort [V8HI V16QI])
176;; Longer vec int modes for rotate/mask ops
177(define_mode_iterator VIlong [V2DI V4SI])
178;; Vec float modes
179(define_mode_iterator VF [V4SF])
180;; Vec modes, pity mode iterators are not composable
181(define_mode_iterator V [V4SI V8HI V16QI V4SF])
182;; Vec modes for move/logical/permute ops, include vector types for move not
183;; otherwise handled by altivec (v2df, v2di, ti)
184(define_mode_iterator VM [V4SI
185			  V8HI
186			  V16QI
187			  V4SF
188			  V2DF
189			  V2DI
190			  V1TI
191			  TI
192			  (KF "FLOAT128_VECTOR_P (KFmode)")
193			  (TF "FLOAT128_VECTOR_P (TFmode)")])
194
195;; Like VM, except don't do TImode
196(define_mode_iterator VM2 [V4SI
197			   V8HI
198			   V16QI
199			   V4SF
200			   V2DF
201			   V2DI
202			   V1TI
203			   (KF "FLOAT128_VECTOR_P (KFmode)")
204			   (TF "FLOAT128_VECTOR_P (TFmode)")])
205
206;; Specific iterator for parity which does not have a byte/half-word form, but
207;; does have a quad word form
208(define_mode_iterator VParity [V4SI
209			       V2DI
210			       V1TI
211			       (TI "TARGET_VSX_TIMODE")])
212
213(define_mode_attr VI_char [(V2DI "d") (V4SI "w") (V8HI "h") (V16QI "b")])
214(define_mode_attr VI_scalar [(V2DI "DI") (V4SI "SI") (V8HI "HI") (V16QI "QI")])
215(define_mode_attr VI_unit [(V16QI "VECTOR_UNIT_ALTIVEC_P (V16QImode)")
216			   (V8HI "VECTOR_UNIT_ALTIVEC_P (V8HImode)")
217			   (V4SI "VECTOR_UNIT_ALTIVEC_P (V4SImode)")
218			   (V2DI "VECTOR_UNIT_P8_VECTOR_P (V2DImode)")
219			   (V1TI "VECTOR_UNIT_ALTIVEC_P (V1TImode)")])
220
221;; Vector pack/unpack
222(define_mode_iterator VP [V2DI V4SI V8HI])
223(define_mode_attr VP_small [(V2DI "V4SI") (V4SI "V8HI") (V8HI "V16QI")])
224(define_mode_attr VP_small_lc [(V2DI "v4si") (V4SI "v8hi") (V8HI "v16qi")])
225(define_mode_attr VU_char [(V2DI "w") (V4SI "h") (V8HI "b")])
226
227;; Vector negate
228(define_mode_iterator VNEG [V4SI V2DI])
229
230;; Vector move instructions.
231(define_insn "*altivec_mov<mode>"
232  [(set (match_operand:VM2 0 "nonimmediate_operand" "=Z,v,v,?Y,?*r,?*r,v,v,?*r")
233	(match_operand:VM2 1 "input_operand" "v,Z,v,*r,Y,*r,j,W,W"))]
234  "VECTOR_MEM_ALTIVEC_P (<MODE>mode)
235   && (register_operand (operands[0], <MODE>mode)
236       || register_operand (operands[1], <MODE>mode))"
237{
238  switch (which_alternative)
239    {
240    case 0: return "stvx %1,%y0";
241    case 1: return "lvx %0,%y1";
242    case 2: return "vor %0,%1,%1";
243    case 3: return "#";
244    case 4: return "#";
245    case 5: return "#";
246    case 6: return "vxor %0,%0,%0";
247    case 7: return output_vec_const_move (operands);
248    case 8: return "#";
249    default: gcc_unreachable ();
250    }
251}
252  [(set_attr "type" "vecstore,vecload,veclogical,store,load,*,veclogical,*,*")
253   (set_attr "length" "4,4,4,20,20,20,4,8,32")])
254
255;; Unlike other altivec moves, allow the GPRs, since a normal use of TImode
256;; is for unions.  However for plain data movement, slightly favor the vector
257;; loads
258(define_insn "*altivec_movti"
259  [(set (match_operand:TI 0 "nonimmediate_operand" "=Z,v,v,?Y,?r,?r,v,v")
260	(match_operand:TI 1 "input_operand" "v,Z,v,r,Y,r,j,W"))]
261  "VECTOR_MEM_ALTIVEC_P (TImode)
262   && (register_operand (operands[0], TImode)
263       || register_operand (operands[1], TImode))"
264{
265  switch (which_alternative)
266    {
267    case 0: return "stvx %1,%y0";
268    case 1: return "lvx %0,%y1";
269    case 2: return "vor %0,%1,%1";
270    case 3: return "#";
271    case 4: return "#";
272    case 5: return "#";
273    case 6: return "vxor %0,%0,%0";
274    case 7: return output_vec_const_move (operands);
275    default: gcc_unreachable ();
276    }
277}
278  [(set_attr "type" "vecstore,vecload,veclogical,store,load,*,veclogical,*")])
279
280;; Load up a vector with the most significant bit set by loading up -1 and
281;; doing a shift left
282(define_split
283  [(set (match_operand:VM 0 "altivec_register_operand" "")
284	(match_operand:VM 1 "easy_vector_constant_msb" ""))]
285  "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode) && reload_completed"
286  [(const_int 0)]
287{
288  rtx dest = operands[0];
289  machine_mode mode = GET_MODE (operands[0]);
290  rtvec v;
291  int i, num_elements;
292
293  if (mode == V4SFmode)
294    {
295      mode = V4SImode;
296      dest = gen_lowpart (V4SImode, dest);
297    }
298
299  num_elements = GET_MODE_NUNITS (mode);
300  v = rtvec_alloc (num_elements);
301  for (i = 0; i < num_elements; i++)
302    RTVEC_ELT (v, i) = constm1_rtx;
303
304  emit_insn (gen_vec_initv4sisi (dest, gen_rtx_PARALLEL (mode, v)));
305  emit_insn (gen_rtx_SET (dest, gen_rtx_ASHIFT (mode, dest, dest)));
306  DONE;
307})
308
309(define_split
310  [(set (match_operand:VM 0 "altivec_register_operand" "")
311	(match_operand:VM 1 "easy_vector_constant_add_self" ""))]
312  "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode) && reload_completed"
313  [(set (match_dup 0) (match_dup 3))
314   (set (match_dup 0) (match_dup 4))]
315{
316  rtx dup = gen_easy_altivec_constant (operands[1]);
317  rtx const_vec;
318  machine_mode op_mode = <MODE>mode;
319
320  /* Divide the operand of the resulting VEC_DUPLICATE, and use
321     simplify_rtx to make a CONST_VECTOR.  */
322  XEXP (dup, 0) = simplify_const_binary_operation (ASHIFTRT, QImode,
323						   XEXP (dup, 0), const1_rtx);
324  const_vec = simplify_rtx (dup);
325
326  if (op_mode == V4SFmode)
327    {
328      op_mode = V4SImode;
329      operands[0] = gen_lowpart (op_mode, operands[0]);
330    }
331  if (GET_MODE (const_vec) == op_mode)
332    operands[3] = const_vec;
333  else
334    operands[3] = gen_lowpart (op_mode, const_vec);
335  operands[4] = gen_rtx_PLUS (op_mode, operands[0], operands[0]);
336})
337
338(define_split
339  [(set (match_operand:VM 0 "altivec_register_operand" "")
340	(match_operand:VM 1 "easy_vector_constant_vsldoi" ""))]
341  "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode) && can_create_pseudo_p ()"
342  [(set (match_dup 2) (match_dup 3))
343   (set (match_dup 4) (match_dup 5))
344   (set (match_dup 0)
345        (unspec:VM [(match_dup 2)
346		    (match_dup 4)
347		    (match_dup 6)]
348		   UNSPEC_VSLDOI))]
349{
350  rtx op1 = operands[1];
351  int elt = (BYTES_BIG_ENDIAN) ? 0 : GET_MODE_NUNITS (<MODE>mode) - 1;
352  HOST_WIDE_INT val = const_vector_elt_as_int (op1, elt);
353  rtx rtx_val = GEN_INT (val);
354  int shift = vspltis_shifted (op1);
355
356  gcc_assert (shift != 0);
357  operands[2] = gen_reg_rtx (<MODE>mode);
358  operands[3] = gen_const_vec_duplicate (<MODE>mode, rtx_val);
359  operands[4] = gen_reg_rtx (<MODE>mode);
360
361  if (shift < 0)
362    {
363      operands[5] = CONSTM1_RTX (<MODE>mode);
364      operands[6] = GEN_INT (-shift);
365    }
366  else
367    {
368      operands[5] = CONST0_RTX (<MODE>mode);
369      operands[6] = GEN_INT (shift);
370    }
371})
372
373(define_insn "get_vrsave_internal"
374  [(set (match_operand:SI 0 "register_operand" "=r")
375	(unspec:SI [(reg:SI VRSAVE_REGNO)] UNSPEC_GET_VRSAVE))]
376  "TARGET_ALTIVEC"
377{
378  if (TARGET_MACHO)
379     return "mfspr %0,256";
380  else
381     return "mfvrsave %0";
382}
383  [(set_attr "type" "*")])
384
385(define_insn "*set_vrsave_internal"
386  [(match_parallel 0 "vrsave_operation"
387     [(set (reg:SI VRSAVE_REGNO)
388	   (unspec_volatile:SI [(match_operand:SI 1 "register_operand" "r")
389				(reg:SI VRSAVE_REGNO)] UNSPECV_SET_VRSAVE))])]
390  "TARGET_ALTIVEC"
391{
392  if (TARGET_MACHO)
393    return "mtspr 256,%1";
394  else
395    return "mtvrsave %1";
396}
397  [(set_attr "type" "*")])
398
399(define_insn "*save_world"
400 [(match_parallel 0 "save_world_operation"
401                  [(clobber (reg:SI LR_REGNO))
402                   (use (match_operand:SI 1 "call_operand" "s"))])]
403 "TARGET_MACHO && (DEFAULT_ABI == ABI_DARWIN) && TARGET_32BIT"
404 "bl %z1"
405  [(set_attr "type" "branch")
406   (set_attr "length" "4")])
407
408(define_insn "*restore_world"
409 [(match_parallel 0 "restore_world_operation"
410                  [(return)
411		   (use (reg:SI LR_REGNO))
412                   (use (match_operand:SI 1 "call_operand" "s"))
413                   (clobber (match_operand:SI 2 "gpc_reg_operand" "=r"))])]
414 "TARGET_MACHO && (DEFAULT_ABI == ABI_DARWIN) && TARGET_32BIT"
415 "b %z1")
416
417;; The save_vregs and restore_vregs patterns don't use memory_operand
418;; because (plus (reg) (const_int)) is not a valid vector address.
419;; This way is more compact than describing exactly what happens in
420;; the out-of-line functions, ie. loading the constant into r11/r12
421;; then using indexed addressing, and requires less editing of rtl
422;; to describe the operation to dwarf2out_frame_debug_expr.
423(define_insn "*save_vregs_<mode>_r11"
424  [(match_parallel 0 "any_parallel_operand"
425     [(clobber (reg:P LR_REGNO))
426      (use (match_operand:P 1 "symbol_ref_operand" "s"))
427      (clobber (reg:P 11))
428      (use (reg:P 0))
429      (set (mem:V4SI (plus:P (match_operand:P 2 "gpc_reg_operand" "b")
430			     (match_operand:P 3 "short_cint_operand" "I")))
431	   (match_operand:V4SI 4 "altivec_register_operand" "v"))])]
432  "TARGET_ALTIVEC"
433  "bl %1"
434  [(set_attr "type" "branch")
435   (set_attr "length" "4")])
436
437(define_insn "*save_vregs_<mode>_r12"
438  [(match_parallel 0 "any_parallel_operand"
439     [(clobber (reg:P LR_REGNO))
440      (use (match_operand:P 1 "symbol_ref_operand" "s"))
441      (clobber (reg:P 12))
442      (use (reg:P 0))
443      (set (mem:V4SI (plus:P (match_operand:P 2 "gpc_reg_operand" "b")
444			     (match_operand:P 3 "short_cint_operand" "I")))
445	   (match_operand:V4SI 4 "altivec_register_operand" "v"))])]
446  "TARGET_ALTIVEC"
447  "bl %1"
448  [(set_attr "type" "branch")
449   (set_attr "length" "4")])
450
451(define_insn "*restore_vregs_<mode>_r11"
452  [(match_parallel 0 "any_parallel_operand"
453     [(clobber (reg:P LR_REGNO))
454      (use (match_operand:P 1 "symbol_ref_operand" "s"))
455      (clobber (reg:P 11))
456      (use (reg:P 0))
457      (set (match_operand:V4SI 2 "altivec_register_operand" "=v")
458	   (mem:V4SI (plus:P (match_operand:P 3 "gpc_reg_operand" "b")
459			     (match_operand:P 4 "short_cint_operand" "I"))))])]
460  "TARGET_ALTIVEC"
461  "bl %1"
462  [(set_attr "type" "branch")
463   (set_attr "length" "4")])
464
465(define_insn "*restore_vregs_<mode>_r12"
466  [(match_parallel 0 "any_parallel_operand"
467     [(clobber (reg:P LR_REGNO))
468      (use (match_operand:P 1 "symbol_ref_operand" "s"))
469      (clobber (reg:P 12))
470      (use (reg:P 0))
471      (set (match_operand:V4SI 2 "altivec_register_operand" "=v")
472	   (mem:V4SI (plus:P (match_operand:P 3 "gpc_reg_operand" "b")
473			     (match_operand:P 4 "short_cint_operand" "I"))))])]
474  "TARGET_ALTIVEC"
475  "bl %1"
476  [(set_attr "type" "branch")
477   (set_attr "length" "4")])
478
479;; Simple binary operations.
480
481;; add
482(define_insn "add<mode>3"
483  [(set (match_operand:VI2 0 "register_operand" "=v")
484        (plus:VI2 (match_operand:VI2 1 "register_operand" "v")
485		  (match_operand:VI2 2 "register_operand" "v")))]
486  "<VI_unit>"
487  "vaddu<VI_char>m %0,%1,%2"
488  [(set_attr "type" "vecsimple")])
489
490(define_insn "*altivec_addv4sf3"
491  [(set (match_operand:V4SF 0 "register_operand" "=v")
492        (plus:V4SF (match_operand:V4SF 1 "register_operand" "v")
493		   (match_operand:V4SF 2 "register_operand" "v")))]
494  "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
495  "vaddfp %0,%1,%2"
496  [(set_attr "type" "vecfloat")])
497
498(define_insn "altivec_vaddcuw"
499  [(set (match_operand:V4SI 0 "register_operand" "=v")
500        (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
501                      (match_operand:V4SI 2 "register_operand" "v")]
502		     UNSPEC_VADDCUW))]
503  "VECTOR_UNIT_ALTIVEC_P (V4SImode)"
504  "vaddcuw %0,%1,%2"
505  [(set_attr "type" "vecsimple")])
506
507(define_insn "altivec_vaddu<VI_char>s"
508  [(set (match_operand:VI 0 "register_operand" "=v")
509        (unspec:VI [(match_operand:VI 1 "register_operand" "v")
510		    (match_operand:VI 2 "register_operand" "v")]
511		   UNSPEC_VADDU))
512   (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
513  "<VI_unit>"
514  "vaddu<VI_char>s %0,%1,%2"
515  [(set_attr "type" "vecsimple")])
516
517(define_insn "altivec_vadds<VI_char>s"
518  [(set (match_operand:VI 0 "register_operand" "=v")
519        (unspec:VI [(match_operand:VI 1 "register_operand" "v")
520                    (match_operand:VI 2 "register_operand" "v")]
521		   UNSPEC_VADDS))
522   (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
523  "VECTOR_UNIT_ALTIVEC_P (<MODE>mode)"
524  "vadds<VI_char>s %0,%1,%2"
525  [(set_attr "type" "vecsimple")])
526
527;; sub
528(define_insn "sub<mode>3"
529  [(set (match_operand:VI2 0 "register_operand" "=v")
530        (minus:VI2 (match_operand:VI2 1 "register_operand" "v")
531		   (match_operand:VI2 2 "register_operand" "v")))]
532  "<VI_unit>"
533  "vsubu<VI_char>m %0,%1,%2"
534  [(set_attr "type" "vecsimple")])
535
536(define_insn "*altivec_subv4sf3"
537  [(set (match_operand:V4SF 0 "register_operand" "=v")
538        (minus:V4SF (match_operand:V4SF 1 "register_operand" "v")
539                    (match_operand:V4SF 2 "register_operand" "v")))]
540  "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
541  "vsubfp %0,%1,%2"
542  [(set_attr "type" "vecfloat")])
543
544(define_insn "altivec_vsubcuw"
545  [(set (match_operand:V4SI 0 "register_operand" "=v")
546        (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
547                      (match_operand:V4SI 2 "register_operand" "v")]
548		     UNSPEC_VSUBCUW))]
549  "VECTOR_UNIT_ALTIVEC_P (V4SImode)"
550  "vsubcuw %0,%1,%2"
551  [(set_attr "type" "vecsimple")])
552
553(define_insn "altivec_vsubu<VI_char>s"
554  [(set (match_operand:VI 0 "register_operand" "=v")
555        (unspec:VI [(match_operand:VI 1 "register_operand" "v")
556                    (match_operand:VI 2 "register_operand" "v")]
557		   UNSPEC_VSUBU))
558   (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
559  "VECTOR_UNIT_ALTIVEC_P (<MODE>mode)"
560  "vsubu<VI_char>s %0,%1,%2"
561  [(set_attr "type" "vecsimple")])
562
563(define_insn "altivec_vsubs<VI_char>s"
564  [(set (match_operand:VI 0 "register_operand" "=v")
565        (unspec:VI [(match_operand:VI 1 "register_operand" "v")
566                    (match_operand:VI 2 "register_operand" "v")]
567		   UNSPEC_VSUBS))
568   (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
569  "VECTOR_UNIT_ALTIVEC_P (<MODE>mode)"
570  "vsubs<VI_char>s %0,%1,%2"
571  [(set_attr "type" "vecsimple")])
572
573;;
574(define_insn "altivec_vavgu<VI_char>"
575  [(set (match_operand:VI 0 "register_operand" "=v")
576        (unspec:VI [(match_operand:VI 1 "register_operand" "v")
577                    (match_operand:VI 2 "register_operand" "v")]
578		   UNSPEC_VAVGU))]
579  "TARGET_ALTIVEC"
580  "vavgu<VI_char> %0,%1,%2"
581  [(set_attr "type" "vecsimple")])
582
583(define_insn "altivec_vavgs<VI_char>"
584  [(set (match_operand:VI 0 "register_operand" "=v")
585        (unspec:VI [(match_operand:VI 1 "register_operand" "v")
586                    (match_operand:VI 2 "register_operand" "v")]
587		   UNSPEC_VAVGS))]
588  "VECTOR_UNIT_ALTIVEC_P (<MODE>mode)"
589  "vavgs<VI_char> %0,%1,%2"
590  [(set_attr "type" "vecsimple")])
591
592(define_insn "altivec_vcmpbfp"
593  [(set (match_operand:V4SI 0 "register_operand" "=v")
594        (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
595                      (match_operand:V4SF 2 "register_operand" "v")]
596                      UNSPEC_VCMPBFP))]
597  "VECTOR_UNIT_ALTIVEC_P (V4SImode)"
598  "vcmpbfp %0,%1,%2"
599  [(set_attr "type" "veccmp")])
600
601(define_insn "*altivec_eq<mode>"
602  [(set (match_operand:VI2 0 "altivec_register_operand" "=v")
603	(eq:VI2 (match_operand:VI2 1 "altivec_register_operand" "v")
604		(match_operand:VI2 2 "altivec_register_operand" "v")))]
605  "<VI_unit>"
606  "vcmpequ<VI_char> %0,%1,%2"
607  [(set_attr "type" "veccmpfx")])
608
609(define_insn "*altivec_gt<mode>"
610  [(set (match_operand:VI2 0 "altivec_register_operand" "=v")
611	(gt:VI2 (match_operand:VI2 1 "altivec_register_operand" "v")
612		(match_operand:VI2 2 "altivec_register_operand" "v")))]
613  "<VI_unit>"
614  "vcmpgts<VI_char> %0,%1,%2"
615  [(set_attr "type" "veccmpfx")])
616
617(define_insn "*altivec_gtu<mode>"
618  [(set (match_operand:VI2 0 "altivec_register_operand" "=v")
619	(gtu:VI2 (match_operand:VI2 1 "altivec_register_operand" "v")
620		 (match_operand:VI2 2 "altivec_register_operand" "v")))]
621  "<VI_unit>"
622  "vcmpgtu<VI_char> %0,%1,%2"
623  [(set_attr "type" "veccmpfx")])
624
625(define_insn "*altivec_eqv4sf"
626  [(set (match_operand:V4SF 0 "altivec_register_operand" "=v")
627	(eq:V4SF (match_operand:V4SF 1 "altivec_register_operand" "v")
628		 (match_operand:V4SF 2 "altivec_register_operand" "v")))]
629  "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
630  "vcmpeqfp %0,%1,%2"
631  [(set_attr "type" "veccmp")])
632
633(define_insn "*altivec_gtv4sf"
634  [(set (match_operand:V4SF 0 "altivec_register_operand" "=v")
635	(gt:V4SF (match_operand:V4SF 1 "altivec_register_operand" "v")
636		 (match_operand:V4SF 2 "altivec_register_operand" "v")))]
637  "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
638  "vcmpgtfp %0,%1,%2"
639  [(set_attr "type" "veccmp")])
640
641(define_insn "*altivec_gev4sf"
642  [(set (match_operand:V4SF 0 "altivec_register_operand" "=v")
643	(ge:V4SF (match_operand:V4SF 1 "altivec_register_operand" "v")
644		 (match_operand:V4SF 2 "altivec_register_operand" "v")))]
645  "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
646  "vcmpgefp %0,%1,%2"
647  [(set_attr "type" "veccmp")])
648
649(define_insn "*altivec_vsel<mode>"
650  [(set (match_operand:VM 0 "altivec_register_operand" "=v")
651	(if_then_else:VM
652	 (ne:CC (match_operand:VM 1 "altivec_register_operand" "v")
653		(match_operand:VM 4 "zero_constant" ""))
654	 (match_operand:VM 2 "altivec_register_operand" "v")
655	 (match_operand:VM 3 "altivec_register_operand" "v")))]
656  "VECTOR_MEM_ALTIVEC_P (<MODE>mode)"
657  "vsel %0,%3,%2,%1"
658  [(set_attr "type" "vecmove")])
659
660(define_insn "*altivec_vsel<mode>_uns"
661  [(set (match_operand:VM 0 "altivec_register_operand" "=v")
662	(if_then_else:VM
663	 (ne:CCUNS (match_operand:VM 1 "altivec_register_operand" "v")
664		   (match_operand:VM 4 "zero_constant" ""))
665	 (match_operand:VM 2 "altivec_register_operand" "v")
666	 (match_operand:VM 3 "altivec_register_operand" "v")))]
667  "VECTOR_MEM_ALTIVEC_P (<MODE>mode)"
668  "vsel %0,%3,%2,%1"
669  [(set_attr "type" "vecmove")])
670
671;; Fused multiply add.
672
673(define_insn "*altivec_fmav4sf4"
674  [(set (match_operand:V4SF 0 "register_operand" "=v")
675	(fma:V4SF (match_operand:V4SF 1 "register_operand" "v")
676		  (match_operand:V4SF 2 "register_operand" "v")
677		  (match_operand:V4SF 3 "register_operand" "v")))]
678  "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
679  "vmaddfp %0,%1,%2,%3"
680  [(set_attr "type" "vecfloat")])
681
682;; We do multiply as a fused multiply-add with an add of a -0.0 vector.
683
684(define_expand "altivec_mulv4sf3"
685  [(set (match_operand:V4SF 0 "register_operand" "")
686	(fma:V4SF (match_operand:V4SF 1 "register_operand" "")
687		  (match_operand:V4SF 2 "register_operand" "")
688		  (match_dup 3)))]
689  "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
690{
691  rtx neg0;
692
693  /* Generate [-0.0, -0.0, -0.0, -0.0].  */
694  neg0 = gen_reg_rtx (V4SImode);
695  emit_insn (gen_altivec_vspltisw (neg0, constm1_rtx));
696  emit_insn (gen_vashlv4si3 (neg0, neg0, neg0));
697
698  operands[3] = gen_lowpart (V4SFmode, neg0);
699})
700
701;; 32-bit integer multiplication
702;; A_high = Operand_0 & 0xFFFF0000 >> 16
703;; A_low = Operand_0 & 0xFFFF
704;; B_high = Operand_1 & 0xFFFF0000 >> 16
705;; B_low = Operand_1 & 0xFFFF
706;; result = A_low * B_low + (A_high * B_low + B_high * A_low) << 16
707
708;; (define_insn "mulv4si3"
709;;   [(set (match_operand:V4SI 0 "register_operand" "=v")
710;;         (mult:V4SI (match_operand:V4SI 1 "register_operand" "v")
711;;                    (match_operand:V4SI 2 "register_operand" "v")))]
712(define_insn "mulv4si3_p8"
713  [(set (match_operand:V4SI 0 "register_operand" "=v")
714        (mult:V4SI (match_operand:V4SI 1 "register_operand" "v")
715                   (match_operand:V4SI 2 "register_operand" "v")))]
716  "TARGET_P8_VECTOR"
717  "vmuluwm %0,%1,%2"
718  [(set_attr "type" "veccomplex")])
719
720(define_expand "mulv4si3"
721  [(use (match_operand:V4SI 0 "register_operand" ""))
722   (use (match_operand:V4SI 1 "register_operand" ""))
723   (use (match_operand:V4SI 2 "register_operand" ""))]
724   "TARGET_ALTIVEC"
725{
726  rtx zero;
727  rtx swap;
728  rtx small_swap;
729  rtx sixteen;
730  rtx one;
731  rtx two;
732  rtx low_product;
733  rtx high_product;
734
735  if (TARGET_P8_VECTOR)
736    {
737      emit_insn (gen_mulv4si3_p8 (operands[0], operands[1], operands[2]));
738      DONE;
739    }
740
741  zero = gen_reg_rtx (V4SImode);
742  emit_insn (gen_altivec_vspltisw (zero, const0_rtx));
743
744  sixteen = gen_reg_rtx (V4SImode);
745  emit_insn (gen_altivec_vspltisw (sixteen,  gen_rtx_CONST_INT (V4SImode, -16)));
746
747  swap = gen_reg_rtx (V4SImode);
748  emit_insn (gen_vrotlv4si3 (swap, operands[2], sixteen));
749
750  one = gen_reg_rtx (V8HImode);
751  convert_move (one, operands[1], 0);
752
753  two = gen_reg_rtx (V8HImode);
754  convert_move (two, operands[2], 0);
755
756  small_swap = gen_reg_rtx (V8HImode);
757  convert_move (small_swap, swap, 0);
758
759  low_product = gen_reg_rtx (V4SImode);
760  emit_insn (gen_altivec_vmulouh (low_product, one, two));
761
762  high_product = gen_reg_rtx (V4SImode);
763  emit_insn (gen_altivec_vmsumuhm (high_product, one, small_swap, zero));
764
765  emit_insn (gen_vashlv4si3 (high_product, high_product, sixteen));
766
767  emit_insn (gen_addv4si3 (operands[0], high_product, low_product));
768
769  DONE;
770})
771
772(define_expand "mulv8hi3"
773  [(use (match_operand:V8HI 0 "register_operand" ""))
774   (use (match_operand:V8HI 1 "register_operand" ""))
775   (use (match_operand:V8HI 2 "register_operand" ""))]
776   "TARGET_ALTIVEC"
777{
778  rtx zero = gen_reg_rtx (V8HImode);
779
780  emit_insn (gen_altivec_vspltish (zero, const0_rtx));
781  emit_insn (gen_altivec_vmladduhm(operands[0], operands[1], operands[2], zero));
782
783  DONE;
784})
785
786;; Fused multiply subtract
787(define_insn "*altivec_vnmsubfp"
788  [(set (match_operand:V4SF 0 "register_operand" "=v")
789	(neg:V4SF
790	 (fma:V4SF (match_operand:V4SF 1 "register_operand" "v")
791		   (match_operand:V4SF 2 "register_operand" "v")
792		   (neg:V4SF
793		    (match_operand:V4SF 3 "register_operand" "v")))))]
794  "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
795  "vnmsubfp %0,%1,%2,%3"
796  [(set_attr "type" "vecfloat")])
797
798(define_insn "altivec_vmsumu<VI_char>m"
799  [(set (match_operand:V4SI 0 "register_operand" "=v")
800        (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")
801		      (match_operand:VIshort 2 "register_operand" "v")
802                      (match_operand:V4SI 3 "register_operand" "v")]
803		     UNSPEC_VMSUMU))]
804  "TARGET_ALTIVEC"
805  "vmsumu<VI_char>m %0,%1,%2,%3"
806  [(set_attr "type" "veccomplex")])
807
808(define_insn "altivec_vmsumm<VI_char>m"
809  [(set (match_operand:V4SI 0 "register_operand" "=v")
810        (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")
811		      (match_operand:VIshort 2 "register_operand" "v")
812                      (match_operand:V4SI 3 "register_operand" "v")]
813		     UNSPEC_VMSUMM))]
814  "TARGET_ALTIVEC"
815  "vmsumm<VI_char>m %0,%1,%2,%3"
816  [(set_attr "type" "veccomplex")])
817
818(define_insn "altivec_vmsumshm"
819  [(set (match_operand:V4SI 0 "register_operand" "=v")
820        (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
821		      (match_operand:V8HI 2 "register_operand" "v")
822                      (match_operand:V4SI 3 "register_operand" "v")]
823		     UNSPEC_VMSUMSHM))]
824  "TARGET_ALTIVEC"
825  "vmsumshm %0,%1,%2,%3"
826  [(set_attr "type" "veccomplex")])
827
828(define_insn "altivec_vmsumuhs"
829  [(set (match_operand:V4SI 0 "register_operand" "=v")
830        (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
831		      (match_operand:V8HI 2 "register_operand" "v")
832                      (match_operand:V4SI 3 "register_operand" "v")]
833		     UNSPEC_VMSUMUHS))
834   (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
835  "TARGET_ALTIVEC"
836  "vmsumuhs %0,%1,%2,%3"
837  [(set_attr "type" "veccomplex")])
838
839(define_insn "altivec_vmsumshs"
840  [(set (match_operand:V4SI 0 "register_operand" "=v")
841        (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
842		      (match_operand:V8HI 2 "register_operand" "v")
843                      (match_operand:V4SI 3 "register_operand" "v")]
844		     UNSPEC_VMSUMSHS))
845   (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
846  "TARGET_ALTIVEC"
847  "vmsumshs %0,%1,%2,%3"
848  [(set_attr "type" "veccomplex")])
849
850;; max
851
852(define_insn "umax<mode>3"
853  [(set (match_operand:VI2 0 "register_operand" "=v")
854        (umax:VI2 (match_operand:VI2 1 "register_operand" "v")
855		  (match_operand:VI2 2 "register_operand" "v")))]
856  "<VI_unit>"
857  "vmaxu<VI_char> %0,%1,%2"
858  [(set_attr "type" "vecsimple")])
859
860(define_insn "smax<mode>3"
861  [(set (match_operand:VI2 0 "register_operand" "=v")
862        (smax:VI2 (match_operand:VI2 1 "register_operand" "v")
863		  (match_operand:VI2 2 "register_operand" "v")))]
864  "<VI_unit>"
865  "vmaxs<VI_char> %0,%1,%2"
866  [(set_attr "type" "vecsimple")])
867
868(define_insn "*altivec_smaxv4sf3"
869  [(set (match_operand:V4SF 0 "register_operand" "=v")
870        (smax:V4SF (match_operand:V4SF 1 "register_operand" "v")
871                   (match_operand:V4SF 2 "register_operand" "v")))]
872  "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
873  "vmaxfp %0,%1,%2"
874  [(set_attr "type" "veccmp")])
875
876(define_insn "umin<mode>3"
877  [(set (match_operand:VI2 0 "register_operand" "=v")
878        (umin:VI2 (match_operand:VI2 1 "register_operand" "v")
879		  (match_operand:VI2 2 "register_operand" "v")))]
880  "<VI_unit>"
881  "vminu<VI_char> %0,%1,%2"
882  [(set_attr "type" "vecsimple")])
883
884(define_insn "smin<mode>3"
885  [(set (match_operand:VI2 0 "register_operand" "=v")
886        (smin:VI2 (match_operand:VI2 1 "register_operand" "v")
887		  (match_operand:VI2 2 "register_operand" "v")))]
888  "<VI_unit>"
889  "vmins<VI_char> %0,%1,%2"
890  [(set_attr "type" "vecsimple")])
891
892(define_insn "*altivec_sminv4sf3"
893  [(set (match_operand:V4SF 0 "register_operand" "=v")
894        (smin:V4SF (match_operand:V4SF 1 "register_operand" "v")
895                   (match_operand:V4SF 2 "register_operand" "v")))]
896  "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
897  "vminfp %0,%1,%2"
898  [(set_attr "type" "veccmp")])
899
900(define_insn "altivec_vmhaddshs"
901  [(set (match_operand:V8HI 0 "register_operand" "=v")
902        (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
903		      (match_operand:V8HI 2 "register_operand" "v")
904                      (match_operand:V8HI 3 "register_operand" "v")]
905		     UNSPEC_VMHADDSHS))
906   (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
907  "TARGET_ALTIVEC"
908  "vmhaddshs %0,%1,%2,%3"
909  [(set_attr "type" "veccomplex")])
910
911(define_insn "altivec_vmhraddshs"
912  [(set (match_operand:V8HI 0 "register_operand" "=v")
913        (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
914		      (match_operand:V8HI 2 "register_operand" "v")
915                      (match_operand:V8HI 3 "register_operand" "v")]
916		     UNSPEC_VMHRADDSHS))
917   (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
918  "TARGET_ALTIVEC"
919  "vmhraddshs %0,%1,%2,%3"
920  [(set_attr "type" "veccomplex")])
921
922(define_insn "altivec_vmladduhm"
923  [(set (match_operand:V8HI 0 "register_operand" "=v")
924        (plus:V8HI (mult:V8HI (match_operand:V8HI 1 "register_operand" "v")
925		   	      (match_operand:V8HI 2 "register_operand" "v"))
926		   (match_operand:V8HI 3 "register_operand" "v")))]
927  "TARGET_ALTIVEC"
928  "vmladduhm %0,%1,%2,%3"
929  [(set_attr "type" "veccomplex")])
930
931(define_expand "altivec_vmrghb"
932  [(use (match_operand:V16QI 0 "register_operand" ""))
933   (use (match_operand:V16QI 1 "register_operand" ""))
934   (use (match_operand:V16QI 2 "register_operand" ""))]
935  "TARGET_ALTIVEC"
936{
937  rtvec v;
938  rtx x;
939
940  /* Special handling for LE with -maltivec=be.  */
941  if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
942    {
943      v = gen_rtvec (16, GEN_INT (8), GEN_INT (24), GEN_INT (9), GEN_INT (25),
944                     GEN_INT (10), GEN_INT (26), GEN_INT (11), GEN_INT (27),
945		     GEN_INT (12), GEN_INT (28), GEN_INT (13), GEN_INT (29),
946		     GEN_INT (14), GEN_INT (30), GEN_INT (15), GEN_INT (31));
947      x = gen_rtx_VEC_CONCAT (V32QImode, operands[2], operands[1]);
948    }
949  else
950    {
951      v = gen_rtvec (16, GEN_INT (0), GEN_INT (16), GEN_INT (1), GEN_INT (17),
952                     GEN_INT (2), GEN_INT (18), GEN_INT (3), GEN_INT (19),
953		     GEN_INT (4), GEN_INT (20), GEN_INT (5), GEN_INT (21),
954		     GEN_INT (6), GEN_INT (22), GEN_INT (7), GEN_INT (23));
955      x = gen_rtx_VEC_CONCAT (V32QImode, operands[1], operands[2]);
956    }
957
958  x = gen_rtx_VEC_SELECT (V16QImode, x, gen_rtx_PARALLEL (VOIDmode, v));
959  emit_insn (gen_rtx_SET (operands[0], x));
960  DONE;
961})
962
963(define_insn "*altivec_vmrghb_internal"
964  [(set (match_operand:V16QI 0 "register_operand" "=v")
965        (vec_select:V16QI
966	  (vec_concat:V32QI
967	    (match_operand:V16QI 1 "register_operand" "v")
968	    (match_operand:V16QI 2 "register_operand" "v"))
969	  (parallel [(const_int 0) (const_int 16)
970		     (const_int 1) (const_int 17)
971		     (const_int 2) (const_int 18)
972		     (const_int 3) (const_int 19)
973		     (const_int 4) (const_int 20)
974		     (const_int 5) (const_int 21)
975		     (const_int 6) (const_int 22)
976		     (const_int 7) (const_int 23)])))]
977  "TARGET_ALTIVEC"
978{
979  if (BYTES_BIG_ENDIAN)
980    return "vmrghb %0,%1,%2";
981  else
982    return "vmrglb %0,%2,%1";
983}
984  [(set_attr "type" "vecperm")])
985
986(define_insn "altivec_vmrghb_direct"
987  [(set (match_operand:V16QI 0 "register_operand" "=v")
988        (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
989                       (match_operand:V16QI 2 "register_operand" "v")]
990		      UNSPEC_VMRGH_DIRECT))]
991  "TARGET_ALTIVEC"
992  "vmrghb %0,%1,%2"
993  [(set_attr "type" "vecperm")])
994
995(define_expand "altivec_vmrghh"
996  [(use (match_operand:V8HI 0 "register_operand" ""))
997   (use (match_operand:V8HI 1 "register_operand" ""))
998   (use (match_operand:V8HI 2 "register_operand" ""))]
999  "TARGET_ALTIVEC"
1000{
1001  rtvec v;
1002  rtx x;
1003
1004  /* Special handling for LE with -maltivec=be.  */
1005  if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
1006    {
1007      v = gen_rtvec (8, GEN_INT (4), GEN_INT (12), GEN_INT (5), GEN_INT (13),
1008                     GEN_INT (6), GEN_INT (14), GEN_INT (7), GEN_INT (15));
1009      x = gen_rtx_VEC_CONCAT (V16HImode, operands[2], operands[1]);
1010    }
1011  else
1012    {
1013      v = gen_rtvec (8, GEN_INT (0), GEN_INT (8), GEN_INT (1), GEN_INT (9),
1014                     GEN_INT (2), GEN_INT (10), GEN_INT (3), GEN_INT (11));
1015      x = gen_rtx_VEC_CONCAT (V16HImode, operands[1], operands[2]);
1016    }
1017
1018  x = gen_rtx_VEC_SELECT (V8HImode, x, gen_rtx_PARALLEL (VOIDmode, v));
1019  emit_insn (gen_rtx_SET (operands[0], x));
1020  DONE;
1021})
1022
1023(define_insn "*altivec_vmrghh_internal"
1024  [(set (match_operand:V8HI 0 "register_operand" "=v")
1025        (vec_select:V8HI
1026	  (vec_concat:V16HI
1027	    (match_operand:V8HI 1 "register_operand" "v")
1028	    (match_operand:V8HI 2 "register_operand" "v"))
1029	  (parallel [(const_int 0) (const_int 8)
1030		     (const_int 1) (const_int 9)
1031		     (const_int 2) (const_int 10)
1032		     (const_int 3) (const_int 11)])))]
1033  "TARGET_ALTIVEC"
1034{
1035  if (BYTES_BIG_ENDIAN)
1036    return "vmrghh %0,%1,%2";
1037  else
1038    return "vmrglh %0,%2,%1";
1039}
1040  [(set_attr "type" "vecperm")])
1041
1042(define_insn "altivec_vmrghh_direct"
1043  [(set (match_operand:V8HI 0 "register_operand" "=v")
1044        (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
1045                      (match_operand:V8HI 2 "register_operand" "v")]
1046                     UNSPEC_VMRGH_DIRECT))]
1047  "TARGET_ALTIVEC"
1048  "vmrghh %0,%1,%2"
1049  [(set_attr "type" "vecperm")])
1050
1051(define_expand "altivec_vmrghw"
1052  [(use (match_operand:V4SI 0 "register_operand" ""))
1053   (use (match_operand:V4SI 1 "register_operand" ""))
1054   (use (match_operand:V4SI 2 "register_operand" ""))]
1055  "VECTOR_MEM_ALTIVEC_P (V4SImode)"
1056{
1057  rtvec v;
1058  rtx x;
1059
1060  /* Special handling for LE with -maltivec=be.  */
1061  if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
1062    {
1063      v = gen_rtvec (4, GEN_INT (2), GEN_INT (6), GEN_INT (3), GEN_INT (7));
1064      x = gen_rtx_VEC_CONCAT (V8SImode, operands[2], operands[1]);
1065    }
1066  else
1067    {
1068      v = gen_rtvec (4, GEN_INT (0), GEN_INT (4), GEN_INT (1), GEN_INT (5));
1069      x = gen_rtx_VEC_CONCAT (V8SImode, operands[1], operands[2]);
1070    }
1071
1072  x = gen_rtx_VEC_SELECT (V4SImode, x, gen_rtx_PARALLEL (VOIDmode, v));
1073  emit_insn (gen_rtx_SET (operands[0], x));
1074  DONE;
1075})
1076
1077(define_insn "*altivec_vmrghw_internal"
1078  [(set (match_operand:V4SI 0 "register_operand" "=v")
1079        (vec_select:V4SI
1080	  (vec_concat:V8SI
1081	    (match_operand:V4SI 1 "register_operand" "v")
1082	    (match_operand:V4SI 2 "register_operand" "v"))
1083	  (parallel [(const_int 0) (const_int 4)
1084		     (const_int 1) (const_int 5)])))]
1085  "VECTOR_MEM_ALTIVEC_P (V4SImode)"
1086{
1087  if (BYTES_BIG_ENDIAN)
1088    return "vmrghw %0,%1,%2";
1089  else
1090    return "vmrglw %0,%2,%1";
1091}
1092  [(set_attr "type" "vecperm")])
1093
1094(define_insn "altivec_vmrghw_direct"
1095  [(set (match_operand:V4SI 0 "register_operand" "=v")
1096        (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
1097                      (match_operand:V4SI 2 "register_operand" "v")]
1098                     UNSPEC_VMRGH_DIRECT))]
1099  "TARGET_ALTIVEC"
1100  "vmrghw %0,%1,%2"
1101  [(set_attr "type" "vecperm")])
1102
1103(define_insn "*altivec_vmrghsf"
1104  [(set (match_operand:V4SF 0 "register_operand" "=v")
1105        (vec_select:V4SF
1106	  (vec_concat:V8SF
1107	    (match_operand:V4SF 1 "register_operand" "v")
1108	    (match_operand:V4SF 2 "register_operand" "v"))
1109	  (parallel [(const_int 0) (const_int 4)
1110		     (const_int 1) (const_int 5)])))]
1111  "VECTOR_MEM_ALTIVEC_P (V4SFmode)"
1112{
1113  if (BYTES_BIG_ENDIAN)
1114    return "vmrghw %0,%1,%2";
1115  else
1116    return "vmrglw %0,%2,%1";
1117}
1118  [(set_attr "type" "vecperm")])
1119
1120(define_expand "altivec_vmrglb"
1121  [(use (match_operand:V16QI 0 "register_operand" ""))
1122   (use (match_operand:V16QI 1 "register_operand" ""))
1123   (use (match_operand:V16QI 2 "register_operand" ""))]
1124  "TARGET_ALTIVEC"
1125{
1126  rtvec v;
1127  rtx x;
1128
1129  /* Special handling for LE with -maltivec=be.  */
1130  if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
1131    {
1132      v = gen_rtvec (16, GEN_INT (0), GEN_INT (16), GEN_INT (1), GEN_INT (17),
1133                     GEN_INT (2), GEN_INT (18), GEN_INT (3), GEN_INT (19),
1134		     GEN_INT (4), GEN_INT (20), GEN_INT (5), GEN_INT (21),
1135		     GEN_INT (6), GEN_INT (22), GEN_INT (7), GEN_INT (23));
1136      x = gen_rtx_VEC_CONCAT (V32QImode, operands[2], operands[1]);
1137    }
1138  else
1139    {
1140      v = gen_rtvec (16, GEN_INT (8), GEN_INT (24), GEN_INT (9), GEN_INT (25),
1141                     GEN_INT (10), GEN_INT (26), GEN_INT (11), GEN_INT (27),
1142		     GEN_INT (12), GEN_INT (28), GEN_INT (13), GEN_INT (29),
1143		     GEN_INT (14), GEN_INT (30), GEN_INT (15), GEN_INT (31));
1144      x = gen_rtx_VEC_CONCAT (V32QImode, operands[1], operands[2]);
1145    }
1146
1147  x = gen_rtx_VEC_SELECT (V16QImode, x, gen_rtx_PARALLEL (VOIDmode, v));
1148  emit_insn (gen_rtx_SET (operands[0], x));
1149  DONE;
1150})
1151
1152(define_insn "*altivec_vmrglb_internal"
1153  [(set (match_operand:V16QI 0 "register_operand" "=v")
1154        (vec_select:V16QI
1155	  (vec_concat:V32QI
1156	    (match_operand:V16QI 1 "register_operand" "v")
1157	    (match_operand:V16QI 2 "register_operand" "v"))
1158	  (parallel [(const_int  8) (const_int 24)
1159		     (const_int  9) (const_int 25)
1160		     (const_int 10) (const_int 26)
1161		     (const_int 11) (const_int 27)
1162		     (const_int 12) (const_int 28)
1163		     (const_int 13) (const_int 29)
1164		     (const_int 14) (const_int 30)
1165		     (const_int 15) (const_int 31)])))]
1166  "TARGET_ALTIVEC"
1167{
1168  if (BYTES_BIG_ENDIAN)
1169    return "vmrglb %0,%1,%2";
1170  else
1171    return "vmrghb %0,%2,%1";
1172}
1173  [(set_attr "type" "vecperm")])
1174
1175(define_insn "altivec_vmrglb_direct"
1176  [(set (match_operand:V16QI 0 "register_operand" "=v")
1177        (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
1178    		       (match_operand:V16QI 2 "register_operand" "v")]
1179                      UNSPEC_VMRGL_DIRECT))]
1180  "TARGET_ALTIVEC"
1181  "vmrglb %0,%1,%2"
1182  [(set_attr "type" "vecperm")])
1183
1184(define_expand "altivec_vmrglh"
1185  [(use (match_operand:V8HI 0 "register_operand" ""))
1186   (use (match_operand:V8HI 1 "register_operand" ""))
1187   (use (match_operand:V8HI 2 "register_operand" ""))]
1188  "TARGET_ALTIVEC"
1189{
1190  rtvec v;
1191  rtx x;
1192
1193  /* Special handling for LE with -maltivec=be.  */
1194  if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
1195    {
1196      v = gen_rtvec (8, GEN_INT (0), GEN_INT (8), GEN_INT (1), GEN_INT (9),
1197                     GEN_INT (2), GEN_INT (10), GEN_INT (3), GEN_INT (11));
1198      x = gen_rtx_VEC_CONCAT (V16HImode, operands[2], operands[1]);
1199    }
1200  else
1201    {
1202      v = gen_rtvec (8, GEN_INT (4), GEN_INT (12), GEN_INT (5), GEN_INT (13),
1203                     GEN_INT (6), GEN_INT (14), GEN_INT (7), GEN_INT (15));
1204      x = gen_rtx_VEC_CONCAT (V16HImode, operands[1], operands[2]);
1205    }
1206
1207  x = gen_rtx_VEC_SELECT (V8HImode, x, gen_rtx_PARALLEL (VOIDmode, v));
1208  emit_insn (gen_rtx_SET (operands[0], x));
1209  DONE;
1210})
1211
1212(define_insn "*altivec_vmrglh_internal"
1213  [(set (match_operand:V8HI 0 "register_operand" "=v")
1214        (vec_select:V8HI
1215	  (vec_concat:V16HI
1216	    (match_operand:V8HI 1 "register_operand" "v")
1217	    (match_operand:V8HI 2 "register_operand" "v"))
1218	  (parallel [(const_int 4) (const_int 12)
1219		     (const_int 5) (const_int 13)
1220		     (const_int 6) (const_int 14)
1221		     (const_int 7) (const_int 15)])))]
1222  "TARGET_ALTIVEC"
1223{
1224  if (BYTES_BIG_ENDIAN)
1225    return "vmrglh %0,%1,%2";
1226  else
1227    return "vmrghh %0,%2,%1";
1228}
1229  [(set_attr "type" "vecperm")])
1230
1231(define_insn "altivec_vmrglh_direct"
1232  [(set (match_operand:V8HI 0 "register_operand" "=v")
1233        (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
1234		      (match_operand:V8HI 2 "register_operand" "v")]
1235                     UNSPEC_VMRGL_DIRECT))]
1236  "TARGET_ALTIVEC"
1237  "vmrglh %0,%1,%2"
1238  [(set_attr "type" "vecperm")])
1239
1240(define_expand "altivec_vmrglw"
1241  [(use (match_operand:V4SI 0 "register_operand" ""))
1242   (use (match_operand:V4SI 1 "register_operand" ""))
1243   (use (match_operand:V4SI 2 "register_operand" ""))]
1244  "VECTOR_MEM_ALTIVEC_P (V4SImode)"
1245{
1246  rtvec v;
1247  rtx x;
1248
1249  /* Special handling for LE with -maltivec=be.  */
1250  if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
1251    {
1252      v = gen_rtvec (4, GEN_INT (0), GEN_INT (4), GEN_INT (1), GEN_INT (5));
1253      x = gen_rtx_VEC_CONCAT (V8SImode, operands[2], operands[1]);
1254    }
1255  else
1256    {
1257      v = gen_rtvec (4, GEN_INT (2), GEN_INT (6), GEN_INT (3), GEN_INT (7));
1258      x = gen_rtx_VEC_CONCAT (V8SImode, operands[1], operands[2]);
1259    }
1260
1261  x = gen_rtx_VEC_SELECT (V4SImode, x, gen_rtx_PARALLEL (VOIDmode, v));
1262  emit_insn (gen_rtx_SET (operands[0], x));
1263  DONE;
1264})
1265
1266(define_insn "*altivec_vmrglw_internal"
1267  [(set (match_operand:V4SI 0 "register_operand" "=v")
1268        (vec_select:V4SI
1269	  (vec_concat:V8SI
1270	    (match_operand:V4SI 1 "register_operand" "v")
1271	    (match_operand:V4SI 2 "register_operand" "v"))
1272	  (parallel [(const_int 2) (const_int 6)
1273		     (const_int 3) (const_int 7)])))]
1274  "VECTOR_MEM_ALTIVEC_P (V4SImode)"
1275{
1276  if (BYTES_BIG_ENDIAN)
1277    return "vmrglw %0,%1,%2";
1278  else
1279    return "vmrghw %0,%2,%1";
1280}
1281  [(set_attr "type" "vecperm")])
1282
1283(define_insn "altivec_vmrglw_direct"
1284  [(set (match_operand:V4SI 0 "register_operand" "=v")
1285        (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
1286	              (match_operand:V4SI 2 "register_operand" "v")]
1287                     UNSPEC_VMRGL_DIRECT))]
1288  "TARGET_ALTIVEC"
1289  "vmrglw %0,%1,%2"
1290  [(set_attr "type" "vecperm")])
1291
1292(define_insn "*altivec_vmrglsf"
1293  [(set (match_operand:V4SF 0 "register_operand" "=v")
1294        (vec_select:V4SF
1295	 (vec_concat:V8SF
1296	   (match_operand:V4SF 1 "register_operand" "v")
1297	   (match_operand:V4SF 2 "register_operand" "v"))
1298	 (parallel [(const_int 2) (const_int 6)
1299		    (const_int 3) (const_int 7)])))]
1300  "VECTOR_MEM_ALTIVEC_P (V4SFmode)"
1301{
1302  if (BYTES_BIG_ENDIAN)
1303    return "vmrglw %0,%1,%2";
1304  else
1305    return "vmrghw %0,%2,%1";
1306}
1307  [(set_attr "type" "vecperm")])
1308
1309;; Power8 vector merge even/odd
1310(define_insn "p8_vmrgew"
1311  [(set (match_operand:V4SI 0 "register_operand" "=v")
1312	(vec_select:V4SI
1313	  (vec_concat:V8SI
1314	    (match_operand:V4SI 1 "register_operand" "v")
1315	    (match_operand:V4SI 2 "register_operand" "v"))
1316	  (parallel [(const_int 0) (const_int 4)
1317		     (const_int 2) (const_int 6)])))]
1318  "TARGET_P8_VECTOR"
1319{
1320  if (BYTES_BIG_ENDIAN)
1321    return "vmrgew %0,%1,%2";
1322  else
1323    return "vmrgow %0,%2,%1";
1324}
1325  [(set_attr "type" "vecperm")])
1326
1327(define_insn "p8_vmrgow"
1328  [(set (match_operand:V4SI 0 "register_operand" "=v")
1329	(vec_select:V4SI
1330	  (vec_concat:V8SI
1331	    (match_operand:V4SI 1 "register_operand" "v")
1332	    (match_operand:V4SI 2 "register_operand" "v"))
1333	  (parallel [(const_int 1) (const_int 5)
1334		     (const_int 3) (const_int 7)])))]
1335  "TARGET_P8_VECTOR"
1336{
1337  if (BYTES_BIG_ENDIAN)
1338    return "vmrgow %0,%1,%2";
1339  else
1340    return "vmrgew %0,%2,%1";
1341}
1342  [(set_attr "type" "vecperm")])
1343
1344(define_insn "p8_vmrgew_v4sf_direct"
1345  [(set (match_operand:V4SF 0 "register_operand" "=v")
1346	(unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")
1347		      (match_operand:V4SF 2 "register_operand" "v")]
1348		     UNSPEC_VMRGEW_DIRECT))]
1349  "TARGET_P8_VECTOR"
1350  "vmrgew %0,%1,%2"
1351  [(set_attr "type" "vecperm")])
1352
1353(define_expand "vec_widen_umult_even_v16qi"
1354  [(use (match_operand:V8HI 0 "register_operand" ""))
1355   (use (match_operand:V16QI 1 "register_operand" ""))
1356   (use (match_operand:V16QI 2 "register_operand" ""))]
1357  "TARGET_ALTIVEC"
1358{
1359  if (VECTOR_ELT_ORDER_BIG)
1360    emit_insn (gen_altivec_vmuleub (operands[0], operands[1], operands[2]));
1361  else
1362    emit_insn (gen_altivec_vmuloub (operands[0], operands[1], operands[2]));
1363  DONE;
1364})
1365
1366(define_expand "vec_widen_smult_even_v16qi"
1367  [(use (match_operand:V8HI 0 "register_operand" ""))
1368   (use (match_operand:V16QI 1 "register_operand" ""))
1369   (use (match_operand:V16QI 2 "register_operand" ""))]
1370  "TARGET_ALTIVEC"
1371{
1372  if (VECTOR_ELT_ORDER_BIG)
1373    emit_insn (gen_altivec_vmulesb (operands[0], operands[1], operands[2]));
1374  else
1375    emit_insn (gen_altivec_vmulosb (operands[0], operands[1], operands[2]));
1376  DONE;
1377})
1378
1379(define_expand "vec_widen_umult_even_v8hi"
1380  [(use (match_operand:V4SI 0 "register_operand" ""))
1381   (use (match_operand:V8HI 1 "register_operand" ""))
1382   (use (match_operand:V8HI 2 "register_operand" ""))]
1383  "TARGET_ALTIVEC"
1384{
1385  if (VECTOR_ELT_ORDER_BIG)
1386    emit_insn (gen_altivec_vmuleuh (operands[0], operands[1], operands[2]));
1387  else
1388    emit_insn (gen_altivec_vmulouh (operands[0], operands[1], operands[2]));
1389  DONE;
1390})
1391
1392(define_expand "vec_widen_smult_even_v8hi"
1393  [(use (match_operand:V4SI 0 "register_operand" ""))
1394   (use (match_operand:V8HI 1 "register_operand" ""))
1395   (use (match_operand:V8HI 2 "register_operand" ""))]
1396  "TARGET_ALTIVEC"
1397{
1398  if (VECTOR_ELT_ORDER_BIG)
1399    emit_insn (gen_altivec_vmulesh (operands[0], operands[1], operands[2]));
1400  else
1401    emit_insn (gen_altivec_vmulosh (operands[0], operands[1], operands[2]));
1402  DONE;
1403})
1404
1405(define_expand "vec_widen_umult_odd_v16qi"
1406  [(use (match_operand:V8HI 0 "register_operand" ""))
1407   (use (match_operand:V16QI 1 "register_operand" ""))
1408   (use (match_operand:V16QI 2 "register_operand" ""))]
1409  "TARGET_ALTIVEC"
1410{
1411  if (VECTOR_ELT_ORDER_BIG)
1412    emit_insn (gen_altivec_vmuloub (operands[0], operands[1], operands[2]));
1413  else
1414    emit_insn (gen_altivec_vmuleub (operands[0], operands[1], operands[2]));
1415  DONE;
1416})
1417
1418(define_expand "vec_widen_smult_odd_v16qi"
1419  [(use (match_operand:V8HI 0 "register_operand" ""))
1420   (use (match_operand:V16QI 1 "register_operand" ""))
1421   (use (match_operand:V16QI 2 "register_operand" ""))]
1422  "TARGET_ALTIVEC"
1423{
1424  if (VECTOR_ELT_ORDER_BIG)
1425    emit_insn (gen_altivec_vmulosb (operands[0], operands[1], operands[2]));
1426  else
1427    emit_insn (gen_altivec_vmulesb (operands[0], operands[1], operands[2]));
1428  DONE;
1429})
1430
1431(define_expand "vec_widen_umult_odd_v8hi"
1432  [(use (match_operand:V4SI 0 "register_operand" ""))
1433   (use (match_operand:V8HI 1 "register_operand" ""))
1434   (use (match_operand:V8HI 2 "register_operand" ""))]
1435  "TARGET_ALTIVEC"
1436{
1437  if (VECTOR_ELT_ORDER_BIG)
1438    emit_insn (gen_altivec_vmulouh (operands[0], operands[1], operands[2]));
1439  else
1440    emit_insn (gen_altivec_vmuleuh (operands[0], operands[1], operands[2]));
1441  DONE;
1442})
1443
1444(define_expand "vec_widen_smult_odd_v8hi"
1445  [(use (match_operand:V4SI 0 "register_operand" ""))
1446   (use (match_operand:V8HI 1 "register_operand" ""))
1447   (use (match_operand:V8HI 2 "register_operand" ""))]
1448  "TARGET_ALTIVEC"
1449{
1450  if (VECTOR_ELT_ORDER_BIG)
1451    emit_insn (gen_altivec_vmulosh (operands[0], operands[1], operands[2]));
1452  else
1453    emit_insn (gen_altivec_vmulesh (operands[0], operands[1], operands[2]));
1454  DONE;
1455})
1456
1457(define_insn "altivec_vmuleub"
1458  [(set (match_operand:V8HI 0 "register_operand" "=v")
1459        (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
1460                      (match_operand:V16QI 2 "register_operand" "v")]
1461		     UNSPEC_VMULEUB))]
1462  "TARGET_ALTIVEC"
1463  "vmuleub %0,%1,%2"
1464  [(set_attr "type" "veccomplex")])
1465
1466(define_insn "altivec_vmuloub"
1467  [(set (match_operand:V8HI 0 "register_operand" "=v")
1468        (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
1469                      (match_operand:V16QI 2 "register_operand" "v")]
1470		     UNSPEC_VMULOUB))]
1471  "TARGET_ALTIVEC"
1472  "vmuloub %0,%1,%2"
1473  [(set_attr "type" "veccomplex")])
1474
1475(define_insn "altivec_vmulesb"
1476  [(set (match_operand:V8HI 0 "register_operand" "=v")
1477        (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
1478                      (match_operand:V16QI 2 "register_operand" "v")]
1479		     UNSPEC_VMULESB))]
1480  "TARGET_ALTIVEC"
1481  "vmulesb %0,%1,%2"
1482  [(set_attr "type" "veccomplex")])
1483
1484(define_insn "altivec_vmulosb"
1485  [(set (match_operand:V8HI 0 "register_operand" "=v")
1486        (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
1487                      (match_operand:V16QI 2 "register_operand" "v")]
1488		     UNSPEC_VMULOSB))]
1489  "TARGET_ALTIVEC"
1490  "vmulosb %0,%1,%2"
1491  [(set_attr "type" "veccomplex")])
1492
1493(define_insn "altivec_vmuleuh"
1494  [(set (match_operand:V4SI 0 "register_operand" "=v")
1495        (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
1496                      (match_operand:V8HI 2 "register_operand" "v")]
1497		     UNSPEC_VMULEUH))]
1498  "TARGET_ALTIVEC"
1499  "vmuleuh %0,%1,%2"
1500  [(set_attr "type" "veccomplex")])
1501
1502(define_insn "altivec_vmulouh"
1503  [(set (match_operand:V4SI 0 "register_operand" "=v")
1504        (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
1505                      (match_operand:V8HI 2 "register_operand" "v")]
1506		     UNSPEC_VMULOUH))]
1507  "TARGET_ALTIVEC"
1508  "vmulouh %0,%1,%2"
1509  [(set_attr "type" "veccomplex")])
1510
1511(define_insn "altivec_vmulesh"
1512  [(set (match_operand:V4SI 0 "register_operand" "=v")
1513        (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
1514                      (match_operand:V8HI 2 "register_operand" "v")]
1515		     UNSPEC_VMULESH))]
1516  "TARGET_ALTIVEC"
1517  "vmulesh %0,%1,%2"
1518  [(set_attr "type" "veccomplex")])
1519
1520(define_insn "altivec_vmulosh"
1521  [(set (match_operand:V4SI 0 "register_operand" "=v")
1522        (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
1523                      (match_operand:V8HI 2 "register_operand" "v")]
1524		     UNSPEC_VMULOSH))]
1525  "TARGET_ALTIVEC"
1526  "vmulosh %0,%1,%2"
1527  [(set_attr "type" "veccomplex")])
1528
1529
1530;; Vector pack/unpack
1531(define_insn "altivec_vpkpx"
1532  [(set (match_operand:V8HI 0 "register_operand" "=v")
1533        (unspec:V8HI [(match_operand:V4SI 1 "register_operand" "v")
1534                      (match_operand:V4SI 2 "register_operand" "v")]
1535		     UNSPEC_VPKPX))]
1536  "TARGET_ALTIVEC"
1537  "*
1538  {
1539    if (VECTOR_ELT_ORDER_BIG)
1540      return \"vpkpx %0,%1,%2\";
1541    else
1542      return \"vpkpx %0,%2,%1\";
1543  }"
1544  [(set_attr "type" "vecperm")])
1545
1546(define_insn "altivec_vpks<VI_char>ss"
1547  [(set (match_operand:<VP_small> 0 "register_operand" "=v")
1548	(unspec:<VP_small> [(match_operand:VP 1 "register_operand" "v")
1549			    (match_operand:VP 2 "register_operand" "v")]
1550			   UNSPEC_VPACK_SIGN_SIGN_SAT))]
1551  "<VI_unit>"
1552  "*
1553  {
1554    if (VECTOR_ELT_ORDER_BIG)
1555      return \"vpks<VI_char>ss %0,%1,%2\";
1556    else
1557      return \"vpks<VI_char>ss %0,%2,%1\";
1558  }"
1559  [(set_attr "type" "vecperm")])
1560
1561(define_insn "altivec_vpks<VI_char>us"
1562  [(set (match_operand:<VP_small> 0 "register_operand" "=v")
1563	(unspec:<VP_small> [(match_operand:VP 1 "register_operand" "v")
1564			    (match_operand:VP 2 "register_operand" "v")]
1565			   UNSPEC_VPACK_SIGN_UNS_SAT))]
1566  "<VI_unit>"
1567  "*
1568  {
1569    if (VECTOR_ELT_ORDER_BIG)
1570      return \"vpks<VI_char>us %0,%1,%2\";
1571    else
1572      return \"vpks<VI_char>us %0,%2,%1\";
1573  }"
1574  [(set_attr "type" "vecperm")])
1575
1576(define_insn "altivec_vpku<VI_char>us"
1577  [(set (match_operand:<VP_small> 0 "register_operand" "=v")
1578	(unspec:<VP_small> [(match_operand:VP 1 "register_operand" "v")
1579			    (match_operand:VP 2 "register_operand" "v")]
1580			   UNSPEC_VPACK_UNS_UNS_SAT))]
1581  "<VI_unit>"
1582  "*
1583  {
1584    if (VECTOR_ELT_ORDER_BIG)
1585      return \"vpku<VI_char>us %0,%1,%2\";
1586    else
1587      return \"vpku<VI_char>us %0,%2,%1\";
1588  }"
1589  [(set_attr "type" "vecperm")])
1590
1591(define_insn "altivec_vpku<VI_char>um"
1592  [(set (match_operand:<VP_small> 0 "register_operand" "=v")
1593	(unspec:<VP_small> [(match_operand:VP 1 "register_operand" "v")
1594			    (match_operand:VP 2 "register_operand" "v")]
1595			   UNSPEC_VPACK_UNS_UNS_MOD))]
1596  "<VI_unit>"
1597  "*
1598  {
1599    if (VECTOR_ELT_ORDER_BIG)
1600      return \"vpku<VI_char>um %0,%1,%2\";
1601    else
1602      return \"vpku<VI_char>um %0,%2,%1\";
1603  }"
1604  [(set_attr "type" "vecperm")])
1605
1606(define_insn "altivec_vpku<VI_char>um_direct"
1607  [(set (match_operand:<VP_small> 0 "register_operand" "=v")
1608	(unspec:<VP_small> [(match_operand:VP 1 "register_operand" "v")
1609			    (match_operand:VP 2 "register_operand" "v")]
1610			   UNSPEC_VPACK_UNS_UNS_MOD_DIRECT))]
1611  "<VI_unit>"
1612  "*
1613  {
1614    if (BYTES_BIG_ENDIAN)
1615      return \"vpku<VI_char>um %0,%1,%2\";
1616    else
1617      return \"vpku<VI_char>um %0,%2,%1\";
1618  }"
1619  [(set_attr "type" "vecperm")])
1620
1621(define_insn "*altivec_vrl<VI_char>"
1622  [(set (match_operand:VI2 0 "register_operand" "=v")
1623        (rotate:VI2 (match_operand:VI2 1 "register_operand" "v")
1624		    (match_operand:VI2 2 "register_operand" "v")))]
1625  "<VI_unit>"
1626  "vrl<VI_char> %0,%1,%2"
1627  [(set_attr "type" "vecsimple")])
1628
1629(define_insn "altivec_vrl<VI_char>mi"
1630  [(set (match_operand:VIlong 0 "register_operand" "=v")
1631        (unspec:VIlong [(match_operand:VIlong 1 "register_operand" "0")
1632	                (match_operand:VIlong 2 "register_operand" "v")
1633		        (match_operand:VIlong 3 "register_operand" "v")]
1634		       UNSPEC_VRLMI))]
1635  "TARGET_P9_VECTOR"
1636  "vrl<VI_char>mi %0,%2,%3"
1637  [(set_attr "type" "veclogical")])
1638
1639(define_insn "altivec_vrl<VI_char>nm"
1640  [(set (match_operand:VIlong 0 "register_operand" "=v")
1641        (unspec:VIlong [(match_operand:VIlong 1 "register_operand" "v")
1642		        (match_operand:VIlong 2 "register_operand" "v")]
1643		       UNSPEC_VRLNM))]
1644  "TARGET_P9_VECTOR"
1645  "vrl<VI_char>nm %0,%1,%2"
1646  [(set_attr "type" "veclogical")])
1647
1648(define_insn "altivec_vsl"
1649  [(set (match_operand:V4SI 0 "register_operand" "=v")
1650        (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
1651                      (match_operand:V4SI 2 "register_operand" "v")]
1652		     UNSPEC_VSLV4SI))]
1653  "TARGET_ALTIVEC"
1654  "vsl %0,%1,%2"
1655  [(set_attr "type" "vecperm")])
1656
1657(define_insn "altivec_vslo"
1658  [(set (match_operand:V4SI 0 "register_operand" "=v")
1659        (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
1660                      (match_operand:V4SI 2 "register_operand" "v")]
1661		     UNSPEC_VSLO))]
1662  "TARGET_ALTIVEC"
1663  "vslo %0,%1,%2"
1664  [(set_attr "type" "vecperm")])
1665
1666(define_insn "vslv"
1667  [(set (match_operand:V16QI 0 "register_operand" "=v")
1668	(unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
1669		       (match_operand:V16QI 2 "register_operand" "v")]
1670         UNSPEC_VSLV))]
1671  "TARGET_P9_VECTOR"
1672  "vslv %0,%1,%2"
1673  [(set_attr "type" "vecsimple")])
1674
1675(define_insn "vsrv"
1676  [(set (match_operand:V16QI 0 "register_operand" "=v")
1677	(unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
1678		       (match_operand:V16QI 2 "register_operand" "v")]
1679         UNSPEC_VSRV))]
1680  "TARGET_P9_VECTOR"
1681  "vsrv %0,%1,%2"
1682  [(set_attr "type" "vecsimple")])
1683
1684(define_insn "*altivec_vsl<VI_char>"
1685  [(set (match_operand:VI2 0 "register_operand" "=v")
1686        (ashift:VI2 (match_operand:VI2 1 "register_operand" "v")
1687		    (match_operand:VI2 2 "register_operand" "v")))]
1688  "<VI_unit>"
1689  "vsl<VI_char> %0,%1,%2"
1690  [(set_attr "type" "vecsimple")])
1691
1692(define_insn "*altivec_vsr<VI_char>"
1693  [(set (match_operand:VI2 0 "register_operand" "=v")
1694        (lshiftrt:VI2 (match_operand:VI2 1 "register_operand" "v")
1695		      (match_operand:VI2 2 "register_operand" "v")))]
1696  "<VI_unit>"
1697  "vsr<VI_char> %0,%1,%2"
1698  [(set_attr "type" "vecsimple")])
1699
1700(define_insn "*altivec_vsra<VI_char>"
1701  [(set (match_operand:VI2 0 "register_operand" "=v")
1702        (ashiftrt:VI2 (match_operand:VI2 1 "register_operand" "v")
1703		      (match_operand:VI2 2 "register_operand" "v")))]
1704  "<VI_unit>"
1705  "vsra<VI_char> %0,%1,%2"
1706  [(set_attr "type" "vecsimple")])
1707
1708(define_insn "altivec_vsr"
1709  [(set (match_operand:V4SI 0 "register_operand" "=v")
1710        (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
1711                      (match_operand:V4SI 2 "register_operand" "v")]
1712		     UNSPEC_VSR))]
1713  "TARGET_ALTIVEC"
1714  "vsr %0,%1,%2"
1715  [(set_attr "type" "vecperm")])
1716
1717(define_insn "altivec_vsro"
1718  [(set (match_operand:V4SI 0 "register_operand" "=v")
1719        (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
1720                      (match_operand:V4SI 2 "register_operand" "v")]
1721		     UNSPEC_VSRO))]
1722  "TARGET_ALTIVEC"
1723  "vsro %0,%1,%2"
1724  [(set_attr "type" "vecperm")])
1725
1726(define_insn "altivec_vsum4ubs"
1727  [(set (match_operand:V4SI 0 "register_operand" "=v")
1728        (unspec:V4SI [(match_operand:V16QI 1 "register_operand" "v")
1729                      (match_operand:V4SI 2 "register_operand" "v")]
1730		     UNSPEC_VSUM4UBS))
1731   (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
1732  "TARGET_ALTIVEC"
1733  "vsum4ubs %0,%1,%2"
1734  [(set_attr "type" "veccomplex")])
1735
1736(define_insn "altivec_vsum4s<VI_char>s"
1737  [(set (match_operand:V4SI 0 "register_operand" "=v")
1738        (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")
1739                      (match_operand:V4SI 2 "register_operand" "v")]
1740		     UNSPEC_VSUM4S))
1741   (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
1742  "TARGET_ALTIVEC"
1743  "vsum4s<VI_char>s %0,%1,%2"
1744  [(set_attr "type" "veccomplex")])
1745
1746;; FIXME: For the following two patterns, the scratch should only be
1747;; allocated for !VECTOR_ELT_ORDER_BIG, and the instructions should
1748;; be emitted separately.
1749(define_insn "altivec_vsum2sws"
1750  [(set (match_operand:V4SI 0 "register_operand" "=v")
1751        (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
1752                      (match_operand:V4SI 2 "register_operand" "v")]
1753		     UNSPEC_VSUM2SWS))
1754   (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))
1755   (clobber (match_scratch:V4SI 3 "=v"))]
1756  "TARGET_ALTIVEC"
1757{
1758  if (VECTOR_ELT_ORDER_BIG)
1759    return "vsum2sws %0,%1,%2";
1760  else
1761    return "vsldoi %3,%2,%2,12\n\tvsum2sws %3,%1,%3\n\tvsldoi %0,%3,%3,4";
1762}
1763  [(set_attr "type" "veccomplex")
1764   (set (attr "length")
1765     (if_then_else
1766       (match_test "VECTOR_ELT_ORDER_BIG")
1767       (const_string "4")
1768       (const_string "12")))])
1769
1770(define_insn "altivec_vsumsws"
1771  [(set (match_operand:V4SI 0 "register_operand" "=v")
1772        (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
1773                      (match_operand:V4SI 2 "register_operand" "v")]
1774		     UNSPEC_VSUMSWS))
1775   (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))
1776   (clobber (match_scratch:V4SI 3 "=v"))]
1777  "TARGET_ALTIVEC"
1778{
1779  if (VECTOR_ELT_ORDER_BIG)
1780    return "vsumsws %0,%1,%2";
1781  else
1782    return "vspltw %3,%2,0\n\tvsumsws %3,%1,%3\n\tvsldoi %0,%3,%3,12";
1783}
1784  [(set_attr "type" "veccomplex")
1785   (set (attr "length")
1786     (if_then_else
1787       (match_test "(VECTOR_ELT_ORDER_BIG)")
1788       (const_string "4")
1789       (const_string "12")))])
1790
1791(define_insn "altivec_vsumsws_direct"
1792  [(set (match_operand:V4SI 0 "register_operand" "=v")
1793        (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
1794                      (match_operand:V4SI 2 "register_operand" "v")]
1795		     UNSPEC_VSUMSWS_DIRECT))
1796   (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
1797  "TARGET_ALTIVEC"
1798  "vsumsws %0,%1,%2"
1799  [(set_attr "type" "veccomplex")])
1800
1801(define_expand "altivec_vspltb"
1802  [(use (match_operand:V16QI 0 "register_operand" ""))
1803   (use (match_operand:V16QI 1 "register_operand" ""))
1804   (use (match_operand:QI 2 "u5bit_cint_operand" ""))]
1805  "TARGET_ALTIVEC"
1806{
1807  rtvec v;
1808  rtx x;
1809
1810  /* Special handling for LE with -maltivec=be.  We have to reflect
1811     the actual selected index for the splat in the RTL.  */
1812  if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
1813    operands[2] = GEN_INT (15 - INTVAL (operands[2]));
1814
1815  v = gen_rtvec (1, operands[2]);
1816  x = gen_rtx_VEC_SELECT (QImode, operands[1], gen_rtx_PARALLEL (VOIDmode, v));
1817  x = gen_rtx_VEC_DUPLICATE (V16QImode, x);
1818  emit_insn (gen_rtx_SET (operands[0], x));
1819  DONE;
1820})
1821
1822(define_insn "*altivec_vspltb_internal"
1823  [(set (match_operand:V16QI 0 "register_operand" "=v")
1824        (vec_duplicate:V16QI
1825	 (vec_select:QI (match_operand:V16QI 1 "register_operand" "v")
1826			(parallel
1827			 [(match_operand:QI 2 "u5bit_cint_operand" "")]))))]
1828  "TARGET_ALTIVEC"
1829{
1830  /* For true LE, this adjusts the selected index.  For LE with
1831     -maltivec=be, this reverses what was done in the define_expand
1832     because the instruction already has big-endian bias.  */
1833  if (!BYTES_BIG_ENDIAN)
1834    operands[2] = GEN_INT (15 - INTVAL (operands[2]));
1835
1836  return "vspltb %0,%1,%2";
1837}
1838  [(set_attr "type" "vecperm")])
1839
1840(define_insn "altivec_vspltb_direct"
1841  [(set (match_operand:V16QI 0 "register_operand" "=v")
1842        (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
1843	               (match_operand:QI 2 "u5bit_cint_operand" "i")]
1844                      UNSPEC_VSPLT_DIRECT))]
1845  "TARGET_ALTIVEC"
1846  "vspltb %0,%1,%2"
1847  [(set_attr "type" "vecperm")])
1848
1849(define_expand "altivec_vsplth"
1850  [(use (match_operand:V8HI 0 "register_operand" ""))
1851   (use (match_operand:V8HI 1 "register_operand" ""))
1852   (use (match_operand:QI 2 "u5bit_cint_operand" ""))]
1853  "TARGET_ALTIVEC"
1854{
1855  rtvec v;
1856  rtx x;
1857
1858  /* Special handling for LE with -maltivec=be.  We have to reflect
1859     the actual selected index for the splat in the RTL.  */
1860  if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
1861    operands[2] = GEN_INT (7 - INTVAL (operands[2]));
1862
1863  v = gen_rtvec (1, operands[2]);
1864  x = gen_rtx_VEC_SELECT (HImode, operands[1], gen_rtx_PARALLEL (VOIDmode, v));
1865  x = gen_rtx_VEC_DUPLICATE (V8HImode, x);
1866  emit_insn (gen_rtx_SET (operands[0], x));
1867  DONE;
1868})
1869
1870(define_insn "*altivec_vsplth_internal"
1871  [(set (match_operand:V8HI 0 "register_operand" "=v")
1872	(vec_duplicate:V8HI
1873	 (vec_select:HI (match_operand:V8HI 1 "register_operand" "v")
1874			(parallel
1875			 [(match_operand:QI 2 "u5bit_cint_operand" "")]))))]
1876  "TARGET_ALTIVEC"
1877{
1878  /* For true LE, this adjusts the selected index.  For LE with
1879     -maltivec=be, this reverses what was done in the define_expand
1880     because the instruction already has big-endian bias.  */
1881  if (!BYTES_BIG_ENDIAN)
1882    operands[2] = GEN_INT (7 - INTVAL (operands[2]));
1883
1884  return "vsplth %0,%1,%2";
1885}
1886  [(set_attr "type" "vecperm")])
1887
1888(define_insn "altivec_vsplth_direct"
1889  [(set (match_operand:V8HI 0 "register_operand" "=v")
1890        (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
1891                      (match_operand:QI 2 "u5bit_cint_operand" "i")]
1892                     UNSPEC_VSPLT_DIRECT))]
1893  "TARGET_ALTIVEC"
1894  "vsplth %0,%1,%2"
1895  [(set_attr "type" "vecperm")])
1896
1897(define_expand "altivec_vspltw"
1898  [(use (match_operand:V4SI 0 "register_operand" ""))
1899   (use (match_operand:V4SI 1 "register_operand" ""))
1900   (use (match_operand:QI 2 "u5bit_cint_operand" ""))]
1901  "TARGET_ALTIVEC"
1902{
1903  rtvec v;
1904  rtx x;
1905
1906  /* Special handling for LE with -maltivec=be.  We have to reflect
1907     the actual selected index for the splat in the RTL.  */
1908  if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
1909    operands[2] = GEN_INT (3 - INTVAL (operands[2]));
1910
1911  v = gen_rtvec (1, operands[2]);
1912  x = gen_rtx_VEC_SELECT (SImode, operands[1], gen_rtx_PARALLEL (VOIDmode, v));
1913  x = gen_rtx_VEC_DUPLICATE (V4SImode, x);
1914  emit_insn (gen_rtx_SET (operands[0], x));
1915  DONE;
1916})
1917
1918(define_insn "*altivec_vspltw_internal"
1919  [(set (match_operand:V4SI 0 "register_operand" "=v")
1920	(vec_duplicate:V4SI
1921	 (vec_select:SI (match_operand:V4SI 1 "register_operand" "v")
1922			(parallel
1923			 [(match_operand:QI 2 "u5bit_cint_operand" "i")]))))]
1924  "TARGET_ALTIVEC"
1925{
1926  /* For true LE, this adjusts the selected index.  For LE with
1927     -maltivec=be, this reverses what was done in the define_expand
1928     because the instruction already has big-endian bias.  */
1929  if (!BYTES_BIG_ENDIAN)
1930    operands[2] = GEN_INT (3 - INTVAL (operands[2]));
1931
1932  return "vspltw %0,%1,%2";
1933}
1934  [(set_attr "type" "vecperm")])
1935
1936(define_insn "altivec_vspltw_direct"
1937  [(set (match_operand:V4SI 0 "register_operand" "=v")
1938        (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
1939                      (match_operand:QI 2 "u5bit_cint_operand" "i")]
1940                     UNSPEC_VSPLT_DIRECT))]
1941  "TARGET_ALTIVEC"
1942  "vspltw %0,%1,%2"
1943  [(set_attr "type" "vecperm")])
1944
1945(define_expand "altivec_vspltsf"
1946  [(use (match_operand:V4SF 0 "register_operand" ""))
1947   (use (match_operand:V4SF 1 "register_operand" ""))
1948   (use (match_operand:QI 2 "u5bit_cint_operand" ""))]
1949  "TARGET_ALTIVEC"
1950{
1951  rtvec v;
1952  rtx x;
1953
1954  /* Special handling for LE with -maltivec=be.  We have to reflect
1955     the actual selected index for the splat in the RTL.  */
1956  if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
1957    operands[2] = GEN_INT (3 - INTVAL (operands[2]));
1958
1959  v = gen_rtvec (1, operands[2]);
1960  x = gen_rtx_VEC_SELECT (SFmode, operands[1], gen_rtx_PARALLEL (VOIDmode, v));
1961  x = gen_rtx_VEC_DUPLICATE (V4SFmode, x);
1962  emit_insn (gen_rtx_SET (operands[0], x));
1963  DONE;
1964})
1965
1966(define_insn "*altivec_vspltsf_internal"
1967  [(set (match_operand:V4SF 0 "register_operand" "=v")
1968	(vec_duplicate:V4SF
1969	 (vec_select:SF (match_operand:V4SF 1 "register_operand" "v")
1970			(parallel
1971			 [(match_operand:QI 2 "u5bit_cint_operand" "i")]))))]
1972  "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
1973{
1974  /* For true LE, this adjusts the selected index.  For LE with
1975     -maltivec=be, this reverses what was done in the define_expand
1976     because the instruction already has big-endian bias.  */
1977  if (!BYTES_BIG_ENDIAN)
1978    operands[2] = GEN_INT (3 - INTVAL (operands[2]));
1979
1980  return "vspltw %0,%1,%2";
1981}
1982  [(set_attr "type" "vecperm")])
1983
1984(define_insn "altivec_vspltis<VI_char>"
1985  [(set (match_operand:VI 0 "register_operand" "=v")
1986	(vec_duplicate:VI
1987	 (match_operand:QI 1 "s5bit_cint_operand" "i")))]
1988  "TARGET_ALTIVEC"
1989  "vspltis<VI_char> %0,%1"
1990  [(set_attr "type" "vecperm")])
1991
1992(define_insn "*altivec_vrfiz"
1993  [(set (match_operand:V4SF 0 "register_operand" "=v")
1994	(fix:V4SF (match_operand:V4SF 1 "register_operand" "v")))]
1995  "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
1996  "vrfiz %0,%1"
1997  [(set_attr "type" "vecfloat")])
1998
1999(define_expand "altivec_vperm_<mode>"
2000  [(set (match_operand:VM 0 "register_operand" "")
2001	(unspec:VM [(match_operand:VM 1 "register_operand" "")
2002		    (match_operand:VM 2 "register_operand" "")
2003		    (match_operand:V16QI 3 "register_operand" "")]
2004		   UNSPEC_VPERM))]
2005  "TARGET_ALTIVEC"
2006{
2007  if (!VECTOR_ELT_ORDER_BIG)
2008    {
2009      altivec_expand_vec_perm_le (operands);
2010      DONE;
2011    }
2012})
2013
2014;; Slightly prefer vperm, since the target does not overlap the source
2015(define_insn "*altivec_vperm_<mode>_internal"
2016  [(set (match_operand:VM 0 "register_operand" "=v,?wo")
2017	(unspec:VM [(match_operand:VM 1 "register_operand" "v,wo")
2018		    (match_operand:VM 2 "register_operand" "v,0")
2019		    (match_operand:V16QI 3 "register_operand" "v,wo")]
2020		   UNSPEC_VPERM))]
2021  "TARGET_ALTIVEC"
2022  "@
2023   vperm %0,%1,%2,%3
2024   xxperm %x0,%x1,%x3"
2025  [(set_attr "type" "vecperm")
2026   (set_attr "length" "4")])
2027
2028(define_insn "altivec_vperm_v8hiv16qi"
2029  [(set (match_operand:V16QI 0 "register_operand" "=v,?wo")
2030	(unspec:V16QI [(match_operand:V8HI 1 "register_operand" "v,wo")
2031   	               (match_operand:V8HI 2 "register_operand" "v,0")
2032		       (match_operand:V16QI 3 "register_operand" "v,wo")]
2033		   UNSPEC_VPERM))]
2034  "TARGET_ALTIVEC"
2035  "@
2036   vperm %0,%1,%2,%3
2037   xxperm %x0,%x1,%x3"
2038  [(set_attr "type" "vecperm")
2039   (set_attr "length" "4")])
2040
2041(define_expand "altivec_vperm_<mode>_uns"
2042  [(set (match_operand:VM 0 "register_operand" "")
2043	(unspec:VM [(match_operand:VM 1 "register_operand" "")
2044		    (match_operand:VM 2 "register_operand" "")
2045		    (match_operand:V16QI 3 "register_operand" "")]
2046		   UNSPEC_VPERM_UNS))]
2047  "TARGET_ALTIVEC"
2048{
2049  if (!VECTOR_ELT_ORDER_BIG)
2050    {
2051      altivec_expand_vec_perm_le (operands);
2052      DONE;
2053    }
2054})
2055
2056(define_insn "*altivec_vperm_<mode>_uns_internal"
2057  [(set (match_operand:VM 0 "register_operand" "=v,?wo")
2058	(unspec:VM [(match_operand:VM 1 "register_operand" "v,wo")
2059		    (match_operand:VM 2 "register_operand" "v,0")
2060		    (match_operand:V16QI 3 "register_operand" "v,wo")]
2061		   UNSPEC_VPERM_UNS))]
2062  "TARGET_ALTIVEC"
2063  "@
2064   vperm %0,%1,%2,%3
2065   xxperm %x0,%x1,%x3"
2066  [(set_attr "type" "vecperm")
2067   (set_attr "length" "4")])
2068
2069(define_expand "vec_permv16qi"
2070  [(set (match_operand:V16QI 0 "register_operand" "")
2071	(unspec:V16QI [(match_operand:V16QI 1 "register_operand" "")
2072		       (match_operand:V16QI 2 "register_operand" "")
2073		       (match_operand:V16QI 3 "register_operand" "")]
2074		      UNSPEC_VPERM))]
2075  "TARGET_ALTIVEC"
2076{
2077  if (!BYTES_BIG_ENDIAN) {
2078    altivec_expand_vec_perm_le (operands);
2079    DONE;
2080  }
2081})
2082
2083(define_insn "*altivec_vpermr_<mode>_internal"
2084  [(set (match_operand:VM 0 "register_operand" "=v,?wo")
2085	(unspec:VM [(match_operand:VM 1 "register_operand" "v,wo")
2086		    (match_operand:VM 2 "register_operand" "v,0")
2087		    (match_operand:V16QI 3 "register_operand" "v,wo")]
2088		   UNSPEC_VPERMR))]
2089  "TARGET_P9_VECTOR"
2090  "@
2091   vpermr %0,%2,%1,%3
2092   xxpermr %x0,%x1,%x3"
2093  [(set_attr "type" "vecperm")
2094   (set_attr "length" "4")])
2095
2096(define_insn "altivec_vrfip"		; ceil
2097  [(set (match_operand:V4SF 0 "register_operand" "=v")
2098        (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
2099		     UNSPEC_FRIP))]
2100  "TARGET_ALTIVEC"
2101  "vrfip %0,%1"
2102  [(set_attr "type" "vecfloat")])
2103
2104(define_insn "altivec_vrfin"
2105  [(set (match_operand:V4SF 0 "register_operand" "=v")
2106        (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
2107		     UNSPEC_VRFIN))]
2108  "TARGET_ALTIVEC"
2109  "vrfin %0,%1"
2110  [(set_attr "type" "vecfloat")])
2111
2112(define_insn "*altivec_vrfim"		; floor
2113  [(set (match_operand:V4SF 0 "register_operand" "=v")
2114        (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
2115		     UNSPEC_FRIM))]
2116  "TARGET_ALTIVEC"
2117  "vrfim %0,%1"
2118  [(set_attr "type" "vecfloat")])
2119
2120(define_insn "altivec_vcfux"
2121  [(set (match_operand:V4SF 0 "register_operand" "=v")
2122        (unspec:V4SF [(match_operand:V4SI 1 "register_operand" "v")
2123	              (match_operand:QI 2 "immediate_operand" "i")]
2124		     UNSPEC_VCFUX))]
2125  "TARGET_ALTIVEC"
2126  "vcfux %0,%1,%2"
2127  [(set_attr "type" "vecfloat")])
2128
2129(define_insn "altivec_vcfsx"
2130  [(set (match_operand:V4SF 0 "register_operand" "=v")
2131        (unspec:V4SF [(match_operand:V4SI 1 "register_operand" "v")
2132	              (match_operand:QI 2 "immediate_operand" "i")]
2133		     UNSPEC_VCFSX))]
2134  "TARGET_ALTIVEC"
2135  "vcfsx %0,%1,%2"
2136  [(set_attr "type" "vecfloat")])
2137
2138(define_insn "altivec_vctuxs"
2139  [(set (match_operand:V4SI 0 "register_operand" "=v")
2140        (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
2141                      (match_operand:QI 2 "immediate_operand" "i")]
2142		     UNSPEC_VCTUXS))
2143   (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
2144  "TARGET_ALTIVEC"
2145  "vctuxs %0,%1,%2"
2146  [(set_attr "type" "vecfloat")])
2147
2148(define_insn "altivec_vctsxs"
2149  [(set (match_operand:V4SI 0 "register_operand" "=v")
2150        (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
2151                      (match_operand:QI 2 "immediate_operand" "i")]
2152		     UNSPEC_VCTSXS))
2153   (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
2154  "TARGET_ALTIVEC"
2155  "vctsxs %0,%1,%2"
2156  [(set_attr "type" "vecfloat")])
2157
2158(define_insn "altivec_vlogefp"
2159  [(set (match_operand:V4SF 0 "register_operand" "=v")
2160        (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
2161		     UNSPEC_VLOGEFP))]
2162  "TARGET_ALTIVEC"
2163  "vlogefp %0,%1"
2164  [(set_attr "type" "vecfloat")])
2165
2166(define_insn "altivec_vexptefp"
2167  [(set (match_operand:V4SF 0 "register_operand" "=v")
2168        (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
2169		     UNSPEC_VEXPTEFP))]
2170  "TARGET_ALTIVEC"
2171  "vexptefp %0,%1"
2172  [(set_attr "type" "vecfloat")])
2173
2174(define_insn "*altivec_vrsqrtefp"
2175  [(set (match_operand:V4SF 0 "register_operand" "=v")
2176        (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
2177		     UNSPEC_RSQRT))]
2178  "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
2179  "vrsqrtefp %0,%1"
2180  [(set_attr "type" "vecfloat")])
2181
2182(define_insn "altivec_vrefp"
2183  [(set (match_operand:V4SF 0 "register_operand" "=v")
2184        (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
2185		     UNSPEC_FRES))]
2186  "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
2187  "vrefp %0,%1"
2188  [(set_attr "type" "vecfloat")])
2189
2190(define_expand "altivec_copysign_v4sf3"
2191  [(use (match_operand:V4SF 0 "register_operand" ""))
2192   (use (match_operand:V4SF 1 "register_operand" ""))
2193   (use (match_operand:V4SF 2 "register_operand" ""))]
2194  "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
2195  "
2196{
2197  rtx mask = gen_reg_rtx (V4SImode);
2198  rtvec v = rtvec_alloc (4);
2199  unsigned HOST_WIDE_INT mask_val = ((unsigned HOST_WIDE_INT)1) << 31;
2200
2201  RTVEC_ELT (v, 0) = GEN_INT (mask_val);
2202  RTVEC_ELT (v, 1) = GEN_INT (mask_val);
2203  RTVEC_ELT (v, 2) = GEN_INT (mask_val);
2204  RTVEC_ELT (v, 3) = GEN_INT (mask_val);
2205
2206  emit_insn (gen_vec_initv4sisi (mask, gen_rtx_PARALLEL (V4SImode, v)));
2207  emit_insn (gen_vector_select_v4sf (operands[0], operands[1], operands[2],
2208				     gen_lowpart (V4SFmode, mask)));
2209  DONE;
2210}")
2211
2212(define_insn "altivec_vsldoi_<mode>"
2213  [(set (match_operand:VM 0 "register_operand" "=v")
2214        (unspec:VM [(match_operand:VM 1 "register_operand" "v")
2215		    (match_operand:VM 2 "register_operand" "v")
2216		    (match_operand:QI 3 "immediate_operand" "i")]
2217		  UNSPEC_VSLDOI))]
2218  "TARGET_ALTIVEC"
2219  "vsldoi %0,%1,%2,%3"
2220  [(set_attr "type" "vecperm")])
2221
2222(define_insn "altivec_vupkhs<VU_char>"
2223  [(set (match_operand:VP 0 "register_operand" "=v")
2224	(unspec:VP [(match_operand:<VP_small> 1 "register_operand" "v")]
2225		     UNSPEC_VUNPACK_HI_SIGN))]
2226  "<VI_unit>"
2227{
2228  if (VECTOR_ELT_ORDER_BIG)
2229    return "vupkhs<VU_char> %0,%1";
2230  else
2231    return "vupkls<VU_char> %0,%1";
2232}
2233  [(set_attr "type" "vecperm")])
2234
2235(define_insn "*altivec_vupkhs<VU_char>_direct"
2236  [(set (match_operand:VP 0 "register_operand" "=v")
2237	(unspec:VP [(match_operand:<VP_small> 1 "register_operand" "v")]
2238		     UNSPEC_VUNPACK_HI_SIGN_DIRECT))]
2239  "<VI_unit>"
2240  "vupkhs<VU_char> %0,%1"
2241  [(set_attr "type" "vecperm")])
2242
2243(define_insn "altivec_vupkls<VU_char>"
2244  [(set (match_operand:VP 0 "register_operand" "=v")
2245	(unspec:VP [(match_operand:<VP_small> 1 "register_operand" "v")]
2246		     UNSPEC_VUNPACK_LO_SIGN))]
2247  "<VI_unit>"
2248{
2249  if (VECTOR_ELT_ORDER_BIG)
2250    return "vupkls<VU_char> %0,%1";
2251  else
2252    return "vupkhs<VU_char> %0,%1";
2253}
2254  [(set_attr "type" "vecperm")])
2255
2256(define_insn "*altivec_vupkls<VU_char>_direct"
2257  [(set (match_operand:VP 0 "register_operand" "=v")
2258	(unspec:VP [(match_operand:<VP_small> 1 "register_operand" "v")]
2259		     UNSPEC_VUNPACK_LO_SIGN_DIRECT))]
2260  "<VI_unit>"
2261  "vupkls<VU_char> %0,%1"
2262  [(set_attr "type" "vecperm")])
2263
2264(define_insn "altivec_vupkhpx"
2265  [(set (match_operand:V4SI 0 "register_operand" "=v")
2266	(unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")]
2267		     UNSPEC_VUPKHPX))]
2268  "TARGET_ALTIVEC"
2269{
2270  if (VECTOR_ELT_ORDER_BIG)
2271    return "vupkhpx %0,%1";
2272  else
2273    return "vupklpx %0,%1";
2274}
2275  [(set_attr "type" "vecperm")])
2276
2277(define_insn "altivec_vupklpx"
2278  [(set (match_operand:V4SI 0 "register_operand" "=v")
2279	(unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")]
2280		     UNSPEC_VUPKLPX))]
2281  "TARGET_ALTIVEC"
2282{
2283  if (VECTOR_ELT_ORDER_BIG)
2284    return "vupklpx %0,%1";
2285  else
2286    return "vupkhpx %0,%1";
2287}
2288  [(set_attr "type" "vecperm")])
2289
2290;; Compare vectors producing a vector result and a predicate, setting CR6 to
2291;; indicate a combined status
2292(define_insn "*altivec_vcmpequ<VI_char>_p"
2293  [(set (reg:CC CR6_REGNO)
2294	(unspec:CC [(eq:CC (match_operand:VI2 1 "register_operand" "v")
2295			   (match_operand:VI2 2 "register_operand" "v"))]
2296		   UNSPEC_PREDICATE))
2297   (set (match_operand:VI2 0 "register_operand" "=v")
2298	(eq:VI2 (match_dup 1)
2299		(match_dup 2)))]
2300  "<VI_unit>"
2301  "vcmpequ<VI_char>. %0,%1,%2"
2302  [(set_attr "type" "veccmpfx")])
2303
2304(define_insn "*altivec_vcmpgts<VI_char>_p"
2305  [(set (reg:CC CR6_REGNO)
2306	(unspec:CC [(gt:CC (match_operand:VI2 1 "register_operand" "v")
2307			   (match_operand:VI2 2 "register_operand" "v"))]
2308		   UNSPEC_PREDICATE))
2309   (set (match_operand:VI2 0 "register_operand" "=v")
2310	(gt:VI2 (match_dup 1)
2311		(match_dup 2)))]
2312  "<VI_unit>"
2313  "vcmpgts<VI_char>. %0,%1,%2"
2314  [(set_attr "type" "veccmpfx")])
2315
2316(define_insn "*altivec_vcmpgtu<VI_char>_p"
2317  [(set (reg:CC CR6_REGNO)
2318	(unspec:CC [(gtu:CC (match_operand:VI2 1 "register_operand" "v")
2319			    (match_operand:VI2 2 "register_operand" "v"))]
2320		   UNSPEC_PREDICATE))
2321   (set (match_operand:VI2 0 "register_operand" "=v")
2322	(gtu:VI2 (match_dup 1)
2323		 (match_dup 2)))]
2324  "<VI_unit>"
2325  "vcmpgtu<VI_char>. %0,%1,%2"
2326  [(set_attr "type" "veccmpfx")])
2327
2328(define_insn "*altivec_vcmpeqfp_p"
2329  [(set (reg:CC CR6_REGNO)
2330	(unspec:CC [(eq:CC (match_operand:V4SF 1 "register_operand" "v")
2331			   (match_operand:V4SF 2 "register_operand" "v"))]
2332		   UNSPEC_PREDICATE))
2333   (set (match_operand:V4SF 0 "register_operand" "=v")
2334	(eq:V4SF (match_dup 1)
2335		 (match_dup 2)))]
2336  "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
2337  "vcmpeqfp. %0,%1,%2"
2338  [(set_attr "type" "veccmp")])
2339
2340(define_insn "*altivec_vcmpgtfp_p"
2341  [(set (reg:CC CR6_REGNO)
2342	(unspec:CC [(gt:CC (match_operand:V4SF 1 "register_operand" "v")
2343			   (match_operand:V4SF 2 "register_operand" "v"))]
2344		   UNSPEC_PREDICATE))
2345   (set (match_operand:V4SF 0 "register_operand" "=v")
2346	(gt:V4SF (match_dup 1)
2347		 (match_dup 2)))]
2348  "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
2349  "vcmpgtfp. %0,%1,%2"
2350  [(set_attr "type" "veccmp")])
2351
2352(define_insn "*altivec_vcmpgefp_p"
2353  [(set (reg:CC CR6_REGNO)
2354	(unspec:CC [(ge:CC (match_operand:V4SF 1 "register_operand" "v")
2355			   (match_operand:V4SF 2 "register_operand" "v"))]
2356		   UNSPEC_PREDICATE))
2357   (set (match_operand:V4SF 0 "register_operand" "=v")
2358	(ge:V4SF (match_dup 1)
2359		 (match_dup 2)))]
2360  "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
2361  "vcmpgefp. %0,%1,%2"
2362  [(set_attr "type" "veccmp")])
2363
2364(define_insn "altivec_vcmpbfp_p"
2365  [(set (reg:CC CR6_REGNO)
2366	(unspec:CC [(match_operand:V4SF 1 "register_operand" "v")
2367		    (match_operand:V4SF 2 "register_operand" "v")]
2368		   UNSPEC_VCMPBFP))
2369   (set (match_operand:V4SF 0 "register_operand" "=v")
2370        (unspec:V4SF [(match_dup 1)
2371                      (match_dup 2)]
2372                      UNSPEC_VCMPBFP))]
2373  "VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)"
2374  "vcmpbfp. %0,%1,%2"
2375  [(set_attr "type" "veccmp")])
2376
2377(define_insn "altivec_mtvscr"
2378  [(set (reg:SI VSCR_REGNO)
2379	(unspec_volatile:SI
2380	 [(match_operand:V4SI 0 "register_operand" "v")] UNSPECV_MTVSCR))]
2381  "TARGET_ALTIVEC"
2382  "mtvscr %0"
2383  [(set_attr "type" "vecsimple")])
2384
2385(define_insn "altivec_mfvscr"
2386  [(set (match_operand:V8HI 0 "register_operand" "=v")
2387	(unspec_volatile:V8HI [(reg:SI VSCR_REGNO)] UNSPECV_MFVSCR))]
2388  "TARGET_ALTIVEC"
2389  "mfvscr %0"
2390  [(set_attr "type" "vecsimple")])
2391
2392(define_insn "altivec_dssall"
2393  [(unspec_volatile [(const_int 0)] UNSPECV_DSSALL)]
2394  "TARGET_ALTIVEC"
2395  "dssall"
2396  [(set_attr "type" "vecsimple")])
2397
2398(define_insn "altivec_dss"
2399  [(unspec_volatile [(match_operand:QI 0 "immediate_operand" "i")]
2400		    UNSPECV_DSS)]
2401  "TARGET_ALTIVEC"
2402  "dss %0"
2403  [(set_attr "type" "vecsimple")])
2404
2405(define_insn "altivec_dst"
2406  [(unspec [(match_operand 0 "register_operand" "b")
2407	    (match_operand:SI 1 "register_operand" "r")
2408	    (match_operand:QI 2 "immediate_operand" "i")] UNSPEC_DST)]
2409  "TARGET_ALTIVEC && GET_MODE (operands[0]) == Pmode"
2410  "dst %0,%1,%2"
2411  [(set_attr "type" "vecsimple")])
2412
2413(define_insn "altivec_dstt"
2414  [(unspec [(match_operand 0 "register_operand" "b")
2415	    (match_operand:SI 1 "register_operand" "r")
2416	    (match_operand:QI 2 "immediate_operand" "i")] UNSPEC_DSTT)]
2417  "TARGET_ALTIVEC && GET_MODE (operands[0]) == Pmode"
2418  "dstt %0,%1,%2"
2419  [(set_attr "type" "vecsimple")])
2420
2421(define_insn "altivec_dstst"
2422  [(unspec [(match_operand 0 "register_operand" "b")
2423	    (match_operand:SI 1 "register_operand" "r")
2424	    (match_operand:QI 2 "immediate_operand" "i")] UNSPEC_DSTST)]
2425  "TARGET_ALTIVEC && GET_MODE (operands[0]) == Pmode"
2426  "dstst %0,%1,%2"
2427  [(set_attr "type" "vecsimple")])
2428
2429(define_insn "altivec_dststt"
2430  [(unspec [(match_operand 0 "register_operand" "b")
2431	    (match_operand:SI 1 "register_operand" "r")
2432	    (match_operand:QI 2 "immediate_operand" "i")] UNSPEC_DSTSTT)]
2433  "TARGET_ALTIVEC && GET_MODE (operands[0]) == Pmode"
2434  "dststt %0,%1,%2"
2435  [(set_attr "type" "vecsimple")])
2436
2437(define_expand "altivec_lvsl"
2438  [(use (match_operand:V16QI 0 "register_operand" ""))
2439   (use (match_operand:V16QI 1 "memory_operand" ""))]
2440  "TARGET_ALTIVEC"
2441{
2442  if (VECTOR_ELT_ORDER_BIG)
2443    emit_insn (gen_altivec_lvsl_direct (operands[0], operands[1]));
2444  else
2445    {
2446      rtx mask, constv, vperm;
2447      mask = gen_reg_rtx (V16QImode);
2448      emit_insn (gen_altivec_lvsl_direct (mask, operands[1]));
2449      constv = gen_const_vec_series (V16QImode, const0_rtx, const1_rtx);
2450      constv = force_reg (V16QImode, constv);
2451      vperm = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, mask, mask, constv),
2452                              UNSPEC_VPERM);
2453      emit_insn (gen_rtx_SET (operands[0], vperm));
2454    }
2455  DONE;
2456})
2457
2458(define_insn "altivec_lvsl_direct"
2459  [(set (match_operand:V16QI 0 "register_operand" "=v")
2460	(unspec:V16QI [(match_operand:V16QI 1 "memory_operand" "Z")]
2461		      UNSPEC_LVSL))]
2462  "TARGET_ALTIVEC"
2463  "lvsl %0,%y1"
2464  [(set_attr "type" "vecload")])
2465
2466(define_expand "altivec_lvsr"
2467  [(use (match_operand:V16QI 0 "register_operand" ""))
2468   (use (match_operand:V16QI 1 "memory_operand" ""))]
2469  "TARGET_ALTIVEC"
2470{
2471  if (VECTOR_ELT_ORDER_BIG)
2472    emit_insn (gen_altivec_lvsr_direct (operands[0], operands[1]));
2473  else
2474    {
2475      rtx mask, constv, vperm;
2476      mask = gen_reg_rtx (V16QImode);
2477      emit_insn (gen_altivec_lvsr_direct (mask, operands[1]));
2478      constv = gen_const_vec_series (V16QImode, const0_rtx, const1_rtx);
2479      constv = force_reg (V16QImode, constv);
2480      vperm = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, mask, mask, constv),
2481                              UNSPEC_VPERM);
2482      emit_insn (gen_rtx_SET (operands[0], vperm));
2483    }
2484  DONE;
2485})
2486
2487(define_insn "altivec_lvsr_direct"
2488  [(set (match_operand:V16QI 0 "register_operand" "=v")
2489	(unspec:V16QI [(match_operand:V16QI 1 "memory_operand" "Z")]
2490		      UNSPEC_LVSR))]
2491  "TARGET_ALTIVEC"
2492  "lvsr %0,%y1"
2493  [(set_attr "type" "vecload")])
2494
2495(define_expand "build_vector_mask_for_load"
2496  [(set (match_operand:V16QI 0 "register_operand" "")
2497	(unspec:V16QI [(match_operand 1 "memory_operand" "")] UNSPEC_LVSR))]
2498  "TARGET_ALTIVEC"
2499  "
2500{
2501  rtx addr;
2502  rtx temp;
2503
2504  gcc_assert (GET_CODE (operands[1]) == MEM);
2505
2506  addr = XEXP (operands[1], 0);
2507  temp = gen_reg_rtx (GET_MODE (addr));
2508  emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (GET_MODE (addr), addr)));
2509  emit_insn (gen_altivec_lvsr (operands[0],
2510			       replace_equiv_address (operands[1], temp)));
2511  DONE;
2512}")
2513
2514;; Parallel some of the LVE* and STV*'s with unspecs because some have
2515;; identical rtl but different instructions-- and gcc gets confused.
2516
2517(define_expand "altivec_lve<VI_char>x"
2518  [(parallel
2519    [(set (match_operand:VI 0 "register_operand" "=v")
2520	  (match_operand:VI 1 "memory_operand" "Z"))
2521     (unspec [(const_int 0)] UNSPEC_LVE)])]
2522  "TARGET_ALTIVEC"
2523{
2524  if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
2525    {
2526      altivec_expand_lvx_be (operands[0], operands[1], <MODE>mode, UNSPEC_LVE);
2527      DONE;
2528    }
2529})
2530
2531(define_insn "*altivec_lve<VI_char>x_internal"
2532  [(parallel
2533    [(set (match_operand:VI 0 "register_operand" "=v")
2534	  (match_operand:VI 1 "memory_operand" "Z"))
2535     (unspec [(const_int 0)] UNSPEC_LVE)])]
2536  "TARGET_ALTIVEC"
2537  "lve<VI_char>x %0,%y1"
2538  [(set_attr "type" "vecload")])
2539
2540(define_insn "*altivec_lvesfx"
2541  [(parallel
2542    [(set (match_operand:V4SF 0 "register_operand" "=v")
2543	  (match_operand:V4SF 1 "memory_operand" "Z"))
2544     (unspec [(const_int 0)] UNSPEC_LVE)])]
2545  "TARGET_ALTIVEC"
2546  "lvewx %0,%y1"
2547  [(set_attr "type" "vecload")])
2548
2549(define_expand "altivec_lvxl_<mode>"
2550  [(parallel
2551    [(set (match_operand:VM2 0 "register_operand" "=v")
2552	  (match_operand:VM2 1 "memory_operand" "Z"))
2553     (unspec [(const_int 0)] UNSPEC_SET_VSCR)])]
2554  "TARGET_ALTIVEC"
2555{
2556  if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
2557    {
2558      altivec_expand_lvx_be (operands[0], operands[1], <MODE>mode, UNSPEC_SET_VSCR);
2559      DONE;
2560    }
2561})
2562
2563(define_insn "*altivec_lvxl_<mode>_internal"
2564  [(parallel
2565    [(set (match_operand:VM2 0 "register_operand" "=v")
2566	  (match_operand:VM2 1 "memory_operand" "Z"))
2567     (unspec [(const_int 0)] UNSPEC_SET_VSCR)])]
2568  "TARGET_ALTIVEC"
2569  "lvxl %0,%y1"
2570  [(set_attr "type" "vecload")])
2571
2572; This version of lvx is used only in cases where we need to force an lvx
2573; over any other load, and we don't care about losing CSE opportunities.
2574; Its primary use is for prologue register saves.
2575(define_insn "altivec_lvx_<mode>_internal"
2576  [(parallel
2577    [(set (match_operand:VM2 0 "register_operand" "=v")
2578	  (match_operand:VM2 1 "memory_operand" "Z"))
2579     (unspec [(const_int 0)] UNSPEC_LVX)])]
2580  "TARGET_ALTIVEC"
2581  "lvx %0,%y1"
2582  [(set_attr "type" "vecload")])
2583
2584; The next two patterns embody what lvx should usually look like.
2585(define_insn "altivec_lvx_<mode>_2op"
2586  [(set (match_operand:VM2 0 "register_operand" "=v")
2587        (mem:VM2 (and:DI (plus:DI (match_operand:DI 1 "register_operand" "b")
2588                                  (match_operand:DI 2 "register_operand" "r"))
2589		         (const_int -16))))]
2590  "TARGET_ALTIVEC && TARGET_64BIT"
2591  "lvx %0,%1,%2"
2592  [(set_attr "type" "vecload")])
2593
2594(define_insn "altivec_lvx_<mode>_1op"
2595  [(set (match_operand:VM2 0 "register_operand" "=v")
2596        (mem:VM2 (and:DI (match_operand:DI 1 "register_operand" "r")
2597			 (const_int -16))))]
2598  "TARGET_ALTIVEC && TARGET_64BIT"
2599  "lvx %0,0,%1"
2600  [(set_attr "type" "vecload")])
2601
2602; 32-bit versions of the above.
2603(define_insn "altivec_lvx_<mode>_2op_si"
2604  [(set (match_operand:VM2 0 "register_operand" "=v")
2605        (mem:VM2 (and:SI (plus:SI (match_operand:SI 1 "register_operand" "b")
2606                                  (match_operand:SI 2 "register_operand" "r"))
2607		         (const_int -16))))]
2608  "TARGET_ALTIVEC && TARGET_32BIT"
2609  "lvx %0,%1,%2"
2610  [(set_attr "type" "vecload")])
2611
2612(define_insn "altivec_lvx_<mode>_1op_si"
2613  [(set (match_operand:VM2 0 "register_operand" "=v")
2614        (mem:VM2 (and:SI (match_operand:SI 1 "register_operand" "r")
2615			 (const_int -16))))]
2616  "TARGET_ALTIVEC && TARGET_32BIT"
2617  "lvx %0,0,%1"
2618  [(set_attr "type" "vecload")])
2619
2620; This version of stvx is used only in cases where we need to force an stvx
2621; over any other store, and we don't care about losing CSE opportunities.
2622; Its primary use is for epilogue register restores.
2623(define_insn "altivec_stvx_<mode>_internal"
2624  [(parallel
2625    [(set (match_operand:VM2 0 "memory_operand" "=Z")
2626	  (match_operand:VM2 1 "register_operand" "v"))
2627     (unspec [(const_int 0)] UNSPEC_STVX)])]
2628  "TARGET_ALTIVEC"
2629  "stvx %1,%y0"
2630  [(set_attr "type" "vecstore")])
2631
2632; The next two patterns embody what stvx should usually look like.
2633(define_insn "altivec_stvx_<mode>_2op"
2634  [(set (mem:VM2 (and:DI (plus:DI (match_operand:DI 1 "register_operand" "b")
2635  	                          (match_operand:DI 2 "register_operand" "r"))
2636	                 (const_int -16)))
2637        (match_operand:VM2 0 "register_operand" "v"))]
2638  "TARGET_ALTIVEC && TARGET_64BIT"
2639  "stvx %0,%1,%2"
2640  [(set_attr "type" "vecstore")])
2641
2642(define_insn "altivec_stvx_<mode>_1op"
2643  [(set (mem:VM2 (and:DI (match_operand:DI 1 "register_operand" "r")
2644	                 (const_int -16)))
2645        (match_operand:VM2 0 "register_operand" "v"))]
2646  "TARGET_ALTIVEC && TARGET_64BIT"
2647  "stvx %0,0,%1"
2648  [(set_attr "type" "vecstore")])
2649
2650; 32-bit versions of the above.
2651(define_insn "altivec_stvx_<mode>_2op_si"
2652  [(set (mem:VM2 (and:SI (plus:SI (match_operand:SI 1 "register_operand" "b")
2653  	                          (match_operand:SI 2 "register_operand" "r"))
2654	                 (const_int -16)))
2655        (match_operand:VM2 0 "register_operand" "v"))]
2656  "TARGET_ALTIVEC && TARGET_32BIT"
2657  "stvx %0,%1,%2"
2658  [(set_attr "type" "vecstore")])
2659
2660(define_insn "altivec_stvx_<mode>_1op_si"
2661  [(set (mem:VM2 (and:SI (match_operand:SI 1 "register_operand" "r")
2662	                 (const_int -16)))
2663        (match_operand:VM2 0 "register_operand" "v"))]
2664  "TARGET_ALTIVEC && TARGET_32BIT"
2665  "stvx %0,0,%1"
2666  [(set_attr "type" "vecstore")])
2667
2668(define_expand "altivec_stvxl_<mode>"
2669  [(parallel
2670    [(set (match_operand:VM2 0 "memory_operand" "=Z")
2671	  (match_operand:VM2 1 "register_operand" "v"))
2672     (unspec [(const_int 0)] UNSPEC_STVXL)])]
2673  "TARGET_ALTIVEC"
2674{
2675  if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
2676    {
2677      altivec_expand_stvx_be (operands[0], operands[1], <MODE>mode, UNSPEC_STVXL);
2678      DONE;
2679    }
2680})
2681
2682(define_insn "*altivec_stvxl_<mode>_internal"
2683  [(parallel
2684    [(set (match_operand:VM2 0 "memory_operand" "=Z")
2685	  (match_operand:VM2 1 "register_operand" "v"))
2686     (unspec [(const_int 0)] UNSPEC_STVXL)])]
2687  "TARGET_ALTIVEC"
2688  "stvxl %1,%y0"
2689  [(set_attr "type" "vecstore")])
2690
2691(define_expand "altivec_stve<VI_char>x"
2692  [(set (match_operand:<VI_scalar> 0 "memory_operand" "=Z")
2693	(unspec:<VI_scalar> [(match_operand:VI 1 "register_operand" "v")] UNSPEC_STVE))]
2694  "TARGET_ALTIVEC"
2695{
2696  if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
2697    {
2698      altivec_expand_stvex_be (operands[0], operands[1], <MODE>mode, UNSPEC_STVE);
2699      DONE;
2700    }
2701})
2702
2703(define_insn "*altivec_stve<VI_char>x_internal"
2704  [(set (match_operand:<VI_scalar> 0 "memory_operand" "=Z")
2705	(unspec:<VI_scalar> [(match_operand:VI 1 "register_operand" "v")] UNSPEC_STVE))]
2706  "TARGET_ALTIVEC"
2707  "stve<VI_char>x %1,%y0"
2708  [(set_attr "type" "vecstore")])
2709
2710(define_insn "*altivec_stvesfx"
2711  [(set (match_operand:SF 0 "memory_operand" "=Z")
2712	(unspec:SF [(match_operand:V4SF 1 "register_operand" "v")] UNSPEC_STVE))]
2713  "TARGET_ALTIVEC"
2714  "stvewx %1,%y0"
2715  [(set_attr "type" "vecstore")])
2716
2717;; Generate
2718;;    xxlxor/vxor SCRATCH0,SCRATCH0,SCRATCH0
2719;;    vsubu?m SCRATCH2,SCRATCH1,%1
2720;;    vmaxs? %0,%1,SCRATCH2"
2721(define_expand "abs<mode>2"
2722  [(set (match_dup 2) (match_dup 3))
2723   (set (match_dup 4)
2724        (minus:VI2 (match_dup 2)
2725		   (match_operand:VI2 1 "register_operand" "v")))
2726   (set (match_operand:VI2 0 "register_operand" "=v")
2727        (smax:VI2 (match_dup 1) (match_dup 4)))]
2728  "<VI_unit>"
2729{
2730  operands[2] = gen_reg_rtx (<MODE>mode);
2731  operands[3] = CONST0_RTX (<MODE>mode);
2732  operands[4] = gen_reg_rtx (<MODE>mode);
2733})
2734
2735;; Generate
2736;;    vspltisw SCRATCH1,0
2737;;    vsubu?m SCRATCH2,SCRATCH1,%1
2738;;    vmins? %0,%1,SCRATCH2"
2739(define_expand "nabs<mode>2"
2740  [(set (match_dup 2) (match_dup 3))
2741   (set (match_dup 4)
2742        (minus:VI2 (match_dup 2)
2743		   (match_operand:VI2 1 "register_operand" "v")))
2744   (set (match_operand:VI2 0 "register_operand" "=v")
2745        (smin:VI2 (match_dup 1) (match_dup 4)))]
2746  "<VI_unit>"
2747{
2748  operands[2] = gen_reg_rtx (<MODE>mode);
2749  operands[3] = CONST0_RTX (<MODE>mode);
2750  operands[4] = gen_reg_rtx (<MODE>mode);
2751})
2752
2753;; Generate
2754;;    vspltisw SCRATCH1,-1
2755;;    vslw SCRATCH2,SCRATCH1,SCRATCH1
2756;;    vandc %0,%1,SCRATCH2
2757(define_expand "altivec_absv4sf2"
2758  [(set (match_dup 2)
2759	(vec_duplicate:V4SI (const_int -1)))
2760   (set (match_dup 3)
2761        (ashift:V4SI (match_dup 2) (match_dup 2)))
2762   (set (match_operand:V4SF 0 "register_operand" "=v")
2763        (and:V4SF (not:V4SF (subreg:V4SF (match_dup 3) 0))
2764                  (match_operand:V4SF 1 "register_operand" "v")))]
2765  "TARGET_ALTIVEC"
2766{
2767  operands[2] = gen_reg_rtx (V4SImode);
2768  operands[3] = gen_reg_rtx (V4SImode);
2769})
2770
2771;; Generate
2772;;    vspltis? SCRATCH0,0
2773;;    vsubs?s SCRATCH2,SCRATCH1,%1
2774;;    vmaxs? %0,%1,SCRATCH2"
2775(define_expand "altivec_abss_<mode>"
2776  [(set (match_dup 2) (vec_duplicate:VI (const_int 0)))
2777   (parallel [(set (match_dup 3)
2778		   (unspec:VI [(match_dup 2)
2779			       (match_operand:VI 1 "register_operand" "v")]
2780			      UNSPEC_VSUBS))
2781	      (set (reg:SI VSCR_REGNO)
2782		   (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))])
2783   (set (match_operand:VI 0 "register_operand" "=v")
2784        (smax:VI (match_dup 1) (match_dup 3)))]
2785  "TARGET_ALTIVEC"
2786{
2787  operands[2] = gen_reg_rtx (GET_MODE (operands[0]));
2788  operands[3] = gen_reg_rtx (GET_MODE (operands[0]));
2789})
2790
2791(define_expand "reduc_plus_scal_<mode>"
2792  [(set (match_operand:<VI_scalar> 0 "register_operand" "=v")
2793        (unspec:VIshort [(match_operand:VIshort 1 "register_operand" "v")]
2794			UNSPEC_REDUC_PLUS))]
2795  "TARGET_ALTIVEC"
2796{
2797  rtx vzero = gen_reg_rtx (V4SImode);
2798  rtx vtmp1 = gen_reg_rtx (V4SImode);
2799  rtx vtmp2 = gen_reg_rtx (<MODE>mode);
2800  rtx dest = gen_lowpart (V4SImode, vtmp2);
2801  int elt = VECTOR_ELT_ORDER_BIG ? GET_MODE_NUNITS (<MODE>mode) - 1 : 0;
2802
2803  emit_insn (gen_altivec_vspltisw (vzero, const0_rtx));
2804  emit_insn (gen_altivec_vsum4s<VI_char>s (vtmp1, operands[1], vzero));
2805  emit_insn (gen_altivec_vsumsws_direct (dest, vtmp1, vzero));
2806  rs6000_expand_vector_extract (operands[0], vtmp2, GEN_INT (elt));
2807  DONE;
2808})
2809
2810(define_insn "*p9_neg<mode>2"
2811  [(set (match_operand:VNEG 0 "altivec_register_operand" "=v")
2812	(neg:VNEG (match_operand:VNEG 1 "altivec_register_operand" "v")))]
2813  "TARGET_P9_VECTOR"
2814  "vneg<VI_char> %0,%1"
2815  [(set_attr "type" "vecsimple")])
2816
2817(define_expand "neg<mode>2"
2818  [(set (match_operand:VI2 0 "register_operand" "")
2819	(neg:VI2 (match_operand:VI2 1 "register_operand" "")))]
2820  "<VI_unit>"
2821{
2822  if (!TARGET_P9_VECTOR || (<MODE>mode != V4SImode && <MODE>mode != V2DImode))
2823    {
2824      rtx vzero;
2825
2826      vzero = gen_reg_rtx (GET_MODE (operands[0]));
2827      emit_move_insn (vzero, CONST0_RTX (<MODE>mode));
2828      emit_insn (gen_sub<mode>3 (operands[0], vzero, operands[1]));
2829      DONE;
2830    }
2831})
2832
2833(define_expand "udot_prod<mode>"
2834  [(set (match_operand:V4SI 0 "register_operand" "=v")
2835        (plus:V4SI (match_operand:V4SI 3 "register_operand" "v")
2836                   (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")
2837                                 (match_operand:VIshort 2 "register_operand" "v")]
2838                                UNSPEC_VMSUMU)))]
2839  "TARGET_ALTIVEC"
2840  "
2841{
2842  emit_insn (gen_altivec_vmsumu<VI_char>m (operands[0], operands[1], operands[2], operands[3]));
2843  DONE;
2844}")
2845
2846(define_expand "sdot_prodv8hi"
2847  [(set (match_operand:V4SI 0 "register_operand" "=v")
2848        (plus:V4SI (match_operand:V4SI 3 "register_operand" "v")
2849                   (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
2850                                 (match_operand:V8HI 2 "register_operand" "v")]
2851                                UNSPEC_VMSUMSHM)))]
2852  "TARGET_ALTIVEC"
2853  "
2854{
2855  emit_insn (gen_altivec_vmsumshm (operands[0], operands[1], operands[2], operands[3]));
2856  DONE;
2857}")
2858
2859(define_expand "widen_usum<mode>3"
2860  [(set (match_operand:V4SI 0 "register_operand" "=v")
2861        (plus:V4SI (match_operand:V4SI 2 "register_operand" "v")
2862                   (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")]
2863                                UNSPEC_VMSUMU)))]
2864  "TARGET_ALTIVEC"
2865  "
2866{
2867  rtx vones = gen_reg_rtx (GET_MODE (operands[1]));
2868
2869  emit_insn (gen_altivec_vspltis<VI_char> (vones, const1_rtx));
2870  emit_insn (gen_altivec_vmsumu<VI_char>m (operands[0], operands[1], vones, operands[2]));
2871  DONE;
2872}")
2873
2874(define_expand "widen_ssumv16qi3"
2875  [(set (match_operand:V4SI 0 "register_operand" "=v")
2876        (plus:V4SI (match_operand:V4SI 2 "register_operand" "v")
2877                   (unspec:V4SI [(match_operand:V16QI 1 "register_operand" "v")]
2878                                UNSPEC_VMSUMM)))]
2879  "TARGET_ALTIVEC"
2880  "
2881{
2882  rtx vones = gen_reg_rtx (V16QImode);
2883
2884  emit_insn (gen_altivec_vspltisb (vones, const1_rtx));
2885  emit_insn (gen_altivec_vmsummbm (operands[0], operands[1], vones, operands[2]));
2886  DONE;
2887}")
2888
2889(define_expand "widen_ssumv8hi3"
2890  [(set (match_operand:V4SI 0 "register_operand" "=v")
2891        (plus:V4SI (match_operand:V4SI 2 "register_operand" "v")
2892                   (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")]
2893                                UNSPEC_VMSUMSHM)))]
2894  "TARGET_ALTIVEC"
2895  "
2896{
2897  rtx vones = gen_reg_rtx (V8HImode);
2898
2899  emit_insn (gen_altivec_vspltish (vones, const1_rtx));
2900  emit_insn (gen_altivec_vmsumshm (operands[0], operands[1], vones, operands[2]));
2901  DONE;
2902}")
2903
2904(define_expand "vec_unpacks_hi_<VP_small_lc>"
2905  [(set (match_operand:VP 0 "register_operand" "=v")
2906        (unspec:VP [(match_operand:<VP_small> 1 "register_operand" "v")]
2907		   UNSPEC_VUNPACK_HI_SIGN_DIRECT))]
2908  "<VI_unit>"
2909  "")
2910
2911(define_expand "vec_unpacks_lo_<VP_small_lc>"
2912  [(set (match_operand:VP 0 "register_operand" "=v")
2913        (unspec:VP [(match_operand:<VP_small> 1 "register_operand" "v")]
2914		   UNSPEC_VUNPACK_LO_SIGN_DIRECT))]
2915  "<VI_unit>"
2916  "")
2917
2918(define_insn "vperm_v8hiv4si"
2919  [(set (match_operand:V4SI 0 "register_operand" "=v,?wo")
2920        (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v,wo")
2921		      (match_operand:V4SI 2 "register_operand" "v,0")
2922		      (match_operand:V16QI 3 "register_operand" "v,wo")]
2923                  UNSPEC_VPERMSI))]
2924  "TARGET_ALTIVEC"
2925  "@
2926   vperm %0,%1,%2,%3
2927   xxperm %x0,%x1,%x3"
2928  [(set_attr "type" "vecperm")
2929   (set_attr "length" "4")])
2930
2931(define_insn "vperm_v16qiv8hi"
2932  [(set (match_operand:V8HI 0 "register_operand" "=v,?wo")
2933        (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v,wo")
2934		      (match_operand:V8HI 2 "register_operand" "v,0")
2935		      (match_operand:V16QI 3 "register_operand" "v,wo")]
2936                  UNSPEC_VPERMHI))]
2937  "TARGET_ALTIVEC"
2938  "@
2939   vperm %0,%1,%2,%3
2940   xxperm %x0,%x1,%x3"
2941  [(set_attr "type" "vecperm")
2942   (set_attr "length" "4")])
2943
2944
2945(define_expand "vec_unpacku_hi_v16qi"
2946  [(set (match_operand:V8HI 0 "register_operand" "=v")
2947        (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")]
2948                     UNSPEC_VUPKHUB))]
2949  "TARGET_ALTIVEC"
2950  "
2951{
2952  rtx vzero = gen_reg_rtx (V8HImode);
2953  rtx mask = gen_reg_rtx (V16QImode);
2954  rtvec v = rtvec_alloc (16);
2955  bool be = BYTES_BIG_ENDIAN;
2956
2957  emit_insn (gen_altivec_vspltish (vzero, const0_rtx));
2958
2959  RTVEC_ELT (v,  0) = gen_rtx_CONST_INT (QImode, be ? 16 :  7);
2960  RTVEC_ELT (v,  1) = gen_rtx_CONST_INT (QImode, be ?  0 : 16);
2961  RTVEC_ELT (v,  2) = gen_rtx_CONST_INT (QImode, be ? 16 :  6);
2962  RTVEC_ELT (v,  3) = gen_rtx_CONST_INT (QImode, be ?  1 : 16);
2963  RTVEC_ELT (v,  4) = gen_rtx_CONST_INT (QImode, be ? 16 :  5);
2964  RTVEC_ELT (v,  5) = gen_rtx_CONST_INT (QImode, be ?  2 : 16);
2965  RTVEC_ELT (v,  6) = gen_rtx_CONST_INT (QImode, be ? 16 :  4);
2966  RTVEC_ELT (v,  7) = gen_rtx_CONST_INT (QImode, be ?  3 : 16);
2967  RTVEC_ELT (v,  8) = gen_rtx_CONST_INT (QImode, be ? 16 :  3);
2968  RTVEC_ELT (v,  9) = gen_rtx_CONST_INT (QImode, be ?  4 : 16);
2969  RTVEC_ELT (v, 10) = gen_rtx_CONST_INT (QImode, be ? 16 :  2);
2970  RTVEC_ELT (v, 11) = gen_rtx_CONST_INT (QImode, be ?  5 : 16);
2971  RTVEC_ELT (v, 12) = gen_rtx_CONST_INT (QImode, be ? 16 :  1);
2972  RTVEC_ELT (v, 13) = gen_rtx_CONST_INT (QImode, be ?  6 : 16);
2973  RTVEC_ELT (v, 14) = gen_rtx_CONST_INT (QImode, be ? 16 :  0);
2974  RTVEC_ELT (v, 15) = gen_rtx_CONST_INT (QImode, be ?  7 : 16);
2975
2976  emit_insn (gen_vec_initv16qiqi (mask, gen_rtx_PARALLEL (V16QImode, v)));
2977  emit_insn (gen_vperm_v16qiv8hi (operands[0], operands[1], vzero, mask));
2978  DONE;
2979}")
2980
2981(define_expand "vec_unpacku_hi_v8hi"
2982  [(set (match_operand:V4SI 0 "register_operand" "=v")
2983        (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")]
2984                     UNSPEC_VUPKHUH))]
2985  "TARGET_ALTIVEC"
2986  "
2987{
2988  rtx vzero = gen_reg_rtx (V4SImode);
2989  rtx mask = gen_reg_rtx (V16QImode);
2990  rtvec v = rtvec_alloc (16);
2991  bool be = BYTES_BIG_ENDIAN;
2992
2993  emit_insn (gen_altivec_vspltisw (vzero, const0_rtx));
2994
2995  RTVEC_ELT (v,  0) = gen_rtx_CONST_INT (QImode, be ? 16 :  7);
2996  RTVEC_ELT (v,  1) = gen_rtx_CONST_INT (QImode, be ? 17 :  6);
2997  RTVEC_ELT (v,  2) = gen_rtx_CONST_INT (QImode, be ?  0 : 17);
2998  RTVEC_ELT (v,  3) = gen_rtx_CONST_INT (QImode, be ?  1 : 16);
2999  RTVEC_ELT (v,  4) = gen_rtx_CONST_INT (QImode, be ? 16 :  5);
3000  RTVEC_ELT (v,  5) = gen_rtx_CONST_INT (QImode, be ? 17 :  4);
3001  RTVEC_ELT (v,  6) = gen_rtx_CONST_INT (QImode, be ?  2 : 17);
3002  RTVEC_ELT (v,  7) = gen_rtx_CONST_INT (QImode, be ?  3 : 16);
3003  RTVEC_ELT (v,  8) = gen_rtx_CONST_INT (QImode, be ? 16 :  3);
3004  RTVEC_ELT (v,  9) = gen_rtx_CONST_INT (QImode, be ? 17 :  2);
3005  RTVEC_ELT (v, 10) = gen_rtx_CONST_INT (QImode, be ?  4 : 17);
3006  RTVEC_ELT (v, 11) = gen_rtx_CONST_INT (QImode, be ?  5 : 16);
3007  RTVEC_ELT (v, 12) = gen_rtx_CONST_INT (QImode, be ? 16 :  1);
3008  RTVEC_ELT (v, 13) = gen_rtx_CONST_INT (QImode, be ? 17 :  0);
3009  RTVEC_ELT (v, 14) = gen_rtx_CONST_INT (QImode, be ?  6 : 17);
3010  RTVEC_ELT (v, 15) = gen_rtx_CONST_INT (QImode, be ?  7 : 16);
3011
3012  emit_insn (gen_vec_initv16qiqi (mask, gen_rtx_PARALLEL (V16QImode, v)));
3013  emit_insn (gen_vperm_v8hiv4si (operands[0], operands[1], vzero, mask));
3014  DONE;
3015}")
3016
3017(define_expand "vec_unpacku_lo_v16qi"
3018  [(set (match_operand:V8HI 0 "register_operand" "=v")
3019        (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")]
3020                     UNSPEC_VUPKLUB))]
3021  "TARGET_ALTIVEC"
3022  "
3023{
3024  rtx vzero = gen_reg_rtx (V8HImode);
3025  rtx mask = gen_reg_rtx (V16QImode);
3026  rtvec v = rtvec_alloc (16);
3027  bool be = BYTES_BIG_ENDIAN;
3028
3029  emit_insn (gen_altivec_vspltish (vzero, const0_rtx));
3030
3031  RTVEC_ELT (v,  0) = gen_rtx_CONST_INT (QImode, be ? 16 : 15);
3032  RTVEC_ELT (v,  1) = gen_rtx_CONST_INT (QImode, be ?  8 : 16);
3033  RTVEC_ELT (v,  2) = gen_rtx_CONST_INT (QImode, be ? 16 : 14);
3034  RTVEC_ELT (v,  3) = gen_rtx_CONST_INT (QImode, be ?  9 : 16);
3035  RTVEC_ELT (v,  4) = gen_rtx_CONST_INT (QImode, be ? 16 : 13);
3036  RTVEC_ELT (v,  5) = gen_rtx_CONST_INT (QImode, be ? 10 : 16);
3037  RTVEC_ELT (v,  6) = gen_rtx_CONST_INT (QImode, be ? 16 : 12);
3038  RTVEC_ELT (v,  7) = gen_rtx_CONST_INT (QImode, be ? 11 : 16);
3039  RTVEC_ELT (v,  8) = gen_rtx_CONST_INT (QImode, be ? 16 : 11);
3040  RTVEC_ELT (v,  9) = gen_rtx_CONST_INT (QImode, be ? 12 : 16);
3041  RTVEC_ELT (v, 10) = gen_rtx_CONST_INT (QImode, be ? 16 : 10);
3042  RTVEC_ELT (v, 11) = gen_rtx_CONST_INT (QImode, be ? 13 : 16);
3043  RTVEC_ELT (v, 12) = gen_rtx_CONST_INT (QImode, be ? 16 :  9);
3044  RTVEC_ELT (v, 13) = gen_rtx_CONST_INT (QImode, be ? 14 : 16);
3045  RTVEC_ELT (v, 14) = gen_rtx_CONST_INT (QImode, be ? 16 :  8);
3046  RTVEC_ELT (v, 15) = gen_rtx_CONST_INT (QImode, be ? 15 : 16);
3047
3048  emit_insn (gen_vec_initv16qiqi (mask, gen_rtx_PARALLEL (V16QImode, v)));
3049  emit_insn (gen_vperm_v16qiv8hi (operands[0], operands[1], vzero, mask));
3050  DONE;
3051}")
3052
3053(define_expand "vec_unpacku_lo_v8hi"
3054  [(set (match_operand:V4SI 0 "register_operand" "=v")
3055        (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")]
3056                     UNSPEC_VUPKLUH))]
3057  "TARGET_ALTIVEC"
3058  "
3059{
3060  rtx vzero = gen_reg_rtx (V4SImode);
3061  rtx mask = gen_reg_rtx (V16QImode);
3062  rtvec v = rtvec_alloc (16);
3063  bool be = BYTES_BIG_ENDIAN;
3064
3065  emit_insn (gen_altivec_vspltisw (vzero, const0_rtx));
3066
3067  RTVEC_ELT (v,  0) = gen_rtx_CONST_INT (QImode, be ? 16 : 15);
3068  RTVEC_ELT (v,  1) = gen_rtx_CONST_INT (QImode, be ? 17 : 14);
3069  RTVEC_ELT (v,  2) = gen_rtx_CONST_INT (QImode, be ?  8 : 17);
3070  RTVEC_ELT (v,  3) = gen_rtx_CONST_INT (QImode, be ?  9 : 16);
3071  RTVEC_ELT (v,  4) = gen_rtx_CONST_INT (QImode, be ? 16 : 13);
3072  RTVEC_ELT (v,  5) = gen_rtx_CONST_INT (QImode, be ? 17 : 12);
3073  RTVEC_ELT (v,  6) = gen_rtx_CONST_INT (QImode, be ? 10 : 17);
3074  RTVEC_ELT (v,  7) = gen_rtx_CONST_INT (QImode, be ? 11 : 16);
3075  RTVEC_ELT (v,  8) = gen_rtx_CONST_INT (QImode, be ? 16 : 11);
3076  RTVEC_ELT (v,  9) = gen_rtx_CONST_INT (QImode, be ? 17 : 10);
3077  RTVEC_ELT (v, 10) = gen_rtx_CONST_INT (QImode, be ? 12 : 17);
3078  RTVEC_ELT (v, 11) = gen_rtx_CONST_INT (QImode, be ? 13 : 16);
3079  RTVEC_ELT (v, 12) = gen_rtx_CONST_INT (QImode, be ? 16 :  9);
3080  RTVEC_ELT (v, 13) = gen_rtx_CONST_INT (QImode, be ? 17 :  8);
3081  RTVEC_ELT (v, 14) = gen_rtx_CONST_INT (QImode, be ? 14 : 17);
3082  RTVEC_ELT (v, 15) = gen_rtx_CONST_INT (QImode, be ? 15 : 16);
3083
3084  emit_insn (gen_vec_initv16qiqi (mask, gen_rtx_PARALLEL (V16QImode, v)));
3085  emit_insn (gen_vperm_v8hiv4si (operands[0], operands[1], vzero, mask));
3086  DONE;
3087}")
3088
3089(define_expand "vec_widen_umult_hi_v16qi"
3090  [(set (match_operand:V8HI 0 "register_operand" "=v")
3091        (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
3092                      (match_operand:V16QI 2 "register_operand" "v")]
3093                     UNSPEC_VMULWHUB))]
3094  "TARGET_ALTIVEC"
3095  "
3096{
3097  rtx ve = gen_reg_rtx (V8HImode);
3098  rtx vo = gen_reg_rtx (V8HImode);
3099
3100  if (BYTES_BIG_ENDIAN)
3101    {
3102      emit_insn (gen_altivec_vmuleub (ve, operands[1], operands[2]));
3103      emit_insn (gen_altivec_vmuloub (vo, operands[1], operands[2]));
3104      emit_insn (gen_altivec_vmrghh_direct (operands[0], ve, vo));
3105    }
3106  else
3107    {
3108      emit_insn (gen_altivec_vmuloub (ve, operands[1], operands[2]));
3109      emit_insn (gen_altivec_vmuleub (vo, operands[1], operands[2]));
3110      emit_insn (gen_altivec_vmrghh_direct (operands[0], vo, ve));
3111    }
3112  DONE;
3113}")
3114
3115(define_expand "vec_widen_umult_lo_v16qi"
3116  [(set (match_operand:V8HI 0 "register_operand" "=v")
3117        (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
3118                      (match_operand:V16QI 2 "register_operand" "v")]
3119                     UNSPEC_VMULWLUB))]
3120  "TARGET_ALTIVEC"
3121  "
3122{
3123  rtx ve = gen_reg_rtx (V8HImode);
3124  rtx vo = gen_reg_rtx (V8HImode);
3125
3126  if (BYTES_BIG_ENDIAN)
3127    {
3128      emit_insn (gen_altivec_vmuleub (ve, operands[1], operands[2]));
3129      emit_insn (gen_altivec_vmuloub (vo, operands[1], operands[2]));
3130      emit_insn (gen_altivec_vmrglh_direct (operands[0], ve, vo));
3131    }
3132  else
3133    {
3134      emit_insn (gen_altivec_vmuloub (ve, operands[1], operands[2]));
3135      emit_insn (gen_altivec_vmuleub (vo, operands[1], operands[2]));
3136      emit_insn (gen_altivec_vmrglh_direct (operands[0], vo, ve));
3137    }
3138  DONE;
3139}")
3140
3141(define_expand "vec_widen_smult_hi_v16qi"
3142  [(set (match_operand:V8HI 0 "register_operand" "=v")
3143        (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
3144                      (match_operand:V16QI 2 "register_operand" "v")]
3145                     UNSPEC_VMULWHSB))]
3146  "TARGET_ALTIVEC"
3147  "
3148{
3149  rtx ve = gen_reg_rtx (V8HImode);
3150  rtx vo = gen_reg_rtx (V8HImode);
3151
3152  if (BYTES_BIG_ENDIAN)
3153    {
3154      emit_insn (gen_altivec_vmulesb (ve, operands[1], operands[2]));
3155      emit_insn (gen_altivec_vmulosb (vo, operands[1], operands[2]));
3156      emit_insn (gen_altivec_vmrghh_direct (operands[0], ve, vo));
3157    }
3158  else
3159    {
3160      emit_insn (gen_altivec_vmulosb (ve, operands[1], operands[2]));
3161      emit_insn (gen_altivec_vmulesb (vo, operands[1], operands[2]));
3162      emit_insn (gen_altivec_vmrghh_direct (operands[0], vo, ve));
3163    }
3164  DONE;
3165}")
3166
3167(define_expand "vec_widen_smult_lo_v16qi"
3168  [(set (match_operand:V8HI 0 "register_operand" "=v")
3169        (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
3170                      (match_operand:V16QI 2 "register_operand" "v")]
3171                     UNSPEC_VMULWLSB))]
3172  "TARGET_ALTIVEC"
3173  "
3174{
3175  rtx ve = gen_reg_rtx (V8HImode);
3176  rtx vo = gen_reg_rtx (V8HImode);
3177
3178  if (BYTES_BIG_ENDIAN)
3179    {
3180      emit_insn (gen_altivec_vmulesb (ve, operands[1], operands[2]));
3181      emit_insn (gen_altivec_vmulosb (vo, operands[1], operands[2]));
3182      emit_insn (gen_altivec_vmrglh_direct (operands[0], ve, vo));
3183    }
3184  else
3185    {
3186      emit_insn (gen_altivec_vmulosb (ve, operands[1], operands[2]));
3187      emit_insn (gen_altivec_vmulesb (vo, operands[1], operands[2]));
3188      emit_insn (gen_altivec_vmrglh_direct (operands[0], vo, ve));
3189    }
3190  DONE;
3191}")
3192
3193(define_expand "vec_widen_umult_hi_v8hi"
3194  [(set (match_operand:V4SI 0 "register_operand" "=v")
3195        (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
3196                      (match_operand:V8HI 2 "register_operand" "v")]
3197                     UNSPEC_VMULWHUH))]
3198  "TARGET_ALTIVEC"
3199  "
3200{
3201  rtx ve = gen_reg_rtx (V4SImode);
3202  rtx vo = gen_reg_rtx (V4SImode);
3203
3204  if (BYTES_BIG_ENDIAN)
3205    {
3206      emit_insn (gen_altivec_vmuleuh (ve, operands[1], operands[2]));
3207      emit_insn (gen_altivec_vmulouh (vo, operands[1], operands[2]));
3208      emit_insn (gen_altivec_vmrghw_direct (operands[0], ve, vo));
3209    }
3210  else
3211    {
3212      emit_insn (gen_altivec_vmulouh (ve, operands[1], operands[2]));
3213      emit_insn (gen_altivec_vmuleuh (vo, operands[1], operands[2]));
3214      emit_insn (gen_altivec_vmrghw_direct (operands[0], vo, ve));
3215    }
3216  DONE;
3217}")
3218
3219(define_expand "vec_widen_umult_lo_v8hi"
3220  [(set (match_operand:V4SI 0 "register_operand" "=v")
3221        (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
3222                      (match_operand:V8HI 2 "register_operand" "v")]
3223                     UNSPEC_VMULWLUH))]
3224  "TARGET_ALTIVEC"
3225  "
3226{
3227  rtx ve = gen_reg_rtx (V4SImode);
3228  rtx vo = gen_reg_rtx (V4SImode);
3229
3230  if (BYTES_BIG_ENDIAN)
3231    {
3232      emit_insn (gen_altivec_vmuleuh (ve, operands[1], operands[2]));
3233      emit_insn (gen_altivec_vmulouh (vo, operands[1], operands[2]));
3234      emit_insn (gen_altivec_vmrglw_direct (operands[0], ve, vo));
3235    }
3236  else
3237    {
3238      emit_insn (gen_altivec_vmulouh (ve, operands[1], operands[2]));
3239      emit_insn (gen_altivec_vmuleuh (vo, operands[1], operands[2]));
3240      emit_insn (gen_altivec_vmrglw_direct (operands[0], vo, ve));
3241    }
3242  DONE;
3243}")
3244
3245(define_expand "vec_widen_smult_hi_v8hi"
3246  [(set (match_operand:V4SI 0 "register_operand" "=v")
3247        (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
3248                      (match_operand:V8HI 2 "register_operand" "v")]
3249                     UNSPEC_VMULWHSH))]
3250  "TARGET_ALTIVEC"
3251  "
3252{
3253  rtx ve = gen_reg_rtx (V4SImode);
3254  rtx vo = gen_reg_rtx (V4SImode);
3255
3256  if (BYTES_BIG_ENDIAN)
3257    {
3258      emit_insn (gen_altivec_vmulesh (ve, operands[1], operands[2]));
3259      emit_insn (gen_altivec_vmulosh (vo, operands[1], operands[2]));
3260      emit_insn (gen_altivec_vmrghw_direct (operands[0], ve, vo));
3261    }
3262  else
3263    {
3264      emit_insn (gen_altivec_vmulosh (ve, operands[1], operands[2]));
3265      emit_insn (gen_altivec_vmulesh (vo, operands[1], operands[2]));
3266      emit_insn (gen_altivec_vmrghw_direct (operands[0], vo, ve));
3267    }
3268  DONE;
3269}")
3270
3271(define_expand "vec_widen_smult_lo_v8hi"
3272  [(set (match_operand:V4SI 0 "register_operand" "=v")
3273        (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
3274                      (match_operand:V8HI 2 "register_operand" "v")]
3275                     UNSPEC_VMULWLSH))]
3276  "TARGET_ALTIVEC"
3277  "
3278{
3279  rtx ve = gen_reg_rtx (V4SImode);
3280  rtx vo = gen_reg_rtx (V4SImode);
3281
3282  if (BYTES_BIG_ENDIAN)
3283    {
3284      emit_insn (gen_altivec_vmulesh (ve, operands[1], operands[2]));
3285      emit_insn (gen_altivec_vmulosh (vo, operands[1], operands[2]));
3286      emit_insn (gen_altivec_vmrglw_direct (operands[0], ve, vo));
3287    }
3288  else
3289    {
3290      emit_insn (gen_altivec_vmulosh (ve, operands[1], operands[2]));
3291      emit_insn (gen_altivec_vmulesh (vo, operands[1], operands[2]));
3292      emit_insn (gen_altivec_vmrglw_direct (operands[0], vo, ve));
3293    }
3294  DONE;
3295}")
3296
3297(define_expand "vec_pack_trunc_<mode>"
3298  [(set (match_operand:<VP_small> 0 "register_operand" "=v")
3299        (unspec:<VP_small> [(match_operand:VP 1 "register_operand" "v")
3300			    (match_operand:VP 2 "register_operand" "v")]
3301                      UNSPEC_VPACK_UNS_UNS_MOD))]
3302  "<VI_unit>"
3303  "")
3304
3305(define_expand "mulv16qi3"
3306  [(set (match_operand:V16QI 0 "register_operand" "=v")
3307        (mult:V16QI (match_operand:V16QI 1 "register_operand" "v")
3308                    (match_operand:V16QI 2 "register_operand" "v")))]
3309  "TARGET_ALTIVEC"
3310  "
3311{
3312  rtx even = gen_reg_rtx (V8HImode);
3313  rtx odd = gen_reg_rtx (V8HImode);
3314  rtx mask = gen_reg_rtx (V16QImode);
3315  rtvec v = rtvec_alloc (16);
3316  int i;
3317
3318  for (i = 0; i < 8; ++i) {
3319    RTVEC_ELT (v, 2 * i)
3320     = gen_rtx_CONST_INT (QImode, BYTES_BIG_ENDIAN ? 2 * i + 1 : 31 - 2 * i);
3321    RTVEC_ELT (v, 2 * i + 1)
3322     = gen_rtx_CONST_INT (QImode, BYTES_BIG_ENDIAN ? 2 * i + 17 : 15 - 2 * i);
3323  }
3324
3325  emit_insn (gen_vec_initv16qiqi (mask, gen_rtx_PARALLEL (V16QImode, v)));
3326  emit_insn (gen_altivec_vmulesb (even, operands[1], operands[2]));
3327  emit_insn (gen_altivec_vmulosb (odd, operands[1], operands[2]));
3328  emit_insn (gen_altivec_vperm_v8hiv16qi (operands[0], even, odd, mask));
3329  DONE;
3330}")
3331
3332(define_expand "altivec_negv4sf2"
3333  [(use (match_operand:V4SF 0 "register_operand" ""))
3334   (use (match_operand:V4SF 1 "register_operand" ""))]
3335  "TARGET_ALTIVEC"
3336  "
3337{
3338  rtx neg0;
3339
3340  /* Generate [-0.0, -0.0, -0.0, -0.0].  */
3341  neg0 = gen_reg_rtx (V4SImode);
3342  emit_insn (gen_altivec_vspltisw (neg0, constm1_rtx));
3343  emit_insn (gen_vashlv4si3 (neg0, neg0, neg0));
3344
3345  /* XOR */
3346  emit_insn (gen_xorv4sf3 (operands[0],
3347			   gen_lowpart (V4SFmode, neg0), operands[1]));
3348
3349  DONE;
3350}")
3351
3352;; Vector SIMD PEM v2.06c defines LVLX, LVLXL, LVRX, LVRXL,
3353;; STVLX, STVLXL, STVVRX, STVRXL are available only on Cell.
3354(define_insn "altivec_lvlx"
3355  [(set (match_operand:V16QI 0 "register_operand" "=v")
3356        (unspec:V16QI [(match_operand:BLK 1 "memory_operand" "Z")]
3357		      UNSPEC_LVLX))]
3358  "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
3359  "lvlx %0,%y1"
3360  [(set_attr "type" "vecload")])
3361
3362(define_insn "altivec_lvlxl"
3363  [(set (match_operand:V16QI 0 "register_operand" "=v")
3364        (unspec:V16QI [(match_operand:BLK 1 "memory_operand" "Z")]
3365		      UNSPEC_LVLXL))]
3366  "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
3367  "lvlxl %0,%y1"
3368  [(set_attr "type" "vecload")])
3369
3370(define_insn "altivec_lvrx"
3371  [(set (match_operand:V16QI 0 "register_operand" "=v")
3372        (unspec:V16QI [(match_operand:BLK 1 "memory_operand" "Z")]
3373		      UNSPEC_LVRX))]
3374  "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
3375  "lvrx %0,%y1"
3376  [(set_attr "type" "vecload")])
3377
3378(define_insn "altivec_lvrxl"
3379  [(set (match_operand:V16QI 0 "register_operand" "=v")
3380        (unspec:V16QI [(match_operand:BLK 1 "memory_operand" "Z")]
3381		      UNSPEC_LVRXL))]
3382  "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
3383  "lvrxl %0,%y1"
3384  [(set_attr "type" "vecload")])
3385
3386(define_insn "altivec_stvlx"
3387  [(parallel
3388    [(set (match_operand:V16QI 0 "memory_operand" "=Z")
3389	  (match_operand:V16QI 1 "register_operand" "v"))
3390     (unspec [(const_int 0)] UNSPEC_STVLX)])]
3391  "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
3392  "stvlx %1,%y0"
3393  [(set_attr "type" "vecstore")])
3394
3395(define_insn "altivec_stvlxl"
3396  [(parallel
3397    [(set (match_operand:V16QI 0 "memory_operand" "=Z")
3398	  (match_operand:V16QI 1 "register_operand" "v"))
3399     (unspec [(const_int 0)] UNSPEC_STVLXL)])]
3400  "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
3401  "stvlxl %1,%y0"
3402  [(set_attr "type" "vecstore")])
3403
3404(define_insn "altivec_stvrx"
3405  [(parallel
3406    [(set (match_operand:V16QI 0 "memory_operand" "=Z")
3407	  (match_operand:V16QI 1 "register_operand" "v"))
3408     (unspec [(const_int 0)] UNSPEC_STVRX)])]
3409  "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
3410  "stvrx %1,%y0"
3411  [(set_attr "type" "vecstore")])
3412
3413(define_insn "altivec_stvrxl"
3414  [(parallel
3415    [(set (match_operand:V16QI 0 "memory_operand" "=Z")
3416	  (match_operand:V16QI 1 "register_operand" "v"))
3417     (unspec [(const_int 0)] UNSPEC_STVRXL)])]
3418  "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
3419  "stvrxl %1,%y0"
3420  [(set_attr "type" "vecstore")])
3421
3422(define_expand "vec_unpacks_float_hi_v8hi"
3423 [(set (match_operand:V4SF 0 "register_operand" "")
3424        (unspec:V4SF [(match_operand:V8HI 1 "register_operand" "")]
3425                     UNSPEC_VUPKHS_V4SF))]
3426  "TARGET_ALTIVEC"
3427  "
3428{
3429  rtx tmp = gen_reg_rtx (V4SImode);
3430
3431  emit_insn (gen_vec_unpacks_hi_v8hi (tmp, operands[1]));
3432  emit_insn (gen_altivec_vcfsx (operands[0], tmp, const0_rtx));
3433  DONE;
3434}")
3435
3436(define_expand "vec_unpacks_float_lo_v8hi"
3437 [(set (match_operand:V4SF 0 "register_operand" "")
3438        (unspec:V4SF [(match_operand:V8HI 1 "register_operand" "")]
3439                     UNSPEC_VUPKLS_V4SF))]
3440  "TARGET_ALTIVEC"
3441  "
3442{
3443  rtx tmp = gen_reg_rtx (V4SImode);
3444
3445  emit_insn (gen_vec_unpacks_lo_v8hi (tmp, operands[1]));
3446  emit_insn (gen_altivec_vcfsx (operands[0], tmp, const0_rtx));
3447  DONE;
3448}")
3449
3450(define_expand "vec_unpacku_float_hi_v8hi"
3451 [(set (match_operand:V4SF 0 "register_operand" "")
3452        (unspec:V4SF [(match_operand:V8HI 1 "register_operand" "")]
3453                     UNSPEC_VUPKHU_V4SF))]
3454  "TARGET_ALTIVEC"
3455  "
3456{
3457  rtx tmp = gen_reg_rtx (V4SImode);
3458
3459  emit_insn (gen_vec_unpacku_hi_v8hi (tmp, operands[1]));
3460  emit_insn (gen_altivec_vcfux (operands[0], tmp, const0_rtx));
3461  DONE;
3462}")
3463
3464(define_expand "vec_unpacku_float_lo_v8hi"
3465 [(set (match_operand:V4SF 0 "register_operand" "")
3466        (unspec:V4SF [(match_operand:V8HI 1 "register_operand" "")]
3467                     UNSPEC_VUPKLU_V4SF))]
3468  "TARGET_ALTIVEC"
3469  "
3470{
3471  rtx tmp = gen_reg_rtx (V4SImode);
3472
3473  emit_insn (gen_vec_unpacku_lo_v8hi (tmp, operands[1]));
3474  emit_insn (gen_altivec_vcfux (operands[0], tmp, const0_rtx));
3475  DONE;
3476}")
3477
3478
3479;; Power8/power9 vector instructions encoded as Altivec instructions
3480
3481;; Vector count leading zeros
3482(define_insn "*p8v_clz<mode>2"
3483  [(set (match_operand:VI2 0 "register_operand" "=v")
3484	(clz:VI2 (match_operand:VI2 1 "register_operand" "v")))]
3485  "TARGET_P8_VECTOR"
3486  "vclz<wd> %0,%1"
3487  [(set_attr "length" "4")
3488   (set_attr "type" "vecsimple")])
3489
3490;; Vector absolute difference unsigned
3491(define_expand "vadu<mode>3"
3492  [(set (match_operand:VI 0 "register_operand")
3493        (unspec:VI [(match_operand:VI 1 "register_operand")
3494		    (match_operand:VI 2 "register_operand")]
3495         UNSPEC_VADU))]
3496  "TARGET_P9_VECTOR")
3497
3498;; Vector absolute difference unsigned
3499(define_insn "*p9_vadu<mode>3"
3500  [(set (match_operand:VI 0 "register_operand" "=v")
3501        (unspec:VI [(match_operand:VI 1 "register_operand" "v")
3502		    (match_operand:VI 2 "register_operand" "v")]
3503         UNSPEC_VADU))]
3504  "TARGET_P9_VECTOR"
3505  "vabsdu<wd> %0,%1,%2"
3506  [(set_attr "type" "vecsimple")])
3507
3508;; Vector count trailing zeros
3509(define_insn "*p9v_ctz<mode>2"
3510  [(set (match_operand:VI2 0 "register_operand" "=v")
3511	(ctz:VI2 (match_operand:VI2 1 "register_operand" "v")))]
3512  "TARGET_P9_VECTOR"
3513  "vctz<wd> %0,%1"
3514  [(set_attr "length" "4")
3515   (set_attr "type" "vecsimple")])
3516
3517;; Vector population count
3518(define_insn "*p8v_popcount<mode>2"
3519  [(set (match_operand:VI2 0 "register_operand" "=v")
3520        (popcount:VI2 (match_operand:VI2 1 "register_operand" "v")))]
3521  "TARGET_P8_VECTOR"
3522  "vpopcnt<wd> %0,%1"
3523  [(set_attr "length" "4")
3524   (set_attr "type" "vecsimple")])
3525
3526;; Vector parity
3527(define_insn "*p9v_parity<mode>2"
3528  [(set (match_operand:VParity 0 "register_operand" "=v")
3529        (parity:VParity (match_operand:VParity 1 "register_operand" "v")))]
3530  "TARGET_P9_VECTOR"
3531  "vprtyb<wd> %0,%1"
3532  [(set_attr "length" "4")
3533   (set_attr "type" "vecsimple")])
3534
3535;; Vector Gather Bits by Bytes by Doubleword
3536(define_insn "p8v_vgbbd"
3537  [(set (match_operand:V16QI 0 "register_operand" "=v")
3538	(unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")]
3539		      UNSPEC_VGBBD))]
3540  "TARGET_P8_VECTOR"
3541  "vgbbd %0,%1"
3542  [(set_attr "length" "4")
3543   (set_attr "type" "vecsimple")])
3544
3545
3546;; 128-bit binary integer arithmetic
3547;; We have a special container type (V1TImode) to allow operations using the
3548;; ISA 2.07 128-bit binary support to target the VMX/altivec registers without
3549;; having to worry about the register allocator deciding GPRs are better.
3550
3551(define_insn "altivec_vadduqm"
3552  [(set (match_operand:V1TI 0 "register_operand" "=v")
3553	(plus:V1TI (match_operand:V1TI 1 "register_operand" "v")
3554		   (match_operand:V1TI 2 "register_operand" "v")))]
3555  "TARGET_VADDUQM"
3556  "vadduqm %0,%1,%2"
3557  [(set_attr "length" "4")
3558   (set_attr "type" "vecsimple")])
3559
3560(define_insn "altivec_vaddcuq"
3561  [(set (match_operand:V1TI 0 "register_operand" "=v")
3562	(unspec:V1TI [(match_operand:V1TI 1 "register_operand" "v")
3563		      (match_operand:V1TI 2 "register_operand" "v")]
3564		     UNSPEC_VADDCUQ))]
3565  "TARGET_VADDUQM"
3566  "vaddcuq %0,%1,%2"
3567  [(set_attr "length" "4")
3568   (set_attr "type" "vecsimple")])
3569
3570(define_insn "altivec_vsubuqm"
3571  [(set (match_operand:V1TI 0 "register_operand" "=v")
3572	(minus:V1TI (match_operand:V1TI 1 "register_operand" "v")
3573		    (match_operand:V1TI 2 "register_operand" "v")))]
3574  "TARGET_VADDUQM"
3575  "vsubuqm %0,%1,%2"
3576  [(set_attr "length" "4")
3577   (set_attr "type" "vecsimple")])
3578
3579(define_insn "altivec_vsubcuq"
3580  [(set (match_operand:V1TI 0 "register_operand" "=v")
3581	(unspec:V1TI [(match_operand:V1TI 1 "register_operand" "v")
3582		      (match_operand:V1TI 2 "register_operand" "v")]
3583		     UNSPEC_VSUBCUQ))]
3584  "TARGET_VADDUQM"
3585  "vsubcuq %0,%1,%2"
3586  [(set_attr "length" "4")
3587   (set_attr "type" "vecsimple")])
3588
3589(define_insn "altivec_vaddeuqm"
3590  [(set (match_operand:V1TI 0 "register_operand" "=v")
3591	(unspec:V1TI [(match_operand:V1TI 1 "register_operand" "v")
3592		      (match_operand:V1TI 2 "register_operand" "v")
3593		      (match_operand:V1TI 3 "register_operand" "v")]
3594		     UNSPEC_VADDEUQM))]
3595  "TARGET_VADDUQM"
3596  "vaddeuqm %0,%1,%2,%3"
3597  [(set_attr "length" "4")
3598   (set_attr "type" "vecsimple")])
3599
3600(define_insn "altivec_vaddecuq"
3601  [(set (match_operand:V1TI 0 "register_operand" "=v")
3602	(unspec:V1TI [(match_operand:V1TI 1 "register_operand" "v")
3603		      (match_operand:V1TI 2 "register_operand" "v")
3604		      (match_operand:V1TI 3 "register_operand" "v")]
3605		     UNSPEC_VADDECUQ))]
3606  "TARGET_VADDUQM"
3607  "vaddecuq %0,%1,%2,%3"
3608  [(set_attr "length" "4")
3609   (set_attr "type" "vecsimple")])
3610
3611(define_insn "altivec_vsubeuqm"
3612  [(set (match_operand:V1TI 0 "register_operand" "=v")
3613	(unspec:V1TI [(match_operand:V1TI 1 "register_operand" "v")
3614		      (match_operand:V1TI 2 "register_operand" "v")
3615		      (match_operand:V1TI 3 "register_operand" "v")]
3616		   UNSPEC_VSUBEUQM))]
3617  "TARGET_VADDUQM"
3618  "vsubeuqm %0,%1,%2,%3"
3619  [(set_attr "length" "4")
3620   (set_attr "type" "vecsimple")])
3621
3622(define_insn "altivec_vsubecuq"
3623  [(set (match_operand:V1TI 0 "register_operand" "=v")
3624	(unspec:V1TI [(match_operand:V1TI 1 "register_operand" "v")
3625		      (match_operand:V1TI 2 "register_operand" "v")
3626		      (match_operand:V1TI 3 "register_operand" "v")]
3627		     UNSPEC_VSUBECUQ))]
3628  "TARGET_VADDUQM"
3629  "vsubecuq %0,%1,%2,%3"
3630  [(set_attr "length" "4")
3631   (set_attr "type" "vecsimple")])
3632
3633;; We use V2DI as the output type to simplify converting the permute
3634;; bits into an integer
3635(define_insn "altivec_vbpermq"
3636  [(set (match_operand:V2DI 0 "register_operand" "=v")
3637	(unspec:V2DI [(match_operand:V16QI 1 "register_operand" "v")
3638		      (match_operand:V16QI 2 "register_operand" "v")]
3639		     UNSPEC_VBPERMQ))]
3640  "TARGET_P8_VECTOR"
3641  "vbpermq %0,%1,%2"
3642  [(set_attr "type" "vecperm")])
3643
3644; One of the vector API interfaces requires returning vector unsigned char.
3645(define_insn "altivec_vbpermq2"
3646  [(set (match_operand:V16QI 0 "register_operand" "=v")
3647	(unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
3648		       (match_operand:V16QI 2 "register_operand" "v")]
3649		      UNSPEC_VBPERMQ))]
3650  "TARGET_P8_VECTOR"
3651  "vbpermq %0,%1,%2"
3652  [(set_attr "type" "vecperm")])
3653
3654(define_insn "altivec_vbpermd"
3655  [(set (match_operand:V2DI 0 "register_operand" "=v")
3656	(unspec:V2DI [(match_operand:V2DI 1 "register_operand" "v")
3657		      (match_operand:V16QI 2 "register_operand" "v")]
3658		     UNSPEC_VBPERMD))]
3659  "TARGET_P9_VECTOR"
3660  "vbpermd %0,%1,%2"
3661  [(set_attr "type" "vecsimple")])
3662
3663;; Decimal Integer operations
3664(define_int_iterator UNSPEC_BCD_ADD_SUB [UNSPEC_BCDADD UNSPEC_BCDSUB])
3665
3666(define_int_attr bcd_add_sub [(UNSPEC_BCDADD "add")
3667			      (UNSPEC_BCDSUB "sub")])
3668
3669(define_code_iterator BCD_TEST [eq lt gt unordered])
3670
3671(define_insn "bcd<bcd_add_sub>"
3672  [(set (match_operand:V1TI 0 "gpc_reg_operand" "=v")
3673	(unspec:V1TI [(match_operand:V1TI 1 "gpc_reg_operand" "v")
3674		      (match_operand:V1TI 2 "gpc_reg_operand" "v")
3675		      (match_operand:QI 3 "const_0_to_1_operand" "n")]
3676		     UNSPEC_BCD_ADD_SUB))
3677   (clobber (reg:CCFP CR6_REGNO))]
3678  "TARGET_P8_VECTOR"
3679  "bcd<bcd_add_sub>. %0,%1,%2,%3"
3680  [(set_attr "length" "4")
3681   (set_attr "type" "vecsimple")])
3682
3683;; Use a floating point type (V2DFmode) for the compare to set CR6 so that we
3684;; can use the unordered test for BCD nans and add/subtracts that overflow.  An
3685;; UNORDERED test on an integer type (like V1TImode) is not defined.  The type
3686;; probably should be one that can go in the VMX (Altivec) registers, so we
3687;; can't use DDmode or DFmode.
3688(define_insn "*bcd<bcd_add_sub>_test"
3689  [(set (reg:CCFP CR6_REGNO)
3690	(compare:CCFP
3691	 (unspec:V2DF [(match_operand:V1TI 1 "register_operand" "v")
3692		       (match_operand:V1TI 2 "register_operand" "v")
3693		       (match_operand:QI 3 "const_0_to_1_operand" "i")]
3694		      UNSPEC_BCD_ADD_SUB)
3695	 (match_operand:V2DF 4 "zero_constant" "j")))
3696   (clobber (match_scratch:V1TI 0 "=v"))]
3697  "TARGET_P8_VECTOR"
3698  "bcd<bcd_add_sub>. %0,%1,%2,%3"
3699  [(set_attr "length" "4")
3700   (set_attr "type" "vecsimple")])
3701
3702(define_insn "*bcd<bcd_add_sub>_test2"
3703  [(set (match_operand:V1TI 0 "register_operand" "=v")
3704	(unspec:V1TI [(match_operand:V1TI 1 "register_operand" "v")
3705		      (match_operand:V1TI 2 "register_operand" "v")
3706		      (match_operand:QI 3 "const_0_to_1_operand" "i")]
3707		     UNSPEC_BCD_ADD_SUB))
3708   (set (reg:CCFP CR6_REGNO)
3709	(compare:CCFP
3710	 (unspec:V2DF [(match_dup 1)
3711		       (match_dup 2)
3712		       (match_dup 3)]
3713		      UNSPEC_BCD_ADD_SUB)
3714	 (match_operand:V2DF 4 "zero_constant" "j")))]
3715  "TARGET_P8_VECTOR"
3716  "bcd<bcd_add_sub>. %0,%1,%2,%3"
3717  [(set_attr "length" "4")
3718   (set_attr "type" "vecsimple")])
3719
3720(define_insn "darn_32"
3721  [(set (match_operand:SI 0 "register_operand" "=r")
3722        (unspec:SI [(const_int 0)] UNSPEC_DARN_32))]
3723  "TARGET_P9_MISC"
3724  "darn %0,0"
3725  [(set_attr "type" "integer")])
3726
3727(define_insn "darn_raw"
3728  [(set (match_operand:DI 0 "register_operand" "=r")
3729        (unspec:DI [(const_int 0)] UNSPEC_DARN_RAW))]
3730  "TARGET_P9_MISC && TARGET_64BIT"
3731  "darn %0,2"
3732  [(set_attr "type" "integer")])
3733
3734(define_insn "darn"
3735  [(set (match_operand:DI 0 "register_operand" "=r")
3736        (unspec:DI [(const_int 0)] UNSPEC_DARN))]
3737  "TARGET_P9_MISC && TARGET_64BIT"
3738  "darn %0,1"
3739  [(set_attr "type" "integer")])
3740
3741;; Test byte within range.
3742;;
3743;; The bytes of operand 1 are organized as xx:xx:xx:vv, where xx
3744;; represents a byte whose value is ignored in this context and
3745;; vv, the least significant byte, holds the byte value that is to
3746;; be tested for membership within the range specified by operand 2.
3747;; The bytes of operand 2 are organized as xx:xx:hi:lo.
3748;;
3749;; Return in target register operand 0 a value of 1 if lo <= vv and
3750;; vv <= hi.  Otherwise, set register operand 0 to 0.
3751;;
3752;; Though the instructions to which this expansion maps operate on
3753;; 64-bit registers, the current implementation only operates on
3754;; SI-mode operands as the high-order bits provide no information
3755;; that is not already available in the low-order bits.  To avoid the
3756;; costs of data widening operations, future enhancements might allow
3757;; DI mode for operand 0 and/or might allow operand 1 to be QI mode.
3758(define_expand "cmprb"
3759  [(set (match_dup 3)
3760	(unspec:CC [(match_operand:SI 1 "gpc_reg_operand" "r")
3761		    (match_operand:SI 2 "gpc_reg_operand" "r")]
3762	 UNSPEC_CMPRB))
3763   (set (match_operand:SI 0 "gpc_reg_operand" "=r")
3764	(if_then_else:SI (lt (match_dup 3)
3765			     (const_int 0))
3766			 (const_int -1)
3767			 (if_then_else (gt (match_dup 3)
3768					   (const_int 0))
3769				       (const_int 1)
3770				       (const_int 0))))]
3771  "TARGET_P9_MISC"
3772{
3773  operands[3] = gen_reg_rtx (CCmode);
3774})
3775
3776;; The bytes of operand 1 are organized as xx:xx:xx:vv, where xx
3777;; represents a byte whose value is ignored in this context and
3778;; vv, the least significant byte, holds the byte value that is to
3779;; be tested for membership within the range specified by operand 2.
3780;; The bytes of operand 2 are organized as xx:xx:hi:lo.
3781;;
3782;; Set bit 1 (the GT bit, 0x4) of CR register operand 0 to 1 if
3783;; lo <= vv and vv <= hi.  Otherwise, set the GT bit to 0.  The other
3784;; 3 bits of the target CR register are all set to 0.
3785(define_insn "*cmprb_internal"
3786  [(set (match_operand:CC 0 "cc_reg_operand" "=y")
3787	(unspec:CC [(match_operand:SI 1 "gpc_reg_operand" "r")
3788		    (match_operand:SI 2 "gpc_reg_operand" "r")]
3789	 UNSPEC_CMPRB))]
3790  "TARGET_P9_MISC"
3791  "cmprb %0,0,%1,%2"
3792  [(set_attr "type" "logical")])
3793
3794;; Set operand 0 register to -1 if the LT bit (0x8) of condition
3795;; register operand 1 is on.  Otherwise, set operand 0 register to 1
3796;; if the GT bit (0x4) of condition register operand 1 is on.
3797;; Otherwise, set operand 0 to 0.  Note that the result stored into
3798;; register operand 0 is non-zero iff either the LT or GT bits are on
3799;; within condition register operand 1.
3800(define_insn "setb_signed"
3801   [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
3802	 (if_then_else:SI (lt (match_operand:CC 1 "cc_reg_operand" "y")
3803			      (const_int 0))
3804			  (const_int -1)
3805			  (if_then_else (gt (match_dup 1)
3806					    (const_int 0))
3807					(const_int 1)
3808					(const_int 0))))]
3809  "TARGET_P9_MISC"
3810  "setb %0,%1"
3811  [(set_attr "type" "logical")])
3812
3813(define_insn "setb_unsigned"
3814   [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
3815	 (if_then_else:SI (ltu (match_operand:CCUNS 1 "cc_reg_operand" "y")
3816			      (const_int 0))
3817			  (const_int -1)
3818			  (if_then_else (gtu (match_dup 1)
3819					    (const_int 0))
3820					(const_int 1)
3821					(const_int 0))))]
3822  "TARGET_P9_MISC"
3823  "setb %0,%1"
3824  [(set_attr "type" "logical")])
3825
3826;; Test byte within two ranges.
3827;;
3828;; The bytes of operand 1 are organized as xx:xx:xx:vv, where xx
3829;; represents a byte whose value is ignored in this context and
3830;; vv, the least significant byte, holds the byte value that is to
3831;; be tested for membership within the range specified by operand 2.
3832;; The bytes of operand 2 are organized as hi_1:lo_1:hi_2:lo_2.
3833;;
3834;; Return in target register operand 0 a value of 1 if (lo_1 <= vv and
3835;; vv <= hi_1) or if (lo_2 <= vv and vv <= hi_2).  Otherwise, set register
3836;; operand 0 to 0.
3837;;
3838;; Though the instructions to which this expansion maps operate on
3839;; 64-bit registers, the current implementation only operates on
3840;; SI-mode operands as the high-order bits provide no information
3841;; that is not already available in the low-order bits.  To avoid the
3842;; costs of data widening operations, future enhancements might allow
3843;; DI mode for operand 0 and/or might allow operand 1 to be QI mode.
3844(define_expand "cmprb2"
3845  [(set (match_dup 3)
3846	(unspec:CC [(match_operand:SI 1 "gpc_reg_operand" "r")
3847		    (match_operand:SI 2 "gpc_reg_operand" "r")]
3848	 UNSPEC_CMPRB2))
3849   (set (match_operand:SI 0 "gpc_reg_operand" "=r")
3850	(if_then_else:SI (lt (match_dup 3)
3851			     (const_int 0))
3852			 (const_int -1)
3853			 (if_then_else (gt (match_dup 3)
3854					   (const_int 0))
3855				       (const_int 1)
3856				       (const_int 0))))]
3857  "TARGET_P9_MISC"
3858{
3859  operands[3] = gen_reg_rtx (CCmode);
3860})
3861
3862;; The bytes of operand 1 are organized as xx:xx:xx:vv, where xx
3863;; represents a byte whose value is ignored in this context and
3864;; vv, the least significant byte, holds the byte value that is to
3865;; be tested for membership within the ranges specified by operand 2.
3866;; The bytes of operand 2 are organized as hi_1:lo_1:hi_2:lo_2.
3867;;
3868;; Set bit 1 (the GT bit, 0x4) of CR register operand 0 to 1 if
3869;; (lo_1 <= vv and vv <= hi_1) or if (lo_2 <= vv and vv <= hi_2).
3870;; Otherwise, set the GT bit to 0.  The other 3 bits of the target
3871;; CR register are all set to 0.
3872(define_insn "*cmprb2_internal"
3873  [(set (match_operand:CC 0 "cc_reg_operand" "=y")
3874	(unspec:CC [(match_operand:SI 1 "gpc_reg_operand" "r")
3875		    (match_operand:SI 2 "gpc_reg_operand" "r")]
3876	 UNSPEC_CMPRB2))]
3877  "TARGET_P9_MISC"
3878  "cmprb %0,1,%1,%2"
3879  [(set_attr "type" "logical")])
3880
3881;; Test byte membership within set of 8 bytes.
3882;;
3883;; The bytes of operand 1 are organized as xx:xx:xx:vv, where xx
3884;; represents a byte whose value is ignored in this context and
3885;; vv, the least significant byte, holds the byte value that is to
3886;; be tested for membership within the set specified by operand 2.
3887;; The bytes of operand 2 are organized as e0:e1:e2:e3:e4:e5:e6:e7.
3888;;
3889;; Return in target register operand 0 a value of 1 if vv equals one
3890;; of the values e0, e1, e2, e3, e4, e5, e6, or e7.  Otherwise, set
3891;; register operand 0 to 0.  Note that the 8 byte values held within
3892;; operand 2 need not be unique.
3893;;
3894;; Though the instructions to which this expansion maps operate on
3895;; 64-bit registers, the current implementation requires that operands
3896;; 0 and 1 have mode SI as the high-order bits provide no information
3897;; that is not already available in the low-order bits.  To avoid the
3898;; costs of data widening operations, future enhancements might allow
3899;; DI mode for operand 0 and/or might allow operand 1 to be QI mode.
3900(define_expand "cmpeqb"
3901  [(set (match_dup 3)
3902	(unspec:CC [(match_operand:SI 1 "gpc_reg_operand" "r")
3903		    (match_operand:DI 2 "gpc_reg_operand" "r")]
3904	 UNSPEC_CMPEQB))
3905   (set (match_operand:SI 0 "gpc_reg_operand" "=r")
3906	(if_then_else:SI (lt (match_dup 3)
3907			     (const_int 0))
3908			 (const_int -1)
3909			 (if_then_else (gt (match_dup 3)
3910					   (const_int 0))
3911				       (const_int 1)
3912				       (const_int 0))))]
3913  "TARGET_P9_MISC && TARGET_64BIT"
3914{
3915  operands[3] = gen_reg_rtx (CCmode);
3916})
3917
3918;; The bytes of operand 1 are organized as xx:xx:xx:vv, where xx
3919;; represents a byte whose value is ignored in this context and
3920;; vv, the least significant byte, holds the byte value that is to
3921;; be tested for membership within the set specified by operand 2.
3922;; The bytes of operand 2 are organized as e0:e1:e2:e3:e4:e5:e6:e7.
3923;;
3924;; Set bit 1 (the GT bit, 0x4) of CR register operand 0 to 1 if vv
3925;; equals one of the values e0, e1, e2, e3, e4, e5, e6, or e7.  Otherwise,
3926;; set the GT bit to zero.  The other 3 bits of the target CR register
3927;; are all set to 0.
3928(define_insn "*cmpeqb_internal"
3929  [(set (match_operand:CC 0 "cc_reg_operand" "=y")
3930	 (unspec:CC [(match_operand:SI 1 "gpc_reg_operand" "r")
3931		     (match_operand:DI 2 "gpc_reg_operand" "r")]
3932	  UNSPEC_CMPEQB))]
3933  "TARGET_P9_MISC && TARGET_64BIT"
3934  "cmpeqb %0,%1,%2"
3935  [(set_attr "type" "logical")])
3936
3937(define_expand "bcd<bcd_add_sub>_<code>"
3938  [(parallel [(set (reg:CCFP CR6_REGNO)
3939		   (compare:CCFP
3940		    (unspec:V2DF [(match_operand:V1TI 1 "register_operand" "")
3941				  (match_operand:V1TI 2 "register_operand" "")
3942				  (match_operand:QI 3 "const_0_to_1_operand" "")]
3943				 UNSPEC_BCD_ADD_SUB)
3944		    (match_dup 4)))
3945	      (clobber (match_scratch:V1TI 5 ""))])
3946   (set (match_operand:SI 0 "register_operand" "")
3947	(BCD_TEST:SI (reg:CCFP CR6_REGNO)
3948		     (const_int 0)))]
3949  "TARGET_P8_VECTOR"
3950{
3951  operands[4] = CONST0_RTX (V2DFmode);
3952})
3953
3954;; Peephole2 pattern to combine a bcdadd/bcdsub that calculates the value and
3955;; the bcdadd/bcdsub that tests the value.  The combiner won't work since
3956;; CR6 is a hard coded register.  Unfortunately, all of the Altivec predicate
3957;; support is hard coded to use the fixed register CR6 instead of creating
3958;; a register class for CR6.
3959
3960(define_peephole2
3961  [(parallel [(set (match_operand:V1TI 0 "register_operand" "")
3962		   (unspec:V1TI [(match_operand:V1TI 1 "register_operand" "")
3963				 (match_operand:V1TI 2 "register_operand" "")
3964				 (match_operand:QI 3 "const_0_to_1_operand" "")]
3965				UNSPEC_BCD_ADD_SUB))
3966	      (clobber (reg:CCFP CR6_REGNO))])
3967   (parallel [(set (reg:CCFP CR6_REGNO)
3968		   (compare:CCFP
3969		    (unspec:V2DF [(match_dup 1)
3970				  (match_dup 2)
3971				  (match_dup 3)]
3972				 UNSPEC_BCD_ADD_SUB)
3973		    (match_operand:V2DF 4 "zero_constant" "")))
3974	      (clobber (match_operand:V1TI 5 "register_operand" ""))])]
3975  "TARGET_P8_VECTOR"
3976  [(parallel [(set (match_dup 0)
3977		   (unspec:V1TI [(match_dup 1)
3978				 (match_dup 2)
3979				 (match_dup 3)]
3980				UNSPEC_BCD_ADD_SUB))
3981	      (set (reg:CCFP CR6_REGNO)
3982		   (compare:CCFP
3983		    (unspec:V2DF [(match_dup 1)
3984				  (match_dup 2)
3985				  (match_dup 3)]
3986				 UNSPEC_BCD_ADD_SUB)
3987		    (match_dup 4)))])])
3988