1;; AltiVec patterns.
2;; Copyright (C) 2002-2018 Free Software Foundation, Inc.
3;; Contributed by Aldy Hernandez (aldy@quesejoda.com)
4
5;; This file is part of GCC.
6
7;; GCC is free software; you can redistribute it and/or modify it
8;; under the terms of the GNU General Public License as published
9;; by the Free Software Foundation; either version 3, or (at your
10;; option) any later version.
11
12;; GCC is distributed in the hope that it will be useful, but WITHOUT
13;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14;; or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
15;; License for more details.
16
17;; You should have received a copy of the GNU General Public License
18;; along with GCC; see the file COPYING3.  If not see
19;; <http://www.gnu.org/licenses/>.
20
21(define_c_enum "unspec"
22  [UNSPEC_VCMPBFP
23   UNSPEC_VMSUMU
24   UNSPEC_VMSUMM
25   UNSPEC_VMSUMSHM
26   UNSPEC_VMSUMUHS
27   UNSPEC_VMSUMSHS
28   UNSPEC_VMHADDSHS
29   UNSPEC_VMHRADDSHS
30   UNSPEC_VADDCUW
31   UNSPEC_VADDU
32   UNSPEC_VADDS
33   UNSPEC_VAVGU
34   UNSPEC_VAVGS
35   UNSPEC_VMULEUB
36   UNSPEC_VMULESB
37   UNSPEC_VMULEUH
38   UNSPEC_VMULESH
39   UNSPEC_VMULEUW
40   UNSPEC_VMULESW
41   UNSPEC_VMULOUB
42   UNSPEC_VMULOSB
43   UNSPEC_VMULOUH
44   UNSPEC_VMULOSH
45   UNSPEC_VMULOUW
46   UNSPEC_VMULOSW
47   UNSPEC_VPKPX
48   UNSPEC_VPACK_SIGN_SIGN_SAT
49   UNSPEC_VPACK_SIGN_UNS_SAT
50   UNSPEC_VPACK_UNS_UNS_SAT
51   UNSPEC_VPACK_UNS_UNS_MOD
52   UNSPEC_VPACK_UNS_UNS_MOD_DIRECT
53   UNSPEC_VREVEV
54   UNSPEC_VSLV4SI
55   UNSPEC_VSLO
56   UNSPEC_VSR
57   UNSPEC_VSRO
58   UNSPEC_VSUBCUW
59   UNSPEC_VSUBU
60   UNSPEC_VSUBS
61   UNSPEC_VSUM4UBS
62   UNSPEC_VSUM4S
63   UNSPEC_VSUM2SWS
64   UNSPEC_VSUMSWS
65   UNSPEC_VPERM
66   UNSPEC_VPERMR
67   UNSPEC_VPERM_UNS
68   UNSPEC_VRFIN
69   UNSPEC_VCFUX
70   UNSPEC_VCFSX
71   UNSPEC_VCTUXS
72   UNSPEC_VCTSXS
73   UNSPEC_VLOGEFP
74   UNSPEC_VEXPTEFP
75   UNSPEC_VSLDOI
76   UNSPEC_VUNPACK_HI_SIGN
77   UNSPEC_VUNPACK_LO_SIGN
78   UNSPEC_VUNPACK_HI_SIGN_DIRECT
79   UNSPEC_VUNPACK_LO_SIGN_DIRECT
80   UNSPEC_VUPKHPX
81   UNSPEC_VUPKLPX
82   UNSPEC_CONVERT_4F32_8I16
83   UNSPEC_DST
84   UNSPEC_DSTT
85   UNSPEC_DSTST
86   UNSPEC_DSTSTT
87   UNSPEC_LVSL
88   UNSPEC_LVSR
89   UNSPEC_LVE
90   UNSPEC_STVX
91   UNSPEC_STVXL
92   UNSPEC_STVE
93   UNSPEC_SET_VSCR
94   UNSPEC_GET_VRSAVE
95   UNSPEC_LVX
96   UNSPEC_REDUC_PLUS
97   UNSPEC_VECSH
98   UNSPEC_EXTEVEN_V4SI
99   UNSPEC_EXTEVEN_V8HI
100   UNSPEC_EXTEVEN_V16QI
101   UNSPEC_EXTEVEN_V4SF
102   UNSPEC_EXTODD_V4SI
103   UNSPEC_EXTODD_V8HI
104   UNSPEC_EXTODD_V16QI
105   UNSPEC_EXTODD_V4SF
106   UNSPEC_INTERHI_V4SI
107   UNSPEC_INTERHI_V8HI
108   UNSPEC_INTERHI_V16QI
109   UNSPEC_INTERLO_V4SI
110   UNSPEC_INTERLO_V8HI
111   UNSPEC_INTERLO_V16QI
112   UNSPEC_LVLX
113   UNSPEC_LVLXL
114   UNSPEC_LVRX
115   UNSPEC_LVRXL
116   UNSPEC_STVLX
117   UNSPEC_STVLXL
118   UNSPEC_STVRX
119   UNSPEC_STVRXL
120   UNSPEC_VADU
121   UNSPEC_VSLV
122   UNSPEC_VSRV
123   UNSPEC_VMULWHUB
124   UNSPEC_VMULWLUB
125   UNSPEC_VMULWHSB
126   UNSPEC_VMULWLSB
127   UNSPEC_VMULWHUH
128   UNSPEC_VMULWLUH
129   UNSPEC_VMULWHSH
130   UNSPEC_VMULWLSH
131   UNSPEC_VUPKHUB
132   UNSPEC_VUPKHUH
133   UNSPEC_VUPKLUB
134   UNSPEC_VUPKLUH
135   UNSPEC_VPERMSI
136   UNSPEC_VPERMHI
137   UNSPEC_INTERHI
138   UNSPEC_INTERLO
139   UNSPEC_VUPKHS_V4SF
140   UNSPEC_VUPKLS_V4SF
141   UNSPEC_VUPKHU_V4SF
142   UNSPEC_VUPKLU_V4SF
143   UNSPEC_VGBBD
144   UNSPEC_VMRGH_DIRECT
145   UNSPEC_VMRGL_DIRECT
146   UNSPEC_VSPLT_DIRECT
147   UNSPEC_VMRGEW_DIRECT
148   UNSPEC_VMRGOW_DIRECT
149   UNSPEC_VSUMSWS_DIRECT
150   UNSPEC_VADDCUQ
151   UNSPEC_VADDEUQM
152   UNSPEC_VADDECUQ
153   UNSPEC_VSUBCUQ
154   UNSPEC_VSUBEUQM
155   UNSPEC_VSUBECUQ
156   UNSPEC_VBPERMQ
157   UNSPEC_VBPERMD
158   UNSPEC_BCDADD
159   UNSPEC_BCDSUB
160   UNSPEC_BCD_OVERFLOW
161   UNSPEC_VRLMI
162   UNSPEC_VRLNM
163])
164
165(define_c_enum "unspecv"
166  [UNSPECV_SET_VRSAVE
167   UNSPECV_MTVSCR
168   UNSPECV_MFVSCR
169   UNSPECV_DSSALL
170   UNSPECV_DSS
171  ])
172
173;; Like VI, defined in vector.md, but add ISA 2.07 integer vector ops
174(define_mode_iterator VI2 [V4SI V8HI V16QI V2DI])
175;; Short vec int modes
176(define_mode_iterator VIshort [V8HI V16QI])
177;; Longer vec int modes for rotate/mask ops
178(define_mode_iterator VIlong [V2DI V4SI])
179;; Vec float modes
180(define_mode_iterator VF [V4SF])
181;; Vec modes, pity mode iterators are not composable
182(define_mode_iterator V [V4SI V8HI V16QI V4SF])
183;; Vec modes for move/logical/permute ops, include vector types for move not
184;; otherwise handled by altivec (v2df, v2di, ti)
185(define_mode_iterator VM [V4SI
186			  V8HI
187			  V16QI
188			  V4SF
189			  V2DF
190			  V2DI
191			  V1TI
192			  TI
193			  (KF "FLOAT128_VECTOR_P (KFmode)")
194			  (TF "FLOAT128_VECTOR_P (TFmode)")])
195
196;; Like VM, except don't do TImode
197(define_mode_iterator VM2 [V4SI
198			   V8HI
199			   V16QI
200			   V4SF
201			   V2DF
202			   V2DI
203			   V1TI
204			   (KF "FLOAT128_VECTOR_P (KFmode)")
205			   (TF "FLOAT128_VECTOR_P (TFmode)")])
206
207;; Map the Vector convert single precision to double precision for integer
208;; versus floating point
209(define_mode_attr VS_sxwsp [(V4SI "sxw") (V4SF "sp")])
210
211;; Specific iterator for parity which does not have a byte/half-word form, but
212;; does have a quad word form
213(define_mode_iterator VParity [V4SI
214			       V2DI
215			       V1TI
216			       TI])
217
218(define_mode_attr VI_char [(V2DI "d") (V4SI "w") (V8HI "h") (V16QI "b")])
219(define_mode_attr VI_scalar [(V2DI "DI") (V4SI "SI") (V8HI "HI") (V16QI "QI")])
220(define_mode_attr VI_unit [(V16QI "VECTOR_UNIT_ALTIVEC_P (V16QImode)")
221			   (V8HI "VECTOR_UNIT_ALTIVEC_P (V8HImode)")
222			   (V4SI "VECTOR_UNIT_ALTIVEC_P (V4SImode)")
223			   (V2DI "VECTOR_UNIT_P8_VECTOR_P (V2DImode)")
224			   (V1TI "VECTOR_UNIT_ALTIVEC_P (V1TImode)")])
225
226;; Vector pack/unpack
227(define_mode_iterator VP [V2DI V4SI V8HI])
228(define_mode_attr VP_small [(V2DI "V4SI") (V4SI "V8HI") (V8HI "V16QI")])
229(define_mode_attr VP_small_lc [(V2DI "v4si") (V4SI "v8hi") (V8HI "v16qi")])
230(define_mode_attr VU_char [(V2DI "w") (V4SI "h") (V8HI "b")])
231
232;; Vector negate
233(define_mode_iterator VNEG [V4SI V2DI])
234
235;; Vector move instructions.
236(define_insn "*altivec_mov<mode>"
237  [(set (match_operand:VM2 0 "nonimmediate_operand" "=Z,v,v,?Y,?*r,?*r,v,v,?*r")
238	(match_operand:VM2 1 "input_operand" "v,Z,v,*r,Y,*r,j,W,W"))]
239  "VECTOR_MEM_ALTIVEC_P (<MODE>mode)
240   && (register_operand (operands[0], <MODE>mode)
241       || register_operand (operands[1], <MODE>mode))"
242{
243  switch (which_alternative)
244    {
245    case 0: return "stvx %1,%y0";
246    case 1: return "lvx %0,%y1";
247    case 2: return "vor %0,%1,%1";
248    case 3: return "#";
249    case 4: return "#";
250    case 5: return "#";
251    case 6: return "vxor %0,%0,%0";
252    case 7: return output_vec_const_move (operands);
253    case 8: return "#";
254    default: gcc_unreachable ();
255    }
256}
257  [(set_attr "type" "vecstore,vecload,veclogical,store,load,*,veclogical,*,*")
258   (set_attr "length" "4,4,4,20,20,20,4,8,32")])
259
260;; Unlike other altivec moves, allow the GPRs, since a normal use of TImode
261;; is for unions.  However for plain data movement, slightly favor the vector
262;; loads
263(define_insn "*altivec_movti"
264  [(set (match_operand:TI 0 "nonimmediate_operand" "=Z,v,v,?Y,?r,?r,v,v")
265	(match_operand:TI 1 "input_operand" "v,Z,v,r,Y,r,j,W"))]
266  "VECTOR_MEM_ALTIVEC_P (TImode)
267   && (register_operand (operands[0], TImode)
268       || register_operand (operands[1], TImode))"
269{
270  switch (which_alternative)
271    {
272    case 0: return "stvx %1,%y0";
273    case 1: return "lvx %0,%y1";
274    case 2: return "vor %0,%1,%1";
275    case 3: return "#";
276    case 4: return "#";
277    case 5: return "#";
278    case 6: return "vxor %0,%0,%0";
279    case 7: return output_vec_const_move (operands);
280    default: gcc_unreachable ();
281    }
282}
283  [(set_attr "type" "vecstore,vecload,veclogical,store,load,*,veclogical,*")])
284
285;; Load up a vector with the most significant bit set by loading up -1 and
286;; doing a shift left
287(define_split
288  [(set (match_operand:VM 0 "altivec_register_operand")
289	(match_operand:VM 1 "easy_vector_constant_msb"))]
290  "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode) && reload_completed"
291  [(const_int 0)]
292{
293  rtx dest = operands[0];
294  machine_mode mode = GET_MODE (operands[0]);
295  rtvec v;
296  int i, num_elements;
297
298  if (mode == V4SFmode)
299    {
300      mode = V4SImode;
301      dest = gen_lowpart (V4SImode, dest);
302    }
303
304  num_elements = GET_MODE_NUNITS (mode);
305  v = rtvec_alloc (num_elements);
306  for (i = 0; i < num_elements; i++)
307    RTVEC_ELT (v, i) = constm1_rtx;
308
309  emit_insn (gen_vec_initv4sisi (dest, gen_rtx_PARALLEL (mode, v)));
310  emit_insn (gen_rtx_SET (dest, gen_rtx_ASHIFT (mode, dest, dest)));
311  DONE;
312})
313
314(define_split
315  [(set (match_operand:VM 0 "altivec_register_operand")
316	(match_operand:VM 1 "easy_vector_constant_add_self"))]
317  "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode) && reload_completed"
318  [(set (match_dup 0) (match_dup 3))
319   (set (match_dup 0) (match_dup 4))]
320{
321  rtx dup = gen_easy_altivec_constant (operands[1]);
322  rtx const_vec;
323  machine_mode op_mode = <MODE>mode;
324
325  /* Divide the operand of the resulting VEC_DUPLICATE, and use
326     simplify_rtx to make a CONST_VECTOR.  */
327  XEXP (dup, 0) = simplify_const_binary_operation (ASHIFTRT, QImode,
328						   XEXP (dup, 0), const1_rtx);
329  const_vec = simplify_rtx (dup);
330
331  if (op_mode == V4SFmode)
332    {
333      op_mode = V4SImode;
334      operands[0] = gen_lowpart (op_mode, operands[0]);
335    }
336  if (GET_MODE (const_vec) == op_mode)
337    operands[3] = const_vec;
338  else
339    operands[3] = gen_lowpart (op_mode, const_vec);
340  operands[4] = gen_rtx_PLUS (op_mode, operands[0], operands[0]);
341})
342
343(define_split
344  [(set (match_operand:VM 0 "altivec_register_operand")
345	(match_operand:VM 1 "easy_vector_constant_vsldoi"))]
346  "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode) && can_create_pseudo_p ()"
347  [(set (match_dup 2) (match_dup 3))
348   (set (match_dup 4) (match_dup 5))
349   (set (match_dup 0)
350        (unspec:VM [(match_dup 2)
351		    (match_dup 4)
352		    (match_dup 6)]
353		   UNSPEC_VSLDOI))]
354{
355  rtx op1 = operands[1];
356  int elt = (BYTES_BIG_ENDIAN) ? 0 : GET_MODE_NUNITS (<MODE>mode) - 1;
357  HOST_WIDE_INT val = const_vector_elt_as_int (op1, elt);
358  rtx rtx_val = GEN_INT (val);
359  int shift = vspltis_shifted (op1);
360
361  gcc_assert (shift != 0);
362  operands[2] = gen_reg_rtx (<MODE>mode);
363  operands[3] = gen_const_vec_duplicate (<MODE>mode, rtx_val);
364  operands[4] = gen_reg_rtx (<MODE>mode);
365
366  if (shift < 0)
367    {
368      operands[5] = CONSTM1_RTX (<MODE>mode);
369      operands[6] = GEN_INT (-shift);
370    }
371  else
372    {
373      operands[5] = CONST0_RTX (<MODE>mode);
374      operands[6] = GEN_INT (shift);
375    }
376})
377
378(define_insn "get_vrsave_internal"
379  [(set (match_operand:SI 0 "register_operand" "=r")
380	(unspec:SI [(reg:SI VRSAVE_REGNO)] UNSPEC_GET_VRSAVE))]
381  "TARGET_ALTIVEC"
382{
383  if (TARGET_MACHO)
384     return "mfspr %0,256";
385  else
386     return "mfvrsave %0";
387}
388  [(set_attr "type" "*")])
389
390(define_insn "*set_vrsave_internal"
391  [(match_parallel 0 "vrsave_operation"
392     [(set (reg:SI VRSAVE_REGNO)
393	   (unspec_volatile:SI [(match_operand:SI 1 "register_operand" "r")
394				(reg:SI VRSAVE_REGNO)] UNSPECV_SET_VRSAVE))])]
395  "TARGET_ALTIVEC"
396{
397  if (TARGET_MACHO)
398    return "mtspr 256,%1";
399  else
400    return "mtvrsave %1";
401}
402  [(set_attr "type" "*")])
403
404(define_insn "*save_world"
405 [(match_parallel 0 "save_world_operation"
406                  [(clobber (reg:SI LR_REGNO))
407                   (use (match_operand:SI 1 "call_operand" "s"))])]
408 "TARGET_MACHO && (DEFAULT_ABI == ABI_DARWIN) && TARGET_32BIT"
409 "bl %z1"
410  [(set_attr "type" "branch")
411   (set_attr "length" "4")])
412
413(define_insn "*restore_world"
414 [(match_parallel 0 "restore_world_operation"
415                  [(return)
416                   (use (match_operand:SI 1 "call_operand" "s"))
417                   (clobber (match_operand:SI 2 "gpc_reg_operand" "=r"))])]
418 "TARGET_MACHO && (DEFAULT_ABI == ABI_DARWIN) && TARGET_32BIT"
419 "b %z1")
420
421;; The save_vregs and restore_vregs patterns don't use memory_operand
422;; because (plus (reg) (const_int)) is not a valid vector address.
423;; This way is more compact than describing exactly what happens in
424;; the out-of-line functions, ie. loading the constant into r11/r12
425;; then using indexed addressing, and requires less editing of rtl
426;; to describe the operation to dwarf2out_frame_debug_expr.
427(define_insn "*save_vregs_<mode>_r11"
428  [(match_parallel 0 "any_parallel_operand"
429     [(clobber (reg:P LR_REGNO))
430      (use (match_operand:P 1 "symbol_ref_operand" "s"))
431      (clobber (reg:P 11))
432      (use (reg:P 0))
433      (set (mem:V4SI (plus:P (match_operand:P 2 "gpc_reg_operand" "b")
434			     (match_operand:P 3 "short_cint_operand" "I")))
435	   (match_operand:V4SI 4 "altivec_register_operand" "v"))])]
436  "TARGET_ALTIVEC"
437  "bl %1"
438  [(set_attr "type" "branch")
439   (set_attr "length" "4")])
440
441(define_insn "*save_vregs_<mode>_r12"
442  [(match_parallel 0 "any_parallel_operand"
443     [(clobber (reg:P LR_REGNO))
444      (use (match_operand:P 1 "symbol_ref_operand" "s"))
445      (clobber (reg:P 12))
446      (use (reg:P 0))
447      (set (mem:V4SI (plus:P (match_operand:P 2 "gpc_reg_operand" "b")
448			     (match_operand:P 3 "short_cint_operand" "I")))
449	   (match_operand:V4SI 4 "altivec_register_operand" "v"))])]
450  "TARGET_ALTIVEC"
451  "bl %1"
452  [(set_attr "type" "branch")
453   (set_attr "length" "4")])
454
455(define_insn "*restore_vregs_<mode>_r11"
456  [(match_parallel 0 "any_parallel_operand"
457     [(clobber (reg:P LR_REGNO))
458      (use (match_operand:P 1 "symbol_ref_operand" "s"))
459      (clobber (reg:P 11))
460      (use (reg:P 0))
461      (set (match_operand:V4SI 2 "altivec_register_operand" "=v")
462	   (mem:V4SI (plus:P (match_operand:P 3 "gpc_reg_operand" "b")
463			     (match_operand:P 4 "short_cint_operand" "I"))))])]
464  "TARGET_ALTIVEC"
465  "bl %1"
466  [(set_attr "type" "branch")
467   (set_attr "length" "4")])
468
469(define_insn "*restore_vregs_<mode>_r12"
470  [(match_parallel 0 "any_parallel_operand"
471     [(clobber (reg:P LR_REGNO))
472      (use (match_operand:P 1 "symbol_ref_operand" "s"))
473      (clobber (reg:P 12))
474      (use (reg:P 0))
475      (set (match_operand:V4SI 2 "altivec_register_operand" "=v")
476	   (mem:V4SI (plus:P (match_operand:P 3 "gpc_reg_operand" "b")
477			     (match_operand:P 4 "short_cint_operand" "I"))))])]
478  "TARGET_ALTIVEC"
479  "bl %1"
480  [(set_attr "type" "branch")
481   (set_attr "length" "4")])
482
483;; Simple binary operations.
484
485;; add
486(define_insn "add<mode>3"
487  [(set (match_operand:VI2 0 "register_operand" "=v")
488        (plus:VI2 (match_operand:VI2 1 "register_operand" "v")
489		  (match_operand:VI2 2 "register_operand" "v")))]
490  "<VI_unit>"
491  "vaddu<VI_char>m %0,%1,%2"
492  [(set_attr "type" "vecsimple")])
493
494(define_insn "*altivec_addv4sf3"
495  [(set (match_operand:V4SF 0 "register_operand" "=v")
496        (plus:V4SF (match_operand:V4SF 1 "register_operand" "v")
497		   (match_operand:V4SF 2 "register_operand" "v")))]
498  "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
499  "vaddfp %0,%1,%2"
500  [(set_attr "type" "vecfloat")])
501
502(define_insn "altivec_vaddcuw"
503  [(set (match_operand:V4SI 0 "register_operand" "=v")
504        (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
505                      (match_operand:V4SI 2 "register_operand" "v")]
506		     UNSPEC_VADDCUW))]
507  "VECTOR_UNIT_ALTIVEC_P (V4SImode)"
508  "vaddcuw %0,%1,%2"
509  [(set_attr "type" "vecsimple")])
510
511(define_insn "altivec_vaddu<VI_char>s"
512  [(set (match_operand:VI 0 "register_operand" "=v")
513        (unspec:VI [(match_operand:VI 1 "register_operand" "v")
514		    (match_operand:VI 2 "register_operand" "v")]
515		   UNSPEC_VADDU))
516   (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
517  "<VI_unit>"
518  "vaddu<VI_char>s %0,%1,%2"
519  [(set_attr "type" "vecsimple")])
520
521(define_insn "altivec_vadds<VI_char>s"
522  [(set (match_operand:VI 0 "register_operand" "=v")
523        (unspec:VI [(match_operand:VI 1 "register_operand" "v")
524                    (match_operand:VI 2 "register_operand" "v")]
525		   UNSPEC_VADDS))
526   (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
527  "VECTOR_UNIT_ALTIVEC_P (<MODE>mode)"
528  "vadds<VI_char>s %0,%1,%2"
529  [(set_attr "type" "vecsimple")])
530
531;; sub
532(define_insn "sub<mode>3"
533  [(set (match_operand:VI2 0 "register_operand" "=v")
534        (minus:VI2 (match_operand:VI2 1 "register_operand" "v")
535		   (match_operand:VI2 2 "register_operand" "v")))]
536  "<VI_unit>"
537  "vsubu<VI_char>m %0,%1,%2"
538  [(set_attr "type" "vecsimple")])
539
540(define_insn "*altivec_subv4sf3"
541  [(set (match_operand:V4SF 0 "register_operand" "=v")
542        (minus:V4SF (match_operand:V4SF 1 "register_operand" "v")
543                    (match_operand:V4SF 2 "register_operand" "v")))]
544  "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
545  "vsubfp %0,%1,%2"
546  [(set_attr "type" "vecfloat")])
547
548(define_insn "altivec_vsubcuw"
549  [(set (match_operand:V4SI 0 "register_operand" "=v")
550        (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
551                      (match_operand:V4SI 2 "register_operand" "v")]
552		     UNSPEC_VSUBCUW))]
553  "VECTOR_UNIT_ALTIVEC_P (V4SImode)"
554  "vsubcuw %0,%1,%2"
555  [(set_attr "type" "vecsimple")])
556
557(define_insn "altivec_vsubu<VI_char>s"
558  [(set (match_operand:VI 0 "register_operand" "=v")
559        (unspec:VI [(match_operand:VI 1 "register_operand" "v")
560                    (match_operand:VI 2 "register_operand" "v")]
561		   UNSPEC_VSUBU))
562   (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
563  "VECTOR_UNIT_ALTIVEC_P (<MODE>mode)"
564  "vsubu<VI_char>s %0,%1,%2"
565  [(set_attr "type" "vecsimple")])
566
567(define_insn "altivec_vsubs<VI_char>s"
568  [(set (match_operand:VI 0 "register_operand" "=v")
569        (unspec:VI [(match_operand:VI 1 "register_operand" "v")
570                    (match_operand:VI 2 "register_operand" "v")]
571		   UNSPEC_VSUBS))
572   (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
573  "VECTOR_UNIT_ALTIVEC_P (<MODE>mode)"
574  "vsubs<VI_char>s %0,%1,%2"
575  [(set_attr "type" "vecsimple")])
576
577;;
578(define_insn "uavg<mode>3_ceil"
579  [(set (match_operand:VI 0 "register_operand" "=v")
580        (unspec:VI [(match_operand:VI 1 "register_operand" "v")
581                    (match_operand:VI 2 "register_operand" "v")]
582		   UNSPEC_VAVGU))]
583  "TARGET_ALTIVEC"
584  "vavgu<VI_char> %0,%1,%2"
585  [(set_attr "type" "vecsimple")])
586
587(define_insn "avg<mode>3_ceil"
588  [(set (match_operand:VI 0 "register_operand" "=v")
589        (unspec:VI [(match_operand:VI 1 "register_operand" "v")
590                    (match_operand:VI 2 "register_operand" "v")]
591		   UNSPEC_VAVGS))]
592  "VECTOR_UNIT_ALTIVEC_P (<MODE>mode)"
593  "vavgs<VI_char> %0,%1,%2"
594  [(set_attr "type" "vecsimple")])
595
596(define_insn "altivec_vcmpbfp"
597  [(set (match_operand:V4SI 0 "register_operand" "=v")
598        (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
599                      (match_operand:V4SF 2 "register_operand" "v")]
600                      UNSPEC_VCMPBFP))]
601  "VECTOR_UNIT_ALTIVEC_P (V4SImode)"
602  "vcmpbfp %0,%1,%2"
603  [(set_attr "type" "veccmp")])
604
605(define_insn "*altivec_eq<mode>"
606  [(set (match_operand:VI2 0 "altivec_register_operand" "=v")
607	(eq:VI2 (match_operand:VI2 1 "altivec_register_operand" "v")
608		(match_operand:VI2 2 "altivec_register_operand" "v")))]
609  "<VI_unit>"
610  "vcmpequ<VI_char> %0,%1,%2"
611  [(set_attr "type" "veccmpfx")])
612
613(define_insn "*altivec_gt<mode>"
614  [(set (match_operand:VI2 0 "altivec_register_operand" "=v")
615	(gt:VI2 (match_operand:VI2 1 "altivec_register_operand" "v")
616		(match_operand:VI2 2 "altivec_register_operand" "v")))]
617  "<VI_unit>"
618  "vcmpgts<VI_char> %0,%1,%2"
619  [(set_attr "type" "veccmpfx")])
620
621(define_insn "*altivec_gtu<mode>"
622  [(set (match_operand:VI2 0 "altivec_register_operand" "=v")
623	(gtu:VI2 (match_operand:VI2 1 "altivec_register_operand" "v")
624		 (match_operand:VI2 2 "altivec_register_operand" "v")))]
625  "<VI_unit>"
626  "vcmpgtu<VI_char> %0,%1,%2"
627  [(set_attr "type" "veccmpfx")])
628
629(define_insn "*altivec_eqv4sf"
630  [(set (match_operand:V4SF 0 "altivec_register_operand" "=v")
631	(eq:V4SF (match_operand:V4SF 1 "altivec_register_operand" "v")
632		 (match_operand:V4SF 2 "altivec_register_operand" "v")))]
633  "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
634  "vcmpeqfp %0,%1,%2"
635  [(set_attr "type" "veccmp")])
636
637(define_insn "*altivec_gtv4sf"
638  [(set (match_operand:V4SF 0 "altivec_register_operand" "=v")
639	(gt:V4SF (match_operand:V4SF 1 "altivec_register_operand" "v")
640		 (match_operand:V4SF 2 "altivec_register_operand" "v")))]
641  "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
642  "vcmpgtfp %0,%1,%2"
643  [(set_attr "type" "veccmp")])
644
645(define_insn "*altivec_gev4sf"
646  [(set (match_operand:V4SF 0 "altivec_register_operand" "=v")
647	(ge:V4SF (match_operand:V4SF 1 "altivec_register_operand" "v")
648		 (match_operand:V4SF 2 "altivec_register_operand" "v")))]
649  "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
650  "vcmpgefp %0,%1,%2"
651  [(set_attr "type" "veccmp")])
652
653(define_insn "*altivec_vsel<mode>"
654  [(set (match_operand:VM 0 "altivec_register_operand" "=v")
655	(if_then_else:VM
656	 (ne:CC (match_operand:VM 1 "altivec_register_operand" "v")
657		(match_operand:VM 4 "zero_constant" ""))
658	 (match_operand:VM 2 "altivec_register_operand" "v")
659	 (match_operand:VM 3 "altivec_register_operand" "v")))]
660  "VECTOR_MEM_ALTIVEC_P (<MODE>mode)"
661  "vsel %0,%3,%2,%1"
662  [(set_attr "type" "vecmove")])
663
664(define_insn "*altivec_vsel<mode>_uns"
665  [(set (match_operand:VM 0 "altivec_register_operand" "=v")
666	(if_then_else:VM
667	 (ne:CCUNS (match_operand:VM 1 "altivec_register_operand" "v")
668		   (match_operand:VM 4 "zero_constant" ""))
669	 (match_operand:VM 2 "altivec_register_operand" "v")
670	 (match_operand:VM 3 "altivec_register_operand" "v")))]
671  "VECTOR_MEM_ALTIVEC_P (<MODE>mode)"
672  "vsel %0,%3,%2,%1"
673  [(set_attr "type" "vecmove")])
674
675;; Fused multiply add.
676
677(define_insn "*altivec_fmav4sf4"
678  [(set (match_operand:V4SF 0 "register_operand" "=v")
679	(fma:V4SF (match_operand:V4SF 1 "register_operand" "v")
680		  (match_operand:V4SF 2 "register_operand" "v")
681		  (match_operand:V4SF 3 "register_operand" "v")))]
682  "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
683  "vmaddfp %0,%1,%2,%3"
684  [(set_attr "type" "vecfloat")])
685
686;; We do multiply as a fused multiply-add with an add of a -0.0 vector.
687
688(define_expand "altivec_mulv4sf3"
689  [(set (match_operand:V4SF 0 "register_operand")
690	(fma:V4SF (match_operand:V4SF 1 "register_operand")
691		  (match_operand:V4SF 2 "register_operand")
692		  (match_dup 3)))]
693  "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
694{
695  rtx neg0;
696
697  /* Generate [-0.0, -0.0, -0.0, -0.0].  */
698  neg0 = gen_reg_rtx (V4SImode);
699  emit_insn (gen_altivec_vspltisw (neg0, constm1_rtx));
700  emit_insn (gen_vashlv4si3 (neg0, neg0, neg0));
701
702  operands[3] = gen_lowpart (V4SFmode, neg0);
703})
704
705;; 32-bit integer multiplication
706;; A_high = Operand_0 & 0xFFFF0000 >> 16
707;; A_low = Operand_0 & 0xFFFF
708;; B_high = Operand_1 & 0xFFFF0000 >> 16
709;; B_low = Operand_1 & 0xFFFF
710;; result = A_low * B_low + (A_high * B_low + B_high * A_low) << 16
711
712;; (define_insn "mulv4si3"
713;;   [(set (match_operand:V4SI 0 "register_operand" "=v")
714;;         (mult:V4SI (match_operand:V4SI 1 "register_operand" "v")
715;;                    (match_operand:V4SI 2 "register_operand" "v")))]
716(define_insn "mulv4si3_p8"
717  [(set (match_operand:V4SI 0 "register_operand" "=v")
718        (mult:V4SI (match_operand:V4SI 1 "register_operand" "v")
719                   (match_operand:V4SI 2 "register_operand" "v")))]
720  "TARGET_P8_VECTOR"
721  "vmuluwm %0,%1,%2"
722  [(set_attr "type" "veccomplex")])
723
724(define_expand "mulv4si3"
725  [(use (match_operand:V4SI 0 "register_operand"))
726   (use (match_operand:V4SI 1 "register_operand"))
727   (use (match_operand:V4SI 2 "register_operand"))]
728   "TARGET_ALTIVEC"
729{
730  rtx zero;
731  rtx swap;
732  rtx small_swap;
733  rtx sixteen;
734  rtx one;
735  rtx two;
736  rtx low_product;
737  rtx high_product;
738
739  if (TARGET_P8_VECTOR)
740    {
741      emit_insn (gen_mulv4si3_p8 (operands[0], operands[1], operands[2]));
742      DONE;
743    }
744
745  zero = gen_reg_rtx (V4SImode);
746  emit_insn (gen_altivec_vspltisw (zero, const0_rtx));
747
748  sixteen = gen_reg_rtx (V4SImode);
749  emit_insn (gen_altivec_vspltisw (sixteen,  gen_rtx_CONST_INT (V4SImode, -16)));
750
751  swap = gen_reg_rtx (V4SImode);
752  emit_insn (gen_vrotlv4si3 (swap, operands[2], sixteen));
753
754  one = gen_reg_rtx (V8HImode);
755  convert_move (one, operands[1], 0);
756
757  two = gen_reg_rtx (V8HImode);
758  convert_move (two, operands[2], 0);
759
760  small_swap = gen_reg_rtx (V8HImode);
761  convert_move (small_swap, swap, 0);
762
763  low_product = gen_reg_rtx (V4SImode);
764  emit_insn (gen_altivec_vmulouh (low_product, one, two));
765
766  high_product = gen_reg_rtx (V4SImode);
767  emit_insn (gen_altivec_vmsumuhm (high_product, one, small_swap, zero));
768
769  emit_insn (gen_vashlv4si3 (high_product, high_product, sixteen));
770
771  emit_insn (gen_addv4si3 (operands[0], high_product, low_product));
772
773  DONE;
774})
775
776(define_expand "mulv8hi3"
777  [(use (match_operand:V8HI 0 "register_operand"))
778   (use (match_operand:V8HI 1 "register_operand"))
779   (use (match_operand:V8HI 2 "register_operand"))]
780   "TARGET_ALTIVEC"
781{
782  rtx zero = gen_reg_rtx (V8HImode);
783
784  emit_insn (gen_altivec_vspltish (zero, const0_rtx));
785  emit_insn (gen_fmav8hi4 (operands[0], operands[1], operands[2], zero));
786
787  DONE;
788})
789
790
791;; Fused multiply subtract
792(define_insn "*altivec_vnmsubfp"
793  [(set (match_operand:V4SF 0 "register_operand" "=v")
794	(neg:V4SF
795	 (fma:V4SF (match_operand:V4SF 1 "register_operand" "v")
796		   (match_operand:V4SF 2 "register_operand" "v")
797		   (neg:V4SF
798		    (match_operand:V4SF 3 "register_operand" "v")))))]
799  "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
800  "vnmsubfp %0,%1,%2,%3"
801  [(set_attr "type" "vecfloat")])
802
803(define_insn "altivec_vmsumu<VI_char>m"
804  [(set (match_operand:V4SI 0 "register_operand" "=v")
805        (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")
806		      (match_operand:VIshort 2 "register_operand" "v")
807                      (match_operand:V4SI 3 "register_operand" "v")]
808		     UNSPEC_VMSUMU))]
809  "TARGET_ALTIVEC"
810  "vmsumu<VI_char>m %0,%1,%2,%3"
811  [(set_attr "type" "veccomplex")])
812
813(define_insn "altivec_vmsumm<VI_char>m"
814  [(set (match_operand:V4SI 0 "register_operand" "=v")
815        (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")
816		      (match_operand:VIshort 2 "register_operand" "v")
817                      (match_operand:V4SI 3 "register_operand" "v")]
818		     UNSPEC_VMSUMM))]
819  "TARGET_ALTIVEC"
820  "vmsumm<VI_char>m %0,%1,%2,%3"
821  [(set_attr "type" "veccomplex")])
822
823(define_insn "altivec_vmsumshm"
824  [(set (match_operand:V4SI 0 "register_operand" "=v")
825        (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
826		      (match_operand:V8HI 2 "register_operand" "v")
827                      (match_operand:V4SI 3 "register_operand" "v")]
828		     UNSPEC_VMSUMSHM))]
829  "TARGET_ALTIVEC"
830  "vmsumshm %0,%1,%2,%3"
831  [(set_attr "type" "veccomplex")])
832
833(define_insn "altivec_vmsumuhs"
834  [(set (match_operand:V4SI 0 "register_operand" "=v")
835        (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
836		      (match_operand:V8HI 2 "register_operand" "v")
837                      (match_operand:V4SI 3 "register_operand" "v")]
838		     UNSPEC_VMSUMUHS))
839   (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
840  "TARGET_ALTIVEC"
841  "vmsumuhs %0,%1,%2,%3"
842  [(set_attr "type" "veccomplex")])
843
844(define_insn "altivec_vmsumshs"
845  [(set (match_operand:V4SI 0 "register_operand" "=v")
846        (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
847		      (match_operand:V8HI 2 "register_operand" "v")
848                      (match_operand:V4SI 3 "register_operand" "v")]
849		     UNSPEC_VMSUMSHS))
850   (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
851  "TARGET_ALTIVEC"
852  "vmsumshs %0,%1,%2,%3"
853  [(set_attr "type" "veccomplex")])
854
855;; max
856
857(define_insn "umax<mode>3"
858  [(set (match_operand:VI2 0 "register_operand" "=v")
859        (umax:VI2 (match_operand:VI2 1 "register_operand" "v")
860		  (match_operand:VI2 2 "register_operand" "v")))]
861  "<VI_unit>"
862  "vmaxu<VI_char> %0,%1,%2"
863  [(set_attr "type" "vecsimple")])
864
865(define_insn "smax<mode>3"
866  [(set (match_operand:VI2 0 "register_operand" "=v")
867        (smax:VI2 (match_operand:VI2 1 "register_operand" "v")
868		  (match_operand:VI2 2 "register_operand" "v")))]
869  "<VI_unit>"
870  "vmaxs<VI_char> %0,%1,%2"
871  [(set_attr "type" "vecsimple")])
872
873(define_insn "*altivec_smaxv4sf3"
874  [(set (match_operand:V4SF 0 "register_operand" "=v")
875        (smax:V4SF (match_operand:V4SF 1 "register_operand" "v")
876                   (match_operand:V4SF 2 "register_operand" "v")))]
877  "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
878  "vmaxfp %0,%1,%2"
879  [(set_attr "type" "veccmp")])
880
881(define_insn "umin<mode>3"
882  [(set (match_operand:VI2 0 "register_operand" "=v")
883        (umin:VI2 (match_operand:VI2 1 "register_operand" "v")
884		  (match_operand:VI2 2 "register_operand" "v")))]
885  "<VI_unit>"
886  "vminu<VI_char> %0,%1,%2"
887  [(set_attr "type" "vecsimple")])
888
889(define_insn "smin<mode>3"
890  [(set (match_operand:VI2 0 "register_operand" "=v")
891        (smin:VI2 (match_operand:VI2 1 "register_operand" "v")
892		  (match_operand:VI2 2 "register_operand" "v")))]
893  "<VI_unit>"
894  "vmins<VI_char> %0,%1,%2"
895  [(set_attr "type" "vecsimple")])
896
897(define_insn "*altivec_sminv4sf3"
898  [(set (match_operand:V4SF 0 "register_operand" "=v")
899        (smin:V4SF (match_operand:V4SF 1 "register_operand" "v")
900                   (match_operand:V4SF 2 "register_operand" "v")))]
901  "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
902  "vminfp %0,%1,%2"
903  [(set_attr "type" "veccmp")])
904
905(define_insn "altivec_vmhaddshs"
906  [(set (match_operand:V8HI 0 "register_operand" "=v")
907        (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
908		      (match_operand:V8HI 2 "register_operand" "v")
909                      (match_operand:V8HI 3 "register_operand" "v")]
910		     UNSPEC_VMHADDSHS))
911   (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
912  "TARGET_ALTIVEC"
913  "vmhaddshs %0,%1,%2,%3"
914  [(set_attr "type" "veccomplex")])
915
916(define_insn "altivec_vmhraddshs"
917  [(set (match_operand:V8HI 0 "register_operand" "=v")
918        (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
919		      (match_operand:V8HI 2 "register_operand" "v")
920                      (match_operand:V8HI 3 "register_operand" "v")]
921		     UNSPEC_VMHRADDSHS))
922   (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
923  "TARGET_ALTIVEC"
924  "vmhraddshs %0,%1,%2,%3"
925  [(set_attr "type" "veccomplex")])
926
927(define_insn "fmav8hi4"
928  [(set (match_operand:V8HI 0 "register_operand" "=v")
929        (plus:V8HI (mult:V8HI (match_operand:V8HI 1 "register_operand" "v")
930		   	      (match_operand:V8HI 2 "register_operand" "v"))
931		   (match_operand:V8HI 3 "register_operand" "v")))]
932  "TARGET_ALTIVEC"
933  "vmladduhm %0,%1,%2,%3"
934  [(set_attr "type" "veccomplex")])
935
936(define_expand "altivec_vmrghb"
937  [(use (match_operand:V16QI 0 "register_operand"))
938   (use (match_operand:V16QI 1 "register_operand"))
939   (use (match_operand:V16QI 2 "register_operand"))]
940  "TARGET_ALTIVEC"
941{
942  rtvec v;
943  rtx x;
944
945  /* Special handling for LE with -maltivec=be.  */
946  if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
947    {
948      v = gen_rtvec (16, GEN_INT (8), GEN_INT (24), GEN_INT (9), GEN_INT (25),
949                     GEN_INT (10), GEN_INT (26), GEN_INT (11), GEN_INT (27),
950		     GEN_INT (12), GEN_INT (28), GEN_INT (13), GEN_INT (29),
951		     GEN_INT (14), GEN_INT (30), GEN_INT (15), GEN_INT (31));
952      x = gen_rtx_VEC_CONCAT (V32QImode, operands[2], operands[1]);
953    }
954  else
955    {
956      v = gen_rtvec (16, GEN_INT (0), GEN_INT (16), GEN_INT (1), GEN_INT (17),
957                     GEN_INT (2), GEN_INT (18), GEN_INT (3), GEN_INT (19),
958		     GEN_INT (4), GEN_INT (20), GEN_INT (5), GEN_INT (21),
959		     GEN_INT (6), GEN_INT (22), GEN_INT (7), GEN_INT (23));
960      x = gen_rtx_VEC_CONCAT (V32QImode, operands[1], operands[2]);
961    }
962
963  x = gen_rtx_VEC_SELECT (V16QImode, x, gen_rtx_PARALLEL (VOIDmode, v));
964  emit_insn (gen_rtx_SET (operands[0], x));
965  DONE;
966})
967
968(define_insn "*altivec_vmrghb_internal"
969  [(set (match_operand:V16QI 0 "register_operand" "=v")
970        (vec_select:V16QI
971	  (vec_concat:V32QI
972	    (match_operand:V16QI 1 "register_operand" "v")
973	    (match_operand:V16QI 2 "register_operand" "v"))
974	  (parallel [(const_int 0) (const_int 16)
975		     (const_int 1) (const_int 17)
976		     (const_int 2) (const_int 18)
977		     (const_int 3) (const_int 19)
978		     (const_int 4) (const_int 20)
979		     (const_int 5) (const_int 21)
980		     (const_int 6) (const_int 22)
981		     (const_int 7) (const_int 23)])))]
982  "TARGET_ALTIVEC"
983{
984  if (BYTES_BIG_ENDIAN)
985    return "vmrghb %0,%1,%2";
986  else
987    return "vmrglb %0,%2,%1";
988}
989  [(set_attr "type" "vecperm")])
990
991(define_insn "altivec_vmrghb_direct"
992  [(set (match_operand:V16QI 0 "register_operand" "=v")
993	(unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
994		       (match_operand:V16QI 2 "register_operand" "v")]
995		      UNSPEC_VMRGH_DIRECT))]
996  "TARGET_ALTIVEC"
997  "vmrghb %0,%1,%2"
998  [(set_attr "type" "vecperm")])
999
1000(define_expand "altivec_vmrghh"
1001  [(use (match_operand:V8HI 0 "register_operand"))
1002   (use (match_operand:V8HI 1 "register_operand"))
1003   (use (match_operand:V8HI 2 "register_operand"))]
1004  "TARGET_ALTIVEC"
1005{
1006  rtvec v;
1007  rtx x;
1008
1009  /* Special handling for LE with -maltivec=be.  */
1010  if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
1011    {
1012      v = gen_rtvec (8, GEN_INT (4), GEN_INT (12), GEN_INT (5), GEN_INT (13),
1013                     GEN_INT (6), GEN_INT (14), GEN_INT (7), GEN_INT (15));
1014      x = gen_rtx_VEC_CONCAT (V16HImode, operands[2], operands[1]);
1015    }
1016  else
1017    {
1018      v = gen_rtvec (8, GEN_INT (0), GEN_INT (8), GEN_INT (1), GEN_INT (9),
1019                     GEN_INT (2), GEN_INT (10), GEN_INT (3), GEN_INT (11));
1020      x = gen_rtx_VEC_CONCAT (V16HImode, operands[1], operands[2]);
1021    }
1022
1023  x = gen_rtx_VEC_SELECT (V8HImode, x, gen_rtx_PARALLEL (VOIDmode, v));
1024  emit_insn (gen_rtx_SET (operands[0], x));
1025  DONE;
1026})
1027
1028(define_insn "*altivec_vmrghh_internal"
1029  [(set (match_operand:V8HI 0 "register_operand" "=v")
1030        (vec_select:V8HI
1031	  (vec_concat:V16HI
1032	    (match_operand:V8HI 1 "register_operand" "v")
1033	    (match_operand:V8HI 2 "register_operand" "v"))
1034	  (parallel [(const_int 0) (const_int 8)
1035		     (const_int 1) (const_int 9)
1036		     (const_int 2) (const_int 10)
1037		     (const_int 3) (const_int 11)])))]
1038  "TARGET_ALTIVEC"
1039{
1040  if (BYTES_BIG_ENDIAN)
1041    return "vmrghh %0,%1,%2";
1042  else
1043    return "vmrglh %0,%2,%1";
1044}
1045  [(set_attr "type" "vecperm")])
1046
1047(define_insn "altivec_vmrghh_direct"
1048  [(set (match_operand:V8HI 0 "register_operand" "=v")
1049        (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
1050                      (match_operand:V8HI 2 "register_operand" "v")]
1051                     UNSPEC_VMRGH_DIRECT))]
1052  "TARGET_ALTIVEC"
1053  "vmrghh %0,%1,%2"
1054  [(set_attr "type" "vecperm")])
1055
1056(define_expand "altivec_vmrghw"
1057  [(use (match_operand:V4SI 0 "register_operand"))
1058   (use (match_operand:V4SI 1 "register_operand"))
1059   (use (match_operand:V4SI 2 "register_operand"))]
1060  "VECTOR_MEM_ALTIVEC_P (V4SImode)"
1061{
1062  rtvec v;
1063  rtx x;
1064
1065  /* Special handling for LE with -maltivec=be.  */
1066  if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
1067    {
1068      v = gen_rtvec (4, GEN_INT (2), GEN_INT (6), GEN_INT (3), GEN_INT (7));
1069      x = gen_rtx_VEC_CONCAT (V8SImode, operands[2], operands[1]);
1070    }
1071  else
1072    {
1073      v = gen_rtvec (4, GEN_INT (0), GEN_INT (4), GEN_INT (1), GEN_INT (5));
1074      x = gen_rtx_VEC_CONCAT (V8SImode, operands[1], operands[2]);
1075    }
1076
1077  x = gen_rtx_VEC_SELECT (V4SImode, x, gen_rtx_PARALLEL (VOIDmode, v));
1078  emit_insn (gen_rtx_SET (operands[0], x));
1079  DONE;
1080})
1081
1082(define_insn "*altivec_vmrghw_internal"
1083  [(set (match_operand:V4SI 0 "register_operand" "=v")
1084        (vec_select:V4SI
1085	  (vec_concat:V8SI
1086	    (match_operand:V4SI 1 "register_operand" "v")
1087	    (match_operand:V4SI 2 "register_operand" "v"))
1088	  (parallel [(const_int 0) (const_int 4)
1089		     (const_int 1) (const_int 5)])))]
1090  "VECTOR_MEM_ALTIVEC_P (V4SImode)"
1091{
1092  if (BYTES_BIG_ENDIAN)
1093    return "vmrghw %0,%1,%2";
1094  else
1095    return "vmrglw %0,%2,%1";
1096}
1097  [(set_attr "type" "vecperm")])
1098
1099(define_insn "altivec_vmrghw_direct"
1100  [(set (match_operand:V4SI 0 "register_operand" "=v,wa")
1101	(unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v,wa")
1102		      (match_operand:V4SI 2 "register_operand" "v,wa")]
1103		     UNSPEC_VMRGH_DIRECT))]
1104  "TARGET_ALTIVEC"
1105  "@
1106   vmrghw %0,%1,%2
1107   xxmrghw %x0,%x1,%x2"
1108  [(set_attr "type" "vecperm")])
1109
1110(define_insn "*altivec_vmrghsf"
1111  [(set (match_operand:V4SF 0 "register_operand" "=v")
1112        (vec_select:V4SF
1113	  (vec_concat:V8SF
1114	    (match_operand:V4SF 1 "register_operand" "v")
1115	    (match_operand:V4SF 2 "register_operand" "v"))
1116	  (parallel [(const_int 0) (const_int 4)
1117		     (const_int 1) (const_int 5)])))]
1118  "VECTOR_MEM_ALTIVEC_P (V4SFmode)"
1119{
1120  if (BYTES_BIG_ENDIAN)
1121    return "vmrghw %0,%1,%2";
1122  else
1123    return "vmrglw %0,%2,%1";
1124}
1125  [(set_attr "type" "vecperm")])
1126
1127(define_expand "altivec_vmrglb"
1128  [(use (match_operand:V16QI 0 "register_operand"))
1129   (use (match_operand:V16QI 1 "register_operand"))
1130   (use (match_operand:V16QI 2 "register_operand"))]
1131  "TARGET_ALTIVEC"
1132{
1133  rtvec v;
1134  rtx x;
1135
1136  /* Special handling for LE with -maltivec=be.  */
1137  if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
1138    {
1139      v = gen_rtvec (16, GEN_INT (0), GEN_INT (16), GEN_INT (1), GEN_INT (17),
1140                     GEN_INT (2), GEN_INT (18), GEN_INT (3), GEN_INT (19),
1141		     GEN_INT (4), GEN_INT (20), GEN_INT (5), GEN_INT (21),
1142		     GEN_INT (6), GEN_INT (22), GEN_INT (7), GEN_INT (23));
1143      x = gen_rtx_VEC_CONCAT (V32QImode, operands[2], operands[1]);
1144    }
1145  else
1146    {
1147      v = gen_rtvec (16, GEN_INT (8), GEN_INT (24), GEN_INT (9), GEN_INT (25),
1148                     GEN_INT (10), GEN_INT (26), GEN_INT (11), GEN_INT (27),
1149		     GEN_INT (12), GEN_INT (28), GEN_INT (13), GEN_INT (29),
1150		     GEN_INT (14), GEN_INT (30), GEN_INT (15), GEN_INT (31));
1151      x = gen_rtx_VEC_CONCAT (V32QImode, operands[1], operands[2]);
1152    }
1153
1154  x = gen_rtx_VEC_SELECT (V16QImode, x, gen_rtx_PARALLEL (VOIDmode, v));
1155  emit_insn (gen_rtx_SET (operands[0], x));
1156  DONE;
1157})
1158
1159(define_insn "*altivec_vmrglb_internal"
1160  [(set (match_operand:V16QI 0 "register_operand" "=v")
1161        (vec_select:V16QI
1162	  (vec_concat:V32QI
1163	    (match_operand:V16QI 1 "register_operand" "v")
1164	    (match_operand:V16QI 2 "register_operand" "v"))
1165	  (parallel [(const_int  8) (const_int 24)
1166		     (const_int  9) (const_int 25)
1167		     (const_int 10) (const_int 26)
1168		     (const_int 11) (const_int 27)
1169		     (const_int 12) (const_int 28)
1170		     (const_int 13) (const_int 29)
1171		     (const_int 14) (const_int 30)
1172		     (const_int 15) (const_int 31)])))]
1173  "TARGET_ALTIVEC"
1174{
1175  if (BYTES_BIG_ENDIAN)
1176    return "vmrglb %0,%1,%2";
1177  else
1178    return "vmrghb %0,%2,%1";
1179}
1180  [(set_attr "type" "vecperm")])
1181
1182(define_insn "altivec_vmrglb_direct"
1183  [(set (match_operand:V16QI 0 "register_operand" "=v")
1184	(unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
1185		       (match_operand:V16QI 2 "register_operand" "v")]
1186		      UNSPEC_VMRGL_DIRECT))]
1187  "TARGET_ALTIVEC"
1188  "vmrglb %0,%1,%2"
1189  [(set_attr "type" "vecperm")])
1190
1191(define_expand "altivec_vmrglh"
1192  [(use (match_operand:V8HI 0 "register_operand"))
1193   (use (match_operand:V8HI 1 "register_operand"))
1194   (use (match_operand:V8HI 2 "register_operand"))]
1195  "TARGET_ALTIVEC"
1196{
1197  rtvec v;
1198  rtx x;
1199
1200  /* Special handling for LE with -maltivec=be.  */
1201  if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
1202    {
1203      v = gen_rtvec (8, GEN_INT (0), GEN_INT (8), GEN_INT (1), GEN_INT (9),
1204                     GEN_INT (2), GEN_INT (10), GEN_INT (3), GEN_INT (11));
1205      x = gen_rtx_VEC_CONCAT (V16HImode, operands[2], operands[1]);
1206    }
1207  else
1208    {
1209      v = gen_rtvec (8, GEN_INT (4), GEN_INT (12), GEN_INT (5), GEN_INT (13),
1210                     GEN_INT (6), GEN_INT (14), GEN_INT (7), GEN_INT (15));
1211      x = gen_rtx_VEC_CONCAT (V16HImode, operands[1], operands[2]);
1212    }
1213
1214  x = gen_rtx_VEC_SELECT (V8HImode, x, gen_rtx_PARALLEL (VOIDmode, v));
1215  emit_insn (gen_rtx_SET (operands[0], x));
1216  DONE;
1217})
1218
1219(define_insn "*altivec_vmrglh_internal"
1220  [(set (match_operand:V8HI 0 "register_operand" "=v")
1221        (vec_select:V8HI
1222	  (vec_concat:V16HI
1223	    (match_operand:V8HI 1 "register_operand" "v")
1224	    (match_operand:V8HI 2 "register_operand" "v"))
1225	  (parallel [(const_int 4) (const_int 12)
1226		     (const_int 5) (const_int 13)
1227		     (const_int 6) (const_int 14)
1228		     (const_int 7) (const_int 15)])))]
1229  "TARGET_ALTIVEC"
1230{
1231  if (BYTES_BIG_ENDIAN)
1232    return "vmrglh %0,%1,%2";
1233  else
1234    return "vmrghh %0,%2,%1";
1235}
1236  [(set_attr "type" "vecperm")])
1237
1238(define_insn "altivec_vmrglh_direct"
1239  [(set (match_operand:V8HI 0 "register_operand" "=v")
1240        (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
1241		      (match_operand:V8HI 2 "register_operand" "v")]
1242		     UNSPEC_VMRGL_DIRECT))]
1243  "TARGET_ALTIVEC"
1244  "vmrglh %0,%1,%2"
1245  [(set_attr "type" "vecperm")])
1246
1247(define_expand "altivec_vmrglw"
1248  [(use (match_operand:V4SI 0 "register_operand"))
1249   (use (match_operand:V4SI 1 "register_operand"))
1250   (use (match_operand:V4SI 2 "register_operand"))]
1251  "VECTOR_MEM_ALTIVEC_P (V4SImode)"
1252{
1253  rtvec v;
1254  rtx x;
1255
1256  /* Special handling for LE with -maltivec=be.  */
1257  if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
1258    {
1259      v = gen_rtvec (4, GEN_INT (0), GEN_INT (4), GEN_INT (1), GEN_INT (5));
1260      x = gen_rtx_VEC_CONCAT (V8SImode, operands[2], operands[1]);
1261    }
1262  else
1263    {
1264      v = gen_rtvec (4, GEN_INT (2), GEN_INT (6), GEN_INT (3), GEN_INT (7));
1265      x = gen_rtx_VEC_CONCAT (V8SImode, operands[1], operands[2]);
1266    }
1267
1268  x = gen_rtx_VEC_SELECT (V4SImode, x, gen_rtx_PARALLEL (VOIDmode, v));
1269  emit_insn (gen_rtx_SET (operands[0], x));
1270  DONE;
1271})
1272
1273(define_insn "*altivec_vmrglw_internal"
1274  [(set (match_operand:V4SI 0 "register_operand" "=v")
1275        (vec_select:V4SI
1276	  (vec_concat:V8SI
1277	    (match_operand:V4SI 1 "register_operand" "v")
1278	    (match_operand:V4SI 2 "register_operand" "v"))
1279	  (parallel [(const_int 2) (const_int 6)
1280		     (const_int 3) (const_int 7)])))]
1281  "VECTOR_MEM_ALTIVEC_P (V4SImode)"
1282{
1283  if (BYTES_BIG_ENDIAN)
1284    return "vmrglw %0,%1,%2";
1285  else
1286    return "vmrghw %0,%2,%1";
1287}
1288  [(set_attr "type" "vecperm")])
1289
1290(define_insn "altivec_vmrglw_direct"
1291  [(set (match_operand:V4SI 0 "register_operand" "=v,wa")
1292	(unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v,wa")
1293		      (match_operand:V4SI 2 "register_operand" "v,wa")]
1294		     UNSPEC_VMRGL_DIRECT))]
1295  "TARGET_ALTIVEC"
1296  "@
1297   vmrglw %0,%1,%2
1298   xxmrglw %x0,%x1,%x2"
1299  [(set_attr "type" "vecperm")])
1300
1301(define_insn "*altivec_vmrglsf"
1302  [(set (match_operand:V4SF 0 "register_operand" "=v")
1303        (vec_select:V4SF
1304	 (vec_concat:V8SF
1305	   (match_operand:V4SF 1 "register_operand" "v")
1306	   (match_operand:V4SF 2 "register_operand" "v"))
1307	 (parallel [(const_int 2) (const_int 6)
1308		    (const_int 3) (const_int 7)])))]
1309  "VECTOR_MEM_ALTIVEC_P (V4SFmode)"
1310{
1311  if (BYTES_BIG_ENDIAN)
1312    return "vmrglw %0,%1,%2";
1313  else
1314    return "vmrghw %0,%2,%1";
1315}
1316  [(set_attr "type" "vecperm")])
1317
1318;; Power8 vector merge two V2DF/V2DI even words to V2DF
1319(define_expand "p8_vmrgew_<mode>"
1320  [(use (match_operand:VSX_D 0 "vsx_register_operand"))
1321   (use (match_operand:VSX_D 1 "vsx_register_operand"))
1322   (use (match_operand:VSX_D 2 "vsx_register_operand"))]
1323  "VECTOR_MEM_VSX_P (<MODE>mode)"
1324{
1325  rtvec v;
1326  rtx x;
1327
1328  v = gen_rtvec (2, GEN_INT (0), GEN_INT (2));
1329  x = gen_rtx_VEC_CONCAT (<VS_double>mode, operands[1], operands[2]);
1330
1331  x = gen_rtx_VEC_SELECT (<MODE>mode, x, gen_rtx_PARALLEL (VOIDmode, v));
1332  emit_insn (gen_rtx_SET (operands[0], x));
1333  DONE;
1334})
1335
1336;; Power8 vector merge two V4SF/V4SI even words to V4SF
1337(define_insn "p8_vmrgew_<mode>"
1338  [(set (match_operand:VSX_W 0 "register_operand" "=v")
1339	(vec_select:VSX_W
1340	  (vec_concat:<VS_double>
1341	    (match_operand:VSX_W 1 "register_operand" "v")
1342	    (match_operand:VSX_W 2 "register_operand" "v"))
1343	  (parallel [(const_int 0) (const_int 4)
1344		     (const_int 2) (const_int 6)])))]
1345  "TARGET_P8_VECTOR"
1346{
1347  if (BYTES_BIG_ENDIAN)
1348    return "vmrgew %0,%1,%2";
1349  else
1350    return "vmrgow %0,%2,%1";
1351}
1352  [(set_attr "type" "vecperm")])
1353
1354(define_insn "p8_vmrgow_<mode>"
1355  [(set (match_operand:VSX_W 0 "register_operand" "=v")
1356	(vec_select:VSX_W
1357	  (vec_concat:<VS_double>
1358	    (match_operand:VSX_W 1 "register_operand" "v")
1359	    (match_operand:VSX_W 2 "register_operand" "v"))
1360	  (parallel [(const_int 1) (const_int 5)
1361		     (const_int 3) (const_int 7)])))]
1362  "TARGET_P8_VECTOR"
1363{
1364  if (BYTES_BIG_ENDIAN)
1365    return "vmrgow %0,%1,%2";
1366  else
1367    return "vmrgew %0,%2,%1";
1368}
1369  [(set_attr "type" "vecperm")])
1370
1371(define_expand "p8_vmrgow_<mode>"
1372  [(use (match_operand:VSX_D 0 "vsx_register_operand"))
1373   (use (match_operand:VSX_D 1 "vsx_register_operand"))
1374   (use (match_operand:VSX_D 2 "vsx_register_operand"))]
1375  "VECTOR_MEM_VSX_P (<MODE>mode)"
1376{
1377  rtvec v;
1378  rtx x;
1379
1380  v = gen_rtvec (2, GEN_INT (1), GEN_INT (3));
1381  x = gen_rtx_VEC_CONCAT (<VS_double>mode, operands[1], operands[2]);
1382
1383  x = gen_rtx_VEC_SELECT (<MODE>mode, x, gen_rtx_PARALLEL (VOIDmode, v));
1384  emit_insn (gen_rtx_SET (operands[0], x));
1385  DONE;
1386})
1387
1388(define_insn "p8_vmrgew_<mode>_direct"
1389  [(set (match_operand:VSX_W 0 "register_operand" "=v")
1390	(unspec:VSX_W [(match_operand:VSX_W 1 "register_operand" "v")
1391		       (match_operand:VSX_W 2 "register_operand" "v")]
1392		     UNSPEC_VMRGEW_DIRECT))]
1393  "TARGET_P8_VECTOR"
1394  "vmrgew %0,%1,%2"
1395  [(set_attr "type" "vecperm")])
1396
1397(define_insn "p8_vmrgow_<mode>_direct"
1398  [(set (match_operand:VSX_W 0 "register_operand" "=v")
1399	(unspec:VSX_W [(match_operand:VSX_W 1 "register_operand" "v")
1400		       (match_operand:VSX_W 2 "register_operand" "v")]
1401		     UNSPEC_VMRGOW_DIRECT))]
1402  "TARGET_P8_VECTOR"
1403  "vmrgow %0,%1,%2"
1404  [(set_attr "type" "vecperm")])
1405
1406(define_expand "vec_widen_umult_even_v16qi"
1407  [(use (match_operand:V8HI 0 "register_operand"))
1408   (use (match_operand:V16QI 1 "register_operand"))
1409   (use (match_operand:V16QI 2 "register_operand"))]
1410  "TARGET_ALTIVEC"
1411{
1412  if (VECTOR_ELT_ORDER_BIG)
1413    emit_insn (gen_altivec_vmuleub (operands[0], operands[1], operands[2]));
1414  else
1415    emit_insn (gen_altivec_vmuloub (operands[0], operands[1], operands[2]));
1416  DONE;
1417})
1418
1419(define_expand "vec_widen_smult_even_v16qi"
1420  [(use (match_operand:V8HI 0 "register_operand"))
1421   (use (match_operand:V16QI 1 "register_operand"))
1422   (use (match_operand:V16QI 2 "register_operand"))]
1423  "TARGET_ALTIVEC"
1424{
1425  if (VECTOR_ELT_ORDER_BIG)
1426    emit_insn (gen_altivec_vmulesb (operands[0], operands[1], operands[2]));
1427  else
1428    emit_insn (gen_altivec_vmulosb (operands[0], operands[1], operands[2]));
1429  DONE;
1430})
1431
1432(define_expand "vec_widen_umult_even_v8hi"
1433  [(use (match_operand:V4SI 0 "register_operand"))
1434   (use (match_operand:V8HI 1 "register_operand"))
1435   (use (match_operand:V8HI 2 "register_operand"))]
1436  "TARGET_ALTIVEC"
1437{
1438  if (VECTOR_ELT_ORDER_BIG)
1439    emit_insn (gen_altivec_vmuleuh (operands[0], operands[1], operands[2]));
1440  else
1441    emit_insn (gen_altivec_vmulouh (operands[0], operands[1], operands[2]));
1442  DONE;
1443})
1444
1445(define_expand "vec_widen_smult_even_v8hi"
1446  [(use (match_operand:V4SI 0 "register_operand"))
1447   (use (match_operand:V8HI 1 "register_operand"))
1448   (use (match_operand:V8HI 2 "register_operand"))]
1449  "TARGET_ALTIVEC"
1450{
1451  if (VECTOR_ELT_ORDER_BIG)
1452    emit_insn (gen_altivec_vmulesh (operands[0], operands[1], operands[2]));
1453  else
1454    emit_insn (gen_altivec_vmulosh (operands[0], operands[1], operands[2]));
1455  DONE;
1456})
1457
1458(define_expand "vec_widen_umult_even_v4si"
1459  [(use (match_operand:V2DI 0 "register_operand"))
1460   (use (match_operand:V4SI 1 "register_operand"))
1461   (use (match_operand:V4SI 2 "register_operand"))]
1462  "TARGET_P8_VECTOR"
1463{
1464 if (VECTOR_ELT_ORDER_BIG)
1465    emit_insn (gen_altivec_vmuleuw (operands[0], operands[1], operands[2]));
1466  else
1467    emit_insn (gen_altivec_vmulouw (operands[0], operands[1], operands[2]));
1468 DONE;
1469})
1470
1471(define_expand "vec_widen_smult_even_v4si"
1472  [(use (match_operand:V2DI 0 "register_operand"))
1473   (use (match_operand:V4SI 1 "register_operand"))
1474   (use (match_operand:V4SI 2 "register_operand"))]
1475  "TARGET_P8_VECTOR"
1476{
1477  if (VECTOR_ELT_ORDER_BIG)
1478    emit_insn (gen_altivec_vmulesw (operands[0], operands[1], operands[2]));
1479 else
1480    emit_insn (gen_altivec_vmulosw (operands[0], operands[1], operands[2]));
1481  DONE;
1482})
1483
1484(define_expand "vec_widen_umult_odd_v16qi"
1485  [(use (match_operand:V8HI 0 "register_operand"))
1486   (use (match_operand:V16QI 1 "register_operand"))
1487   (use (match_operand:V16QI 2 "register_operand"))]
1488  "TARGET_ALTIVEC"
1489{
1490  if (VECTOR_ELT_ORDER_BIG)
1491    emit_insn (gen_altivec_vmuloub (operands[0], operands[1], operands[2]));
1492  else
1493    emit_insn (gen_altivec_vmuleub (operands[0], operands[1], operands[2]));
1494  DONE;
1495})
1496
1497(define_expand "vec_widen_smult_odd_v16qi"
1498  [(use (match_operand:V8HI 0 "register_operand"))
1499   (use (match_operand:V16QI 1 "register_operand"))
1500   (use (match_operand:V16QI 2 "register_operand"))]
1501  "TARGET_ALTIVEC"
1502{
1503  if (VECTOR_ELT_ORDER_BIG)
1504    emit_insn (gen_altivec_vmulosb (operands[0], operands[1], operands[2]));
1505  else
1506    emit_insn (gen_altivec_vmulesb (operands[0], operands[1], operands[2]));
1507  DONE;
1508})
1509
1510(define_expand "vec_widen_umult_odd_v8hi"
1511  [(use (match_operand:V4SI 0 "register_operand"))
1512   (use (match_operand:V8HI 1 "register_operand"))
1513   (use (match_operand:V8HI 2 "register_operand"))]
1514  "TARGET_ALTIVEC"
1515{
1516  if (VECTOR_ELT_ORDER_BIG)
1517    emit_insn (gen_altivec_vmulouh (operands[0], operands[1], operands[2]));
1518  else
1519    emit_insn (gen_altivec_vmuleuh (operands[0], operands[1], operands[2]));
1520  DONE;
1521})
1522
1523(define_expand "vec_widen_smult_odd_v8hi"
1524  [(use (match_operand:V4SI 0 "register_operand"))
1525   (use (match_operand:V8HI 1 "register_operand"))
1526   (use (match_operand:V8HI 2 "register_operand"))]
1527  "TARGET_ALTIVEC"
1528{
1529  if (VECTOR_ELT_ORDER_BIG)
1530    emit_insn (gen_altivec_vmulosh (operands[0], operands[1], operands[2]));
1531  else
1532    emit_insn (gen_altivec_vmulesh (operands[0], operands[1], operands[2]));
1533  DONE;
1534})
1535
1536(define_expand "vec_widen_umult_odd_v4si"
1537  [(use (match_operand:V2DI 0 "register_operand"))
1538   (use (match_operand:V4SI 1 "register_operand"))
1539   (use (match_operand:V4SI 2 "register_operand"))]
1540  "TARGET_P8_VECTOR"
1541{
1542  if (VECTOR_ELT_ORDER_BIG)
1543    emit_insn (gen_altivec_vmulouw (operands[0], operands[1], operands[2]));
1544  else
1545    emit_insn (gen_altivec_vmuleuw (operands[0], operands[1], operands[2]));
1546  DONE;
1547})
1548
1549(define_expand "vec_widen_smult_odd_v4si"
1550  [(use (match_operand:V2DI 0 "register_operand"))
1551   (use (match_operand:V4SI 1 "register_operand"))
1552   (use (match_operand:V4SI 2 "register_operand"))]
1553  "TARGET_P8_VECTOR"
1554{
1555  if (VECTOR_ELT_ORDER_BIG)
1556    emit_insn (gen_altivec_vmulosw (operands[0], operands[1], operands[2]));
1557  else
1558    emit_insn (gen_altivec_vmulesw (operands[0], operands[1], operands[2]));
1559  DONE;
1560})
1561
1562(define_insn "altivec_vmuleub"
1563  [(set (match_operand:V8HI 0 "register_operand" "=v")
1564        (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
1565                      (match_operand:V16QI 2 "register_operand" "v")]
1566		     UNSPEC_VMULEUB))]
1567  "TARGET_ALTIVEC"
1568  "vmuleub %0,%1,%2"
1569  [(set_attr "type" "veccomplex")])
1570
1571(define_insn "altivec_vmuloub"
1572  [(set (match_operand:V8HI 0 "register_operand" "=v")
1573        (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
1574                      (match_operand:V16QI 2 "register_operand" "v")]
1575		     UNSPEC_VMULOUB))]
1576  "TARGET_ALTIVEC"
1577  "vmuloub %0,%1,%2"
1578  [(set_attr "type" "veccomplex")])
1579
1580(define_insn "altivec_vmulesb"
1581  [(set (match_operand:V8HI 0 "register_operand" "=v")
1582        (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
1583                      (match_operand:V16QI 2 "register_operand" "v")]
1584		     UNSPEC_VMULESB))]
1585  "TARGET_ALTIVEC"
1586  "vmulesb %0,%1,%2"
1587  [(set_attr "type" "veccomplex")])
1588
1589(define_insn "altivec_vmulosb"
1590  [(set (match_operand:V8HI 0 "register_operand" "=v")
1591        (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
1592                      (match_operand:V16QI 2 "register_operand" "v")]
1593		     UNSPEC_VMULOSB))]
1594  "TARGET_ALTIVEC"
1595  "vmulosb %0,%1,%2"
1596  [(set_attr "type" "veccomplex")])
1597
1598(define_insn "altivec_vmuleuh"
1599  [(set (match_operand:V4SI 0 "register_operand" "=v")
1600        (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
1601                      (match_operand:V8HI 2 "register_operand" "v")]
1602		     UNSPEC_VMULEUH))]
1603  "TARGET_ALTIVEC"
1604  "vmuleuh %0,%1,%2"
1605  [(set_attr "type" "veccomplex")])
1606
1607(define_insn "altivec_vmulouh"
1608  [(set (match_operand:V4SI 0 "register_operand" "=v")
1609        (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
1610                      (match_operand:V8HI 2 "register_operand" "v")]
1611		     UNSPEC_VMULOUH))]
1612  "TARGET_ALTIVEC"
1613  "vmulouh %0,%1,%2"
1614  [(set_attr "type" "veccomplex")])
1615
1616(define_insn "altivec_vmulesh"
1617  [(set (match_operand:V4SI 0 "register_operand" "=v")
1618        (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
1619                      (match_operand:V8HI 2 "register_operand" "v")]
1620		     UNSPEC_VMULESH))]
1621  "TARGET_ALTIVEC"
1622  "vmulesh %0,%1,%2"
1623  [(set_attr "type" "veccomplex")])
1624
1625(define_insn "altivec_vmulosh"
1626  [(set (match_operand:V4SI 0 "register_operand" "=v")
1627        (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
1628                      (match_operand:V8HI 2 "register_operand" "v")]
1629		     UNSPEC_VMULOSH))]
1630  "TARGET_ALTIVEC"
1631  "vmulosh %0,%1,%2"
1632  [(set_attr "type" "veccomplex")])
1633
1634(define_insn "altivec_vmuleuw"
1635  [(set (match_operand:V2DI 0 "register_operand" "=v")
1636       (unspec:V2DI [(match_operand:V4SI 1 "register_operand" "v")
1637                     (match_operand:V4SI 2 "register_operand" "v")]
1638                    UNSPEC_VMULEUW))]
1639  "TARGET_P8_VECTOR"
1640  "vmuleuw %0,%1,%2"
1641  [(set_attr "type" "veccomplex")])
1642
1643(define_insn "altivec_vmulouw"
1644  [(set (match_operand:V2DI 0 "register_operand" "=v")
1645       (unspec:V2DI [(match_operand:V4SI 1 "register_operand" "v")
1646                     (match_operand:V4SI 2 "register_operand" "v")]
1647                    UNSPEC_VMULOUW))]
1648  "TARGET_P8_VECTOR"
1649  "vmulouw %0,%1,%2"
1650  [(set_attr "type" "veccomplex")])
1651
1652(define_insn "altivec_vmulesw"
1653  [(set (match_operand:V2DI 0 "register_operand" "=v")
1654       (unspec:V2DI [(match_operand:V4SI 1 "register_operand" "v")
1655                     (match_operand:V4SI 2 "register_operand" "v")]
1656                    UNSPEC_VMULESW))]
1657  "TARGET_P8_VECTOR"
1658  "vmulesw %0,%1,%2"
1659  [(set_attr "type" "veccomplex")])
1660
1661(define_insn "altivec_vmulosw"
1662  [(set (match_operand:V2DI 0 "register_operand" "=v")
1663       (unspec:V2DI [(match_operand:V4SI 1 "register_operand" "v")
1664                     (match_operand:V4SI 2 "register_operand" "v")]
1665                    UNSPEC_VMULOSW))]
1666  "TARGET_P8_VECTOR"
1667  "vmulosw %0,%1,%2"
1668  [(set_attr "type" "veccomplex")])
1669
1670;; Vector pack/unpack
1671(define_insn "altivec_vpkpx"
1672  [(set (match_operand:V8HI 0 "register_operand" "=v")
1673        (unspec:V8HI [(match_operand:V4SI 1 "register_operand" "v")
1674                      (match_operand:V4SI 2 "register_operand" "v")]
1675		     UNSPEC_VPKPX))]
1676  "TARGET_ALTIVEC"
1677{
1678  if (VECTOR_ELT_ORDER_BIG)
1679    return "vpkpx %0,%1,%2";
1680  else
1681    return "vpkpx %0,%2,%1";
1682}
1683  [(set_attr "type" "vecperm")])
1684
1685(define_insn "altivec_vpks<VI_char>ss"
1686  [(set (match_operand:<VP_small> 0 "register_operand" "=v")
1687	(unspec:<VP_small> [(match_operand:VP 1 "register_operand" "v")
1688			    (match_operand:VP 2 "register_operand" "v")]
1689			   UNSPEC_VPACK_SIGN_SIGN_SAT))]
1690  "<VI_unit>"
1691{
1692  if (VECTOR_ELT_ORDER_BIG)
1693    return "vpks<VI_char>ss %0,%1,%2";
1694  else
1695    return "vpks<VI_char>ss %0,%2,%1";
1696}
1697  [(set_attr "type" "vecperm")])
1698
1699(define_insn "altivec_vpks<VI_char>us"
1700  [(set (match_operand:<VP_small> 0 "register_operand" "=v")
1701	(unspec:<VP_small> [(match_operand:VP 1 "register_operand" "v")
1702			    (match_operand:VP 2 "register_operand" "v")]
1703			   UNSPEC_VPACK_SIGN_UNS_SAT))]
1704  "<VI_unit>"
1705{
1706  if (VECTOR_ELT_ORDER_BIG)
1707    return "vpks<VI_char>us %0,%1,%2";
1708  else
1709    return "vpks<VI_char>us %0,%2,%1";
1710}
1711  [(set_attr "type" "vecperm")])
1712
1713(define_insn "altivec_vpku<VI_char>us"
1714  [(set (match_operand:<VP_small> 0 "register_operand" "=v")
1715	(unspec:<VP_small> [(match_operand:VP 1 "register_operand" "v")
1716			    (match_operand:VP 2 "register_operand" "v")]
1717			   UNSPEC_VPACK_UNS_UNS_SAT))]
1718  "<VI_unit>"
1719{
1720  if (VECTOR_ELT_ORDER_BIG)
1721    return "vpku<VI_char>us %0,%1,%2";
1722  else
1723    return "vpku<VI_char>us %0,%2,%1";
1724}
1725  [(set_attr "type" "vecperm")])
1726
1727(define_insn "altivec_vpku<VI_char>um"
1728  [(set (match_operand:<VP_small> 0 "register_operand" "=v")
1729	(unspec:<VP_small> [(match_operand:VP 1 "register_operand" "v")
1730			    (match_operand:VP 2 "register_operand" "v")]
1731			   UNSPEC_VPACK_UNS_UNS_MOD))]
1732  "<VI_unit>"
1733{
1734  if (VECTOR_ELT_ORDER_BIG)
1735    return "vpku<VI_char>um %0,%1,%2";
1736  else
1737    return "vpku<VI_char>um %0,%2,%1";
1738}
1739  [(set_attr "type" "vecperm")])
1740
1741(define_insn "altivec_vpku<VI_char>um_direct"
1742  [(set (match_operand:<VP_small> 0 "register_operand" "=v")
1743	(unspec:<VP_small> [(match_operand:VP 1 "register_operand" "v")
1744			    (match_operand:VP 2 "register_operand" "v")]
1745			   UNSPEC_VPACK_UNS_UNS_MOD_DIRECT))]
1746  "<VI_unit>"
1747{
1748  if (BYTES_BIG_ENDIAN)
1749    return "vpku<VI_char>um %0,%1,%2";
1750  else
1751    return "vpku<VI_char>um %0,%2,%1";
1752}
1753  [(set_attr "type" "vecperm")])
1754
1755(define_insn "*altivec_vrl<VI_char>"
1756  [(set (match_operand:VI2 0 "register_operand" "=v")
1757        (rotate:VI2 (match_operand:VI2 1 "register_operand" "v")
1758		    (match_operand:VI2 2 "register_operand" "v")))]
1759  "<VI_unit>"
1760  "vrl<VI_char> %0,%1,%2"
1761  [(set_attr "type" "vecsimple")])
1762
1763(define_insn "altivec_vrl<VI_char>mi"
1764  [(set (match_operand:VIlong 0 "register_operand" "=v")
1765        (unspec:VIlong [(match_operand:VIlong 1 "register_operand" "0")
1766	                (match_operand:VIlong 2 "register_operand" "v")
1767		        (match_operand:VIlong 3 "register_operand" "v")]
1768		       UNSPEC_VRLMI))]
1769  "TARGET_P9_VECTOR"
1770  "vrl<VI_char>mi %0,%2,%3"
1771  [(set_attr "type" "veclogical")])
1772
1773(define_insn "altivec_vrl<VI_char>nm"
1774  [(set (match_operand:VIlong 0 "register_operand" "=v")
1775        (unspec:VIlong [(match_operand:VIlong 1 "register_operand" "v")
1776		        (match_operand:VIlong 2 "register_operand" "v")]
1777		       UNSPEC_VRLNM))]
1778  "TARGET_P9_VECTOR"
1779  "vrl<VI_char>nm %0,%1,%2"
1780  [(set_attr "type" "veclogical")])
1781
1782(define_insn "altivec_vsl"
1783  [(set (match_operand:V4SI 0 "register_operand" "=v")
1784        (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
1785                      (match_operand:V4SI 2 "register_operand" "v")]
1786		     UNSPEC_VSLV4SI))]
1787  "TARGET_ALTIVEC"
1788  "vsl %0,%1,%2"
1789  [(set_attr "type" "vecperm")])
1790
1791(define_insn "altivec_vslo"
1792  [(set (match_operand:V4SI 0 "register_operand" "=v")
1793        (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
1794                      (match_operand:V4SI 2 "register_operand" "v")]
1795		     UNSPEC_VSLO))]
1796  "TARGET_ALTIVEC"
1797  "vslo %0,%1,%2"
1798  [(set_attr "type" "vecperm")])
1799
1800(define_insn "vslv"
1801  [(set (match_operand:V16QI 0 "register_operand" "=v")
1802	(unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
1803		       (match_operand:V16QI 2 "register_operand" "v")]
1804         UNSPEC_VSLV))]
1805  "TARGET_P9_VECTOR"
1806  "vslv %0,%1,%2"
1807  [(set_attr "type" "vecsimple")])
1808
1809(define_insn "vsrv"
1810  [(set (match_operand:V16QI 0 "register_operand" "=v")
1811	(unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
1812		       (match_operand:V16QI 2 "register_operand" "v")]
1813         UNSPEC_VSRV))]
1814  "TARGET_P9_VECTOR"
1815  "vsrv %0,%1,%2"
1816  [(set_attr "type" "vecsimple")])
1817
1818(define_insn "*altivec_vsl<VI_char>"
1819  [(set (match_operand:VI2 0 "register_operand" "=v")
1820        (ashift:VI2 (match_operand:VI2 1 "register_operand" "v")
1821		    (match_operand:VI2 2 "register_operand" "v")))]
1822  "<VI_unit>"
1823  "vsl<VI_char> %0,%1,%2"
1824  [(set_attr "type" "vecsimple")])
1825
1826(define_insn "*altivec_vsr<VI_char>"
1827  [(set (match_operand:VI2 0 "register_operand" "=v")
1828        (lshiftrt:VI2 (match_operand:VI2 1 "register_operand" "v")
1829		      (match_operand:VI2 2 "register_operand" "v")))]
1830  "<VI_unit>"
1831  "vsr<VI_char> %0,%1,%2"
1832  [(set_attr "type" "vecsimple")])
1833
1834(define_insn "*altivec_vsra<VI_char>"
1835  [(set (match_operand:VI2 0 "register_operand" "=v")
1836        (ashiftrt:VI2 (match_operand:VI2 1 "register_operand" "v")
1837		      (match_operand:VI2 2 "register_operand" "v")))]
1838  "<VI_unit>"
1839  "vsra<VI_char> %0,%1,%2"
1840  [(set_attr "type" "vecsimple")])
1841
1842(define_insn "altivec_vsr"
1843  [(set (match_operand:V4SI 0 "register_operand" "=v")
1844        (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
1845                      (match_operand:V4SI 2 "register_operand" "v")]
1846		     UNSPEC_VSR))]
1847  "TARGET_ALTIVEC"
1848  "vsr %0,%1,%2"
1849  [(set_attr "type" "vecperm")])
1850
1851(define_insn "altivec_vsro"
1852  [(set (match_operand:V4SI 0 "register_operand" "=v")
1853        (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
1854                      (match_operand:V4SI 2 "register_operand" "v")]
1855		     UNSPEC_VSRO))]
1856  "TARGET_ALTIVEC"
1857  "vsro %0,%1,%2"
1858  [(set_attr "type" "vecperm")])
1859
1860(define_insn "altivec_vsum4ubs"
1861  [(set (match_operand:V4SI 0 "register_operand" "=v")
1862        (unspec:V4SI [(match_operand:V16QI 1 "register_operand" "v")
1863                      (match_operand:V4SI 2 "register_operand" "v")]
1864		     UNSPEC_VSUM4UBS))
1865   (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
1866  "TARGET_ALTIVEC"
1867  "vsum4ubs %0,%1,%2"
1868  [(set_attr "type" "veccomplex")])
1869
1870(define_insn "altivec_vsum4s<VI_char>s"
1871  [(set (match_operand:V4SI 0 "register_operand" "=v")
1872        (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")
1873                      (match_operand:V4SI 2 "register_operand" "v")]
1874		     UNSPEC_VSUM4S))
1875   (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
1876  "TARGET_ALTIVEC"
1877  "vsum4s<VI_char>s %0,%1,%2"
1878  [(set_attr "type" "veccomplex")])
1879
1880(define_expand "altivec_vsum2sws"
1881  [(use (match_operand:V4SI 0 "register_operand"))
1882   (use (match_operand:V4SI 1 "register_operand"))
1883   (use (match_operand:V4SI 2 "register_operand"))]
1884  "TARGET_ALTIVEC"
1885{
1886  if (VECTOR_ELT_ORDER_BIG)
1887    emit_insn (gen_altivec_vsum2sws_direct (operands[0], operands[1],
1888                                            operands[2]));
1889  else
1890    {
1891      rtx tmp1 = gen_reg_rtx (V4SImode);
1892      rtx tmp2 = gen_reg_rtx (V4SImode);
1893      emit_insn (gen_altivec_vsldoi_v4si (tmp1, operands[2],
1894                                          operands[2], GEN_INT (12)));
1895      emit_insn (gen_altivec_vsum2sws_direct (tmp2, operands[1], tmp1));
1896      emit_insn (gen_altivec_vsldoi_v4si (operands[0], tmp2, tmp2,
1897                                          GEN_INT (4)));
1898    }
1899  DONE;
1900})
1901
1902; FIXME: This can probably be expressed without an UNSPEC.
1903(define_insn "altivec_vsum2sws_direct"
1904  [(set (match_operand:V4SI 0 "register_operand" "=v")
1905        (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
1906	              (match_operand:V4SI 2 "register_operand" "v")]
1907		     UNSPEC_VSUM2SWS))
1908   (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
1909  "TARGET_ALTIVEC"
1910  "vsum2sws %0,%1,%2"
1911  [(set_attr "type" "veccomplex")])
1912
1913(define_expand "altivec_vsumsws"
1914  [(use (match_operand:V4SI 0 "register_operand"))
1915   (use (match_operand:V4SI 1 "register_operand"))
1916   (use (match_operand:V4SI 2 "register_operand"))]
1917  "TARGET_ALTIVEC"
1918{
1919  if (VECTOR_ELT_ORDER_BIG)
1920    emit_insn (gen_altivec_vsumsws_direct (operands[0], operands[1],
1921                                           operands[2]));
1922  else
1923    {
1924      rtx tmp1 = gen_reg_rtx (V4SImode);
1925      rtx tmp2 = gen_reg_rtx (V4SImode);
1926      emit_insn (gen_altivec_vspltw_direct (tmp1, operands[2], const0_rtx));
1927      emit_insn (gen_altivec_vsumsws_direct (tmp2, operands[1], tmp1));
1928      emit_insn (gen_altivec_vsldoi_v4si (operands[0], tmp2, tmp2,
1929                                          GEN_INT (12)));
1930    }
1931  DONE;
1932})
1933
1934; FIXME: This can probably be expressed without an UNSPEC.
1935(define_insn "altivec_vsumsws_direct"
1936  [(set (match_operand:V4SI 0 "register_operand" "=v")
1937        (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
1938                      (match_operand:V4SI 2 "register_operand" "v")]
1939		     UNSPEC_VSUMSWS_DIRECT))
1940   (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
1941  "TARGET_ALTIVEC"
1942  "vsumsws %0,%1,%2"
1943  [(set_attr "type" "veccomplex")])
1944
1945(define_expand "altivec_vspltb"
1946  [(use (match_operand:V16QI 0 "register_operand"))
1947   (use (match_operand:V16QI 1 "register_operand"))
1948   (use (match_operand:QI 2 "u5bit_cint_operand"))]
1949  "TARGET_ALTIVEC"
1950{
1951  rtvec v;
1952  rtx x;
1953
1954  /* Special handling for LE with -maltivec=be.  We have to reflect
1955     the actual selected index for the splat in the RTL.  */
1956  if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
1957    operands[2] = GEN_INT (15 - INTVAL (operands[2]));
1958
1959  v = gen_rtvec (1, operands[2]);
1960  x = gen_rtx_VEC_SELECT (QImode, operands[1], gen_rtx_PARALLEL (VOIDmode, v));
1961  x = gen_rtx_VEC_DUPLICATE (V16QImode, x);
1962  emit_insn (gen_rtx_SET (operands[0], x));
1963  DONE;
1964})
1965
1966(define_insn "*altivec_vspltb_internal"
1967  [(set (match_operand:V16QI 0 "register_operand" "=v")
1968        (vec_duplicate:V16QI
1969	 (vec_select:QI (match_operand:V16QI 1 "register_operand" "v")
1970			(parallel
1971			 [(match_operand:QI 2 "u5bit_cint_operand" "")]))))]
1972  "TARGET_ALTIVEC"
1973{
1974  /* For true LE, this adjusts the selected index.  For LE with
1975     -maltivec=be, this reverses what was done in the define_expand
1976     because the instruction already has big-endian bias.  */
1977  if (!BYTES_BIG_ENDIAN)
1978    operands[2] = GEN_INT (15 - INTVAL (operands[2]));
1979
1980  return "vspltb %0,%1,%2";
1981}
1982  [(set_attr "type" "vecperm")])
1983
1984(define_insn "altivec_vspltb_direct"
1985  [(set (match_operand:V16QI 0 "register_operand" "=v")
1986        (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
1987	               (match_operand:QI 2 "u5bit_cint_operand" "i")]
1988                      UNSPEC_VSPLT_DIRECT))]
1989  "TARGET_ALTIVEC"
1990  "vspltb %0,%1,%2"
1991  [(set_attr "type" "vecperm")])
1992
1993(define_expand "altivec_vsplth"
1994  [(use (match_operand:V8HI 0 "register_operand"))
1995   (use (match_operand:V8HI 1 "register_operand"))
1996   (use (match_operand:QI 2 "u5bit_cint_operand"))]
1997  "TARGET_ALTIVEC"
1998{
1999  rtvec v;
2000  rtx x;
2001
2002  /* Special handling for LE with -maltivec=be.  We have to reflect
2003     the actual selected index for the splat in the RTL.  */
2004  if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
2005    operands[2] = GEN_INT (7 - INTVAL (operands[2]));
2006
2007  v = gen_rtvec (1, operands[2]);
2008  x = gen_rtx_VEC_SELECT (HImode, operands[1], gen_rtx_PARALLEL (VOIDmode, v));
2009  x = gen_rtx_VEC_DUPLICATE (V8HImode, x);
2010  emit_insn (gen_rtx_SET (operands[0], x));
2011  DONE;
2012})
2013
2014(define_insn "*altivec_vsplth_internal"
2015  [(set (match_operand:V8HI 0 "register_operand" "=v")
2016	(vec_duplicate:V8HI
2017	 (vec_select:HI (match_operand:V8HI 1 "register_operand" "v")
2018			(parallel
2019			 [(match_operand:QI 2 "u5bit_cint_operand" "")]))))]
2020  "TARGET_ALTIVEC"
2021{
2022  /* For true LE, this adjusts the selected index.  For LE with
2023     -maltivec=be, this reverses what was done in the define_expand
2024     because the instruction already has big-endian bias.  */
2025  if (!BYTES_BIG_ENDIAN)
2026    operands[2] = GEN_INT (7 - INTVAL (operands[2]));
2027
2028  return "vsplth %0,%1,%2";
2029}
2030  [(set_attr "type" "vecperm")])
2031
2032(define_insn "altivec_vsplth_direct"
2033  [(set (match_operand:V8HI 0 "register_operand" "=v")
2034        (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
2035                      (match_operand:QI 2 "u5bit_cint_operand" "i")]
2036                     UNSPEC_VSPLT_DIRECT))]
2037  "TARGET_ALTIVEC"
2038  "vsplth %0,%1,%2"
2039  [(set_attr "type" "vecperm")])
2040
2041(define_expand "altivec_vspltw"
2042  [(use (match_operand:V4SI 0 "register_operand"))
2043   (use (match_operand:V4SI 1 "register_operand"))
2044   (use (match_operand:QI 2 "u5bit_cint_operand"))]
2045  "TARGET_ALTIVEC"
2046{
2047  rtvec v;
2048  rtx x;
2049
2050  /* Special handling for LE with -maltivec=be.  We have to reflect
2051     the actual selected index for the splat in the RTL.  */
2052  if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
2053    operands[2] = GEN_INT (3 - INTVAL (operands[2]));
2054
2055  v = gen_rtvec (1, operands[2]);
2056  x = gen_rtx_VEC_SELECT (SImode, operands[1], gen_rtx_PARALLEL (VOIDmode, v));
2057  x = gen_rtx_VEC_DUPLICATE (V4SImode, x);
2058  emit_insn (gen_rtx_SET (operands[0], x));
2059  DONE;
2060})
2061
2062(define_insn "*altivec_vspltw_internal"
2063  [(set (match_operand:V4SI 0 "register_operand" "=v")
2064	(vec_duplicate:V4SI
2065	 (vec_select:SI (match_operand:V4SI 1 "register_operand" "v")
2066			(parallel
2067			 [(match_operand:QI 2 "u5bit_cint_operand" "i")]))))]
2068  "TARGET_ALTIVEC"
2069{
2070  /* For true LE, this adjusts the selected index.  For LE with
2071     -maltivec=be, this reverses what was done in the define_expand
2072     because the instruction already has big-endian bias.  */
2073  if (!BYTES_BIG_ENDIAN)
2074    operands[2] = GEN_INT (3 - INTVAL (operands[2]));
2075
2076  return "vspltw %0,%1,%2";
2077}
2078  [(set_attr "type" "vecperm")])
2079
2080(define_insn "altivec_vspltw_direct"
2081  [(set (match_operand:V4SI 0 "register_operand" "=v")
2082        (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
2083                      (match_operand:QI 2 "u5bit_cint_operand" "i")]
2084                     UNSPEC_VSPLT_DIRECT))]
2085  "TARGET_ALTIVEC"
2086  "vspltw %0,%1,%2"
2087  [(set_attr "type" "vecperm")])
2088
2089(define_expand "altivec_vspltsf"
2090  [(use (match_operand:V4SF 0 "register_operand"))
2091   (use (match_operand:V4SF 1 "register_operand"))
2092   (use (match_operand:QI 2 "u5bit_cint_operand"))]
2093  "TARGET_ALTIVEC"
2094{
2095  rtvec v;
2096  rtx x;
2097
2098  /* Special handling for LE with -maltivec=be.  We have to reflect
2099     the actual selected index for the splat in the RTL.  */
2100  if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
2101    operands[2] = GEN_INT (3 - INTVAL (operands[2]));
2102
2103  v = gen_rtvec (1, operands[2]);
2104  x = gen_rtx_VEC_SELECT (SFmode, operands[1], gen_rtx_PARALLEL (VOIDmode, v));
2105  x = gen_rtx_VEC_DUPLICATE (V4SFmode, x);
2106  emit_insn (gen_rtx_SET (operands[0], x));
2107  DONE;
2108})
2109
2110(define_insn "*altivec_vspltsf_internal"
2111  [(set (match_operand:V4SF 0 "register_operand" "=v")
2112	(vec_duplicate:V4SF
2113	 (vec_select:SF (match_operand:V4SF 1 "register_operand" "v")
2114			(parallel
2115			 [(match_operand:QI 2 "u5bit_cint_operand" "i")]))))]
2116  "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
2117{
2118  /* For true LE, this adjusts the selected index.  For LE with
2119     -maltivec=be, this reverses what was done in the define_expand
2120     because the instruction already has big-endian bias.  */
2121  if (!BYTES_BIG_ENDIAN)
2122    operands[2] = GEN_INT (3 - INTVAL (operands[2]));
2123
2124  return "vspltw %0,%1,%2";
2125}
2126  [(set_attr "type" "vecperm")])
2127
2128(define_insn "altivec_vspltis<VI_char>"
2129  [(set (match_operand:VI 0 "register_operand" "=v")
2130	(vec_duplicate:VI
2131	 (match_operand:QI 1 "s5bit_cint_operand" "i")))]
2132  "TARGET_ALTIVEC"
2133  "vspltis<VI_char> %0,%1"
2134  [(set_attr "type" "vecperm")])
2135
2136(define_insn "*altivec_vrfiz"
2137  [(set (match_operand:V4SF 0 "register_operand" "=v")
2138	(fix:V4SF (match_operand:V4SF 1 "register_operand" "v")))]
2139  "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
2140  "vrfiz %0,%1"
2141  [(set_attr "type" "vecfloat")])
2142
2143(define_expand "altivec_vperm_<mode>"
2144  [(set (match_operand:VM 0 "register_operand")
2145	(unspec:VM [(match_operand:VM 1 "register_operand")
2146		    (match_operand:VM 2 "register_operand")
2147		    (match_operand:V16QI 3 "register_operand")]
2148		   UNSPEC_VPERM))]
2149  "TARGET_ALTIVEC"
2150{
2151  if (!VECTOR_ELT_ORDER_BIG)
2152    {
2153      altivec_expand_vec_perm_le (operands);
2154      DONE;
2155    }
2156})
2157
2158;; Slightly prefer vperm, since the target does not overlap the source
2159(define_insn "altivec_vperm_<mode>_direct"
2160  [(set (match_operand:VM 0 "register_operand" "=v,?wo")
2161	(unspec:VM [(match_operand:VM 1 "register_operand" "v,wo")
2162		    (match_operand:VM 2 "register_operand" "v,0")
2163		    (match_operand:V16QI 3 "register_operand" "v,wo")]
2164		   UNSPEC_VPERM))]
2165  "TARGET_ALTIVEC"
2166  "@
2167   vperm %0,%1,%2,%3
2168   xxperm %x0,%x1,%x3"
2169  [(set_attr "type" "vecperm")
2170   (set_attr "length" "4")])
2171
2172(define_insn "altivec_vperm_v8hiv16qi"
2173  [(set (match_operand:V16QI 0 "register_operand" "=v,?wo")
2174	(unspec:V16QI [(match_operand:V8HI 1 "register_operand" "v,wo")
2175   	               (match_operand:V8HI 2 "register_operand" "v,0")
2176		       (match_operand:V16QI 3 "register_operand" "v,wo")]
2177		   UNSPEC_VPERM))]
2178  "TARGET_ALTIVEC"
2179  "@
2180   vperm %0,%1,%2,%3
2181   xxperm %x0,%x1,%x3"
2182  [(set_attr "type" "vecperm")
2183   (set_attr "length" "4")])
2184
2185(define_expand "altivec_vperm_<mode>_uns"
2186  [(set (match_operand:VM 0 "register_operand")
2187	(unspec:VM [(match_operand:VM 1 "register_operand")
2188		    (match_operand:VM 2 "register_operand")
2189		    (match_operand:V16QI 3 "register_operand")]
2190		   UNSPEC_VPERM_UNS))]
2191  "TARGET_ALTIVEC"
2192{
2193  if (!VECTOR_ELT_ORDER_BIG)
2194    {
2195      altivec_expand_vec_perm_le (operands);
2196      DONE;
2197    }
2198})
2199
2200(define_insn "*altivec_vperm_<mode>_uns_internal"
2201  [(set (match_operand:VM 0 "register_operand" "=v,?wo")
2202	(unspec:VM [(match_operand:VM 1 "register_operand" "v,wo")
2203		    (match_operand:VM 2 "register_operand" "v,0")
2204		    (match_operand:V16QI 3 "register_operand" "v,wo")]
2205		   UNSPEC_VPERM_UNS))]
2206  "TARGET_ALTIVEC"
2207  "@
2208   vperm %0,%1,%2,%3
2209   xxperm %x0,%x1,%x3"
2210  [(set_attr "type" "vecperm")
2211   (set_attr "length" "4")])
2212
2213(define_expand "vec_permv16qi"
2214  [(set (match_operand:V16QI 0 "register_operand")
2215	(unspec:V16QI [(match_operand:V16QI 1 "register_operand")
2216		       (match_operand:V16QI 2 "register_operand")
2217		       (match_operand:V16QI 3 "register_operand")]
2218		      UNSPEC_VPERM))]
2219  "TARGET_ALTIVEC"
2220{
2221  if (!BYTES_BIG_ENDIAN) {
2222    altivec_expand_vec_perm_le (operands);
2223    DONE;
2224  }
2225})
2226
2227(define_insn "*altivec_vpermr_<mode>_internal"
2228  [(set (match_operand:VM 0 "register_operand" "=v,?wo")
2229	(unspec:VM [(match_operand:VM 1 "register_operand" "v,wo")
2230		    (match_operand:VM 2 "register_operand" "v,0")
2231		    (match_operand:V16QI 3 "register_operand" "v,wo")]
2232		   UNSPEC_VPERMR))]
2233  "TARGET_P9_VECTOR"
2234  "@
2235   vpermr %0,%1,%2,%3
2236   xxpermr %x0,%x1,%x3"
2237  [(set_attr "type" "vecperm")
2238   (set_attr "length" "4")])
2239
2240(define_insn "altivec_vrfip"		; ceil
2241  [(set (match_operand:V4SF 0 "register_operand" "=v")
2242        (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
2243		     UNSPEC_FRIP))]
2244  "TARGET_ALTIVEC"
2245  "vrfip %0,%1"
2246  [(set_attr "type" "vecfloat")])
2247
2248(define_insn "altivec_vrfin"
2249  [(set (match_operand:V4SF 0 "register_operand" "=v")
2250        (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
2251		     UNSPEC_VRFIN))]
2252  "TARGET_ALTIVEC"
2253  "vrfin %0,%1"
2254  [(set_attr "type" "vecfloat")])
2255
2256(define_insn "*altivec_vrfim"		; floor
2257  [(set (match_operand:V4SF 0 "register_operand" "=v")
2258        (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
2259		     UNSPEC_FRIM))]
2260  "TARGET_ALTIVEC"
2261  "vrfim %0,%1"
2262  [(set_attr "type" "vecfloat")])
2263
2264(define_insn "altivec_vcfux"
2265  [(set (match_operand:V4SF 0 "register_operand" "=v")
2266        (unspec:V4SF [(match_operand:V4SI 1 "register_operand" "v")
2267	              (match_operand:QI 2 "immediate_operand" "i")]
2268		     UNSPEC_VCFUX))]
2269  "TARGET_ALTIVEC"
2270  "vcfux %0,%1,%2"
2271  [(set_attr "type" "vecfloat")])
2272
2273(define_insn "altivec_vcfsx"
2274  [(set (match_operand:V4SF 0 "register_operand" "=v")
2275        (unspec:V4SF [(match_operand:V4SI 1 "register_operand" "v")
2276	              (match_operand:QI 2 "immediate_operand" "i")]
2277		     UNSPEC_VCFSX))]
2278  "TARGET_ALTIVEC"
2279  "vcfsx %0,%1,%2"
2280  [(set_attr "type" "vecfloat")])
2281
2282(define_insn "altivec_vctuxs"
2283  [(set (match_operand:V4SI 0 "register_operand" "=v")
2284        (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
2285                      (match_operand:QI 2 "immediate_operand" "i")]
2286		     UNSPEC_VCTUXS))
2287   (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
2288  "TARGET_ALTIVEC"
2289  "vctuxs %0,%1,%2"
2290  [(set_attr "type" "vecfloat")])
2291
2292(define_insn "altivec_vctsxs"
2293  [(set (match_operand:V4SI 0 "register_operand" "=v")
2294        (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
2295                      (match_operand:QI 2 "immediate_operand" "i")]
2296		     UNSPEC_VCTSXS))
2297   (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
2298  "TARGET_ALTIVEC"
2299  "vctsxs %0,%1,%2"
2300  [(set_attr "type" "vecfloat")])
2301
2302(define_insn "altivec_vlogefp"
2303  [(set (match_operand:V4SF 0 "register_operand" "=v")
2304        (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
2305		     UNSPEC_VLOGEFP))]
2306  "TARGET_ALTIVEC"
2307  "vlogefp %0,%1"
2308  [(set_attr "type" "vecfloat")])
2309
2310(define_insn "altivec_vexptefp"
2311  [(set (match_operand:V4SF 0 "register_operand" "=v")
2312        (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
2313		     UNSPEC_VEXPTEFP))]
2314  "TARGET_ALTIVEC"
2315  "vexptefp %0,%1"
2316  [(set_attr "type" "vecfloat")])
2317
2318(define_insn "*altivec_vrsqrtefp"
2319  [(set (match_operand:V4SF 0 "register_operand" "=v")
2320        (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
2321		     UNSPEC_RSQRT))]
2322  "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
2323  "vrsqrtefp %0,%1"
2324  [(set_attr "type" "vecfloat")])
2325
2326(define_insn "altivec_vrefp"
2327  [(set (match_operand:V4SF 0 "register_operand" "=v")
2328        (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
2329		     UNSPEC_FRES))]
2330  "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
2331  "vrefp %0,%1"
2332  [(set_attr "type" "vecfloat")])
2333
2334(define_expand "altivec_copysign_v4sf3"
2335  [(use (match_operand:V4SF 0 "register_operand"))
2336   (use (match_operand:V4SF 1 "register_operand"))
2337   (use (match_operand:V4SF 2 "register_operand"))]
2338  "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
2339{
2340  rtx mask = gen_reg_rtx (V4SImode);
2341  rtvec v = rtvec_alloc (4);
2342  unsigned HOST_WIDE_INT mask_val = ((unsigned HOST_WIDE_INT)1) << 31;
2343
2344  RTVEC_ELT (v, 0) = GEN_INT (mask_val);
2345  RTVEC_ELT (v, 1) = GEN_INT (mask_val);
2346  RTVEC_ELT (v, 2) = GEN_INT (mask_val);
2347  RTVEC_ELT (v, 3) = GEN_INT (mask_val);
2348
2349  emit_insn (gen_vec_initv4sisi (mask, gen_rtx_PARALLEL (V4SImode, v)));
2350  emit_insn (gen_vector_select_v4sf (operands[0], operands[1], operands[2],
2351				     gen_lowpart (V4SFmode, mask)));
2352  DONE;
2353})
2354
2355(define_insn "altivec_vsldoi_<mode>"
2356  [(set (match_operand:VM 0 "register_operand" "=v")
2357        (unspec:VM [(match_operand:VM 1 "register_operand" "v")
2358		    (match_operand:VM 2 "register_operand" "v")
2359		    (match_operand:QI 3 "immediate_operand" "i")]
2360		  UNSPEC_VSLDOI))]
2361  "TARGET_ALTIVEC"
2362  "vsldoi %0,%1,%2,%3"
2363  [(set_attr "type" "vecperm")])
2364
2365(define_insn "altivec_vupkhs<VU_char>"
2366  [(set (match_operand:VP 0 "register_operand" "=v")
2367	(unspec:VP [(match_operand:<VP_small> 1 "register_operand" "v")]
2368		     UNSPEC_VUNPACK_HI_SIGN))]
2369  "<VI_unit>"
2370{
2371  if (VECTOR_ELT_ORDER_BIG)
2372    return "vupkhs<VU_char> %0,%1";
2373  else
2374    return "vupkls<VU_char> %0,%1";
2375}
2376  [(set_attr "type" "vecperm")])
2377
2378(define_insn "*altivec_vupkhs<VU_char>_direct"
2379  [(set (match_operand:VP 0 "register_operand" "=v")
2380	(unspec:VP [(match_operand:<VP_small> 1 "register_operand" "v")]
2381		     UNSPEC_VUNPACK_HI_SIGN_DIRECT))]
2382  "<VI_unit>"
2383  "vupkhs<VU_char> %0,%1"
2384  [(set_attr "type" "vecperm")])
2385
2386(define_insn "altivec_vupkls<VU_char>"
2387  [(set (match_operand:VP 0 "register_operand" "=v")
2388	(unspec:VP [(match_operand:<VP_small> 1 "register_operand" "v")]
2389		     UNSPEC_VUNPACK_LO_SIGN))]
2390  "<VI_unit>"
2391{
2392  if (VECTOR_ELT_ORDER_BIG)
2393    return "vupkls<VU_char> %0,%1";
2394  else
2395    return "vupkhs<VU_char> %0,%1";
2396}
2397  [(set_attr "type" "vecperm")])
2398
2399(define_insn "*altivec_vupkls<VU_char>_direct"
2400  [(set (match_operand:VP 0 "register_operand" "=v")
2401	(unspec:VP [(match_operand:<VP_small> 1 "register_operand" "v")]
2402		     UNSPEC_VUNPACK_LO_SIGN_DIRECT))]
2403  "<VI_unit>"
2404  "vupkls<VU_char> %0,%1"
2405  [(set_attr "type" "vecperm")])
2406
2407(define_insn "altivec_vupkhpx"
2408  [(set (match_operand:V4SI 0 "register_operand" "=v")
2409	(unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")]
2410		     UNSPEC_VUPKHPX))]
2411  "TARGET_ALTIVEC"
2412{
2413  if (VECTOR_ELT_ORDER_BIG)
2414    return "vupkhpx %0,%1";
2415  else
2416    return "vupklpx %0,%1";
2417}
2418  [(set_attr "type" "vecperm")])
2419
2420(define_insn "altivec_vupklpx"
2421  [(set (match_operand:V4SI 0 "register_operand" "=v")
2422	(unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")]
2423		     UNSPEC_VUPKLPX))]
2424  "TARGET_ALTIVEC"
2425{
2426  if (VECTOR_ELT_ORDER_BIG)
2427    return "vupklpx %0,%1";
2428  else
2429    return "vupkhpx %0,%1";
2430}
2431  [(set_attr "type" "vecperm")])
2432
2433;; Compare vectors producing a vector result and a predicate, setting CR6 to
2434;; indicate a combined status
2435(define_insn "*altivec_vcmpequ<VI_char>_p"
2436  [(set (reg:CC CR6_REGNO)
2437	(unspec:CC [(eq:CC (match_operand:VI2 1 "register_operand" "v")
2438			   (match_operand:VI2 2 "register_operand" "v"))]
2439		   UNSPEC_PREDICATE))
2440   (set (match_operand:VI2 0 "register_operand" "=v")
2441	(eq:VI2 (match_dup 1)
2442		(match_dup 2)))]
2443  "<VI_unit>"
2444  "vcmpequ<VI_char>. %0,%1,%2"
2445  [(set_attr "type" "veccmpfx")])
2446
2447(define_insn "*altivec_vcmpgts<VI_char>_p"
2448  [(set (reg:CC CR6_REGNO)
2449	(unspec:CC [(gt:CC (match_operand:VI2 1 "register_operand" "v")
2450			   (match_operand:VI2 2 "register_operand" "v"))]
2451		   UNSPEC_PREDICATE))
2452   (set (match_operand:VI2 0 "register_operand" "=v")
2453	(gt:VI2 (match_dup 1)
2454		(match_dup 2)))]
2455  "<VI_unit>"
2456  "vcmpgts<VI_char>. %0,%1,%2"
2457  [(set_attr "type" "veccmpfx")])
2458
2459(define_insn "*altivec_vcmpgtu<VI_char>_p"
2460  [(set (reg:CC CR6_REGNO)
2461	(unspec:CC [(gtu:CC (match_operand:VI2 1 "register_operand" "v")
2462			    (match_operand:VI2 2 "register_operand" "v"))]
2463		   UNSPEC_PREDICATE))
2464   (set (match_operand:VI2 0 "register_operand" "=v")
2465	(gtu:VI2 (match_dup 1)
2466		 (match_dup 2)))]
2467  "<VI_unit>"
2468  "vcmpgtu<VI_char>. %0,%1,%2"
2469  [(set_attr "type" "veccmpfx")])
2470
2471(define_insn "*altivec_vcmpeqfp_p"
2472  [(set (reg:CC CR6_REGNO)
2473	(unspec:CC [(eq:CC (match_operand:V4SF 1 "register_operand" "v")
2474			   (match_operand:V4SF 2 "register_operand" "v"))]
2475		   UNSPEC_PREDICATE))
2476   (set (match_operand:V4SF 0 "register_operand" "=v")
2477	(eq:V4SF (match_dup 1)
2478		 (match_dup 2)))]
2479  "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
2480  "vcmpeqfp. %0,%1,%2"
2481  [(set_attr "type" "veccmp")])
2482
2483(define_insn "*altivec_vcmpgtfp_p"
2484  [(set (reg:CC CR6_REGNO)
2485	(unspec:CC [(gt:CC (match_operand:V4SF 1 "register_operand" "v")
2486			   (match_operand:V4SF 2 "register_operand" "v"))]
2487		   UNSPEC_PREDICATE))
2488   (set (match_operand:V4SF 0 "register_operand" "=v")
2489	(gt:V4SF (match_dup 1)
2490		 (match_dup 2)))]
2491  "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
2492  "vcmpgtfp. %0,%1,%2"
2493  [(set_attr "type" "veccmp")])
2494
2495(define_insn "*altivec_vcmpgefp_p"
2496  [(set (reg:CC CR6_REGNO)
2497	(unspec:CC [(ge:CC (match_operand:V4SF 1 "register_operand" "v")
2498			   (match_operand:V4SF 2 "register_operand" "v"))]
2499		   UNSPEC_PREDICATE))
2500   (set (match_operand:V4SF 0 "register_operand" "=v")
2501	(ge:V4SF (match_dup 1)
2502		 (match_dup 2)))]
2503  "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
2504  "vcmpgefp. %0,%1,%2"
2505  [(set_attr "type" "veccmp")])
2506
2507(define_insn "altivec_vcmpbfp_p"
2508  [(set (reg:CC CR6_REGNO)
2509	(unspec:CC [(match_operand:V4SF 1 "register_operand" "v")
2510		    (match_operand:V4SF 2 "register_operand" "v")]
2511		   UNSPEC_VCMPBFP))
2512   (set (match_operand:V4SF 0 "register_operand" "=v")
2513        (unspec:V4SF [(match_dup 1)
2514                      (match_dup 2)]
2515                      UNSPEC_VCMPBFP))]
2516  "VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)"
2517  "vcmpbfp. %0,%1,%2"
2518  [(set_attr "type" "veccmp")])
2519
2520(define_insn "altivec_mtvscr"
2521  [(set (reg:SI VSCR_REGNO)
2522	(unspec_volatile:SI
2523	 [(match_operand:V4SI 0 "register_operand" "v")] UNSPECV_MTVSCR))]
2524  "TARGET_ALTIVEC"
2525  "mtvscr %0"
2526  [(set_attr "type" "vecsimple")])
2527
2528(define_insn "altivec_mfvscr"
2529  [(set (match_operand:V8HI 0 "register_operand" "=v")
2530	(unspec_volatile:V8HI [(reg:SI VSCR_REGNO)] UNSPECV_MFVSCR))]
2531  "TARGET_ALTIVEC"
2532  "mfvscr %0"
2533  [(set_attr "type" "vecsimple")])
2534
2535(define_insn "altivec_dssall"
2536  [(unspec_volatile [(const_int 0)] UNSPECV_DSSALL)]
2537  "TARGET_ALTIVEC"
2538  "dssall"
2539  [(set_attr "type" "vecsimple")])
2540
2541(define_insn "altivec_dss"
2542  [(unspec_volatile [(match_operand:QI 0 "immediate_operand" "i")]
2543		    UNSPECV_DSS)]
2544  "TARGET_ALTIVEC"
2545  "dss %0"
2546  [(set_attr "type" "vecsimple")])
2547
2548(define_insn "altivec_dst"
2549  [(unspec [(match_operand 0 "register_operand" "b")
2550	    (match_operand:SI 1 "register_operand" "r")
2551	    (match_operand:QI 2 "immediate_operand" "i")] UNSPEC_DST)]
2552  "TARGET_ALTIVEC && GET_MODE (operands[0]) == Pmode"
2553  "dst %0,%1,%2"
2554  [(set_attr "type" "vecsimple")])
2555
2556(define_insn "altivec_dstt"
2557  [(unspec [(match_operand 0 "register_operand" "b")
2558	    (match_operand:SI 1 "register_operand" "r")
2559	    (match_operand:QI 2 "immediate_operand" "i")] UNSPEC_DSTT)]
2560  "TARGET_ALTIVEC && GET_MODE (operands[0]) == Pmode"
2561  "dstt %0,%1,%2"
2562  [(set_attr "type" "vecsimple")])
2563
2564(define_insn "altivec_dstst"
2565  [(unspec [(match_operand 0 "register_operand" "b")
2566	    (match_operand:SI 1 "register_operand" "r")
2567	    (match_operand:QI 2 "immediate_operand" "i")] UNSPEC_DSTST)]
2568  "TARGET_ALTIVEC && GET_MODE (operands[0]) == Pmode"
2569  "dstst %0,%1,%2"
2570  [(set_attr "type" "vecsimple")])
2571
2572(define_insn "altivec_dststt"
2573  [(unspec [(match_operand 0 "register_operand" "b")
2574	    (match_operand:SI 1 "register_operand" "r")
2575	    (match_operand:QI 2 "immediate_operand" "i")] UNSPEC_DSTSTT)]
2576  "TARGET_ALTIVEC && GET_MODE (operands[0]) == Pmode"
2577  "dststt %0,%1,%2"
2578  [(set_attr "type" "vecsimple")])
2579
2580(define_expand "altivec_lvsl"
2581  [(use (match_operand:V16QI 0 "register_operand"))
2582   (use (match_operand:V16QI 1 "memory_operand"))]
2583  "TARGET_ALTIVEC"
2584{
2585  if (VECTOR_ELT_ORDER_BIG)
2586    emit_insn (gen_altivec_lvsl_direct (operands[0], operands[1]));
2587  else
2588    {
2589      rtx mask, constv, vperm;
2590      mask = gen_reg_rtx (V16QImode);
2591      emit_insn (gen_altivec_lvsl_direct (mask, operands[1]));
2592      constv = gen_const_vec_series (V16QImode, const0_rtx, const1_rtx);
2593      constv = force_reg (V16QImode, constv);
2594      vperm = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, mask, mask, constv),
2595                              UNSPEC_VPERM);
2596      emit_insn (gen_rtx_SET (operands[0], vperm));
2597    }
2598  DONE;
2599})
2600
2601(define_insn "altivec_lvsl_reg"
2602  [(set (match_operand:V16QI 0 "altivec_register_operand" "=v")
2603	(unspec:V16QI
2604	[(match_operand:DI 1 "gpc_reg_operand" "b")]
2605	UNSPEC_LVSL_REG))]
2606  "TARGET_ALTIVEC"
2607  "lvsl %0,0,%1"
2608  [(set_attr "type" "vecload")])
2609
2610(define_insn "altivec_lvsl_direct"
2611  [(set (match_operand:V16QI 0 "register_operand" "=v")
2612	(unspec:V16QI [(match_operand:V16QI 1 "memory_operand" "Z")]
2613		      UNSPEC_LVSL))]
2614  "TARGET_ALTIVEC"
2615  "lvsl %0,%y1"
2616  [(set_attr "type" "vecload")])
2617
2618(define_expand "altivec_lvsr"
2619  [(use (match_operand:V16QI 0 "altivec_register_operand"))
2620   (use (match_operand:V16QI 1 "memory_operand"))]
2621  "TARGET_ALTIVEC"
2622{
2623  if (VECTOR_ELT_ORDER_BIG)
2624    emit_insn (gen_altivec_lvsr_direct (operands[0], operands[1]));
2625  else
2626    {
2627      rtx mask, constv, vperm;
2628      mask = gen_reg_rtx (V16QImode);
2629      emit_insn (gen_altivec_lvsr_direct (mask, operands[1]));
2630      constv = gen_const_vec_series (V16QImode, const0_rtx, const1_rtx);
2631      constv = force_reg (V16QImode, constv);
2632      vperm = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, mask, mask, constv),
2633                              UNSPEC_VPERM);
2634      emit_insn (gen_rtx_SET (operands[0], vperm));
2635    }
2636  DONE;
2637})
2638
2639(define_insn "altivec_lvsr_reg"
2640  [(set (match_operand:V16QI 0 "altivec_register_operand" "=v")
2641       (unspec:V16QI
2642       [(match_operand:DI 1 "gpc_reg_operand" "b")]
2643       UNSPEC_LVSR_REG))]
2644  "TARGET_ALTIVEC"
2645  "lvsr %0,0,%1"
2646  [(set_attr "type" "vecload")])
2647
2648(define_insn "altivec_lvsr_direct"
2649  [(set (match_operand:V16QI 0 "register_operand" "=v")
2650	(unspec:V16QI [(match_operand:V16QI 1 "memory_operand" "Z")]
2651		      UNSPEC_LVSR))]
2652  "TARGET_ALTIVEC"
2653  "lvsr %0,%y1"
2654  [(set_attr "type" "vecload")])
2655
2656(define_expand "build_vector_mask_for_load"
2657  [(set (match_operand:V16QI 0 "register_operand")
2658	(unspec:V16QI [(match_operand 1 "memory_operand")] UNSPEC_LVSR))]
2659  "TARGET_ALTIVEC"
2660{
2661  rtx addr;
2662  rtx temp;
2663
2664  gcc_assert (GET_CODE (operands[1]) == MEM);
2665
2666  addr = XEXP (operands[1], 0);
2667  temp = gen_reg_rtx (GET_MODE (addr));
2668  emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (GET_MODE (addr), addr)));
2669  emit_insn (gen_altivec_lvsr (operands[0],
2670			       replace_equiv_address (operands[1], temp)));
2671  DONE;
2672})
2673
2674;; Parallel some of the LVE* and STV*'s with unspecs because some have
2675;; identical rtl but different instructions-- and gcc gets confused.
2676
2677(define_expand "altivec_lve<VI_char>x"
2678  [(parallel
2679    [(set (match_operand:VI 0 "register_operand" "=v")
2680	  (match_operand:VI 1 "memory_operand" "Z"))
2681     (unspec [(const_int 0)] UNSPEC_LVE)])]
2682  "TARGET_ALTIVEC"
2683{
2684  if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
2685    {
2686      altivec_expand_lvx_be (operands[0], operands[1], <MODE>mode, UNSPEC_LVE);
2687      DONE;
2688    }
2689})
2690
2691(define_insn "*altivec_lve<VI_char>x_internal"
2692  [(parallel
2693    [(set (match_operand:VI 0 "register_operand" "=v")
2694	  (match_operand:VI 1 "memory_operand" "Z"))
2695     (unspec [(const_int 0)] UNSPEC_LVE)])]
2696  "TARGET_ALTIVEC"
2697  "lve<VI_char>x %0,%y1"
2698  [(set_attr "type" "vecload")])
2699
2700(define_insn "*altivec_lvesfx"
2701  [(parallel
2702    [(set (match_operand:V4SF 0 "register_operand" "=v")
2703	  (match_operand:V4SF 1 "memory_operand" "Z"))
2704     (unspec [(const_int 0)] UNSPEC_LVE)])]
2705  "TARGET_ALTIVEC"
2706  "lvewx %0,%y1"
2707  [(set_attr "type" "vecload")])
2708
2709(define_expand "altivec_lvxl_<mode>"
2710  [(parallel
2711    [(set (match_operand:VM2 0 "register_operand" "=v")
2712	  (match_operand:VM2 1 "memory_operand" "Z"))
2713     (unspec [(const_int 0)] UNSPEC_SET_VSCR)])]
2714  "TARGET_ALTIVEC"
2715{
2716  if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
2717    {
2718      altivec_expand_lvx_be (operands[0], operands[1], <MODE>mode, UNSPEC_SET_VSCR);
2719      DONE;
2720    }
2721})
2722
2723(define_insn "*altivec_lvxl_<mode>_internal"
2724  [(parallel
2725    [(set (match_operand:VM2 0 "register_operand" "=v")
2726	  (match_operand:VM2 1 "memory_operand" "Z"))
2727     (unspec [(const_int 0)] UNSPEC_SET_VSCR)])]
2728  "TARGET_ALTIVEC"
2729  "lvxl %0,%y1"
2730  [(set_attr "type" "vecload")])
2731
2732; This version of lvx is used only in cases where we need to force an lvx
2733; over any other load, and we don't care about losing CSE opportunities.
2734; Its primary use is for prologue register saves.
2735(define_insn "altivec_lvx_<mode>_internal"
2736  [(parallel
2737    [(set (match_operand:VM2 0 "register_operand" "=v")
2738	  (match_operand:VM2 1 "memory_operand" "Z"))
2739     (unspec [(const_int 0)] UNSPEC_LVX)])]
2740  "TARGET_ALTIVEC"
2741  "lvx %0,%y1"
2742  [(set_attr "type" "vecload")])
2743
2744; The following patterns embody what lvx should usually look like.
2745(define_expand "altivec_lvx_<VM2:mode>"
2746  [(set (match_operand:VM2 0 "register_operand")
2747	(match_operand:VM2 1 "altivec_indexed_or_indirect_operand"))]
2748  "TARGET_ALTIVEC"
2749{
2750  rtx addr = XEXP (operand1, 0);
2751  if (rs6000_sum_of_two_registers_p (addr))
2752    {
2753      rtx op1 = XEXP (addr, 0);
2754      rtx op2 = XEXP (addr, 1);
2755      if (TARGET_64BIT)
2756	emit_insn (gen_altivec_lvx_<VM2:mode>_2op_di (operand0, op1, op2));
2757      else
2758	emit_insn (gen_altivec_lvx_<VM2:mode>_2op_si (operand0, op1, op2));
2759    }
2760  else
2761    {
2762      if (TARGET_64BIT)
2763	emit_insn (gen_altivec_lvx_<VM2:mode>_1op_di (operand0, addr));
2764      else
2765	emit_insn (gen_altivec_lvx_<VM2:mode>_1op_si (operand0, addr));
2766    }
2767  DONE;
2768})
2769
2770; The next two patterns embody what lvx should usually look like.
2771(define_insn "altivec_lvx_<VM2:mode>_2op_<P:mptrsize>"
2772  [(set (match_operand:VM2 0 "register_operand" "=v")
2773	(mem:VM2 (and:P (plus:P (match_operand:P 1 "register_operand" "b")
2774				(match_operand:P 2 "register_operand" "r"))
2775			(const_int -16))))]
2776  "TARGET_ALTIVEC"
2777  "lvx %0,%1,%2"
2778  [(set_attr "type" "vecload")])
2779
2780(define_insn "altivec_lvx_<VM2:mode>_1op_<P:mptrsize>"
2781  [(set (match_operand:VM2 0 "register_operand" "=v")
2782	(mem:VM2 (and:P (match_operand:P 1 "register_operand" "r")
2783			(const_int -16))))]
2784  "TARGET_ALTIVEC"
2785  "lvx %0,0,%1"
2786  [(set_attr "type" "vecload")])
2787
2788; This version of stvx is used only in cases where we need to force an stvx
2789; over any other store, and we don't care about losing CSE opportunities.
2790; Its primary use is for epilogue register restores.
2791(define_insn "altivec_stvx_<mode>_internal"
2792  [(parallel
2793    [(set (match_operand:VM2 0 "memory_operand" "=Z")
2794	  (match_operand:VM2 1 "register_operand" "v"))
2795     (unspec [(const_int 0)] UNSPEC_STVX)])]
2796  "TARGET_ALTIVEC"
2797  "stvx %1,%y0"
2798  [(set_attr "type" "vecstore")])
2799
2800; The following patterns embody what stvx should usually look like.
2801(define_expand "altivec_stvx_<VM2:mode>"
2802  [(set (match_operand:VM2 1 "altivec_indexed_or_indirect_operand")
2803	(match_operand:VM2 0 "register_operand"))]
2804  "TARGET_ALTIVEC"
2805{
2806  rtx addr = XEXP (operand1, 0);
2807  if (rs6000_sum_of_two_registers_p (addr))
2808    {
2809      rtx op1 = XEXP (addr, 0);
2810      rtx op2 = XEXP (addr, 1);
2811      if (TARGET_64BIT)
2812	emit_insn (gen_altivec_stvx_<VM2:mode>_2op_di (operand0, op1, op2));
2813      else
2814	emit_insn (gen_altivec_stvx_<VM2:mode>_2op_si (operand0, op1, op2));
2815    }
2816  else
2817    {
2818      if (TARGET_64BIT)
2819	emit_insn (gen_altivec_stvx_<VM2:mode>_1op_di (operand0, addr));
2820      else
2821	emit_insn (gen_altivec_stvx_<VM2:mode>_1op_si (operand0, addr));
2822    }
2823  DONE;
2824})
2825
2826; The next two patterns embody what stvx should usually look like.
2827(define_insn "altivec_stvx_<VM2:mode>_2op_<P:mptrsize>"
2828  [(set (mem:VM2 (and:P (plus:P (match_operand:P 1 "register_operand" "b")
2829				(match_operand:P 2 "register_operand" "r"))
2830			(const_int -16)))
2831	(match_operand:VM2 0 "register_operand" "v"))]
2832  "TARGET_ALTIVEC"
2833  "stvx %0,%1,%2"
2834  [(set_attr "type" "vecstore")])
2835
2836(define_insn "altivec_stvx_<VM2:mode>_1op_<P:mptrsize>"
2837  [(set (mem:VM2 (and:P (match_operand:P 1 "register_operand" "r")
2838			(const_int -16)))
2839	(match_operand:VM2 0 "register_operand" "v"))]
2840  "TARGET_ALTIVEC"
2841  "stvx %0,0,%1"
2842  [(set_attr "type" "vecstore")])
2843
2844(define_expand "altivec_stvxl_<mode>"
2845  [(parallel
2846    [(set (match_operand:VM2 0 "memory_operand" "=Z")
2847	  (match_operand:VM2 1 "register_operand" "v"))
2848     (unspec [(const_int 0)] UNSPEC_STVXL)])]
2849  "TARGET_ALTIVEC"
2850{
2851  if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
2852    {
2853      altivec_expand_stvx_be (operands[0], operands[1], <MODE>mode, UNSPEC_STVXL);
2854      DONE;
2855    }
2856})
2857
2858(define_insn "*altivec_stvxl_<mode>_internal"
2859  [(parallel
2860    [(set (match_operand:VM2 0 "memory_operand" "=Z")
2861	  (match_operand:VM2 1 "register_operand" "v"))
2862     (unspec [(const_int 0)] UNSPEC_STVXL)])]
2863  "TARGET_ALTIVEC"
2864  "stvxl %1,%y0"
2865  [(set_attr "type" "vecstore")])
2866
2867(define_expand "altivec_stve<VI_char>x"
2868  [(set (match_operand:<VI_scalar> 0 "memory_operand" "=Z")
2869	(unspec:<VI_scalar> [(match_operand:VI 1 "register_operand" "v")] UNSPEC_STVE))]
2870  "TARGET_ALTIVEC"
2871{
2872  if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
2873    {
2874      altivec_expand_stvex_be (operands[0], operands[1], <MODE>mode, UNSPEC_STVE);
2875      DONE;
2876    }
2877})
2878
2879(define_insn "*altivec_stve<VI_char>x_internal"
2880  [(set (match_operand:<VI_scalar> 0 "memory_operand" "=Z")
2881	(unspec:<VI_scalar> [(match_operand:VI 1 "register_operand" "v")] UNSPEC_STVE))]
2882  "TARGET_ALTIVEC"
2883  "stve<VI_char>x %1,%y0"
2884  [(set_attr "type" "vecstore")])
2885
2886(define_insn "*altivec_stvesfx"
2887  [(set (match_operand:SF 0 "memory_operand" "=Z")
2888	(unspec:SF [(match_operand:V4SF 1 "register_operand" "v")] UNSPEC_STVE))]
2889  "TARGET_ALTIVEC"
2890  "stvewx %1,%y0"
2891  [(set_attr "type" "vecstore")])
2892
2893;; Generate doublee
2894;; signed int/float to double convert words 0 and 2
2895(define_expand "doublee<mode>2"
2896  [(set (match_operand:V2DF 0 "register_operand" "=v")
2897	(match_operand:VSX_W 1 "register_operand" "v"))]
2898  "TARGET_VSX"
2899{
2900  machine_mode op_mode = GET_MODE (operands[1]);
2901
2902  if (VECTOR_ELT_ORDER_BIG)
2903    {
2904      /* Big endian word numbering for words in operand is 0 1 2 3.
2905	 Input words 0 and 2 are where they need to be.  */
2906      emit_insn (gen_vsx_xvcv<VS_sxwsp>dp (operands[0], operands[1]));
2907    }
2908  else
2909    {
2910      /* Little endian word numbering for operand is 3 2 1 0.
2911	 take (operand[1] operand[1]) and shift left one word
2912	 3 2 1 0    3 2 1 0  =>  2 1 0 3
2913	 Input words 2 and 0 are now where they need to be for the
2914	 conversion.  */
2915      rtx rtx_tmp;
2916      rtx rtx_val = GEN_INT (1);
2917
2918      rtx_tmp = gen_reg_rtx (op_mode);
2919      emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, operands[1],
2920					 operands[1], rtx_val));
2921      emit_insn (gen_vsx_xvcv<VS_sxwsp>dp (operands[0], rtx_tmp));
2922    }
2923  DONE;
2924}
2925  [(set_attr "type" "veccomplex")])
2926
2927;; Generate unsdoublee
2928;; unsigned int to double convert words 0 and 2
2929(define_expand "unsdoubleev4si2"
2930  [(set (match_operand:V2DF 0 "register_operand" "=v")
2931	(match_operand:V4SI 1 "register_operand" "v"))]
2932  "TARGET_VSX"
2933{
2934  if (VECTOR_ELT_ORDER_BIG)
2935    {
2936      /* Big endian word numbering for words in operand is 0 1 2 3.
2937	 Input words 0 and 2 are where they need to be.  */
2938      emit_insn (gen_vsx_xvcvuxwdp (operands[0], operands[1]));
2939    }
2940  else
2941    {
2942      /* Little endian word numbering for operand is 3 2 1 0.
2943	 take (operand[1] operand[1]) and shift left one word
2944	 3 2 1 0    3 2 1 0  =>   2 1 0 3
2945	 Input words 2 and 0 are now where they need to be for the
2946	 conversion.  */
2947      rtx rtx_tmp;
2948      rtx rtx_val = GEN_INT (1);
2949
2950      rtx_tmp = gen_reg_rtx (V4SImode);
2951      emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, operands[1],
2952				       operands[1], rtx_val));
2953      emit_insn (gen_vsx_xvcvuxwdp (operands[0], rtx_tmp));
2954    }
2955  DONE;
2956}
2957  [(set_attr "type" "veccomplex")])
2958
2959;; Generate doubleov
2960;; signed int/float to double convert words 1 and 3
2961(define_expand "doubleo<mode>2"
2962  [(set (match_operand:V2DF 0 "register_operand" "=v")
2963	(match_operand:VSX_W 1 "register_operand" "v"))]
2964  "TARGET_VSX"
2965{
2966  machine_mode op_mode = GET_MODE (operands[1]);
2967
2968  if (VECTOR_ELT_ORDER_BIG)
2969    {
2970      /* Big endian word numbering for words in operand is 0 1 2 3.
2971	 take (operand[1] operand[1]) and shift left one word
2972	 0 1 2 3    0 1 2 3  =>  1 2 3 0
2973	 Input words 1 and 3 are now where they need to be for the
2974	 conversion.  */
2975      rtx rtx_tmp;
2976      rtx rtx_val = GEN_INT (1);
2977
2978      rtx_tmp = gen_reg_rtx (op_mode);
2979      emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, operands[1],
2980					 operands[1], rtx_val));
2981      emit_insn (gen_vsx_xvcv<VS_sxwsp>dp (operands[0], rtx_tmp));
2982    }
2983  else
2984    {
2985      /* Little endian word numbering for operand is 3 2 1 0.
2986	 Input words 3 and 1 are where they need to be.  */
2987      emit_insn (gen_vsx_xvcv<VS_sxwsp>dp (operands[0], operands[1]));
2988    }
2989  DONE;
2990}
2991  [(set_attr "type" "veccomplex")])
2992
2993;; Generate unsdoubleov
2994;; unsigned int to double convert words 1 and 3
2995(define_expand "unsdoubleov4si2"
2996  [(set (match_operand:V2DF 0 "register_operand" "=v")
2997	(match_operand:V4SI 1 "register_operand" "v"))]
2998  "TARGET_VSX"
2999{
3000  if (VECTOR_ELT_ORDER_BIG)
3001    {
3002      /* Big endian word numbering for words in operand is 0 1 2 3.
3003	 take (operand[1] operand[1]) and shift left one word
3004	 0 1 2 3    0 1 2 3  =>  1 2 3 0
3005	 Input words 1 and 3 are now where they need to be for the
3006	 conversion.  */
3007      rtx rtx_tmp;
3008      rtx rtx_val = GEN_INT (1);
3009
3010      rtx_tmp = gen_reg_rtx (V4SImode);
3011      emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, operands[1],
3012				       operands[1], rtx_val));
3013      emit_insn (gen_vsx_xvcvuxwdp (operands[0], rtx_tmp));
3014    }
3015  else
3016    {
3017      /* Want to convert the words 1 and 3.
3018	 Little endian word numbering for operand is 3 2 1 0.
3019	 Input words 3 and 1 are where they need to be.  */
3020      emit_insn (gen_vsx_xvcvuxwdp (operands[0], operands[1]));
3021    }
3022  DONE;
3023}
3024  [(set_attr "type" "veccomplex")])
3025
3026;; Generate doublehv
3027;; signed int/float to double convert words 0 and 1
3028(define_expand "doubleh<mode>2"
3029  [(set (match_operand:V2DF 0 "register_operand" "=v")
3030	(match_operand:VSX_W 1 "register_operand" "v"))]
3031  "TARGET_VSX"
3032{
3033  rtx rtx_tmp;
3034  rtx rtx_val;
3035
3036  machine_mode op_mode = GET_MODE (operands[1]);
3037  rtx_tmp = gen_reg_rtx (op_mode);
3038
3039  if (VECTOR_ELT_ORDER_BIG)
3040    {
3041      /* Big endian word numbering for words in operand is 0 1 2 3.
3042	 Shift operand left one word, rtx_tmp word order is now 1 2 3 0.
3043	 take (rts_tmp operand[1]) and shift left three words
3044	 1 2 3 0  0 1 2 3 => 0 0 1 2
3045	 Input words 0 and 1 are now where they need to be for the
3046	 conversion.  */
3047      rtx_val = GEN_INT (1);
3048      emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, operands[1],
3049					 operands[1], rtx_val));
3050
3051      rtx_val = GEN_INT (3);
3052      emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, rtx_tmp,
3053					 operands[1], rtx_val));
3054      emit_insn (gen_vsx_xvcv<VS_sxwsp>dp (operands[0], rtx_tmp));
3055    }
3056  else
3057    {
3058      /* Little endian word numbering for operand is 3 2 1 0.
3059	 Shift operand left three words, rtx_tmp word order is now 0 3 2 1.
3060	 take (operand[1] rts_tmp) and shift left two words
3061	 3 2 1 0  0 3 2 1   =>  1 0 0 3
3062	 Input words 0 and 1 are now where they need to be for the
3063	 conversion.  */
3064      rtx_val = GEN_INT (3);
3065      emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, operands[1],
3066					 operands[1], rtx_val));
3067
3068      rtx_val = GEN_INT (2);
3069      emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, operands[1],
3070					 rtx_tmp, rtx_val));
3071      emit_insn (gen_vsx_xvcv<VS_sxwsp>dp (operands[0], rtx_tmp));
3072    }
3073  DONE;
3074}
3075  [(set_attr "type" "veccomplex")])
3076
3077;; Generate unsdoublehv
3078;; unsigned int to double convert words 0 and 1
3079(define_expand "unsdoublehv4si2"
3080  [(set (match_operand:V2DF 0 "register_operand" "=v")
3081	(match_operand:V4SI 1 "register_operand" "v"))]
3082  "TARGET_VSX"
3083{
3084  rtx rtx_tmp = gen_reg_rtx (V4SImode);
3085  rtx rtx_val = GEN_INT (12);
3086
3087  if (VECTOR_ELT_ORDER_BIG)
3088    {
3089      /* Big endian word numbering for words in operand is 0 1 2 3.
3090	 Shift operand left one word, rtx_tmp word order is now 1 2 3 0.
3091	 take (rts_tmp operand[1]) and shift left three words
3092	 1 2 3 0  0 1 2 3 => 0 0 1 2
3093	 Input words 0 and 1 are now where they need to be for the
3094	 conversion.  */
3095      rtx_val = GEN_INT (1);
3096      emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, operands[1],
3097				       operands[1], rtx_val));
3098
3099      rtx_val = GEN_INT (3);
3100      emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, rtx_tmp,
3101				       operands[1], rtx_val));
3102      emit_insn (gen_vsx_xvcvuxwdp (operands[0], rtx_tmp));
3103    }
3104  else
3105    {
3106      /* Little endian word numbering for operand is 3 2 1 0.
3107	 Shift operand left three words, rtx_tmp word order is now 0 3 2 1.
3108	 take (operand[1] rts_tmp) and shift left two words
3109	 3 2 1 0   0 3 2 1  =>   1 0 0 3
3110	 Input words 1 and 0 are now where they need to be for the
3111	 conversion.  */
3112      rtx_val = GEN_INT (3);
3113
3114      rtx_tmp = gen_reg_rtx (V4SImode);
3115      emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, operands[1],
3116				       operands[1], rtx_val));
3117
3118      rtx_val = GEN_INT (2);
3119      emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, operands[1],
3120				       rtx_tmp, rtx_val));
3121      emit_insn (gen_vsx_xvcvuxwdp (operands[0], rtx_tmp));
3122    }
3123  DONE;
3124}
3125  [(set_attr "type" "veccomplex")])
3126
3127;; Generate doublelv
3128;; signed int/float to double convert words 2 and 3
3129(define_expand "doublel<mode>2"
3130  [(set (match_operand:V2DF 0 "register_operand" "=v")
3131	(match_operand:VSX_W 1 "register_operand" "v"))]
3132  "TARGET_VSX"
3133{
3134  rtx rtx_tmp;
3135  rtx rtx_val = GEN_INT (3);
3136
3137  machine_mode op_mode = GET_MODE (operands[1]);
3138  rtx_tmp = gen_reg_rtx (op_mode);
3139
3140  if (VECTOR_ELT_ORDER_BIG)
3141    {
3142      /* Big endian word numbering for operand is 0 1 2 3.
3143	 Shift operand left three words, rtx_tmp word order is now 3 0 1 2.
3144	 take (operand[1] rtx_tmp) and shift left two words
3145	 0 1 2 3   3 0 1 2  =>  2 3 3 0
3146	 now use convert instruction to convert word 2 and 3 in the
3147	 input vector.  */
3148      rtx_val = GEN_INT (3);
3149      emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, operands[1],
3150					 operands[1], rtx_val));
3151
3152      rtx_val = GEN_INT (2);
3153      emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, operands[1],
3154					 rtx_tmp, rtx_val));
3155      emit_insn (gen_vsx_xvcv<VS_sxwsp>dp (operands[0], rtx_tmp));
3156    }
3157  else
3158    {
3159      /* Little endian word numbering for operand is 3 2 1 0.
3160	 Shift operand left one word, rtx_tmp word order is now  2 1 0 3.
3161	 take (rtx_tmp operand[1]) and shift left three words
3162	 2 1 0 3  3 2 1 0  =>  3 3 2 1
3163	 now use convert instruction to convert word 3 and 2 in the
3164	 input vector.  */
3165      rtx_val = GEN_INT (1);
3166      emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, operands[1],
3167					 operands[1], rtx_val));
3168
3169      rtx_val = GEN_INT (3);
3170      emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, rtx_tmp,
3171					 operands[1], rtx_val));
3172      emit_insn (gen_vsx_xvcv<VS_sxwsp>dp (operands[0], rtx_tmp));
3173    }
3174  DONE;
3175}
3176  [(set_attr "type" "veccomplex")])
3177
3178;; Generate unsdoublelv
3179;; unsigned int to double convert convert 2 and 3
3180(define_expand "unsdoublelv4si2"
3181  [(set (match_operand:V2DF 0 "register_operand" "=v")
3182	(match_operand:V4SI 1 "register_operand" "v"))]
3183  "TARGET_VSX"
3184{
3185  rtx rtx_tmp = gen_reg_rtx (V4SImode);
3186  rtx rtx_val = GEN_INT (12);
3187
3188  if (VECTOR_ELT_ORDER_BIG)
3189    {
3190      /* Big endian word numbering for operand is 0 1 2 3.
3191	 Shift operand left three words, rtx_tmp word order is now 3 0 1 2.
3192	 take (operand[1] rtx_tmp) and shift left two words
3193	 0 1 2 3   3 0 1 2  =>  2 3 3 0
3194	 now use convert instruction to convert word 2 and 3 in the
3195	 input vector.  */
3196      rtx_val = GEN_INT (3);
3197      emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, operands[1],
3198				       operands[1], rtx_val));
3199
3200      rtx_val = GEN_INT (2);
3201      emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, operands[1],
3202				       rtx_tmp, rtx_val));
3203      emit_insn (gen_vsx_xvcvuxwdp (operands[0], rtx_tmp));
3204    }
3205  else
3206    {
3207      /* Little endian word numbering for operand is 3 2 1 0.
3208	 Shift operand left one word, rtx_tmp word order is now 2 1 0 3.
3209	 take (rtx_tmp operand[1]) and shift left three words
3210	 2 1 0 3  3 2 1 0  =>   3 3 2 1
3211	 now use convert instruction to convert word 3 and 2 in the
3212	 input vector.  */
3213      rtx_val = GEN_INT (1);
3214      emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp,
3215      operands[1], operands[1], rtx_val));
3216
3217      rtx_val = GEN_INT (3);
3218      emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, rtx_tmp,
3219				       operands[1], rtx_val));
3220      emit_insn (gen_vsx_xvcvuxwdp (operands[0], rtx_tmp));
3221    }
3222  DONE;
3223}
3224  [(set_attr "type" "veccomplex")])
3225
3226;; Generate two vector F32 converted to packed vector I16 vector
3227(define_expand "convert_4f32_8i16"
3228  [(set (match_operand:V8HI 0 "register_operand" "=v")
3229	(unspec:V8HI [(match_operand:V4SF 1 "register_operand" "v")
3230		      (match_operand:V4SF 2 "register_operand" "v")]
3231		     UNSPEC_CONVERT_4F32_8I16))]
3232  "TARGET_P9_VECTOR"
3233{
3234  rtx rtx_tmp_hi = gen_reg_rtx (V4SImode);
3235  rtx rtx_tmp_lo = gen_reg_rtx (V4SImode);
3236
3237  emit_insn (gen_altivec_vctuxs (rtx_tmp_hi, operands[1], const0_rtx));
3238  emit_insn (gen_altivec_vctuxs (rtx_tmp_lo, operands[2], const0_rtx));
3239  emit_insn (gen_altivec_vpkswss (operands[0], rtx_tmp_hi, rtx_tmp_lo));
3240  DONE;
3241})
3242
3243;; Generate
3244;;    xxlxor/vxor SCRATCH0,SCRATCH0,SCRATCH0
3245;;    vsubu?m SCRATCH2,SCRATCH1,%1
3246;;    vmaxs? %0,%1,SCRATCH2"
3247(define_expand "abs<mode>2"
3248  [(set (match_dup 2) (match_dup 3))
3249   (set (match_dup 4)
3250        (minus:VI2 (match_dup 2)
3251		   (match_operand:VI2 1 "register_operand" "v")))
3252   (set (match_operand:VI2 0 "register_operand" "=v")
3253        (smax:VI2 (match_dup 1) (match_dup 4)))]
3254  "<VI_unit>"
3255{
3256  operands[2] = gen_reg_rtx (<MODE>mode);
3257  operands[3] = CONST0_RTX (<MODE>mode);
3258  operands[4] = gen_reg_rtx (<MODE>mode);
3259})
3260
3261;; Generate
3262;;    vspltisw SCRATCH1,0
3263;;    vsubu?m SCRATCH2,SCRATCH1,%1
3264;;    vmins? %0,%1,SCRATCH2"
3265(define_expand "nabs<mode>2"
3266  [(set (match_dup 2) (match_dup 3))
3267   (set (match_dup 4)
3268        (minus:VI2 (match_dup 2)
3269		   (match_operand:VI2 1 "register_operand" "v")))
3270   (set (match_operand:VI2 0 "register_operand" "=v")
3271        (smin:VI2 (match_dup 1) (match_dup 4)))]
3272  "<VI_unit>"
3273{
3274  operands[2] = gen_reg_rtx (<MODE>mode);
3275  operands[3] = CONST0_RTX (<MODE>mode);
3276  operands[4] = gen_reg_rtx (<MODE>mode);
3277})
3278
3279;; Generate
3280;;    vspltisw SCRATCH1,-1
3281;;    vslw SCRATCH2,SCRATCH1,SCRATCH1
3282;;    vandc %0,%1,SCRATCH2
3283(define_expand "altivec_absv4sf2"
3284  [(set (match_dup 2)
3285	(vec_duplicate:V4SI (const_int -1)))
3286   (set (match_dup 3)
3287        (ashift:V4SI (match_dup 2) (match_dup 2)))
3288   (set (match_operand:V4SF 0 "register_operand" "=v")
3289        (and:V4SF (not:V4SF (subreg:V4SF (match_dup 3) 0))
3290                  (match_operand:V4SF 1 "register_operand" "v")))]
3291  "TARGET_ALTIVEC"
3292{
3293  operands[2] = gen_reg_rtx (V4SImode);
3294  operands[3] = gen_reg_rtx (V4SImode);
3295})
3296
3297;; Generate
3298;;    vspltis? SCRATCH0,0
3299;;    vsubs?s SCRATCH2,SCRATCH1,%1
3300;;    vmaxs? %0,%1,SCRATCH2"
3301(define_expand "altivec_abss_<mode>"
3302  [(set (match_dup 2) (vec_duplicate:VI (const_int 0)))
3303   (parallel [(set (match_dup 3)
3304		   (unspec:VI [(match_dup 2)
3305			       (match_operand:VI 1 "register_operand" "v")]
3306			      UNSPEC_VSUBS))
3307	      (set (reg:SI VSCR_REGNO)
3308		   (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))])
3309   (set (match_operand:VI 0 "register_operand" "=v")
3310        (smax:VI (match_dup 1) (match_dup 3)))]
3311  "TARGET_ALTIVEC"
3312{
3313  operands[2] = gen_reg_rtx (GET_MODE (operands[0]));
3314  operands[3] = gen_reg_rtx (GET_MODE (operands[0]));
3315})
3316
3317(define_expand "reduc_plus_scal_<mode>"
3318  [(set (match_operand:<VI_scalar> 0 "register_operand" "=v")
3319        (unspec:VIshort [(match_operand:VIshort 1 "register_operand" "v")]
3320			UNSPEC_REDUC_PLUS))]
3321  "TARGET_ALTIVEC"
3322{
3323  rtx vzero = gen_reg_rtx (V4SImode);
3324  rtx vtmp1 = gen_reg_rtx (V4SImode);
3325  rtx vtmp2 = gen_reg_rtx (<MODE>mode);
3326  rtx dest = gen_lowpart (V4SImode, vtmp2);
3327  int elt = VECTOR_ELT_ORDER_BIG ? GET_MODE_NUNITS (<MODE>mode) - 1 : 0;
3328
3329  emit_insn (gen_altivec_vspltisw (vzero, const0_rtx));
3330  emit_insn (gen_altivec_vsum4s<VI_char>s (vtmp1, operands[1], vzero));
3331  emit_insn (gen_altivec_vsumsws_direct (dest, vtmp1, vzero));
3332  rs6000_expand_vector_extract (operands[0], vtmp2, GEN_INT (elt));
3333  DONE;
3334})
3335
3336(define_insn "*p9_neg<mode>2"
3337  [(set (match_operand:VNEG 0 "altivec_register_operand" "=v")
3338	(neg:VNEG (match_operand:VNEG 1 "altivec_register_operand" "v")))]
3339  "TARGET_P9_VECTOR"
3340  "vneg<VI_char> %0,%1"
3341  [(set_attr "type" "vecsimple")])
3342
3343(define_expand "neg<mode>2"
3344  [(set (match_operand:VI2 0 "register_operand")
3345	(neg:VI2 (match_operand:VI2 1 "register_operand")))]
3346  "<VI_unit>"
3347{
3348  if (!TARGET_P9_VECTOR || (<MODE>mode != V4SImode && <MODE>mode != V2DImode))
3349    {
3350      rtx vzero;
3351
3352      vzero = gen_reg_rtx (GET_MODE (operands[0]));
3353      emit_move_insn (vzero, CONST0_RTX (<MODE>mode));
3354      emit_insn (gen_sub<mode>3 (operands[0], vzero, operands[1]));
3355      DONE;
3356    }
3357})
3358
3359(define_expand "udot_prod<mode>"
3360  [(set (match_operand:V4SI 0 "register_operand" "=v")
3361        (plus:V4SI (match_operand:V4SI 3 "register_operand" "v")
3362                   (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")
3363                                 (match_operand:VIshort 2 "register_operand" "v")]
3364                                UNSPEC_VMSUMU)))]
3365  "TARGET_ALTIVEC"
3366{
3367  emit_insn (gen_altivec_vmsumu<VI_char>m (operands[0], operands[1], operands[2], operands[3]));
3368  DONE;
3369})
3370
3371(define_expand "sdot_prodv8hi"
3372  [(set (match_operand:V4SI 0 "register_operand" "=v")
3373        (plus:V4SI (match_operand:V4SI 3 "register_operand" "v")
3374                   (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
3375                                 (match_operand:V8HI 2 "register_operand" "v")]
3376                                UNSPEC_VMSUMSHM)))]
3377  "TARGET_ALTIVEC"
3378{
3379  emit_insn (gen_altivec_vmsumshm (operands[0], operands[1], operands[2], operands[3]));
3380  DONE;
3381})
3382
3383(define_expand "widen_usum<mode>3"
3384  [(set (match_operand:V4SI 0 "register_operand" "=v")
3385        (plus:V4SI (match_operand:V4SI 2 "register_operand" "v")
3386                   (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")]
3387                                UNSPEC_VMSUMU)))]
3388  "TARGET_ALTIVEC"
3389{
3390  rtx vones = gen_reg_rtx (GET_MODE (operands[1]));
3391
3392  emit_insn (gen_altivec_vspltis<VI_char> (vones, const1_rtx));
3393  emit_insn (gen_altivec_vmsumu<VI_char>m (operands[0], operands[1], vones, operands[2]));
3394  DONE;
3395})
3396
3397(define_expand "widen_ssumv16qi3"
3398  [(set (match_operand:V4SI 0 "register_operand" "=v")
3399        (plus:V4SI (match_operand:V4SI 2 "register_operand" "v")
3400                   (unspec:V4SI [(match_operand:V16QI 1 "register_operand" "v")]
3401                                UNSPEC_VMSUMM)))]
3402  "TARGET_ALTIVEC"
3403{
3404  rtx vones = gen_reg_rtx (V16QImode);
3405
3406  emit_insn (gen_altivec_vspltisb (vones, const1_rtx));
3407  emit_insn (gen_altivec_vmsummbm (operands[0], operands[1], vones, operands[2]));
3408  DONE;
3409})
3410
3411(define_expand "widen_ssumv8hi3"
3412  [(set (match_operand:V4SI 0 "register_operand" "=v")
3413        (plus:V4SI (match_operand:V4SI 2 "register_operand" "v")
3414                   (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")]
3415                                UNSPEC_VMSUMSHM)))]
3416  "TARGET_ALTIVEC"
3417{
3418  rtx vones = gen_reg_rtx (V8HImode);
3419
3420  emit_insn (gen_altivec_vspltish (vones, const1_rtx));
3421  emit_insn (gen_altivec_vmsumshm (operands[0], operands[1], vones, operands[2]));
3422  DONE;
3423})
3424
3425(define_expand "vec_unpacks_hi_<VP_small_lc>"
3426  [(set (match_operand:VP 0 "register_operand" "=v")
3427        (unspec:VP [(match_operand:<VP_small> 1 "register_operand" "v")]
3428		   UNSPEC_VUNPACK_HI_SIGN_DIRECT))]
3429  "<VI_unit>"
3430  "")
3431
3432(define_expand "vec_unpacks_lo_<VP_small_lc>"
3433  [(set (match_operand:VP 0 "register_operand" "=v")
3434        (unspec:VP [(match_operand:<VP_small> 1 "register_operand" "v")]
3435		   UNSPEC_VUNPACK_LO_SIGN_DIRECT))]
3436  "<VI_unit>"
3437  "")
3438
3439(define_insn "vperm_v8hiv4si"
3440  [(set (match_operand:V4SI 0 "register_operand" "=v,?wo")
3441        (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v,wo")
3442		      (match_operand:V4SI 2 "register_operand" "v,0")
3443		      (match_operand:V16QI 3 "register_operand" "v,wo")]
3444                  UNSPEC_VPERMSI))]
3445  "TARGET_ALTIVEC"
3446  "@
3447   vperm %0,%1,%2,%3
3448   xxperm %x0,%x1,%x3"
3449  [(set_attr "type" "vecperm")
3450   (set_attr "length" "4")])
3451
3452(define_insn "vperm_v16qiv8hi"
3453  [(set (match_operand:V8HI 0 "register_operand" "=v,?wo")
3454        (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v,wo")
3455		      (match_operand:V8HI 2 "register_operand" "v,0")
3456		      (match_operand:V16QI 3 "register_operand" "v,wo")]
3457                  UNSPEC_VPERMHI))]
3458  "TARGET_ALTIVEC"
3459  "@
3460   vperm %0,%1,%2,%3
3461   xxperm %x0,%x1,%x3"
3462  [(set_attr "type" "vecperm")
3463   (set_attr "length" "4")])
3464
3465
3466(define_expand "vec_unpacku_hi_v16qi"
3467  [(set (match_operand:V8HI 0 "register_operand" "=v")
3468        (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")]
3469                     UNSPEC_VUPKHUB))]
3470  "TARGET_ALTIVEC"
3471{
3472  rtx vzero = gen_reg_rtx (V8HImode);
3473  rtx mask = gen_reg_rtx (V16QImode);
3474  rtvec v = rtvec_alloc (16);
3475  bool be = BYTES_BIG_ENDIAN;
3476
3477  emit_insn (gen_altivec_vspltish (vzero, const0_rtx));
3478
3479  RTVEC_ELT (v,  0) = gen_rtx_CONST_INT (QImode, be ? 16 :  7);
3480  RTVEC_ELT (v,  1) = gen_rtx_CONST_INT (QImode, be ?  0 : 16);
3481  RTVEC_ELT (v,  2) = gen_rtx_CONST_INT (QImode, be ? 16 :  6);
3482  RTVEC_ELT (v,  3) = gen_rtx_CONST_INT (QImode, be ?  1 : 16);
3483  RTVEC_ELT (v,  4) = gen_rtx_CONST_INT (QImode, be ? 16 :  5);
3484  RTVEC_ELT (v,  5) = gen_rtx_CONST_INT (QImode, be ?  2 : 16);
3485  RTVEC_ELT (v,  6) = gen_rtx_CONST_INT (QImode, be ? 16 :  4);
3486  RTVEC_ELT (v,  7) = gen_rtx_CONST_INT (QImode, be ?  3 : 16);
3487  RTVEC_ELT (v,  8) = gen_rtx_CONST_INT (QImode, be ? 16 :  3);
3488  RTVEC_ELT (v,  9) = gen_rtx_CONST_INT (QImode, be ?  4 : 16);
3489  RTVEC_ELT (v, 10) = gen_rtx_CONST_INT (QImode, be ? 16 :  2);
3490  RTVEC_ELT (v, 11) = gen_rtx_CONST_INT (QImode, be ?  5 : 16);
3491  RTVEC_ELT (v, 12) = gen_rtx_CONST_INT (QImode, be ? 16 :  1);
3492  RTVEC_ELT (v, 13) = gen_rtx_CONST_INT (QImode, be ?  6 : 16);
3493  RTVEC_ELT (v, 14) = gen_rtx_CONST_INT (QImode, be ? 16 :  0);
3494  RTVEC_ELT (v, 15) = gen_rtx_CONST_INT (QImode, be ?  7 : 16);
3495
3496  emit_insn (gen_vec_initv16qiqi (mask, gen_rtx_PARALLEL (V16QImode, v)));
3497  emit_insn (gen_vperm_v16qiv8hi (operands[0], operands[1], vzero, mask));
3498  DONE;
3499})
3500
3501(define_expand "vec_unpacku_hi_v8hi"
3502  [(set (match_operand:V4SI 0 "register_operand" "=v")
3503        (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")]
3504                     UNSPEC_VUPKHUH))]
3505  "TARGET_ALTIVEC"
3506{
3507  rtx vzero = gen_reg_rtx (V4SImode);
3508  rtx mask = gen_reg_rtx (V16QImode);
3509  rtvec v = rtvec_alloc (16);
3510  bool be = BYTES_BIG_ENDIAN;
3511
3512  emit_insn (gen_altivec_vspltisw (vzero, const0_rtx));
3513
3514  RTVEC_ELT (v,  0) = gen_rtx_CONST_INT (QImode, be ? 16 :  7);
3515  RTVEC_ELT (v,  1) = gen_rtx_CONST_INT (QImode, be ? 17 :  6);
3516  RTVEC_ELT (v,  2) = gen_rtx_CONST_INT (QImode, be ?  0 : 17);
3517  RTVEC_ELT (v,  3) = gen_rtx_CONST_INT (QImode, be ?  1 : 16);
3518  RTVEC_ELT (v,  4) = gen_rtx_CONST_INT (QImode, be ? 16 :  5);
3519  RTVEC_ELT (v,  5) = gen_rtx_CONST_INT (QImode, be ? 17 :  4);
3520  RTVEC_ELT (v,  6) = gen_rtx_CONST_INT (QImode, be ?  2 : 17);
3521  RTVEC_ELT (v,  7) = gen_rtx_CONST_INT (QImode, be ?  3 : 16);
3522  RTVEC_ELT (v,  8) = gen_rtx_CONST_INT (QImode, be ? 16 :  3);
3523  RTVEC_ELT (v,  9) = gen_rtx_CONST_INT (QImode, be ? 17 :  2);
3524  RTVEC_ELT (v, 10) = gen_rtx_CONST_INT (QImode, be ?  4 : 17);
3525  RTVEC_ELT (v, 11) = gen_rtx_CONST_INT (QImode, be ?  5 : 16);
3526  RTVEC_ELT (v, 12) = gen_rtx_CONST_INT (QImode, be ? 16 :  1);
3527  RTVEC_ELT (v, 13) = gen_rtx_CONST_INT (QImode, be ? 17 :  0);
3528  RTVEC_ELT (v, 14) = gen_rtx_CONST_INT (QImode, be ?  6 : 17);
3529  RTVEC_ELT (v, 15) = gen_rtx_CONST_INT (QImode, be ?  7 : 16);
3530
3531  emit_insn (gen_vec_initv16qiqi (mask, gen_rtx_PARALLEL (V16QImode, v)));
3532  emit_insn (gen_vperm_v8hiv4si (operands[0], operands[1], vzero, mask));
3533  DONE;
3534})
3535
3536(define_expand "vec_unpacku_lo_v16qi"
3537  [(set (match_operand:V8HI 0 "register_operand" "=v")
3538        (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")]
3539                     UNSPEC_VUPKLUB))]
3540  "TARGET_ALTIVEC"
3541{
3542  rtx vzero = gen_reg_rtx (V8HImode);
3543  rtx mask = gen_reg_rtx (V16QImode);
3544  rtvec v = rtvec_alloc (16);
3545  bool be = BYTES_BIG_ENDIAN;
3546
3547  emit_insn (gen_altivec_vspltish (vzero, const0_rtx));
3548
3549  RTVEC_ELT (v,  0) = gen_rtx_CONST_INT (QImode, be ? 16 : 15);
3550  RTVEC_ELT (v,  1) = gen_rtx_CONST_INT (QImode, be ?  8 : 16);
3551  RTVEC_ELT (v,  2) = gen_rtx_CONST_INT (QImode, be ? 16 : 14);
3552  RTVEC_ELT (v,  3) = gen_rtx_CONST_INT (QImode, be ?  9 : 16);
3553  RTVEC_ELT (v,  4) = gen_rtx_CONST_INT (QImode, be ? 16 : 13);
3554  RTVEC_ELT (v,  5) = gen_rtx_CONST_INT (QImode, be ? 10 : 16);
3555  RTVEC_ELT (v,  6) = gen_rtx_CONST_INT (QImode, be ? 16 : 12);
3556  RTVEC_ELT (v,  7) = gen_rtx_CONST_INT (QImode, be ? 11 : 16);
3557  RTVEC_ELT (v,  8) = gen_rtx_CONST_INT (QImode, be ? 16 : 11);
3558  RTVEC_ELT (v,  9) = gen_rtx_CONST_INT (QImode, be ? 12 : 16);
3559  RTVEC_ELT (v, 10) = gen_rtx_CONST_INT (QImode, be ? 16 : 10);
3560  RTVEC_ELT (v, 11) = gen_rtx_CONST_INT (QImode, be ? 13 : 16);
3561  RTVEC_ELT (v, 12) = gen_rtx_CONST_INT (QImode, be ? 16 :  9);
3562  RTVEC_ELT (v, 13) = gen_rtx_CONST_INT (QImode, be ? 14 : 16);
3563  RTVEC_ELT (v, 14) = gen_rtx_CONST_INT (QImode, be ? 16 :  8);
3564  RTVEC_ELT (v, 15) = gen_rtx_CONST_INT (QImode, be ? 15 : 16);
3565
3566  emit_insn (gen_vec_initv16qiqi (mask, gen_rtx_PARALLEL (V16QImode, v)));
3567  emit_insn (gen_vperm_v16qiv8hi (operands[0], operands[1], vzero, mask));
3568  DONE;
3569})
3570
3571(define_expand "vec_unpacku_lo_v8hi"
3572  [(set (match_operand:V4SI 0 "register_operand" "=v")
3573        (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")]
3574                     UNSPEC_VUPKLUH))]
3575  "TARGET_ALTIVEC"
3576{
3577  rtx vzero = gen_reg_rtx (V4SImode);
3578  rtx mask = gen_reg_rtx (V16QImode);
3579  rtvec v = rtvec_alloc (16);
3580  bool be = BYTES_BIG_ENDIAN;
3581
3582  emit_insn (gen_altivec_vspltisw (vzero, const0_rtx));
3583
3584  RTVEC_ELT (v,  0) = gen_rtx_CONST_INT (QImode, be ? 16 : 15);
3585  RTVEC_ELT (v,  1) = gen_rtx_CONST_INT (QImode, be ? 17 : 14);
3586  RTVEC_ELT (v,  2) = gen_rtx_CONST_INT (QImode, be ?  8 : 17);
3587  RTVEC_ELT (v,  3) = gen_rtx_CONST_INT (QImode, be ?  9 : 16);
3588  RTVEC_ELT (v,  4) = gen_rtx_CONST_INT (QImode, be ? 16 : 13);
3589  RTVEC_ELT (v,  5) = gen_rtx_CONST_INT (QImode, be ? 17 : 12);
3590  RTVEC_ELT (v,  6) = gen_rtx_CONST_INT (QImode, be ? 10 : 17);
3591  RTVEC_ELT (v,  7) = gen_rtx_CONST_INT (QImode, be ? 11 : 16);
3592  RTVEC_ELT (v,  8) = gen_rtx_CONST_INT (QImode, be ? 16 : 11);
3593  RTVEC_ELT (v,  9) = gen_rtx_CONST_INT (QImode, be ? 17 : 10);
3594  RTVEC_ELT (v, 10) = gen_rtx_CONST_INT (QImode, be ? 12 : 17);
3595  RTVEC_ELT (v, 11) = gen_rtx_CONST_INT (QImode, be ? 13 : 16);
3596  RTVEC_ELT (v, 12) = gen_rtx_CONST_INT (QImode, be ? 16 :  9);
3597  RTVEC_ELT (v, 13) = gen_rtx_CONST_INT (QImode, be ? 17 :  8);
3598  RTVEC_ELT (v, 14) = gen_rtx_CONST_INT (QImode, be ? 14 : 17);
3599  RTVEC_ELT (v, 15) = gen_rtx_CONST_INT (QImode, be ? 15 : 16);
3600
3601  emit_insn (gen_vec_initv16qiqi (mask, gen_rtx_PARALLEL (V16QImode, v)));
3602  emit_insn (gen_vperm_v8hiv4si (operands[0], operands[1], vzero, mask));
3603  DONE;
3604})
3605
3606(define_expand "vec_widen_umult_hi_v16qi"
3607  [(set (match_operand:V8HI 0 "register_operand" "=v")
3608        (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
3609                      (match_operand:V16QI 2 "register_operand" "v")]
3610                     UNSPEC_VMULWHUB))]
3611  "TARGET_ALTIVEC"
3612{
3613  rtx ve = gen_reg_rtx (V8HImode);
3614  rtx vo = gen_reg_rtx (V8HImode);
3615
3616  if (BYTES_BIG_ENDIAN)
3617    {
3618      emit_insn (gen_altivec_vmuleub (ve, operands[1], operands[2]));
3619      emit_insn (gen_altivec_vmuloub (vo, operands[1], operands[2]));
3620      emit_insn (gen_altivec_vmrghh_direct (operands[0], ve, vo));
3621    }
3622  else
3623    {
3624      emit_insn (gen_altivec_vmuloub (ve, operands[1], operands[2]));
3625      emit_insn (gen_altivec_vmuleub (vo, operands[1], operands[2]));
3626      emit_insn (gen_altivec_vmrghh_direct (operands[0], vo, ve));
3627    }
3628  DONE;
3629})
3630
3631(define_expand "vec_widen_umult_lo_v16qi"
3632  [(set (match_operand:V8HI 0 "register_operand" "=v")
3633        (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
3634                      (match_operand:V16QI 2 "register_operand" "v")]
3635                     UNSPEC_VMULWLUB))]
3636  "TARGET_ALTIVEC"
3637{
3638  rtx ve = gen_reg_rtx (V8HImode);
3639  rtx vo = gen_reg_rtx (V8HImode);
3640
3641  if (BYTES_BIG_ENDIAN)
3642    {
3643      emit_insn (gen_altivec_vmuleub (ve, operands[1], operands[2]));
3644      emit_insn (gen_altivec_vmuloub (vo, operands[1], operands[2]));
3645      emit_insn (gen_altivec_vmrglh_direct (operands[0], ve, vo));
3646    }
3647  else
3648    {
3649      emit_insn (gen_altivec_vmuloub (ve, operands[1], operands[2]));
3650      emit_insn (gen_altivec_vmuleub (vo, operands[1], operands[2]));
3651      emit_insn (gen_altivec_vmrglh_direct (operands[0], vo, ve));
3652    }
3653  DONE;
3654})
3655
3656(define_expand "vec_widen_smult_hi_v16qi"
3657  [(set (match_operand:V8HI 0 "register_operand" "=v")
3658        (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
3659                      (match_operand:V16QI 2 "register_operand" "v")]
3660                     UNSPEC_VMULWHSB))]
3661  "TARGET_ALTIVEC"
3662{
3663  rtx ve = gen_reg_rtx (V8HImode);
3664  rtx vo = gen_reg_rtx (V8HImode);
3665
3666  if (BYTES_BIG_ENDIAN)
3667    {
3668      emit_insn (gen_altivec_vmulesb (ve, operands[1], operands[2]));
3669      emit_insn (gen_altivec_vmulosb (vo, operands[1], operands[2]));
3670      emit_insn (gen_altivec_vmrghh_direct (operands[0], ve, vo));
3671    }
3672  else
3673    {
3674      emit_insn (gen_altivec_vmulosb (ve, operands[1], operands[2]));
3675      emit_insn (gen_altivec_vmulesb (vo, operands[1], operands[2]));
3676      emit_insn (gen_altivec_vmrghh_direct (operands[0], vo, ve));
3677    }
3678  DONE;
3679})
3680
3681(define_expand "vec_widen_smult_lo_v16qi"
3682  [(set (match_operand:V8HI 0 "register_operand" "=v")
3683        (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
3684                      (match_operand:V16QI 2 "register_operand" "v")]
3685                     UNSPEC_VMULWLSB))]
3686  "TARGET_ALTIVEC"
3687{
3688  rtx ve = gen_reg_rtx (V8HImode);
3689  rtx vo = gen_reg_rtx (V8HImode);
3690
3691  if (BYTES_BIG_ENDIAN)
3692    {
3693      emit_insn (gen_altivec_vmulesb (ve, operands[1], operands[2]));
3694      emit_insn (gen_altivec_vmulosb (vo, operands[1], operands[2]));
3695      emit_insn (gen_altivec_vmrglh_direct (operands[0], ve, vo));
3696    }
3697  else
3698    {
3699      emit_insn (gen_altivec_vmulosb (ve, operands[1], operands[2]));
3700      emit_insn (gen_altivec_vmulesb (vo, operands[1], operands[2]));
3701      emit_insn (gen_altivec_vmrglh_direct (operands[0], vo, ve));
3702    }
3703  DONE;
3704})
3705
3706(define_expand "vec_widen_umult_hi_v8hi"
3707  [(set (match_operand:V4SI 0 "register_operand" "=v")
3708        (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
3709                      (match_operand:V8HI 2 "register_operand" "v")]
3710                     UNSPEC_VMULWHUH))]
3711  "TARGET_ALTIVEC"
3712{
3713  rtx ve = gen_reg_rtx (V4SImode);
3714  rtx vo = gen_reg_rtx (V4SImode);
3715
3716  if (BYTES_BIG_ENDIAN)
3717    {
3718      emit_insn (gen_altivec_vmuleuh (ve, operands[1], operands[2]));
3719      emit_insn (gen_altivec_vmulouh (vo, operands[1], operands[2]));
3720      emit_insn (gen_altivec_vmrghw_direct (operands[0], ve, vo));
3721    }
3722  else
3723    {
3724      emit_insn (gen_altivec_vmulouh (ve, operands[1], operands[2]));
3725      emit_insn (gen_altivec_vmuleuh (vo, operands[1], operands[2]));
3726      emit_insn (gen_altivec_vmrghw_direct (operands[0], vo, ve));
3727    }
3728  DONE;
3729})
3730
3731(define_expand "vec_widen_umult_lo_v8hi"
3732  [(set (match_operand:V4SI 0 "register_operand" "=v")
3733        (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
3734                      (match_operand:V8HI 2 "register_operand" "v")]
3735                     UNSPEC_VMULWLUH))]
3736  "TARGET_ALTIVEC"
3737{
3738  rtx ve = gen_reg_rtx (V4SImode);
3739  rtx vo = gen_reg_rtx (V4SImode);
3740
3741  if (BYTES_BIG_ENDIAN)
3742    {
3743      emit_insn (gen_altivec_vmuleuh (ve, operands[1], operands[2]));
3744      emit_insn (gen_altivec_vmulouh (vo, operands[1], operands[2]));
3745      emit_insn (gen_altivec_vmrglw_direct (operands[0], ve, vo));
3746    }
3747  else
3748    {
3749      emit_insn (gen_altivec_vmulouh (ve, operands[1], operands[2]));
3750      emit_insn (gen_altivec_vmuleuh (vo, operands[1], operands[2]));
3751      emit_insn (gen_altivec_vmrglw_direct (operands[0], vo, ve));
3752    }
3753  DONE;
3754})
3755
3756(define_expand "vec_widen_smult_hi_v8hi"
3757  [(set (match_operand:V4SI 0 "register_operand" "=v")
3758        (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
3759                      (match_operand:V8HI 2 "register_operand" "v")]
3760                     UNSPEC_VMULWHSH))]
3761  "TARGET_ALTIVEC"
3762{
3763  rtx ve = gen_reg_rtx (V4SImode);
3764  rtx vo = gen_reg_rtx (V4SImode);
3765
3766  if (BYTES_BIG_ENDIAN)
3767    {
3768      emit_insn (gen_altivec_vmulesh (ve, operands[1], operands[2]));
3769      emit_insn (gen_altivec_vmulosh (vo, operands[1], operands[2]));
3770      emit_insn (gen_altivec_vmrghw_direct (operands[0], ve, vo));
3771    }
3772  else
3773    {
3774      emit_insn (gen_altivec_vmulosh (ve, operands[1], operands[2]));
3775      emit_insn (gen_altivec_vmulesh (vo, operands[1], operands[2]));
3776      emit_insn (gen_altivec_vmrghw_direct (operands[0], vo, ve));
3777    }
3778  DONE;
3779})
3780
3781(define_expand "vec_widen_smult_lo_v8hi"
3782  [(set (match_operand:V4SI 0 "register_operand" "=v")
3783        (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
3784                      (match_operand:V8HI 2 "register_operand" "v")]
3785                     UNSPEC_VMULWLSH))]
3786  "TARGET_ALTIVEC"
3787{
3788  rtx ve = gen_reg_rtx (V4SImode);
3789  rtx vo = gen_reg_rtx (V4SImode);
3790
3791  if (BYTES_BIG_ENDIAN)
3792    {
3793      emit_insn (gen_altivec_vmulesh (ve, operands[1], operands[2]));
3794      emit_insn (gen_altivec_vmulosh (vo, operands[1], operands[2]));
3795      emit_insn (gen_altivec_vmrglw_direct (operands[0], ve, vo));
3796    }
3797  else
3798    {
3799      emit_insn (gen_altivec_vmulosh (ve, operands[1], operands[2]));
3800      emit_insn (gen_altivec_vmulesh (vo, operands[1], operands[2]));
3801      emit_insn (gen_altivec_vmrglw_direct (operands[0], vo, ve));
3802    }
3803  DONE;
3804})
3805
3806(define_expand "vec_pack_trunc_<mode>"
3807  [(set (match_operand:<VP_small> 0 "register_operand" "=v")
3808        (unspec:<VP_small> [(match_operand:VP 1 "register_operand" "v")
3809			    (match_operand:VP 2 "register_operand" "v")]
3810                      UNSPEC_VPACK_UNS_UNS_MOD))]
3811  "<VI_unit>"
3812  "")
3813
3814(define_expand "mulv16qi3"
3815  [(set (match_operand:V16QI 0 "register_operand" "=v")
3816        (mult:V16QI (match_operand:V16QI 1 "register_operand" "v")
3817                    (match_operand:V16QI 2 "register_operand" "v")))]
3818  "TARGET_ALTIVEC"
3819{
3820  rtx even = gen_reg_rtx (V8HImode);
3821  rtx odd = gen_reg_rtx (V8HImode);
3822  rtx mask = gen_reg_rtx (V16QImode);
3823  rtvec v = rtvec_alloc (16);
3824  int i;
3825
3826  for (i = 0; i < 8; ++i) {
3827    RTVEC_ELT (v, 2 * i)
3828     = gen_rtx_CONST_INT (QImode, BYTES_BIG_ENDIAN ? 2 * i + 1 : 31 - 2 * i);
3829    RTVEC_ELT (v, 2 * i + 1)
3830     = gen_rtx_CONST_INT (QImode, BYTES_BIG_ENDIAN ? 2 * i + 17 : 15 - 2 * i);
3831  }
3832
3833  emit_insn (gen_vec_initv16qiqi (mask, gen_rtx_PARALLEL (V16QImode, v)));
3834  emit_insn (gen_altivec_vmulesb (even, operands[1], operands[2]));
3835  emit_insn (gen_altivec_vmulosb (odd, operands[1], operands[2]));
3836  emit_insn (gen_altivec_vperm_v8hiv16qi (operands[0], even, odd, mask));
3837  DONE;
3838})
3839
3840(define_expand "altivec_vpermxor"
3841  [(use (match_operand:V16QI 0 "register_operand"))
3842   (use (match_operand:V16QI 1 "register_operand"))
3843   (use (match_operand:V16QI 2 "register_operand"))
3844   (use (match_operand:V16QI 3 "register_operand"))]
3845  "TARGET_P8_VECTOR"
3846{
3847  if (!BYTES_BIG_ENDIAN)
3848    {
3849      /* vpermxor indexes the bytes using Big Endian numbering.  If LE,
3850	 change indexing in operand[3] to BE index.  */
3851      rtx be_index = gen_reg_rtx (V16QImode);
3852
3853      emit_insn (gen_one_cmplv16qi2 (be_index, operands[3]));
3854      emit_insn (gen_crypto_vpermxor_v16qi (operands[0], operands[1],
3855					    operands[2], be_index));
3856    }
3857  else
3858    emit_insn (gen_crypto_vpermxor_v16qi (operands[0], operands[1],
3859					  operands[2], operands[3]));
3860  DONE;
3861})
3862
3863(define_expand "altivec_negv4sf2"
3864  [(use (match_operand:V4SF 0 "register_operand"))
3865   (use (match_operand:V4SF 1 "register_operand"))]
3866  "TARGET_ALTIVEC"
3867{
3868  rtx neg0;
3869
3870  /* Generate [-0.0, -0.0, -0.0, -0.0].  */
3871  neg0 = gen_reg_rtx (V4SImode);
3872  emit_insn (gen_altivec_vspltisw (neg0, constm1_rtx));
3873  emit_insn (gen_vashlv4si3 (neg0, neg0, neg0));
3874
3875  /* XOR */
3876  emit_insn (gen_xorv4sf3 (operands[0],
3877			   gen_lowpart (V4SFmode, neg0), operands[1]));
3878
3879  DONE;
3880})
3881
3882;; Vector reverse elements
3883(define_expand "altivec_vreve<mode>2"
3884  [(set (match_operand:VEC_A 0 "register_operand" "=v")
3885	(unspec:VEC_A [(match_operand:VEC_A 1 "register_operand" "v")]
3886		      UNSPEC_VREVEV))]
3887  "TARGET_ALTIVEC"
3888{
3889  int i, j, size, num_elements;
3890  rtvec v = rtvec_alloc (16);
3891  rtx mask = gen_reg_rtx (V16QImode);
3892
3893  size = GET_MODE_UNIT_SIZE (<MODE>mode);
3894  num_elements = GET_MODE_NUNITS (<MODE>mode);
3895
3896  for (j = 0; j < num_elements; j++)
3897    for (i = 0; i < size; i++)
3898      RTVEC_ELT (v, i + j * size)
3899	= GEN_INT (i + (num_elements - 1 - j) * size);
3900
3901  emit_insn (gen_vec_initv16qiqi (mask, gen_rtx_PARALLEL (V16QImode, v)));
3902  emit_insn (gen_altivec_vperm_<mode> (operands[0], operands[1],
3903	     operands[1], mask));
3904  DONE;
3905})
3906
3907;; Vector SIMD PEM v2.06c defines LVLX, LVLXL, LVRX, LVRXL,
3908;; STVLX, STVLXL, STVVRX, STVRXL are available only on Cell.
3909(define_insn "altivec_lvlx"
3910  [(set (match_operand:V16QI 0 "register_operand" "=v")
3911        (unspec:V16QI [(match_operand:BLK 1 "memory_operand" "Z")]
3912		      UNSPEC_LVLX))]
3913  "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
3914  "lvlx %0,%y1"
3915  [(set_attr "type" "vecload")])
3916
3917(define_insn "altivec_lvlxl"
3918  [(set (match_operand:V16QI 0 "register_operand" "=v")
3919        (unspec:V16QI [(match_operand:BLK 1 "memory_operand" "Z")]
3920		      UNSPEC_LVLXL))]
3921  "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
3922  "lvlxl %0,%y1"
3923  [(set_attr "type" "vecload")])
3924
3925(define_insn "altivec_lvrx"
3926  [(set (match_operand:V16QI 0 "register_operand" "=v")
3927        (unspec:V16QI [(match_operand:BLK 1 "memory_operand" "Z")]
3928		      UNSPEC_LVRX))]
3929  "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
3930  "lvrx %0,%y1"
3931  [(set_attr "type" "vecload")])
3932
3933(define_insn "altivec_lvrxl"
3934  [(set (match_operand:V16QI 0 "register_operand" "=v")
3935        (unspec:V16QI [(match_operand:BLK 1 "memory_operand" "Z")]
3936		      UNSPEC_LVRXL))]
3937  "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
3938  "lvrxl %0,%y1"
3939  [(set_attr "type" "vecload")])
3940
3941(define_insn "altivec_stvlx"
3942  [(parallel
3943    [(set (match_operand:V16QI 0 "memory_operand" "=Z")
3944	  (match_operand:V16QI 1 "register_operand" "v"))
3945     (unspec [(const_int 0)] UNSPEC_STVLX)])]
3946  "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
3947  "stvlx %1,%y0"
3948  [(set_attr "type" "vecstore")])
3949
3950(define_insn "altivec_stvlxl"
3951  [(parallel
3952    [(set (match_operand:V16QI 0 "memory_operand" "=Z")
3953	  (match_operand:V16QI 1 "register_operand" "v"))
3954     (unspec [(const_int 0)] UNSPEC_STVLXL)])]
3955  "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
3956  "stvlxl %1,%y0"
3957  [(set_attr "type" "vecstore")])
3958
3959(define_insn "altivec_stvrx"
3960  [(parallel
3961    [(set (match_operand:V16QI 0 "memory_operand" "=Z")
3962	  (match_operand:V16QI 1 "register_operand" "v"))
3963     (unspec [(const_int 0)] UNSPEC_STVRX)])]
3964  "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
3965  "stvrx %1,%y0"
3966  [(set_attr "type" "vecstore")])
3967
3968(define_insn "altivec_stvrxl"
3969  [(parallel
3970    [(set (match_operand:V16QI 0 "memory_operand" "=Z")
3971	  (match_operand:V16QI 1 "register_operand" "v"))
3972     (unspec [(const_int 0)] UNSPEC_STVRXL)])]
3973  "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
3974  "stvrxl %1,%y0"
3975  [(set_attr "type" "vecstore")])
3976
3977(define_expand "vec_unpacks_float_hi_v8hi"
3978 [(set (match_operand:V4SF 0 "register_operand")
3979        (unspec:V4SF [(match_operand:V8HI 1 "register_operand")]
3980                     UNSPEC_VUPKHS_V4SF))]
3981  "TARGET_ALTIVEC"
3982{
3983  rtx tmp = gen_reg_rtx (V4SImode);
3984
3985  emit_insn (gen_vec_unpacks_hi_v8hi (tmp, operands[1]));
3986  emit_insn (gen_altivec_vcfsx (operands[0], tmp, const0_rtx));
3987  DONE;
3988})
3989
3990(define_expand "vec_unpacks_float_lo_v8hi"
3991 [(set (match_operand:V4SF 0 "register_operand")
3992        (unspec:V4SF [(match_operand:V8HI 1 "register_operand")]
3993                     UNSPEC_VUPKLS_V4SF))]
3994  "TARGET_ALTIVEC"
3995{
3996  rtx tmp = gen_reg_rtx (V4SImode);
3997
3998  emit_insn (gen_vec_unpacks_lo_v8hi (tmp, operands[1]));
3999  emit_insn (gen_altivec_vcfsx (operands[0], tmp, const0_rtx));
4000  DONE;
4001})
4002
4003(define_expand "vec_unpacku_float_hi_v8hi"
4004 [(set (match_operand:V4SF 0 "register_operand")
4005        (unspec:V4SF [(match_operand:V8HI 1 "register_operand")]
4006                     UNSPEC_VUPKHU_V4SF))]
4007  "TARGET_ALTIVEC"
4008{
4009  rtx tmp = gen_reg_rtx (V4SImode);
4010
4011  emit_insn (gen_vec_unpacku_hi_v8hi (tmp, operands[1]));
4012  emit_insn (gen_altivec_vcfux (operands[0], tmp, const0_rtx));
4013  DONE;
4014})
4015
4016(define_expand "vec_unpacku_float_lo_v8hi"
4017 [(set (match_operand:V4SF 0 "register_operand")
4018        (unspec:V4SF [(match_operand:V8HI 1 "register_operand")]
4019                     UNSPEC_VUPKLU_V4SF))]
4020  "TARGET_ALTIVEC"
4021{
4022  rtx tmp = gen_reg_rtx (V4SImode);
4023
4024  emit_insn (gen_vec_unpacku_lo_v8hi (tmp, operands[1]));
4025  emit_insn (gen_altivec_vcfux (operands[0], tmp, const0_rtx));
4026  DONE;
4027})
4028
4029
4030;; Power8/power9 vector instructions encoded as Altivec instructions
4031
4032;; Vector count leading zeros
4033(define_insn "*p8v_clz<mode>2"
4034  [(set (match_operand:VI2 0 "register_operand" "=v")
4035	(clz:VI2 (match_operand:VI2 1 "register_operand" "v")))]
4036  "TARGET_P8_VECTOR"
4037  "vclz<wd> %0,%1"
4038  [(set_attr "length" "4")
4039   (set_attr "type" "vecsimple")])
4040
4041;; Vector absolute difference unsigned
4042(define_expand "vadu<mode>3"
4043  [(set (match_operand:VI 0 "register_operand")
4044        (unspec:VI [(match_operand:VI 1 "register_operand")
4045		    (match_operand:VI 2 "register_operand")]
4046         UNSPEC_VADU))]
4047  "TARGET_P9_VECTOR")
4048
4049;; Vector absolute difference unsigned
4050(define_insn "p9_vadu<mode>3"
4051  [(set (match_operand:VI 0 "register_operand" "=v")
4052        (unspec:VI [(match_operand:VI 1 "register_operand" "v")
4053		    (match_operand:VI 2 "register_operand" "v")]
4054         UNSPEC_VADU))]
4055  "TARGET_P9_VECTOR"
4056  "vabsdu<wd> %0,%1,%2"
4057  [(set_attr "type" "vecsimple")])
4058
4059;; Vector count trailing zeros
4060(define_insn "*p9v_ctz<mode>2"
4061  [(set (match_operand:VI2 0 "register_operand" "=v")
4062	(ctz:VI2 (match_operand:VI2 1 "register_operand" "v")))]
4063  "TARGET_P9_VECTOR"
4064  "vctz<wd> %0,%1"
4065  [(set_attr "length" "4")
4066   (set_attr "type" "vecsimple")])
4067
4068;; Vector population count
4069(define_insn "*p8v_popcount<mode>2"
4070  [(set (match_operand:VI2 0 "register_operand" "=v")
4071        (popcount:VI2 (match_operand:VI2 1 "register_operand" "v")))]
4072  "TARGET_P8_VECTOR"
4073  "vpopcnt<wd> %0,%1"
4074  [(set_attr "length" "4")
4075   (set_attr "type" "vecsimple")])
4076
4077;; Vector parity
4078(define_insn "*p9v_parity<mode>2"
4079  [(set (match_operand:VParity 0 "register_operand" "=v")
4080        (parity:VParity (match_operand:VParity 1 "register_operand" "v")))]
4081  "TARGET_P9_VECTOR"
4082  "vprtyb<wd> %0,%1"
4083  [(set_attr "length" "4")
4084   (set_attr "type" "vecsimple")])
4085
4086;; Vector Gather Bits by Bytes by Doubleword
4087(define_insn "p8v_vgbbd"
4088  [(set (match_operand:V16QI 0 "register_operand" "=v")
4089	(unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")]
4090		      UNSPEC_VGBBD))]
4091  "TARGET_P8_VECTOR"
4092  "vgbbd %0,%1"
4093  [(set_attr "length" "4")
4094   (set_attr "type" "vecsimple")])
4095
4096
4097;; 128-bit binary integer arithmetic
4098;; We have a special container type (V1TImode) to allow operations using the
4099;; ISA 2.07 128-bit binary support to target the VMX/altivec registers without
4100;; having to worry about the register allocator deciding GPRs are better.
4101
4102(define_insn "altivec_vadduqm"
4103  [(set (match_operand:V1TI 0 "register_operand" "=v")
4104	(plus:V1TI (match_operand:V1TI 1 "register_operand" "v")
4105		   (match_operand:V1TI 2 "register_operand" "v")))]
4106  "TARGET_VADDUQM"
4107  "vadduqm %0,%1,%2"
4108  [(set_attr "length" "4")
4109   (set_attr "type" "vecsimple")])
4110
4111(define_insn "altivec_vaddcuq"
4112  [(set (match_operand:V1TI 0 "register_operand" "=v")
4113	(unspec:V1TI [(match_operand:V1TI 1 "register_operand" "v")
4114		      (match_operand:V1TI 2 "register_operand" "v")]
4115		     UNSPEC_VADDCUQ))]
4116  "TARGET_VADDUQM"
4117  "vaddcuq %0,%1,%2"
4118  [(set_attr "length" "4")
4119   (set_attr "type" "vecsimple")])
4120
4121(define_insn "altivec_vsubuqm"
4122  [(set (match_operand:V1TI 0 "register_operand" "=v")
4123	(minus:V1TI (match_operand:V1TI 1 "register_operand" "v")
4124		    (match_operand:V1TI 2 "register_operand" "v")))]
4125  "TARGET_VADDUQM"
4126  "vsubuqm %0,%1,%2"
4127  [(set_attr "length" "4")
4128   (set_attr "type" "vecsimple")])
4129
4130(define_insn "altivec_vsubcuq"
4131  [(set (match_operand:V1TI 0 "register_operand" "=v")
4132	(unspec:V1TI [(match_operand:V1TI 1 "register_operand" "v")
4133		      (match_operand:V1TI 2 "register_operand" "v")]
4134		     UNSPEC_VSUBCUQ))]
4135  "TARGET_VADDUQM"
4136  "vsubcuq %0,%1,%2"
4137  [(set_attr "length" "4")
4138   (set_attr "type" "vecsimple")])
4139
4140(define_insn "altivec_vaddeuqm"
4141  [(set (match_operand:V1TI 0 "register_operand" "=v")
4142	(unspec:V1TI [(match_operand:V1TI 1 "register_operand" "v")
4143		      (match_operand:V1TI 2 "register_operand" "v")
4144		      (match_operand:V1TI 3 "register_operand" "v")]
4145		     UNSPEC_VADDEUQM))]
4146  "TARGET_VADDUQM"
4147  "vaddeuqm %0,%1,%2,%3"
4148  [(set_attr "length" "4")
4149   (set_attr "type" "vecsimple")])
4150
4151(define_insn "altivec_vaddecuq"
4152  [(set (match_operand:V1TI 0 "register_operand" "=v")
4153	(unspec:V1TI [(match_operand:V1TI 1 "register_operand" "v")
4154		      (match_operand:V1TI 2 "register_operand" "v")
4155		      (match_operand:V1TI 3 "register_operand" "v")]
4156		     UNSPEC_VADDECUQ))]
4157  "TARGET_VADDUQM"
4158  "vaddecuq %0,%1,%2,%3"
4159  [(set_attr "length" "4")
4160   (set_attr "type" "vecsimple")])
4161
4162(define_insn "altivec_vsubeuqm"
4163  [(set (match_operand:V1TI 0 "register_operand" "=v")
4164	(unspec:V1TI [(match_operand:V1TI 1 "register_operand" "v")
4165		      (match_operand:V1TI 2 "register_operand" "v")
4166		      (match_operand:V1TI 3 "register_operand" "v")]
4167		   UNSPEC_VSUBEUQM))]
4168  "TARGET_VADDUQM"
4169  "vsubeuqm %0,%1,%2,%3"
4170  [(set_attr "length" "4")
4171   (set_attr "type" "vecsimple")])
4172
4173(define_insn "altivec_vsubecuq"
4174  [(set (match_operand:V1TI 0 "register_operand" "=v")
4175	(unspec:V1TI [(match_operand:V1TI 1 "register_operand" "v")
4176		      (match_operand:V1TI 2 "register_operand" "v")
4177		      (match_operand:V1TI 3 "register_operand" "v")]
4178		     UNSPEC_VSUBECUQ))]
4179  "TARGET_VADDUQM"
4180  "vsubecuq %0,%1,%2,%3"
4181  [(set_attr "length" "4")
4182   (set_attr "type" "vecsimple")])
4183
4184;; We use V2DI as the output type to simplify converting the permute
4185;; bits into an integer
4186(define_insn "altivec_vbpermq"
4187  [(set (match_operand:V2DI 0 "register_operand" "=v")
4188	(unspec:V2DI [(match_operand:V16QI 1 "register_operand" "v")
4189		      (match_operand:V16QI 2 "register_operand" "v")]
4190		     UNSPEC_VBPERMQ))]
4191  "TARGET_P8_VECTOR"
4192  "vbpermq %0,%1,%2"
4193  [(set_attr "type" "vecperm")])
4194
4195; One of the vector API interfaces requires returning vector unsigned char.
4196(define_insn "altivec_vbpermq2"
4197  [(set (match_operand:V16QI 0 "register_operand" "=v")
4198	(unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
4199		       (match_operand:V16QI 2 "register_operand" "v")]
4200		      UNSPEC_VBPERMQ))]
4201  "TARGET_P8_VECTOR"
4202  "vbpermq %0,%1,%2"
4203  [(set_attr "type" "vecperm")])
4204
4205(define_insn "altivec_vbpermd"
4206  [(set (match_operand:V2DI 0 "register_operand" "=v")
4207	(unspec:V2DI [(match_operand:V2DI 1 "register_operand" "v")
4208		      (match_operand:V16QI 2 "register_operand" "v")]
4209		     UNSPEC_VBPERMD))]
4210  "TARGET_P9_VECTOR"
4211  "vbpermd %0,%1,%2"
4212  [(set_attr "type" "vecsimple")])
4213
4214;; Support for SAD (sum of absolute differences).
4215
4216;; Due to saturating semantics, we can't combine the sum-across
4217;; with the vector accumulate in vsum4ubs.  A vadduwm is needed.
4218(define_expand "usadv16qi"
4219  [(use (match_operand:V4SI 0 "register_operand"))
4220   (use (match_operand:V16QI 1 "register_operand"))
4221   (use (match_operand:V16QI 2 "register_operand"))
4222   (use (match_operand:V4SI 3 "register_operand"))]
4223  "TARGET_P9_VECTOR"
4224{
4225  rtx absd = gen_reg_rtx (V16QImode);
4226  rtx zero = gen_reg_rtx (V4SImode);
4227  rtx psum = gen_reg_rtx (V4SImode);
4228
4229  emit_insn (gen_p9_vaduv16qi3 (absd, operands[1], operands[2]));
4230  emit_insn (gen_altivec_vspltisw (zero, const0_rtx));
4231  emit_insn (gen_altivec_vsum4ubs (psum, absd, zero));
4232  emit_insn (gen_addv4si3 (operands[0], psum, operands[3]));
4233  DONE;
4234})
4235
4236;; Since vsum4shs is saturating and further performs signed
4237;; arithmetic, we can't combine the sum-across with the vector
4238;; accumulate in vsum4shs.  A vadduwm is needed.
4239(define_expand "usadv8hi"
4240  [(use (match_operand:V4SI 0 "register_operand"))
4241   (use (match_operand:V8HI 1 "register_operand"))
4242   (use (match_operand:V8HI 2 "register_operand"))
4243   (use (match_operand:V4SI 3 "register_operand"))]
4244  "TARGET_P9_VECTOR"
4245{
4246  rtx absd = gen_reg_rtx (V8HImode);
4247  rtx zero = gen_reg_rtx (V4SImode);
4248  rtx psum = gen_reg_rtx (V4SImode);
4249
4250  emit_insn (gen_p9_vaduv8hi3 (absd, operands[1], operands[2]));
4251  emit_insn (gen_altivec_vspltisw (zero, const0_rtx));
4252  emit_insn (gen_altivec_vsum4shs (psum, absd, zero));
4253  emit_insn (gen_addv4si3 (operands[0], psum, operands[3]));
4254  DONE;
4255})
4256
4257;; Decimal Integer operations
4258(define_int_iterator UNSPEC_BCD_ADD_SUB [UNSPEC_BCDADD UNSPEC_BCDSUB])
4259
4260(define_int_attr bcd_add_sub [(UNSPEC_BCDADD "add")
4261			      (UNSPEC_BCDSUB "sub")])
4262
4263(define_code_iterator BCD_TEST [eq lt gt unordered])
4264
4265(define_insn "bcd<bcd_add_sub>"
4266  [(set (match_operand:V1TI 0 "gpc_reg_operand" "=v")
4267	(unspec:V1TI [(match_operand:V1TI 1 "gpc_reg_operand" "v")
4268		      (match_operand:V1TI 2 "gpc_reg_operand" "v")
4269		      (match_operand:QI 3 "const_0_to_1_operand" "n")]
4270		     UNSPEC_BCD_ADD_SUB))
4271   (clobber (reg:CCFP CR6_REGNO))]
4272  "TARGET_P8_VECTOR"
4273  "bcd<bcd_add_sub>. %0,%1,%2,%3"
4274  [(set_attr "length" "4")
4275   (set_attr "type" "vecsimple")])
4276
4277;; Use a floating point type (V2DFmode) for the compare to set CR6 so that we
4278;; can use the unordered test for BCD nans and add/subtracts that overflow.  An
4279;; UNORDERED test on an integer type (like V1TImode) is not defined.  The type
4280;; probably should be one that can go in the VMX (Altivec) registers, so we
4281;; can't use DDmode or DFmode.
4282(define_insn "*bcd<bcd_add_sub>_test"
4283  [(set (reg:CCFP CR6_REGNO)
4284	(compare:CCFP
4285	 (unspec:V2DF [(match_operand:V1TI 1 "register_operand" "v")
4286		       (match_operand:V1TI 2 "register_operand" "v")
4287		       (match_operand:QI 3 "const_0_to_1_operand" "i")]
4288		      UNSPEC_BCD_ADD_SUB)
4289	 (match_operand:V2DF 4 "zero_constant" "j")))
4290   (clobber (match_scratch:V1TI 0 "=v"))]
4291  "TARGET_P8_VECTOR"
4292  "bcd<bcd_add_sub>. %0,%1,%2,%3"
4293  [(set_attr "length" "4")
4294   (set_attr "type" "vecsimple")])
4295
4296(define_insn "*bcd<bcd_add_sub>_test2"
4297  [(set (match_operand:V1TI 0 "register_operand" "=v")
4298	(unspec:V1TI [(match_operand:V1TI 1 "register_operand" "v")
4299		      (match_operand:V1TI 2 "register_operand" "v")
4300		      (match_operand:QI 3 "const_0_to_1_operand" "i")]
4301		     UNSPEC_BCD_ADD_SUB))
4302   (set (reg:CCFP CR6_REGNO)
4303	(compare:CCFP
4304	 (unspec:V2DF [(match_dup 1)
4305		       (match_dup 2)
4306		       (match_dup 3)]
4307		      UNSPEC_BCD_ADD_SUB)
4308	 (match_operand:V2DF 4 "zero_constant" "j")))]
4309  "TARGET_P8_VECTOR"
4310  "bcd<bcd_add_sub>. %0,%1,%2,%3"
4311  [(set_attr "length" "4")
4312   (set_attr "type" "vecsimple")])
4313
4314(define_expand "bcd<bcd_add_sub>_<code>"
4315  [(parallel [(set (reg:CCFP CR6_REGNO)
4316		   (compare:CCFP
4317		    (unspec:V2DF [(match_operand:V1TI 1 "register_operand")
4318				  (match_operand:V1TI 2 "register_operand")
4319				  (match_operand:QI 3 "const_0_to_1_operand")]
4320				 UNSPEC_BCD_ADD_SUB)
4321		    (match_dup 4)))
4322	      (clobber (match_scratch:V1TI 5))])
4323   (set (match_operand:SI 0 "register_operand")
4324	(BCD_TEST:SI (reg:CCFP CR6_REGNO)
4325		     (const_int 0)))]
4326  "TARGET_P8_VECTOR"
4327{
4328  operands[4] = CONST0_RTX (V2DFmode);
4329})
4330
4331;; Peephole2 pattern to combine a bcdadd/bcdsub that calculates the value and
4332;; the bcdadd/bcdsub that tests the value.  The combiner won't work since
4333;; CR6 is a hard coded register.  Unfortunately, all of the Altivec predicate
4334;; support is hard coded to use the fixed register CR6 instead of creating
4335;; a register class for CR6.
4336
4337(define_peephole2
4338  [(parallel [(set (match_operand:V1TI 0 "register_operand")
4339		   (unspec:V1TI [(match_operand:V1TI 1 "register_operand")
4340				 (match_operand:V1TI 2 "register_operand")
4341				 (match_operand:QI 3 "const_0_to_1_operand")]
4342				UNSPEC_BCD_ADD_SUB))
4343	      (clobber (reg:CCFP CR6_REGNO))])
4344   (parallel [(set (reg:CCFP CR6_REGNO)
4345		   (compare:CCFP
4346		    (unspec:V2DF [(match_dup 1)
4347				  (match_dup 2)
4348				  (match_dup 3)]
4349				 UNSPEC_BCD_ADD_SUB)
4350		    (match_operand:V2DF 4 "zero_constant")))
4351	      (clobber (match_operand:V1TI 5 "register_operand"))])]
4352  "TARGET_P8_VECTOR"
4353  [(parallel [(set (match_dup 0)
4354		   (unspec:V1TI [(match_dup 1)
4355				 (match_dup 2)
4356				 (match_dup 3)]
4357				UNSPEC_BCD_ADD_SUB))
4358	      (set (reg:CCFP CR6_REGNO)
4359		   (compare:CCFP
4360		    (unspec:V2DF [(match_dup 1)
4361				  (match_dup 2)
4362				  (match_dup 3)]
4363				 UNSPEC_BCD_ADD_SUB)
4364		    (match_dup 4)))])])
4365