1 /* Subroutines for the C front end on the PowerPC architecture.
2    Copyright (C) 2002-2021 Free Software Foundation, Inc.
3 
4    Contributed by Zack Weinberg <zack@codesourcery.com>
5    and Paolo Bonzini <bonzini@gnu.org>
6 
7    This file is part of GCC.
8 
9    GCC is free software; you can redistribute it and/or modify it
10    under the terms of the GNU General Public License as published
11    by the Free Software Foundation; either version 3, or (at your
12    option) any later version.
13 
14    GCC is distributed in the hope that it will be useful, but WITHOUT
15    ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
16    or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
17    License for more details.
18 
19    You should have received a copy of the GNU General Public License
20    along with GCC; see the file COPYING3.  If not see
21    <http://www.gnu.org/licenses/>.  */
22 
23 #define IN_TARGET_CODE 1
24 
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "target.h"
29 #include "c-family/c-common.h"
30 #include "memmodel.h"
31 #include "tm_p.h"
32 #include "stringpool.h"
33 #include "stor-layout.h"
34 #include "c-family/c-pragma.h"
35 #include "langhooks.h"
36 #include "c/c-tree.h"
37 
38 #include "rs6000-internal.h"
39 
40 static tree altivec_resolve_new_overloaded_builtin (location_t, tree, void *);
41 
42 
43 /* Handle the machine specific pragma longcall.  Its syntax is
44 
45    # pragma longcall ( TOGGLE )
46 
47    where TOGGLE is either 0 or 1.
48 
49    rs6000_default_long_calls is set to the value of TOGGLE, changing
50    whether or not new function declarations receive a longcall
51    attribute by default.  */
52 
53 void
rs6000_pragma_longcall(cpp_reader * pfile ATTRIBUTE_UNUSED)54 rs6000_pragma_longcall (cpp_reader *pfile ATTRIBUTE_UNUSED)
55 {
56 #define SYNTAX_ERROR(gmsgid) do {					\
57   warning (OPT_Wpragmas, gmsgid);					\
58   warning (OPT_Wpragmas, "ignoring malformed %<#pragma longcall%>");	\
59   return;								\
60 } while (0)
61 
62 
63 
64   tree x, n;
65 
66   /* If we get here, generic code has already scanned the directive
67      leader and the word "longcall".  */
68 
69   if (pragma_lex (&x) != CPP_OPEN_PAREN)
70     SYNTAX_ERROR ("missing open paren");
71   if (pragma_lex (&n) != CPP_NUMBER)
72     SYNTAX_ERROR ("missing number");
73   if (pragma_lex (&x) != CPP_CLOSE_PAREN)
74     SYNTAX_ERROR ("missing close paren");
75 
76   if (n != integer_zero_node && n != integer_one_node)
77     SYNTAX_ERROR ("number must be 0 or 1");
78 
79   if (pragma_lex (&x) != CPP_EOF)
80     warning (OPT_Wpragmas, "junk at end of %<#pragma longcall%>");
81 
82   rs6000_default_long_calls = (n == integer_one_node);
83 }
84 
85 /* Handle defining many CPP flags based on TARGET_xxx.  As a general
86    policy, rather than trying to guess what flags a user might want a
87    #define for, it's better to define a flag for everything.  */
88 
89 #define builtin_define(TXT) cpp_define (pfile, TXT)
90 #define builtin_assert(TXT) cpp_assert (pfile, TXT)
91 
92 /* Keep the AltiVec keywords handy for fast comparisons.  */
93 static GTY(()) tree __vector_keyword;
94 static GTY(()) tree vector_keyword;
95 static GTY(()) tree __pixel_keyword;
96 static GTY(()) tree pixel_keyword;
97 static GTY(()) tree __bool_keyword;
98 static GTY(()) tree bool_keyword;
99 static GTY(()) tree _Bool_keyword;
100 static GTY(()) tree __int128_type;
101 static GTY(()) tree __uint128_type;
102 
103 /* Preserved across calls.  */
104 static tree expand_bool_pixel;
105 
106 static cpp_hashnode *
altivec_categorize_keyword(const cpp_token * tok)107 altivec_categorize_keyword (const cpp_token *tok)
108 {
109   if (tok->type == CPP_NAME)
110     {
111       cpp_hashnode *ident = tok->val.node.node;
112 
113       if (ident == C_CPP_HASHNODE (vector_keyword))
114 	return C_CPP_HASHNODE (__vector_keyword);
115 
116       if (ident == C_CPP_HASHNODE (pixel_keyword))
117 	return C_CPP_HASHNODE (__pixel_keyword);
118 
119       if (ident == C_CPP_HASHNODE (bool_keyword))
120 	return C_CPP_HASHNODE (__bool_keyword);
121 
122       if (ident == C_CPP_HASHNODE (_Bool_keyword))
123 	return C_CPP_HASHNODE (__bool_keyword);
124 
125       return ident;
126     }
127 
128   return 0;
129 }
130 
131 static void
init_vector_keywords(void)132 init_vector_keywords (void)
133 {
134   /* Keywords without two leading underscores are context-sensitive, and hence
135      implemented as conditional macros, controlled by the
136      rs6000_macro_to_expand() function below.  If we have ISA 2.07 64-bit
137      support, record the __int128_t and __uint128_t types.  */
138 
139   __vector_keyword = get_identifier ("__vector");
140   C_CPP_HASHNODE (__vector_keyword)->flags |= NODE_CONDITIONAL;
141 
142   __pixel_keyword = get_identifier ("__pixel");
143   C_CPP_HASHNODE (__pixel_keyword)->flags |= NODE_CONDITIONAL;
144 
145   __bool_keyword = get_identifier ("__bool");
146   C_CPP_HASHNODE (__bool_keyword)->flags |= NODE_CONDITIONAL;
147 
148   vector_keyword = get_identifier ("vector");
149   C_CPP_HASHNODE (vector_keyword)->flags |= NODE_CONDITIONAL;
150 
151   pixel_keyword = get_identifier ("pixel");
152   C_CPP_HASHNODE (pixel_keyword)->flags |= NODE_CONDITIONAL;
153 
154   bool_keyword = get_identifier ("bool");
155   C_CPP_HASHNODE (bool_keyword)->flags |= NODE_CONDITIONAL;
156 
157   _Bool_keyword = get_identifier ("_Bool");
158   C_CPP_HASHNODE (_Bool_keyword)->flags |= NODE_CONDITIONAL;
159 
160   if (TARGET_VADDUQM)
161     {
162       __int128_type = get_identifier ("__int128_t");
163       __uint128_type = get_identifier ("__uint128_t");
164     }
165 }
166 
167 /* Helper function to find out which RID_INT_N_* code is the one for
168    __int128, if any.  Returns RID_MAX+1 if none apply, which is safe
169    (for our purposes, since we always expect to have __int128) to
170    compare against.  */
171 static int
rid_int128(void)172 rid_int128(void)
173 {
174   int i;
175 
176   for (i = 0; i < NUM_INT_N_ENTS; i ++)
177     if (int_n_enabled_p[i]
178 	&& int_n_data[i].bitsize == 128)
179       return RID_INT_N_0 + i;
180 
181   return RID_MAX + 1;
182 }
183 
184 /* Called to decide whether a conditional macro should be expanded.
185    Since we have exactly one such macro (i.e, 'vector'), we do not
186    need to examine the 'tok' parameter.  */
187 
188 static cpp_hashnode *
rs6000_macro_to_expand(cpp_reader * pfile,const cpp_token * tok)189 rs6000_macro_to_expand (cpp_reader *pfile, const cpp_token *tok)
190 {
191   cpp_hashnode *expand_this = tok->val.node.node;
192   cpp_hashnode *ident;
193 
194   /* If the current machine does not have altivec, don't look for the
195      keywords.  */
196   if (!TARGET_ALTIVEC)
197     return NULL;
198 
199   ident = altivec_categorize_keyword (tok);
200 
201   if (ident != expand_this)
202     expand_this = NULL;
203 
204   if (ident == C_CPP_HASHNODE (__vector_keyword))
205     {
206       int idx = 0;
207       do
208 	tok = cpp_peek_token (pfile, idx++);
209       while (tok->type == CPP_PADDING);
210       ident = altivec_categorize_keyword (tok);
211 
212       if (ident == C_CPP_HASHNODE (__pixel_keyword))
213 	{
214 	  expand_this = C_CPP_HASHNODE (__vector_keyword);
215 	  expand_bool_pixel = __pixel_keyword;
216 	}
217       else if (ident == C_CPP_HASHNODE (__bool_keyword))
218 	{
219 	  expand_this = C_CPP_HASHNODE (__vector_keyword);
220 	  expand_bool_pixel = __bool_keyword;
221 	}
222       /* The boost libraries have code with Iterator::vector vector in it.  If
223 	 we allow the normal handling, this module will be called recursively,
224 	 and the vector will be skipped.; */
225       else if (ident && (ident != C_CPP_HASHNODE (__vector_keyword)))
226 	{
227 	  enum rid rid_code = (enum rid)(ident->rid_code);
228 	  bool is_macro = cpp_macro_p (ident);
229 
230 	  /* If there is a function-like macro, check if it is going to be
231 	     invoked with or without arguments.  Without following ( treat
232 	     it like non-macro, otherwise the following cpp_get_token eats
233 	     what should be preserved.  */
234 	  if (is_macro && cpp_fun_like_macro_p (ident))
235 	    {
236 	      int idx2 = idx;
237 	      do
238 		tok = cpp_peek_token (pfile, idx2++);
239 	      while (tok->type == CPP_PADDING);
240 	      if (tok->type != CPP_OPEN_PAREN)
241 		is_macro = false;
242 	    }
243 
244 	  if (is_macro)
245 	    {
246 	      do
247 		(void) cpp_get_token (pfile);
248 	      while (--idx > 0);
249 	      do
250 		tok = cpp_peek_token (pfile, idx++);
251 	      while (tok->type == CPP_PADDING);
252 	      ident = altivec_categorize_keyword (tok);
253 	      if (ident == C_CPP_HASHNODE (__pixel_keyword))
254 		{
255 		  expand_this = C_CPP_HASHNODE (__vector_keyword);
256 		  expand_bool_pixel = __pixel_keyword;
257 		  rid_code = RID_MAX;
258 		}
259 	      else if (ident == C_CPP_HASHNODE (__bool_keyword))
260 		{
261 		  expand_this = C_CPP_HASHNODE (__vector_keyword);
262 		  expand_bool_pixel = __bool_keyword;
263 		  rid_code = RID_MAX;
264 		}
265 	      else if (ident)
266 		rid_code = (enum rid)(ident->rid_code);
267 	    }
268 
269 	  if (rid_code == RID_UNSIGNED || rid_code == RID_LONG
270 	      || rid_code == RID_SHORT || rid_code == RID_SIGNED
271 	      || rid_code == RID_INT || rid_code == RID_CHAR
272 	      || rid_code == RID_FLOAT
273 	      || (rid_code == RID_DOUBLE && TARGET_VSX)
274 	      || (rid_code == rid_int128 () && TARGET_VADDUQM))
275 	    {
276 	      expand_this = C_CPP_HASHNODE (__vector_keyword);
277 	      /* If the next keyword is bool or pixel, it
278 		 will need to be expanded as well.  */
279 	      do
280 		tok = cpp_peek_token (pfile, idx++);
281 	      while (tok->type == CPP_PADDING);
282 	      ident = altivec_categorize_keyword (tok);
283 
284 	      if (ident == C_CPP_HASHNODE (__pixel_keyword))
285 		expand_bool_pixel = __pixel_keyword;
286 	      else if (ident == C_CPP_HASHNODE (__bool_keyword))
287 		expand_bool_pixel = __bool_keyword;
288 	      else
289 		{
290 		  /* Try two tokens down, too.  */
291 		  do
292 		    tok = cpp_peek_token (pfile, idx++);
293 		  while (tok->type == CPP_PADDING);
294 		  ident = altivec_categorize_keyword (tok);
295 		  if (ident == C_CPP_HASHNODE (__pixel_keyword))
296 		    expand_bool_pixel = __pixel_keyword;
297 		  else if (ident == C_CPP_HASHNODE (__bool_keyword))
298 		    expand_bool_pixel = __bool_keyword;
299 		}
300 	    }
301 
302 	  /* Support vector __int128_t, but we don't need to worry about bool
303 	     or pixel on this type.  */
304 	  else if (TARGET_VADDUQM
305 		   && (ident == C_CPP_HASHNODE (__int128_type)
306 		       || ident == C_CPP_HASHNODE (__uint128_type)))
307 	    expand_this = C_CPP_HASHNODE (__vector_keyword);
308 	}
309     }
310   else if (expand_bool_pixel && ident == C_CPP_HASHNODE (__pixel_keyword))
311     {
312       expand_this = C_CPP_HASHNODE (__pixel_keyword);
313       expand_bool_pixel = 0;
314     }
315   else if (expand_bool_pixel && ident == C_CPP_HASHNODE (__bool_keyword))
316     {
317       expand_this = C_CPP_HASHNODE (__bool_keyword);
318       expand_bool_pixel = 0;
319     }
320 
321   return expand_this;
322 }
323 
324 
325 /* Define or undefine a single macro.  */
326 
327 static void
rs6000_define_or_undefine_macro(bool define_p,const char * name)328 rs6000_define_or_undefine_macro (bool define_p, const char *name)
329 {
330   if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
331     fprintf (stderr, "#%s %s\n", (define_p) ? "define" : "undef", name);
332 
333   if (define_p)
334     cpp_define (parse_in, name);
335   else
336     cpp_undef (parse_in, name);
337 }
338 
339 /* Define or undefine macros based on the current target.  If the user does
340    #pragma GCC target, we need to adjust the macros dynamically.  Note, some of
341    the options needed for builtins have been moved to separate variables, so
342    have both the target flags and the builtin flags as arguments.  */
343 
344 void
rs6000_target_modify_macros(bool define_p,HOST_WIDE_INT flags,HOST_WIDE_INT bu_mask)345 rs6000_target_modify_macros (bool define_p, HOST_WIDE_INT flags,
346 			     HOST_WIDE_INT bu_mask)
347 {
348   if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
349     fprintf (stderr,
350 	     "rs6000_target_modify_macros (%s, " HOST_WIDE_INT_PRINT_HEX
351 	     ", " HOST_WIDE_INT_PRINT_HEX ")\n",
352 	     (define_p) ? "define" : "undef",
353 	     flags, bu_mask);
354 
355   /* Each of the flags mentioned below controls whether certain
356      preprocessor macros will be automatically defined when
357      preprocessing source files for compilation by this compiler.
358      While most of these flags can be enabled or disabled
359      explicitly by specifying certain command-line options when
360      invoking the compiler, there are also many ways in which these
361      flags are enabled or disabled implicitly, based on compiler
362      defaults, configuration choices, and on the presence of certain
363      related command-line options.  Many, but not all, of these
364      implicit behaviors can be found in file "rs6000.c", the
365      rs6000_option_override_internal() function.
366 
367      In general, each of the flags may be automatically enabled in
368      any of the following conditions:
369 
370      1. If no -mcpu target is specified on the command line and no
371 	--with-cpu target is specified to the configure command line
372 	and the TARGET_DEFAULT macro for this default cpu host
373 	includes the flag, and the flag has not been explicitly disabled
374 	by command-line options.
375 
376      2. If the target specified with -mcpu=target on the command line, or
377 	in the absence of a -mcpu=target command-line option, if the
378 	target specified using --with-cpu=target on the configure
379 	command line, is disqualified because the associated binary
380 	tools (e.g. the assembler) lack support for the requested cpu,
381 	and the TARGET_DEFAULT macro for this default cpu host
382 	includes the flag, and the flag has not been explicitly disabled
383 	by command-line options.
384 
385      3. If either of the above two conditions apply except that the
386 	TARGET_DEFAULT macro is defined to equal zero, and
387 	TARGET_POWERPC64 and
388 	a) BYTES_BIG_ENDIAN and the flag to be enabled is either
389 	   MASK_PPC_GFXOPT or MASK_POWERPC64 (flags for "powerpc64"
390 	   target), or
391 	b) !BYTES_BIG_ENDIAN and the flag to be enabled is either
392 	   MASK_POWERPC64 or it is one of the flags included in
393 	   ISA_2_7_MASKS_SERVER (flags for "powerpc64le" target).
394 
395      4. If a cpu has been requested with a -mcpu=target command-line option
396 	and this cpu has not been disqualified due to shortcomings of the
397 	binary tools, and the set of flags associated with the requested cpu
398 	include the flag to be enabled.  See rs6000-cpus.def for macro
399 	definitions that represent various ABI standards
400 	(e.g. ISA_2_1_MASKS, ISA_3_0_MASKS_SERVER) and for a list of
401 	the specific flags that are associated with each of the cpu
402 	choices that can be specified as the target of a -mcpu=target
403 	compile option, or as the target of a --with-cpu=target
404 	configure option.  Target flags that are specified in either
405 	of these two ways are considered "implicit" since the flags
406 	are not mentioned specifically by name.
407 
408 	Additional documentation describing behavior specific to
409 	particular flags is provided below, immediately preceding the
410 	use of each relevant flag.
411 
412      5. If there is no -mcpu=target command-line option, and the cpu
413 	requested by a --with-cpu=target command-line option has not
414 	been disqualified due to shortcomings of the binary tools, and
415 	the set of flags associated with the specified target include
416 	the flag to be enabled.  See the notes immediately above for a
417 	summary of the flags associated with particular cpu
418 	definitions.  */
419 
420   /* rs6000_isa_flags based options.  */
421   rs6000_define_or_undefine_macro (define_p, "_ARCH_PPC");
422   if ((flags & OPTION_MASK_PPC_GPOPT) != 0)
423     rs6000_define_or_undefine_macro (define_p, "_ARCH_PPCSQ");
424   if ((flags & OPTION_MASK_PPC_GFXOPT) != 0)
425     rs6000_define_or_undefine_macro (define_p, "_ARCH_PPCGR");
426   if ((flags & OPTION_MASK_POWERPC64) != 0)
427     rs6000_define_or_undefine_macro (define_p, "_ARCH_PPC64");
428   if ((flags & OPTION_MASK_MFCRF) != 0)
429     rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR4");
430   if ((flags & OPTION_MASK_POPCNTB) != 0)
431     rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR5");
432   if ((flags & OPTION_MASK_FPRND) != 0)
433     rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR5X");
434   if ((flags & OPTION_MASK_CMPB) != 0)
435     rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR6");
436   if ((flags & OPTION_MASK_POPCNTD) != 0)
437     rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR7");
438   /* Note that the OPTION_MASK_DIRECT_MOVE flag is automatically
439      turned on in the following condition:
440      1. TARGET_P8_VECTOR is enabled and OPTION_MASK_DIRECT_MOVE is not
441         explicitly disabled.
442         Hereafter, the OPTION_MASK_DIRECT_MOVE flag is considered to
443         have been turned on explicitly.
444      Note that the OPTION_MASK_DIRECT_MOVE flag is automatically
445      turned off in any of the following conditions:
446      1. TARGET_HARD_FLOAT, TARGET_ALTIVEC, or TARGET_VSX is explicitly
447 	disabled and OPTION_MASK_DIRECT_MOVE was not explicitly
448 	enabled.
449      2. TARGET_VSX is off.  */
450   if ((flags & OPTION_MASK_DIRECT_MOVE) != 0)
451     rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR8");
452   if ((flags & OPTION_MASK_MODULO) != 0)
453     rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR9");
454   if ((flags & OPTION_MASK_POWER10) != 0)
455     rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR10");
456   if ((flags & OPTION_MASK_SOFT_FLOAT) != 0)
457     rs6000_define_or_undefine_macro (define_p, "_SOFT_FLOAT");
458   if ((flags & OPTION_MASK_RECIP_PRECISION) != 0)
459     rs6000_define_or_undefine_macro (define_p, "__RECIP_PRECISION__");
460   /* Note that the OPTION_MASK_ALTIVEC flag is automatically turned on
461      in any of the following conditions:
462      1. The operating system is Darwin and it is configured for 64
463 	bit.  (See darwin_rs6000_override_options.)
464      2. The operating system is Darwin and the operating system
465 	version is 10.5 or higher and the user has not explicitly
466 	disabled ALTIVEC by specifying -mcpu=G3 or -mno-altivec and
467 	the compiler is not producing code for integration within the
468 	kernel.  (See darwin_rs6000_override_options.)
469      Note that the OPTION_MASK_ALTIVEC flag is automatically turned
470      off in any of the following conditions:
471      1. The operating system does not support saving of AltiVec
472 	registers (OS_MISSING_ALTIVEC).
473      2. If an inner context (as introduced by
474 	__attribute__((__target__())) or #pragma GCC target()
475 	requests a target that normally enables the
476 	OPTION_MASK_ALTIVEC flag but the outer-most "main target"
477 	does not support the rs6000_altivec_abi, this flag is
478 	turned off for the inner context unless OPTION_MASK_ALTIVEC
479 	was explicitly enabled for the inner context.  */
480   if ((flags & OPTION_MASK_ALTIVEC) != 0)
481     {
482       const char *vec_str = (define_p) ? "__VEC__=10206" : "__VEC__";
483       rs6000_define_or_undefine_macro (define_p, "__ALTIVEC__");
484       rs6000_define_or_undefine_macro (define_p, vec_str);
485 
486 	  /* Define this when supporting context-sensitive keywords.  */
487       if (!flag_iso)
488 	rs6000_define_or_undefine_macro (define_p, "__APPLE_ALTIVEC__");
489       if (rs6000_aix_extabi)
490 	rs6000_define_or_undefine_macro (define_p, "__EXTABI__");
491     }
492   /* Note that the OPTION_MASK_VSX flag is automatically turned on in
493      the following conditions:
494      1. TARGET_P8_VECTOR is explicitly turned on and the OPTION_MASK_VSX
495         was not explicitly turned off.  Hereafter, the OPTION_MASK_VSX
496         flag is considered to have been explicitly turned on.
497      Note that the OPTION_MASK_VSX flag is automatically turned off in
498      the following conditions:
499      1. The operating system does not support saving of AltiVec
500 	registers (OS_MISSING_ALTIVEC).
501      2. If the option TARGET_HARD_FLOAT is turned off.  Hereafter, the
502 	OPTION_MASK_VSX flag is considered to have been turned off
503 	explicitly.
504      3. If TARGET_AVOID_XFORM is turned on explicitly at the outermost
505 	compilation context, or if it is turned on by any means in an
506 	inner compilation context.  Hereafter, the OPTION_MASK_VSX
507 	flag is considered to have been turned off explicitly.
508      4. If TARGET_ALTIVEC was explicitly disabled.  Hereafter, the
509 	OPTION_MASK_VSX flag is considered to have been turned off
510 	explicitly.
511      5. If an inner context (as introduced by
512 	__attribute__((__target__())) or #pragma GCC target()
513 	requests a target that normally enables the
514 	OPTION_MASK_VSX flag but the outer-most "main target"
515 	does not support the rs6000_altivec_abi, this flag is
516 	turned off for the inner context unless OPTION_MASK_VSX
517 	was explicitly enabled for the inner context.  */
518   if ((flags & OPTION_MASK_VSX) != 0)
519     rs6000_define_or_undefine_macro (define_p, "__VSX__");
520   if ((flags & OPTION_MASK_HTM) != 0)
521     {
522       rs6000_define_or_undefine_macro (define_p, "__HTM__");
523       /* Tell the user that our HTM insn patterns act as memory barriers.  */
524       rs6000_define_or_undefine_macro (define_p, "__TM_FENCE__");
525     }
526   /* Note that the OPTION_MASK_P8_VECTOR flag is automatically turned
527      on in the following conditions:
528      1. TARGET_P9_VECTOR is explicitly turned on and
529         OPTION_MASK_P8_VECTOR is not explicitly turned off.
530         Hereafter, the OPTION_MASK_P8_VECTOR flag is considered to
531         have been turned off explicitly.
532      Note that the OPTION_MASK_P8_VECTOR flag is automatically turned
533      off in the following conditions:
534      1. If any of TARGET_HARD_FLOAT, TARGET_ALTIVEC, or TARGET_VSX
535 	were turned off explicitly and OPTION_MASK_P8_VECTOR flag was
536 	not turned on explicitly.
537      2. If TARGET_ALTIVEC is turned off.  Hereafter, the
538 	OPTION_MASK_P8_VECTOR flag is considered to have been turned off
539 	explicitly.
540      3. If TARGET_VSX is turned off and OPTION_MASK_P8_VECTOR was not
541         explicitly enabled.  If TARGET_VSX is explicitly enabled, the
542         OPTION_MASK_P8_VECTOR flag is hereafter also considered to
543 	have been turned off explicitly.  */
544   if ((flags & OPTION_MASK_P8_VECTOR) != 0)
545     rs6000_define_or_undefine_macro (define_p, "__POWER8_VECTOR__");
546   /* Note that the OPTION_MASK_P9_VECTOR flag is automatically turned
547      off in the following conditions:
548      1. If TARGET_P8_VECTOR is turned off and OPTION_MASK_P9_VECTOR is
549         not turned on explicitly. Hereafter, if OPTION_MASK_P8_VECTOR
550         was turned on explicitly, the OPTION_MASK_P9_VECTOR flag is
551         also considered to have been turned off explicitly.
552      Note that the OPTION_MASK_P9_VECTOR is automatically turned on
553      in the following conditions:
554      1. If TARGET_P9_MINMAX was turned on explicitly.
555         Hereafter, THE OPTION_MASK_P9_VECTOR flag is considered to
556         have been turned on explicitly.  */
557   if ((flags & OPTION_MASK_P9_VECTOR) != 0)
558     rs6000_define_or_undefine_macro (define_p, "__POWER9_VECTOR__");
559   /* Note that the OPTION_MASK_QUAD_MEMORY flag is automatically
560      turned off in the following conditions:
561      1. If TARGET_POWERPC64 is turned off.
562      2. If WORDS_BIG_ENDIAN is false (non-atomic quad memory
563 	load/store are disabled on little endian).  */
564   if ((flags & OPTION_MASK_QUAD_MEMORY) != 0)
565     rs6000_define_or_undefine_macro (define_p, "__QUAD_MEMORY__");
566   /* Note that the OPTION_MASK_QUAD_MEMORY_ATOMIC flag is automatically
567      turned off in the following conditions:
568      1. If TARGET_POWERPC64 is turned off.
569      Note that the OPTION_MASK_QUAD_MEMORY_ATOMIC flag is
570      automatically turned on in the following conditions:
571      1. If TARGET_QUAD_MEMORY and this flag was not explicitly
572 	disabled.  */
573   if ((flags & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
574     rs6000_define_or_undefine_macro (define_p, "__QUAD_MEMORY_ATOMIC__");
575   /* Note that the OPTION_MASK_CRYPTO flag is automatically turned off
576      in the following conditions:
577      1. If any of TARGET_HARD_FLOAT or TARGET_ALTIVEC or TARGET_VSX
578 	are turned off explicitly and OPTION_MASK_CRYPTO is not turned
579 	on explicitly.
580      2. If TARGET_ALTIVEC is turned off.  */
581   if ((flags & OPTION_MASK_CRYPTO) != 0)
582     rs6000_define_or_undefine_macro (define_p, "__CRYPTO__");
583   if ((flags & OPTION_MASK_FLOAT128_KEYWORD) != 0)
584     {
585       rs6000_define_or_undefine_macro (define_p, "__FLOAT128__");
586       if (define_p)
587 	rs6000_define_or_undefine_macro (true, "__float128=__ieee128");
588       else
589 	rs6000_define_or_undefine_macro (false, "__float128");
590     }
591   /* OPTION_MASK_FLOAT128_HARDWARE can be turned on if -mcpu=power9 is used or
592      via the target attribute/pragma.  */
593   if ((flags & OPTION_MASK_FLOAT128_HW) != 0)
594     rs6000_define_or_undefine_macro (define_p, "__FLOAT128_HARDWARE__");
595 
596   /* options from the builtin masks.  */
597   /* Note that RS6000_BTM_CELL is enabled only if (rs6000_cpu ==
598      PROCESSOR_CELL) (e.g. -mcpu=cell).  */
599   if ((bu_mask & RS6000_BTM_CELL) != 0)
600     rs6000_define_or_undefine_macro (define_p, "__PPU__");
601 
602   /* Tell the user if we support the MMA instructions.  */
603   if ((flags & OPTION_MASK_MMA) != 0)
604     rs6000_define_or_undefine_macro (define_p, "__MMA__");
605   /* Whether pc-relative code is being generated.  */
606   if ((flags & OPTION_MASK_PCREL) != 0)
607     rs6000_define_or_undefine_macro (define_p, "__PCREL__");
608   /* Tell the user -mrop-protect is in play.  */
609   if (rs6000_rop_protect)
610     rs6000_define_or_undefine_macro (define_p, "__ROP_PROTECT__");
611 }
612 
613 void
rs6000_cpu_cpp_builtins(cpp_reader * pfile)614 rs6000_cpu_cpp_builtins (cpp_reader *pfile)
615 {
616   /* Define all of the common macros.  */
617   rs6000_target_modify_macros (true, rs6000_isa_flags,
618 			       rs6000_builtin_mask_calculate ());
619 
620   if (TARGET_FRE)
621     builtin_define ("__RECIP__");
622   if (TARGET_FRES)
623     builtin_define ("__RECIPF__");
624   if (TARGET_FRSQRTE)
625     builtin_define ("__RSQRTE__");
626   if (TARGET_FRSQRTES)
627     builtin_define ("__RSQRTEF__");
628   if (TARGET_FLOAT128_TYPE)
629     builtin_define ("__FLOAT128_TYPE__");
630 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
631   builtin_define ("__BUILTIN_CPU_SUPPORTS__");
632 #endif
633 
634   if (TARGET_EXTRA_BUILTINS && cpp_get_options (pfile)->lang != CLK_ASM)
635     {
636       /* Define the AltiVec syntactic elements.  */
637       builtin_define ("__vector=__attribute__((altivec(vector__)))");
638       builtin_define ("__pixel=__attribute__((altivec(pixel__))) unsigned short");
639       builtin_define ("__bool=__attribute__((altivec(bool__))) unsigned");
640 
641       if (!flag_iso)
642 	{
643 	  builtin_define ("vector=vector");
644 	  builtin_define ("pixel=pixel");
645 	  builtin_define ("bool=bool");
646 	  builtin_define ("_Bool=_Bool");
647 	  init_vector_keywords ();
648 
649 	  /* Enable context-sensitive macros.  */
650 	  cpp_get_callbacks (pfile)->macro_to_expand = rs6000_macro_to_expand;
651 	}
652     }
653   if (!TARGET_HARD_FLOAT)
654     builtin_define ("_SOFT_DOUBLE");
655   /* Used by lwarx/stwcx. errata work-around.  */
656   if (rs6000_cpu == PROCESSOR_PPC405)
657     builtin_define ("__PPC405__");
658   /* Used by libstdc++.  */
659   if (TARGET_NO_LWSYNC)
660     builtin_define ("__NO_LWSYNC__");
661 
662   if (TARGET_EXTRA_BUILTINS)
663     {
664       /* For the VSX builtin functions identical to Altivec functions, just map
665 	 the altivec builtin into the vsx version (the altivec functions
666 	 generate VSX code if -mvsx).  */
667       builtin_define ("__builtin_vsx_xxland=__builtin_vec_and");
668       builtin_define ("__builtin_vsx_xxlandc=__builtin_vec_andc");
669       builtin_define ("__builtin_vsx_xxlnor=__builtin_vec_nor");
670       builtin_define ("__builtin_vsx_xxlor=__builtin_vec_or");
671       builtin_define ("__builtin_vsx_xxlxor=__builtin_vec_xor");
672       builtin_define ("__builtin_vsx_xxsel=__builtin_vec_sel");
673       builtin_define ("__builtin_vsx_vperm=__builtin_vec_perm");
674 
675       /* Also map the a and m versions of the multiply/add instructions to the
676 	 builtin for people blindly going off the instruction manual.  */
677       builtin_define ("__builtin_vsx_xvmaddadp=__builtin_vsx_xvmadddp");
678       builtin_define ("__builtin_vsx_xvmaddmdp=__builtin_vsx_xvmadddp");
679       builtin_define ("__builtin_vsx_xvmaddasp=__builtin_vsx_xvmaddsp");
680       builtin_define ("__builtin_vsx_xvmaddmsp=__builtin_vsx_xvmaddsp");
681       builtin_define ("__builtin_vsx_xvmsubadp=__builtin_vsx_xvmsubdp");
682       builtin_define ("__builtin_vsx_xvmsubmdp=__builtin_vsx_xvmsubdp");
683       builtin_define ("__builtin_vsx_xvmsubasp=__builtin_vsx_xvmsubsp");
684       builtin_define ("__builtin_vsx_xvmsubmsp=__builtin_vsx_xvmsubsp");
685       builtin_define ("__builtin_vsx_xvnmaddadp=__builtin_vsx_xvnmadddp");
686       builtin_define ("__builtin_vsx_xvnmaddmdp=__builtin_vsx_xvnmadddp");
687       builtin_define ("__builtin_vsx_xvnmaddasp=__builtin_vsx_xvnmaddsp");
688       builtin_define ("__builtin_vsx_xvnmaddmsp=__builtin_vsx_xvnmaddsp");
689       builtin_define ("__builtin_vsx_xvnmsubadp=__builtin_vsx_xvnmsubdp");
690       builtin_define ("__builtin_vsx_xvnmsubmdp=__builtin_vsx_xvnmsubdp");
691       builtin_define ("__builtin_vsx_xvnmsubasp=__builtin_vsx_xvnmsubsp");
692       builtin_define ("__builtin_vsx_xvnmsubmsp=__builtin_vsx_xvnmsubsp");
693     }
694 
695   /* Map the old _Float128 'q' builtins into the new 'f128' builtins.  */
696   if (TARGET_FLOAT128_TYPE)
697     {
698       builtin_define ("__builtin_fabsq=__builtin_fabsf128");
699       builtin_define ("__builtin_copysignq=__builtin_copysignf128");
700       builtin_define ("__builtin_nanq=__builtin_nanf128");
701       builtin_define ("__builtin_nansq=__builtin_nansf128");
702       builtin_define ("__builtin_infq=__builtin_inff128");
703       builtin_define ("__builtin_huge_valq=__builtin_huge_valf128");
704     }
705 
706   /* Tell users they can use __builtin_bswap{16,64}.  */
707   builtin_define ("__HAVE_BSWAP__");
708 
709   /* May be overridden by target configuration.  */
710   RS6000_CPU_CPP_ENDIAN_BUILTINS();
711 
712   if (TARGET_LONG_DOUBLE_128)
713     {
714       builtin_define ("__LONG_DOUBLE_128__");
715       builtin_define ("__LONGDOUBLE128");
716 
717       if (TARGET_IEEEQUAD)
718 	{
719 	  /* Older versions of GLIBC used __attribute__((__KC__)) to create the
720 	     IEEE 128-bit floating point complex type for C++ (which does not
721 	     support _Float128 _Complex).  If the default for long double is
722 	     IEEE 128-bit mode, the library would need to use
723 	     __attribute__((__TC__)) instead.  Defining __KF__ and __KC__
724 	     is a stop-gap to build with the older libraries, until we
725 	     get an updated library.  */
726 	  builtin_define ("__LONG_DOUBLE_IEEE128__");
727 	  builtin_define ("__KF__=__TF__");
728 	  builtin_define ("__KC__=__TC__");
729 	}
730       else
731 	builtin_define ("__LONG_DOUBLE_IBM128__");
732     }
733 
734   switch (TARGET_CMODEL)
735     {
736       /* Deliberately omit __CMODEL_SMALL__ since that was the default
737 	 before --mcmodel support was added.  */
738     case CMODEL_MEDIUM:
739       builtin_define ("__CMODEL_MEDIUM__");
740       break;
741     case CMODEL_LARGE:
742       builtin_define ("__CMODEL_LARGE__");
743       break;
744     default:
745       break;
746     }
747 
748   switch (rs6000_current_abi)
749     {
750     case ABI_V4:
751       builtin_define ("_CALL_SYSV");
752       break;
753     case ABI_AIX:
754       builtin_define ("_CALL_AIXDESC");
755       builtin_define ("_CALL_AIX");
756       builtin_define ("_CALL_ELF=1");
757       break;
758     case ABI_ELFv2:
759       builtin_define ("_CALL_ELF=2");
760       break;
761     case ABI_DARWIN:
762       builtin_define ("_CALL_DARWIN");
763       break;
764     default:
765       break;
766     }
767 
768   /* Vector element order.  */
769   if (BYTES_BIG_ENDIAN)
770     builtin_define ("__VEC_ELEMENT_REG_ORDER__=__ORDER_BIG_ENDIAN__");
771   else
772     builtin_define ("__VEC_ELEMENT_REG_ORDER__=__ORDER_LITTLE_ENDIAN__");
773 
774   /* Let the compiled code know if 'f' class registers will not be available.  */
775   if (TARGET_SOFT_FLOAT)
776     builtin_define ("__NO_FPRS__");
777 
778   /* Whether aggregates passed by value are aligned to a 16 byte boundary
779      if their alignment is 16 bytes or larger.  */
780   if ((TARGET_MACHO && rs6000_darwin64_abi)
781       || DEFAULT_ABI == ABI_ELFv2
782       || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
783     builtin_define ("__STRUCT_PARM_ALIGN__=16");
784 }
785 
786 
787 
788 /* Convert a type stored into a struct altivec_builtin_types as ID,
789    into a tree.  The types are in rs6000_builtin_types: negative values
790    create a pointer type for the type associated to ~ID.  Note it is
791    a logical NOT, rather than a negation, otherwise you cannot represent
792    a pointer type for ID 0.  */
793 
794 static inline tree
rs6000_builtin_type(int id)795 rs6000_builtin_type (int id)
796 {
797   tree t;
798   t = rs6000_builtin_types[id < 0 ? ~id : id];
799   return id < 0 ? build_pointer_type (t) : t;
800 }
801 
802 /* Check whether the type of an argument, T, is compatible with a type ID
803    stored into a struct altivec_builtin_types.  Integer types are considered
804    compatible; otherwise, the language hook lang_hooks.types_compatible_p makes
805    the decision.  Also allow long double and _Float128 to be compatible if
806    -mabi=ieeelongdouble.  */
807 
808 static inline bool
is_float128_p(tree t)809 is_float128_p (tree t)
810 {
811   return (t == float128_type_node
812 	  || (TARGET_IEEEQUAD
813 	      && TARGET_LONG_DOUBLE_128
814 	      && t == long_double_type_node));
815 }
816 
817 
818 /* Return true iff ARGTYPE can be compatibly passed as PARMTYPE.  */
819 static bool
rs6000_new_builtin_type_compatible(tree parmtype,tree argtype)820 rs6000_new_builtin_type_compatible (tree parmtype, tree argtype)
821 {
822   if (parmtype == error_mark_node)
823     return false;
824 
825   if (INTEGRAL_TYPE_P (parmtype) && INTEGRAL_TYPE_P (argtype))
826     return true;
827 
828   if (TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128
829       && is_float128_p (parmtype) && is_float128_p (argtype))
830     return true;
831 
832   if (POINTER_TYPE_P (parmtype) && POINTER_TYPE_P (argtype))
833     {
834       parmtype = TREE_TYPE (parmtype);
835       argtype = TREE_TYPE (argtype);
836       if (TYPE_READONLY (argtype))
837 	parmtype = build_qualified_type (parmtype, TYPE_QUAL_CONST);
838     }
839 
840   return lang_hooks.types_compatible_p (parmtype, argtype);
841 }
842 
843 static inline bool
rs6000_builtin_type_compatible(tree t,int id)844 rs6000_builtin_type_compatible (tree t, int id)
845 {
846   tree builtin_type;
847   builtin_type = rs6000_builtin_type (id);
848   if (t == error_mark_node)
849     return false;
850   if (INTEGRAL_TYPE_P (t) && INTEGRAL_TYPE_P (builtin_type))
851     return true;
852   else if (TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128
853 	   && is_float128_p (t) && is_float128_p (builtin_type))
854     return true;
855   else
856     return lang_hooks.types_compatible_p (t, builtin_type);
857 }
858 
859 
860 /* In addition to calling fold_convert for EXPR of type TYPE, also
861    call c_fully_fold to remove any C_MAYBE_CONST_EXPRs that could be
862    hiding there (PR47197).  */
863 
864 static tree
fully_fold_convert(tree type,tree expr)865 fully_fold_convert (tree type, tree expr)
866 {
867   tree result = fold_convert (type, expr);
868   bool maybe_const = true;
869 
870   if (!c_dialect_cxx ())
871     result = c_fully_fold (result, false, &maybe_const);
872 
873   return result;
874 }
875 
876 /* Build a tree for a function call to an Altivec non-overloaded builtin.
877    The overloaded builtin that matched the types and args is described
878    by DESC.  The N arguments are given in ARGS, respectively.
879 
880    Actually the only thing it does is calling fold_convert on ARGS, with
881    a small exception for vec_{all,any}_{ge,le} predicates. */
882 
883 static tree
altivec_build_resolved_builtin(tree * args,int n,const struct altivec_builtin_types * desc)884 altivec_build_resolved_builtin (tree *args, int n,
885 				const struct altivec_builtin_types *desc)
886 {
887   tree impl_fndecl = rs6000_builtin_decls[desc->overloaded_code];
888   tree ret_type = rs6000_builtin_type (desc->ret_type);
889   tree argtypes = TYPE_ARG_TYPES (TREE_TYPE (impl_fndecl));
890   tree arg_type[4];
891   tree call;
892 
893   int i;
894   for (i = 0; i < n; i++)
895     arg_type[i] = TREE_VALUE (argtypes), argtypes = TREE_CHAIN (argtypes);
896 
897   /* The AltiVec overloading implementation is overall gross, but this
898      is particularly disgusting.  The vec_{all,any}_{ge,le} builtins
899      are completely different for floating-point vs. integer vector
900      types, because the former has vcmpgefp, but the latter should use
901      vcmpgtXX.
902 
903      In practice, the second and third arguments are swapped, and the
904      condition (LT vs. EQ, which is recognizable by bit 1 of the first
905      argument) is reversed.  Patch the arguments here before building
906      the resolved CALL_EXPR.  */
907   if (n == 3
908       && desc->code == ALTIVEC_BUILTIN_VEC_VCMPGE_P
909       && desc->overloaded_code != ALTIVEC_BUILTIN_VCMPGEFP_P
910       && desc->overloaded_code != VSX_BUILTIN_XVCMPGEDP_P)
911     {
912       std::swap (args[1], args[2]);
913       std::swap (arg_type[1], arg_type[2]);
914 
915       args[0] = fold_build2 (BIT_XOR_EXPR, TREE_TYPE (args[0]), args[0],
916 			     build_int_cst (NULL_TREE, 2));
917     }
918 
919   switch (n)
920     {
921     case 0:
922       call = build_call_expr (impl_fndecl, 0);
923       break;
924     case 1:
925       call = build_call_expr (impl_fndecl, 1,
926 			      fully_fold_convert (arg_type[0], args[0]));
927       break;
928     case 2:
929       call = build_call_expr (impl_fndecl, 2,
930 			      fully_fold_convert (arg_type[0], args[0]),
931 			      fully_fold_convert (arg_type[1], args[1]));
932       break;
933     case 3:
934       call = build_call_expr (impl_fndecl, 3,
935 			      fully_fold_convert (arg_type[0], args[0]),
936 			      fully_fold_convert (arg_type[1], args[1]),
937 			      fully_fold_convert (arg_type[2], args[2]));
938       break;
939     case 4:
940       call = build_call_expr (impl_fndecl, 4,
941 			      fully_fold_convert (arg_type[0], args[0]),
942 			      fully_fold_convert (arg_type[1], args[1]),
943 			      fully_fold_convert (arg_type[2], args[2]),
944 			      fully_fold_convert (arg_type[3], args[3]));
945       break;
946     default:
947       gcc_unreachable ();
948     }
949   return fold_convert (ret_type, call);
950 }
951 
952 /* Implementation of the resolve_overloaded_builtin target hook, to
953    support Altivec's overloaded builtins.  */
954 
955 tree
altivec_resolve_overloaded_builtin(location_t loc,tree fndecl,void * passed_arglist)956 altivec_resolve_overloaded_builtin (location_t loc, tree fndecl,
957 				    void *passed_arglist)
958 {
959   if (new_builtins_are_live)
960     return altivec_resolve_new_overloaded_builtin (loc, fndecl,
961 						   passed_arglist);
962 
963   vec<tree, va_gc> *arglist = static_cast<vec<tree, va_gc> *> (passed_arglist);
964   unsigned int nargs = vec_safe_length (arglist);
965   enum rs6000_builtins fcode
966     = (enum rs6000_builtins) DECL_MD_FUNCTION_CODE (fndecl);
967   tree fnargs = TYPE_ARG_TYPES (TREE_TYPE (fndecl));
968   tree types[4], args[4];
969   const struct altivec_builtin_types *desc;
970   unsigned int n;
971 
972   if (!rs6000_overloaded_builtin_p (fcode))
973     return NULL_TREE;
974 
975   if (TARGET_DEBUG_BUILTIN)
976     fprintf (stderr, "altivec_resolve_overloaded_builtin, code = %4d, %s\n",
977 	     (int)fcode, IDENTIFIER_POINTER (DECL_NAME (fndecl)));
978 
979   /* vec_lvsl and vec_lvsr are deprecated for use with LE element order.  */
980   if (fcode == ALTIVEC_BUILTIN_VEC_LVSL && !BYTES_BIG_ENDIAN)
981     warning (OPT_Wdeprecated,
982 	     "%<vec_lvsl%> is deprecated for little endian; use "
983 	     "assignment for unaligned loads and stores");
984   else if (fcode == ALTIVEC_BUILTIN_VEC_LVSR && !BYTES_BIG_ENDIAN)
985     warning (OPT_Wdeprecated,
986 	     "%<vec_lvsr%> is deprecated for little endian; use "
987 	     "assignment for unaligned loads and stores");
988 
989   if (fcode == ALTIVEC_BUILTIN_VEC_MUL)
990     {
991       /* vec_mul needs to be special cased because there are no instructions
992 	 for it for the {un}signed char, {un}signed short, and {un}signed int
993 	 types.  */
994       if (nargs != 2)
995 	{
996 	  error ("builtin %qs only accepts 2 arguments", "vec_mul");
997 	  return error_mark_node;
998 	}
999 
1000       tree arg0 = (*arglist)[0];
1001       tree arg0_type = TREE_TYPE (arg0);
1002       tree arg1 = (*arglist)[1];
1003       tree arg1_type = TREE_TYPE (arg1);
1004 
1005       /* Both arguments must be vectors and the types must be compatible.  */
1006       if (TREE_CODE (arg0_type) != VECTOR_TYPE)
1007 	goto bad;
1008       if (!lang_hooks.types_compatible_p (arg0_type, arg1_type))
1009 	goto bad;
1010 
1011       switch (TYPE_MODE (TREE_TYPE (arg0_type)))
1012 	{
1013 	  case E_QImode:
1014 	  case E_HImode:
1015 	  case E_SImode:
1016 	  case E_DImode:
1017 	  case E_TImode:
1018 	    {
1019 	      /* For scalar types just use a multiply expression.  */
1020 	      return fold_build2_loc (loc, MULT_EXPR, TREE_TYPE (arg0), arg0,
1021 				      fold_convert (TREE_TYPE (arg0), arg1));
1022 	    }
1023 	  case E_SFmode:
1024 	    {
1025 	      /* For floats use the xvmulsp instruction directly.  */
1026 	      tree call = rs6000_builtin_decls[VSX_BUILTIN_XVMULSP];
1027 	      return build_call_expr (call, 2, arg0, arg1);
1028 	    }
1029 	  case E_DFmode:
1030 	    {
1031 	      /* For doubles use the xvmuldp instruction directly.  */
1032 	      tree call = rs6000_builtin_decls[VSX_BUILTIN_XVMULDP];
1033 	      return build_call_expr (call, 2, arg0, arg1);
1034 	    }
1035 	  /* Other types are errors.  */
1036 	  default:
1037 	    goto bad;
1038 	}
1039     }
1040 
1041   if (fcode == ALTIVEC_BUILTIN_VEC_CMPNE)
1042     {
1043       /* vec_cmpne needs to be special cased because there are no instructions
1044 	 for it (prior to power 9).  */
1045       if (nargs != 2)
1046 	{
1047 	  error ("builtin %qs only accepts 2 arguments", "vec_cmpne");
1048 	  return error_mark_node;
1049 	}
1050 
1051       tree arg0 = (*arglist)[0];
1052       tree arg0_type = TREE_TYPE (arg0);
1053       tree arg1 = (*arglist)[1];
1054       tree arg1_type = TREE_TYPE (arg1);
1055 
1056       /* Both arguments must be vectors and the types must be compatible.  */
1057       if (TREE_CODE (arg0_type) != VECTOR_TYPE)
1058 	goto bad;
1059       if (!lang_hooks.types_compatible_p (arg0_type, arg1_type))
1060 	goto bad;
1061 
1062       /* Power9 instructions provide the most efficient implementation of
1063 	 ALTIVEC_BUILTIN_VEC_CMPNE if the mode is not DImode or TImode
1064 	 or SFmode or DFmode.  */
1065       if (!TARGET_P9_VECTOR
1066 	  || (TYPE_MODE (TREE_TYPE (arg0_type)) == DImode)
1067 	  || (TYPE_MODE (TREE_TYPE (arg0_type)) == TImode)
1068 	  || (TYPE_MODE (TREE_TYPE (arg0_type)) == SFmode)
1069 	  || (TYPE_MODE (TREE_TYPE (arg0_type)) == DFmode))
1070 	{
1071 	  switch (TYPE_MODE (TREE_TYPE (arg0_type)))
1072 	    {
1073 	      /* vec_cmpneq (va, vb) == vec_nor (vec_cmpeq (va, vb),
1074 		 vec_cmpeq (va, vb)).  */
1075 	      /* Note:  vec_nand also works but opt changes vec_nand's
1076 		 to vec_nor's anyway.  */
1077 	    case E_QImode:
1078 	    case E_HImode:
1079 	    case E_SImode:
1080 	    case E_DImode:
1081 	    case E_TImode:
1082 	    case E_SFmode:
1083 	    case E_DFmode:
1084 	      {
1085 		/* call = vec_cmpeq (va, vb)
1086 		   result = vec_nor (call, call).  */
1087 		vec<tree, va_gc> *params = make_tree_vector ();
1088 		vec_safe_push (params, arg0);
1089 		vec_safe_push (params, arg1);
1090 		tree call = altivec_resolve_overloaded_builtin
1091 		  (loc, rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_CMPEQ],
1092 		   params);
1093 		/* Use save_expr to ensure that operands used more than once
1094 		   that may have side effects (like calls) are only evaluated
1095 		   once.  */
1096 		call = save_expr (call);
1097 		params = make_tree_vector ();
1098 		vec_safe_push (params, call);
1099 		vec_safe_push (params, call);
1100 		return altivec_resolve_overloaded_builtin
1101 		  (loc, rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_NOR], params);
1102 	      }
1103 	      /* Other types are errors.  */
1104 	    default:
1105 	      goto bad;
1106 	    }
1107 	}
1108       /* else, fall through and process the Power9 alternative below */
1109     }
1110 
1111   if (fcode == ALTIVEC_BUILTIN_VEC_ADDE
1112       || fcode == ALTIVEC_BUILTIN_VEC_SUBE)
1113     {
1114       /* vec_adde needs to be special cased because there is no instruction
1115 	  for the {un}signed int version.  */
1116       if (nargs != 3)
1117 	{
1118 	  const char *name = fcode == ALTIVEC_BUILTIN_VEC_ADDE ?
1119 	    "vec_adde": "vec_sube";
1120 	  error ("builtin %qs only accepts 3 arguments", name);
1121 	  return error_mark_node;
1122 	}
1123 
1124       tree arg0 = (*arglist)[0];
1125       tree arg0_type = TREE_TYPE (arg0);
1126       tree arg1 = (*arglist)[1];
1127       tree arg1_type = TREE_TYPE (arg1);
1128       tree arg2 = (*arglist)[2];
1129       tree arg2_type = TREE_TYPE (arg2);
1130 
1131       /* All 3 arguments must be vectors of (signed or unsigned) (int or
1132 	 __int128) and the types must be compatible.  */
1133       if (TREE_CODE (arg0_type) != VECTOR_TYPE)
1134 	goto bad;
1135       if (!lang_hooks.types_compatible_p (arg0_type, arg1_type)
1136 	  || !lang_hooks.types_compatible_p (arg1_type, arg2_type))
1137 	goto bad;
1138 
1139       switch (TYPE_MODE (TREE_TYPE (arg0_type)))
1140 	{
1141 	  /* For {un}signed ints,
1142 	     vec_adde (va, vb, carryv) == vec_add (vec_add (va, vb),
1143 						   vec_and (carryv, 1)).
1144 	     vec_sube (va, vb, carryv) == vec_sub (vec_sub (va, vb),
1145 						   vec_and (carryv, 1)).  */
1146 	  case E_SImode:
1147 	    {
1148 	      tree add_sub_builtin;
1149 
1150 	      vec<tree, va_gc> *params = make_tree_vector ();
1151 	      vec_safe_push (params, arg0);
1152 	      vec_safe_push (params, arg1);
1153 
1154 	      if (fcode == ALTIVEC_BUILTIN_VEC_ADDE)
1155 		add_sub_builtin = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_ADD];
1156 	      else
1157 		add_sub_builtin = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_SUB];
1158 
1159 	      tree call = altivec_resolve_overloaded_builtin (loc,
1160 							      add_sub_builtin,
1161 							      params);
1162 	      tree const1 = build_int_cstu (TREE_TYPE (arg0_type), 1);
1163 	      tree ones_vector = build_vector_from_val (arg0_type, const1);
1164 	      tree and_expr = fold_build2_loc (loc, BIT_AND_EXPR, arg0_type,
1165 					       arg2, ones_vector);
1166 	      params = make_tree_vector ();
1167 	      vec_safe_push (params, call);
1168 	      vec_safe_push (params, and_expr);
1169 	      return altivec_resolve_overloaded_builtin (loc, add_sub_builtin,
1170 							 params);
1171 	    }
1172 	  /* For {un}signed __int128s use the vaddeuqm instruction
1173 		directly.  */
1174 	  case E_TImode:
1175 	    {
1176 	       tree bii;
1177 
1178 	       if (fcode == ALTIVEC_BUILTIN_VEC_ADDE)
1179 		 bii = rs6000_builtin_decls[P8V_BUILTIN_VEC_VADDEUQM];
1180 
1181 	       else
1182 		 bii = rs6000_builtin_decls[P8V_BUILTIN_VEC_VSUBEUQM];
1183 
1184 	       return altivec_resolve_overloaded_builtin (loc, bii, arglist);
1185 	    }
1186 
1187 	  /* Types other than {un}signed int and {un}signed __int128
1188 		are errors.  */
1189 	  default:
1190 	    goto bad;
1191 	}
1192     }
1193 
1194   if (fcode == ALTIVEC_BUILTIN_VEC_ADDEC
1195       || fcode == ALTIVEC_BUILTIN_VEC_SUBEC)
1196     {
1197       /* vec_addec and vec_subec needs to be special cased because there is
1198 	 no instruction for the {un}signed int version.  */
1199       if (nargs != 3)
1200 	{
1201 	  const char *name = fcode == ALTIVEC_BUILTIN_VEC_ADDEC ?
1202 	    "vec_addec": "vec_subec";
1203 	  error ("builtin %qs only accepts 3 arguments", name);
1204 	  return error_mark_node;
1205 	}
1206 
1207       tree arg0 = (*arglist)[0];
1208       tree arg0_type = TREE_TYPE (arg0);
1209       tree arg1 = (*arglist)[1];
1210       tree arg1_type = TREE_TYPE (arg1);
1211       tree arg2 = (*arglist)[2];
1212       tree arg2_type = TREE_TYPE (arg2);
1213 
1214       /* All 3 arguments must be vectors of (signed or unsigned) (int or
1215 	 __int128) and the types must be compatible.  */
1216       if (TREE_CODE (arg0_type) != VECTOR_TYPE)
1217 	goto bad;
1218       if (!lang_hooks.types_compatible_p (arg0_type, arg1_type)
1219 	  || !lang_hooks.types_compatible_p (arg1_type, arg2_type))
1220 	goto bad;
1221 
1222       switch (TYPE_MODE (TREE_TYPE (arg0_type)))
1223 	{
1224 	  /* For {un}signed ints,
1225 	      vec_addec (va, vb, carryv) ==
1226 				vec_or (vec_addc (va, vb),
1227 					vec_addc (vec_add (va, vb),
1228 						  vec_and (carryv, 0x1))).  */
1229 	  case E_SImode:
1230 	    {
1231 	    /* Use save_expr to ensure that operands used more than once
1232 		that may have side effects (like calls) are only evaluated
1233 		once.  */
1234 	    tree as_builtin;
1235 	    tree as_c_builtin;
1236 
1237 	    arg0 = save_expr (arg0);
1238 	    arg1 = save_expr (arg1);
1239 	    vec<tree, va_gc> *params = make_tree_vector ();
1240 	    vec_safe_push (params, arg0);
1241 	    vec_safe_push (params, arg1);
1242 
1243 	    if (fcode == ALTIVEC_BUILTIN_VEC_ADDEC)
1244 	      as_c_builtin = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_ADDC];
1245 	    else
1246 	      as_c_builtin = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_SUBC];
1247 
1248 	    tree call1 = altivec_resolve_overloaded_builtin (loc, as_c_builtin,
1249 							     params);
1250 	    params = make_tree_vector ();
1251 	    vec_safe_push (params, arg0);
1252 	    vec_safe_push (params, arg1);
1253 
1254 
1255 	    if (fcode == ALTIVEC_BUILTIN_VEC_ADDEC)
1256 	      as_builtin = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_ADD];
1257 	    else
1258 	      as_builtin = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_SUB];
1259 
1260 	    tree call2 = altivec_resolve_overloaded_builtin (loc, as_builtin,
1261 							     params);
1262 	    tree const1 = build_int_cstu (TREE_TYPE (arg0_type), 1);
1263 	    tree ones_vector = build_vector_from_val (arg0_type, const1);
1264 	    tree and_expr = fold_build2_loc (loc, BIT_AND_EXPR, arg0_type,
1265 					     arg2, ones_vector);
1266 	    params = make_tree_vector ();
1267 	    vec_safe_push (params, call2);
1268 	    vec_safe_push (params, and_expr);
1269 	    call2 = altivec_resolve_overloaded_builtin (loc, as_c_builtin,
1270 							params);
1271 	    params = make_tree_vector ();
1272 	    vec_safe_push (params, call1);
1273 	    vec_safe_push (params, call2);
1274 	    tree or_builtin = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_OR];
1275 	    return altivec_resolve_overloaded_builtin (loc, or_builtin,
1276 						       params);
1277 	    }
1278 	  /* For {un}signed __int128s use the vaddecuq/vsubbecuq
1279 	     instructions.  */
1280 	  case E_TImode:
1281 	    {
1282 	       tree bii;
1283 
1284 	       if (fcode == ALTIVEC_BUILTIN_VEC_ADDEC)
1285 		 bii = rs6000_builtin_decls[P8V_BUILTIN_VEC_VADDECUQ];
1286 
1287 	       else
1288 		 bii = rs6000_builtin_decls[P8V_BUILTIN_VEC_VSUBECUQ];
1289 
1290 	       return altivec_resolve_overloaded_builtin (loc, bii, arglist);
1291 	    }
1292 	  /* Types other than {un}signed int and {un}signed __int128
1293 		are errors.  */
1294 	  default:
1295 	    goto bad;
1296 	}
1297     }
1298 
1299   /* For now treat vec_splats and vec_promote as the same.  */
1300   if (fcode == ALTIVEC_BUILTIN_VEC_SPLATS
1301       || fcode == ALTIVEC_BUILTIN_VEC_PROMOTE)
1302     {
1303       tree type, arg;
1304       int size;
1305       int i;
1306       bool unsigned_p;
1307       vec<constructor_elt, va_gc> *vec;
1308       const char *name = fcode == ALTIVEC_BUILTIN_VEC_SPLATS ? "vec_splats": "vec_promote";
1309 
1310       if (fcode == ALTIVEC_BUILTIN_VEC_SPLATS && nargs != 1)
1311 	{
1312 	  error ("builtin %qs only accepts 1 argument", name);
1313 	  return error_mark_node;
1314 	}
1315       if (fcode == ALTIVEC_BUILTIN_VEC_PROMOTE && nargs != 2)
1316 	{
1317 	  error ("builtin %qs only accepts 2 arguments", name);
1318 	  return error_mark_node;
1319 	}
1320       /* Ignore promote's element argument.  */
1321       if (fcode == ALTIVEC_BUILTIN_VEC_PROMOTE
1322 	  && !INTEGRAL_TYPE_P (TREE_TYPE ((*arglist)[1])))
1323 	goto bad;
1324 
1325       arg = (*arglist)[0];
1326       type = TREE_TYPE (arg);
1327       if (!SCALAR_FLOAT_TYPE_P (type)
1328 	  && !INTEGRAL_TYPE_P (type))
1329 	goto bad;
1330       unsigned_p = TYPE_UNSIGNED (type);
1331       switch (TYPE_MODE (type))
1332 	{
1333 	  case E_TImode:
1334 	    type = (unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node);
1335 	    size = 1;
1336 	    break;
1337 	  case E_DImode:
1338 	    type = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
1339 	    size = 2;
1340 	    break;
1341 	  case E_SImode:
1342 	    type = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
1343 	    size = 4;
1344 	    break;
1345 	  case E_HImode:
1346 	    type = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
1347 	    size = 8;
1348 	    break;
1349 	  case E_QImode:
1350 	    type = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
1351 	    size = 16;
1352 	    break;
1353 	  case E_SFmode: type = V4SF_type_node; size = 4; break;
1354 	  case E_DFmode: type = V2DF_type_node; size = 2; break;
1355 	  default:
1356 	    goto bad;
1357 	}
1358       arg = save_expr (fold_convert (TREE_TYPE (type), arg));
1359       vec_alloc (vec, size);
1360       for(i = 0; i < size; i++)
1361 	{
1362 	  constructor_elt elt = {NULL_TREE, arg};
1363 	  vec->quick_push (elt);
1364 	}
1365 	return build_constructor (type, vec);
1366     }
1367 
1368   /* For now use pointer tricks to do the extraction, unless we are on VSX
1369      extracting a double from a constant offset.  */
1370   if (fcode == ALTIVEC_BUILTIN_VEC_EXTRACT)
1371     {
1372       tree arg1;
1373       tree arg1_type;
1374       tree arg2;
1375       tree arg1_inner_type;
1376       tree decl, stmt;
1377       tree innerptrtype;
1378       machine_mode mode;
1379 
1380       /* No second argument. */
1381       if (nargs != 2)
1382 	{
1383 	  error ("builtin %qs only accepts 2 arguments", "vec_extract");
1384 	  return error_mark_node;
1385 	}
1386 
1387       arg2 = (*arglist)[1];
1388       arg1 = (*arglist)[0];
1389       arg1_type = TREE_TYPE (arg1);
1390 
1391       if (TREE_CODE (arg1_type) != VECTOR_TYPE)
1392 	goto bad;
1393       if (!INTEGRAL_TYPE_P (TREE_TYPE (arg2)))
1394 	goto bad;
1395 
1396       /* See if we can optimize vec_extracts with the current VSX instruction
1397 	 set.  */
1398       mode = TYPE_MODE (arg1_type);
1399       if (VECTOR_MEM_VSX_P (mode))
1400 
1401 	{
1402 	  tree call = NULL_TREE;
1403 	  int nunits = GET_MODE_NUNITS (mode);
1404 
1405 	  arg2 = fold_for_warn (arg2);
1406 
1407 	  /* If the second argument is an integer constant, generate
1408 	     the built-in code if we can.  We need 64-bit and direct
1409 	     move to extract the small integer vectors.  */
1410 	  if (TREE_CODE (arg2) == INTEGER_CST)
1411 	    {
1412 	      wide_int selector = wi::to_wide (arg2);
1413 	      selector = wi::umod_trunc (selector, nunits);
1414 	      arg2 = wide_int_to_tree (TREE_TYPE (arg2), selector);
1415 	      switch (mode)
1416 		{
1417 		default:
1418 		  break;
1419 
1420 		case E_V1TImode:
1421 		  call = rs6000_builtin_decls[VSX_BUILTIN_VEC_EXT_V1TI];
1422 		  break;
1423 
1424 		case E_V2DFmode:
1425 		  call = rs6000_builtin_decls[VSX_BUILTIN_VEC_EXT_V2DF];
1426 		  break;
1427 
1428 		case E_V2DImode:
1429 		  call = rs6000_builtin_decls[VSX_BUILTIN_VEC_EXT_V2DI];
1430 		  break;
1431 
1432 		case E_V4SFmode:
1433 		  call = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_EXT_V4SF];
1434 		  break;
1435 
1436 		case E_V4SImode:
1437 		  if (TARGET_DIRECT_MOVE_64BIT)
1438 		    call = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_EXT_V4SI];
1439 		  break;
1440 
1441 		case E_V8HImode:
1442 		  if (TARGET_DIRECT_MOVE_64BIT)
1443 		    call = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_EXT_V8HI];
1444 		  break;
1445 
1446 		case E_V16QImode:
1447 		  if (TARGET_DIRECT_MOVE_64BIT)
1448 		    call = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_EXT_V16QI];
1449 		  break;
1450 		}
1451 	    }
1452 
1453 	  /* If the second argument is variable, we can optimize it if we are
1454 	     generating 64-bit code on a machine with direct move.  */
1455 	  else if (TREE_CODE (arg2) != INTEGER_CST && TARGET_DIRECT_MOVE_64BIT)
1456 	    {
1457 	      switch (mode)
1458 		{
1459 		default:
1460 		  break;
1461 
1462 		case E_V2DFmode:
1463 		  call = rs6000_builtin_decls[VSX_BUILTIN_VEC_EXT_V2DF];
1464 		  break;
1465 
1466 		case E_V2DImode:
1467 		  call = rs6000_builtin_decls[VSX_BUILTIN_VEC_EXT_V2DI];
1468 		  break;
1469 
1470 		case E_V4SFmode:
1471 		  call = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_EXT_V4SF];
1472 		  break;
1473 
1474 		case E_V4SImode:
1475 		  call = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_EXT_V4SI];
1476 		  break;
1477 
1478 		case E_V8HImode:
1479 		  call = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_EXT_V8HI];
1480 		  break;
1481 
1482 		case E_V16QImode:
1483 		  call = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_EXT_V16QI];
1484 		  break;
1485 		}
1486 	    }
1487 
1488 	  if (call)
1489 	    {
1490 	      tree result = build_call_expr (call, 2, arg1, arg2);
1491 	      /* Coerce the result to vector element type.  May be no-op.  */
1492 	      arg1_inner_type = TREE_TYPE (arg1_type);
1493 	      result = fold_convert (arg1_inner_type, result);
1494 	      return result;
1495 	    }
1496 	}
1497 
1498       /* Build *(((arg1_inner_type*)&(vector type){arg1})+arg2). */
1499       arg1_inner_type = TREE_TYPE (arg1_type);
1500       arg2 = build_binary_op (loc, BIT_AND_EXPR, arg2,
1501 			      build_int_cst (TREE_TYPE (arg2),
1502 					     TYPE_VECTOR_SUBPARTS (arg1_type)
1503 					     - 1), 0);
1504       decl = build_decl (loc, VAR_DECL, NULL_TREE, arg1_type);
1505       DECL_EXTERNAL (decl) = 0;
1506       TREE_PUBLIC (decl) = 0;
1507       DECL_CONTEXT (decl) = current_function_decl;
1508       TREE_USED (decl) = 1;
1509       TREE_TYPE (decl) = arg1_type;
1510       TREE_READONLY (decl) = TYPE_READONLY (arg1_type);
1511       if (c_dialect_cxx ())
1512 	{
1513 	  stmt = build4 (TARGET_EXPR, arg1_type, decl, arg1,
1514 			 NULL_TREE, NULL_TREE);
1515 	  SET_EXPR_LOCATION (stmt, loc);
1516 	}
1517       else
1518 	{
1519 	  DECL_INITIAL (decl) = arg1;
1520 	  stmt = build1 (DECL_EXPR, arg1_type, decl);
1521 	  TREE_ADDRESSABLE (decl) = 1;
1522 	  SET_EXPR_LOCATION (stmt, loc);
1523 	  stmt = build1 (COMPOUND_LITERAL_EXPR, arg1_type, stmt);
1524 	}
1525 
1526       innerptrtype = build_pointer_type (arg1_inner_type);
1527 
1528       stmt = build_unary_op (loc, ADDR_EXPR, stmt, 0);
1529       stmt = convert (innerptrtype, stmt);
1530       stmt = build_binary_op (loc, PLUS_EXPR, stmt, arg2, 1);
1531       stmt = build_indirect_ref (loc, stmt, RO_NULL);
1532 
1533       /* PR83660: We mark this as having side effects so that
1534 	 downstream in fold_build_cleanup_point_expr () it will get a
1535 	 CLEANUP_POINT_EXPR.  If it does not we can run into an ICE
1536 	 later in gimplify_cleanup_point_expr ().  Potentially this
1537 	 causes missed optimization because the actually is no side
1538 	 effect.  */
1539       if (c_dialect_cxx ())
1540 	TREE_SIDE_EFFECTS (stmt) = 1;
1541 
1542       return stmt;
1543     }
1544 
1545   /* For now use pointer tricks to do the insertion, unless we are on VSX
1546      inserting a double to a constant offset..  */
1547   if (fcode == ALTIVEC_BUILTIN_VEC_INSERT)
1548     {
1549       tree arg0;
1550       tree arg1;
1551       tree arg2;
1552       tree arg1_type;
1553       tree decl, stmt;
1554       machine_mode mode;
1555 
1556       /* No second or third arguments. */
1557       if (nargs != 3)
1558 	{
1559 	  error ("builtin %qs only accepts 3 arguments", "vec_insert");
1560 	  return error_mark_node;
1561 	}
1562 
1563       arg0 = (*arglist)[0];
1564       arg1 = (*arglist)[1];
1565       arg1_type = TREE_TYPE (arg1);
1566       arg2 = fold_for_warn ((*arglist)[2]);
1567 
1568       if (TREE_CODE (arg1_type) != VECTOR_TYPE)
1569 	goto bad;
1570       if (!INTEGRAL_TYPE_P (TREE_TYPE (arg2)))
1571 	goto bad;
1572 
1573       /* If we can use the VSX xxpermdi instruction, use that for insert.  */
1574       mode = TYPE_MODE (arg1_type);
1575       if ((mode == V2DFmode || mode == V2DImode) && VECTOR_UNIT_VSX_P (mode)
1576 	  && TREE_CODE (arg2) == INTEGER_CST)
1577 	{
1578 	  wide_int selector = wi::to_wide (arg2);
1579 	  selector = wi::umod_trunc (selector, 2);
1580 	  tree call = NULL_TREE;
1581 
1582 	  arg2 = wide_int_to_tree (TREE_TYPE (arg2), selector);
1583 	  if (mode == V2DFmode)
1584 	    call = rs6000_builtin_decls[VSX_BUILTIN_VEC_SET_V2DF];
1585 	  else if (mode == V2DImode)
1586 	    call = rs6000_builtin_decls[VSX_BUILTIN_VEC_SET_V2DI];
1587 
1588 	  /* Note, __builtin_vec_insert_<xxx> has vector and scalar types
1589 	     reversed.  */
1590 	  if (call)
1591 	    return build_call_expr (call, 3, arg1, arg0, arg2);
1592 	}
1593       else if (mode == V1TImode && VECTOR_UNIT_VSX_P (mode)
1594 	       && TREE_CODE (arg2) == INTEGER_CST)
1595 	{
1596 	  tree call = rs6000_builtin_decls[VSX_BUILTIN_VEC_SET_V1TI];
1597 	  wide_int selector = wi::zero(32);
1598 
1599 	  arg2 = wide_int_to_tree (TREE_TYPE (arg2), selector);
1600 	  /* Note, __builtin_vec_insert_<xxx> has vector and scalar types
1601 	     reversed.  */
1602 	  return build_call_expr (call, 3, arg1, arg0, arg2);
1603 	}
1604 
1605       /* Build *(((arg1_inner_type*)&(vector type){arg1})+arg2) = arg0 with
1606 	 VIEW_CONVERT_EXPR.  i.e.:
1607 	 D.3192 = v1;
1608 	 _1 = n & 3;
1609 	 VIEW_CONVERT_EXPR<int[4]>(D.3192)[_1] = i;
1610 	 v1 = D.3192;
1611 	 D.3194 = v1;  */
1612       if (TYPE_VECTOR_SUBPARTS (arg1_type) == 1)
1613 	arg2 = build_int_cst (TREE_TYPE (arg2), 0);
1614       else
1615 	arg2 = build_binary_op (loc, BIT_AND_EXPR, arg2,
1616 				build_int_cst (TREE_TYPE (arg2),
1617 					       TYPE_VECTOR_SUBPARTS (arg1_type)
1618 					       - 1), 0);
1619       decl = build_decl (loc, VAR_DECL, NULL_TREE, arg1_type);
1620       DECL_EXTERNAL (decl) = 0;
1621       TREE_PUBLIC (decl) = 0;
1622       DECL_CONTEXT (decl) = current_function_decl;
1623       TREE_USED (decl) = 1;
1624       TREE_TYPE (decl) = arg1_type;
1625       TREE_READONLY (decl) = TYPE_READONLY (arg1_type);
1626       TREE_ADDRESSABLE (decl) = 1;
1627       if (c_dialect_cxx ())
1628 	{
1629 	  stmt = build4 (TARGET_EXPR, arg1_type, decl, arg1,
1630 			 NULL_TREE, NULL_TREE);
1631 	  SET_EXPR_LOCATION (stmt, loc);
1632 	}
1633       else
1634 	{
1635 	  DECL_INITIAL (decl) = arg1;
1636 	  stmt = build1 (DECL_EXPR, arg1_type, decl);
1637 	  SET_EXPR_LOCATION (stmt, loc);
1638 	  stmt = build1 (COMPOUND_LITERAL_EXPR, arg1_type, stmt);
1639 	}
1640 
1641       if (TARGET_VSX)
1642 	{
1643 	  stmt = build_array_ref (loc, stmt, arg2);
1644 	  stmt = fold_build2 (MODIFY_EXPR, TREE_TYPE (arg0), stmt,
1645 			      convert (TREE_TYPE (stmt), arg0));
1646 	  stmt = build2 (COMPOUND_EXPR, arg1_type, stmt, decl);
1647 	}
1648       else
1649 	{
1650 	  tree arg1_inner_type;
1651 	  tree innerptrtype;
1652 	  arg1_inner_type = TREE_TYPE (arg1_type);
1653 	  innerptrtype = build_pointer_type (arg1_inner_type);
1654 
1655 	  stmt = build_unary_op (loc, ADDR_EXPR, stmt, 0);
1656 	  stmt = convert (innerptrtype, stmt);
1657 	  stmt = build_binary_op (loc, PLUS_EXPR, stmt, arg2, 1);
1658 	  stmt = build_indirect_ref (loc, stmt, RO_NULL);
1659 	  stmt = build2 (MODIFY_EXPR, TREE_TYPE (stmt), stmt,
1660 			 convert (TREE_TYPE (stmt), arg0));
1661 	  stmt = build2 (COMPOUND_EXPR, arg1_type, stmt, decl);
1662 	}
1663       return stmt;
1664     }
1665 
1666   for (n = 0;
1667        !VOID_TYPE_P (TREE_VALUE (fnargs)) && n < nargs;
1668        fnargs = TREE_CHAIN (fnargs), n++)
1669     {
1670       tree decl_type = TREE_VALUE (fnargs);
1671       tree arg = (*arglist)[n];
1672       tree type;
1673 
1674       if (arg == error_mark_node)
1675 	return error_mark_node;
1676 
1677       if (n >= 4)
1678         abort ();
1679 
1680       arg = default_conversion (arg);
1681 
1682       /* The C++ front-end converts float * to const void * using
1683 	 NOP_EXPR<const void *> (NOP_EXPR<void *> (x)).  */
1684       type = TREE_TYPE (arg);
1685       if (POINTER_TYPE_P (type)
1686 	  && TREE_CODE (arg) == NOP_EXPR
1687 	  && lang_hooks.types_compatible_p (TREE_TYPE (arg),
1688 					    const_ptr_type_node)
1689 	  && lang_hooks.types_compatible_p (TREE_TYPE (TREE_OPERAND (arg, 0)),
1690 					    ptr_type_node))
1691 	{
1692 	  arg = TREE_OPERAND (arg, 0);
1693           type = TREE_TYPE (arg);
1694 	}
1695 
1696       /* Remove the const from the pointers to simplify the overload
1697 	 matching further down.  */
1698       if (POINTER_TYPE_P (decl_type)
1699 	  && POINTER_TYPE_P (type)
1700 	  && TYPE_QUALS (TREE_TYPE (type)) != 0)
1701 	{
1702           if (TYPE_READONLY (TREE_TYPE (type))
1703 	      && !TYPE_READONLY (TREE_TYPE (decl_type)))
1704 	    warning (0, "passing argument %d of %qE discards qualifiers from "
1705 		        "pointer target type", n + 1, fndecl);
1706 	  type = build_pointer_type (build_qualified_type (TREE_TYPE (type),
1707 							   0));
1708 	  arg = fold_convert (type, arg);
1709 	}
1710 
1711       /* For P9V_BUILTIN_VEC_LXVL, convert any const * to its non constant
1712 	 equivalent to simplify the overload matching below.  */
1713       if (fcode == P9V_BUILTIN_VEC_LXVL)
1714 	{
1715 	  if (POINTER_TYPE_P (type)
1716 	      && TYPE_READONLY (TREE_TYPE (type)))
1717 	    {
1718 	      type = build_pointer_type (build_qualified_type (
1719 						TREE_TYPE (type),0));
1720 	      arg = fold_convert (type, arg);
1721 	    }
1722 	}
1723 
1724       args[n] = arg;
1725       types[n] = type;
1726     }
1727 
1728   /* If the number of arguments did not match the prototype, return NULL
1729      and the generic code will issue the appropriate error message.  */
1730   if (!VOID_TYPE_P (TREE_VALUE (fnargs)) || n < nargs)
1731     return NULL;
1732 
1733   if (n == 0)
1734     abort ();
1735 
1736   if (fcode == ALTIVEC_BUILTIN_VEC_STEP)
1737     {
1738       if (TREE_CODE (types[0]) != VECTOR_TYPE)
1739 	goto bad;
1740 
1741       return build_int_cst (NULL_TREE, TYPE_VECTOR_SUBPARTS (types[0]));
1742     }
1743 
1744   {
1745     bool unsupported_builtin = false;
1746     enum rs6000_builtins overloaded_code;
1747     tree result = NULL;
1748     for (desc = altivec_overloaded_builtins;
1749 	 desc->code && desc->code != fcode; desc++)
1750       continue;
1751 
1752     /* Need to special case __builtin_cmp because the overloaded forms
1753        of this function take (unsigned int, unsigned int) or (unsigned
1754        long long int, unsigned long long int).  Since C conventions
1755        allow the respective argument types to be implicitly coerced into
1756        each other, the default handling does not provide adequate
1757        discrimination between the desired forms of the function.  */
1758     if (fcode == P6_OV_BUILTIN_CMPB)
1759       {
1760 	machine_mode arg1_mode = TYPE_MODE (types[0]);
1761 	machine_mode arg2_mode = TYPE_MODE (types[1]);
1762 
1763 	if (nargs != 2)
1764 	  {
1765 	    error ("builtin %qs only accepts 2 arguments", "__builtin_cmpb");
1766 	    return error_mark_node;
1767 	  }
1768 
1769 	/* If any supplied arguments are wider than 32 bits, resolve to
1770 	   64-bit variant of built-in function.  */
1771 	if ((GET_MODE_PRECISION (arg1_mode) > 32)
1772 	    || (GET_MODE_PRECISION (arg2_mode) > 32))
1773 	  {
1774 	    /* Assure all argument and result types are compatible with
1775 	       the built-in function represented by P6_BUILTIN_CMPB.  */
1776 	    overloaded_code = P6_BUILTIN_CMPB;
1777 	  }
1778 	else
1779 	  {
1780 	    /* Assure all argument and result types are compatible with
1781 	       the built-in function represented by P6_BUILTIN_CMPB_32.  */
1782 	    overloaded_code = P6_BUILTIN_CMPB_32;
1783 	  }
1784 
1785 	while (desc->code && desc->code == fcode
1786 	       && desc->overloaded_code != overloaded_code)
1787 	  desc++;
1788 
1789 	if (desc->code && (desc->code == fcode)
1790 	    && rs6000_builtin_type_compatible (types[0], desc->op1)
1791 	    && rs6000_builtin_type_compatible (types[1], desc->op2))
1792 	  {
1793 	    if (rs6000_builtin_decls[desc->overloaded_code] != NULL_TREE)
1794 	      {
1795 		result = altivec_build_resolved_builtin (args, n, desc);
1796 		/* overloaded_code is set above */
1797 		if (!rs6000_builtin_is_supported_p (overloaded_code))
1798 		  unsupported_builtin = true;
1799 		else
1800 		  return result;
1801 	      }
1802 	    else
1803 	      unsupported_builtin = true;
1804 	  }
1805       }
1806     else if (fcode == P9V_BUILTIN_VEC_VSIEDP)
1807       {
1808 	machine_mode arg1_mode = TYPE_MODE (types[0]);
1809 
1810 	if (nargs != 2)
1811 	  {
1812 	    error ("builtin %qs only accepts 2 arguments",
1813 		   "scalar_insert_exp");
1814 	    return error_mark_node;
1815 	  }
1816 
1817 	/* If supplied first argument is wider than 64 bits, resolve to
1818 	   128-bit variant of built-in function.  */
1819 	if (GET_MODE_PRECISION (arg1_mode) > 64)
1820 	  {
1821 	    /* If first argument is of float variety, choose variant
1822 	       that expects __ieee128 argument.  Otherwise, expect
1823 	       __int128 argument.  */
1824 	    if (GET_MODE_CLASS (arg1_mode) == MODE_FLOAT)
1825 	      overloaded_code = P9V_BUILTIN_VSIEQPF;
1826 	    else
1827 	      overloaded_code = P9V_BUILTIN_VSIEQP;
1828 	  }
1829 	else
1830 	  {
1831 	    /* If first argument is of float variety, choose variant
1832 	       that expects double argument.  Otherwise, expect
1833 	       long long int argument.  */
1834 	    if (GET_MODE_CLASS (arg1_mode) == MODE_FLOAT)
1835 	      overloaded_code = P9V_BUILTIN_VSIEDPF;
1836 	    else
1837 	      overloaded_code = P9V_BUILTIN_VSIEDP;
1838 	  }
1839 	while (desc->code && desc->code == fcode
1840 	       && desc->overloaded_code != overloaded_code)
1841 	  desc++;
1842 
1843 	if (desc->code && (desc->code == fcode)
1844 	    && rs6000_builtin_type_compatible (types[0], desc->op1)
1845 	    && rs6000_builtin_type_compatible (types[1], desc->op2))
1846 	  {
1847 	    if (rs6000_builtin_decls[desc->overloaded_code] != NULL_TREE)
1848 	      {
1849 		result = altivec_build_resolved_builtin (args, n, desc);
1850 		/* overloaded_code is set above.  */
1851 		if (!rs6000_builtin_is_supported_p (overloaded_code))
1852 		  unsupported_builtin = true;
1853 		else
1854 		  return result;
1855 	      }
1856 	    else
1857 	      unsupported_builtin = true;
1858 	  }
1859       }
1860     else if ((fcode == P10_BUILTIN_VEC_XXEVAL)
1861 	    || (fcode == P10V_BUILTIN_VXXPERMX))
1862       {
1863 	signed char op3_type;
1864 
1865 	/* Need to special case P10_BUILTIN_VEC_XXEVAL and
1866 	   P10V_BUILTIN_VXXPERMX because they take 4 arguments and the
1867 	   existing infrastructure only handles three.  */
1868 	if (nargs != 4)
1869 	  {
1870 	    const char *name = fcode == P10_BUILTIN_VEC_XXEVAL ?
1871 	      "__builtin_vec_xxeval":"__builtin_vec_xxpermx";
1872 
1873 	    error ("builtin %qs requires 4 arguments", name);
1874 	    return error_mark_node;
1875 	  }
1876 
1877 	for ( ; desc->code == fcode; desc++)
1878 	  {
1879 	    if (fcode == P10_BUILTIN_VEC_XXEVAL)
1880 	      op3_type = desc->op3;
1881 	    else  /* P10V_BUILTIN_VXXPERMX */
1882 	      op3_type = RS6000_BTI_V16QI;
1883 
1884 	    if (rs6000_builtin_type_compatible (types[0], desc->op1)
1885 		&& rs6000_builtin_type_compatible (types[1], desc->op2)
1886 		&& rs6000_builtin_type_compatible (types[2], desc->op3)
1887 		&& rs6000_builtin_type_compatible (types[2], op3_type)
1888 		&& rs6000_builtin_type_compatible (types[3],
1889 						   RS6000_BTI_UINTSI))
1890 	      {
1891 		if (rs6000_builtin_decls[desc->overloaded_code] == NULL_TREE)
1892 		  unsupported_builtin = true;
1893 		else
1894 		  {
1895 		    result = altivec_build_resolved_builtin (args, n, desc);
1896 		    if (rs6000_builtin_is_supported_p (desc->overloaded_code))
1897 		      return result;
1898 		    /* Allow loop to continue in case a different
1899 		       definition is supported.  */
1900 		    overloaded_code = desc->overloaded_code;
1901 		    unsupported_builtin = true;
1902 		  }
1903 	      }
1904 	  }
1905       }
1906     else
1907       {
1908 	/* For arguments after the last, we have RS6000_BTI_NOT_OPAQUE in
1909 	   the opX fields.  */
1910 	for (; desc->code == fcode; desc++)
1911 	  {
1912 	    if ((desc->op1 == RS6000_BTI_NOT_OPAQUE
1913 		 || rs6000_builtin_type_compatible (types[0], desc->op1))
1914 		&& (desc->op2 == RS6000_BTI_NOT_OPAQUE
1915 		    || rs6000_builtin_type_compatible (types[1], desc->op2))
1916 		&& (desc->op3 == RS6000_BTI_NOT_OPAQUE
1917 		    || rs6000_builtin_type_compatible (types[2], desc->op3)))
1918 	      {
1919 		if (rs6000_builtin_decls[desc->overloaded_code] != NULL_TREE)
1920 		  {
1921 		    result = altivec_build_resolved_builtin (args, n, desc);
1922 		    if (!rs6000_builtin_is_supported_p (desc->overloaded_code))
1923 		      {
1924 			/* Allow loop to continue in case a different
1925 			   definition is supported.  */
1926 			overloaded_code = desc->overloaded_code;
1927 			unsupported_builtin = true;
1928 		      }
1929 		    else
1930 		      return result;
1931 		  }
1932 		else
1933 		  unsupported_builtin = true;
1934 	      }
1935 	  }
1936       }
1937 
1938     if (unsupported_builtin)
1939       {
1940 	const char *name = rs6000_overloaded_builtin_name (fcode);
1941 	if (result != NULL)
1942 	  {
1943 	    const char *internal_name
1944 	      = rs6000_overloaded_builtin_name (overloaded_code);
1945 	    /* An error message making reference to the name of the
1946 	       non-overloaded function has already been issued.  Add
1947 	       clarification of the previous message.  */
1948 	    rich_location richloc (line_table, input_location);
1949 	    inform (&richloc,
1950 		    "overloaded builtin %qs is implemented by builtin %qs",
1951 		    name, internal_name);
1952 	  }
1953 	else
1954 	  error ("%qs is not supported in this compiler configuration", name);
1955 	/* If an error-representing  result tree was returned from
1956 	   altivec_build_resolved_builtin above, use it.  */
1957 	return (result != NULL) ? result : error_mark_node;
1958       }
1959   }
1960  bad:
1961   {
1962     const char *name = rs6000_overloaded_builtin_name (fcode);
1963     error ("invalid parameter combination for AltiVec intrinsic %qs", name);
1964     return error_mark_node;
1965   }
1966 }
1967 
1968 /* Build a tree for a function call to an Altivec non-overloaded builtin.
1969    The overloaded builtin that matched the types and args is described
1970    by DESC.  The N arguments are given in ARGS, respectively.
1971 
1972    Actually the only thing it does is calling fold_convert on ARGS, with
1973    a small exception for vec_{all,any}_{ge,le} predicates. */
1974 
1975 static tree
altivec_build_new_resolved_builtin(tree * args,int n,tree fntype,tree ret_type,rs6000_gen_builtins bif_id,rs6000_gen_builtins ovld_id)1976 altivec_build_new_resolved_builtin (tree *args, int n, tree fntype,
1977 				    tree ret_type,
1978 				    rs6000_gen_builtins bif_id,
1979 				    rs6000_gen_builtins ovld_id)
1980 {
1981   tree argtypes = TYPE_ARG_TYPES (fntype);
1982   tree arg_type[MAX_OVLD_ARGS];
1983   tree fndecl = rs6000_builtin_decls_x[bif_id];
1984 
1985   for (int i = 0; i < n; i++)
1986     {
1987       arg_type[i] = TREE_VALUE (argtypes);
1988       argtypes = TREE_CHAIN (argtypes);
1989     }
1990 
1991   /* The AltiVec overloading implementation is overall gross, but this
1992      is particularly disgusting.  The vec_{all,any}_{ge,le} builtins
1993      are completely different for floating-point vs. integer vector
1994      types, because the former has vcmpgefp, but the latter should use
1995      vcmpgtXX.
1996 
1997      In practice, the second and third arguments are swapped, and the
1998      condition (LT vs. EQ, which is recognizable by bit 1 of the first
1999      argument) is reversed.  Patch the arguments here before building
2000      the resolved CALL_EXPR.  */
2001   if (n == 3
2002       && ovld_id == RS6000_OVLD_VEC_CMPGE_P
2003       && bif_id != RS6000_BIF_VCMPGEFP_P
2004       && bif_id != RS6000_BIF_XVCMPGEDP_P)
2005     {
2006       std::swap (args[1], args[2]);
2007       std::swap (arg_type[1], arg_type[2]);
2008 
2009       args[0] = fold_build2 (BIT_XOR_EXPR, TREE_TYPE (args[0]), args[0],
2010 			     build_int_cst (NULL_TREE, 2));
2011     }
2012 
2013   for (int j = 0; j < n; j++)
2014     args[j] = fully_fold_convert (arg_type[j], args[j]);
2015 
2016   /* If the number of arguments to an overloaded function increases,
2017      we must expand this switch.  */
2018   gcc_assert (MAX_OVLD_ARGS <= 4);
2019 
2020   tree call;
2021   switch (n)
2022     {
2023     case 0:
2024       call = build_call_expr (fndecl, 0);
2025       break;
2026     case 1:
2027       call = build_call_expr (fndecl, 1, args[0]);
2028       break;
2029     case 2:
2030       call = build_call_expr (fndecl, 2, args[0], args[1]);
2031       break;
2032     case 3:
2033       call = build_call_expr (fndecl, 3, args[0], args[1], args[2]);
2034       break;
2035     case 4:
2036       call = build_call_expr (fndecl, 4, args[0], args[1], args[2], args[3]);
2037       break;
2038     default:
2039       gcc_unreachable ();
2040     }
2041   return fold_convert (ret_type, call);
2042 }
2043 
2044 /* Implementation of the resolve_overloaded_builtin target hook, to
2045    support Altivec's overloaded builtins.  FIXME: This code needs
2046    to be brutally factored.  */
2047 
2048 static tree
altivec_resolve_new_overloaded_builtin(location_t loc,tree fndecl,void * passed_arglist)2049 altivec_resolve_new_overloaded_builtin (location_t loc, tree fndecl,
2050 					void *passed_arglist)
2051 {
2052   vec<tree, va_gc> *arglist = static_cast<vec<tree, va_gc> *> (passed_arglist);
2053   unsigned int nargs = vec_safe_length (arglist);
2054   enum rs6000_gen_builtins fcode
2055     = (enum rs6000_gen_builtins) DECL_MD_FUNCTION_CODE (fndecl);
2056   tree fnargs = TYPE_ARG_TYPES (TREE_TYPE (fndecl));
2057   tree types[MAX_OVLD_ARGS];
2058   tree args[MAX_OVLD_ARGS];
2059 
2060   /* Return immediately if this isn't an overload.  */
2061   if (fcode <= RS6000_OVLD_NONE)
2062     return NULL_TREE;
2063 
2064   unsigned int adj_fcode = fcode - RS6000_OVLD_NONE;
2065 
2066   if (TARGET_DEBUG_BUILTIN)
2067     fprintf (stderr, "altivec_resolve_overloaded_builtin, code = %4d, %s\n",
2068 	     (int) fcode, IDENTIFIER_POINTER (DECL_NAME (fndecl)));
2069 
2070   /* vec_lvsl and vec_lvsr are deprecated for use with LE element order.  */
2071   if (fcode == RS6000_OVLD_VEC_LVSL && !BYTES_BIG_ENDIAN)
2072     warning (OPT_Wdeprecated,
2073 	     "%<vec_lvsl%> is deprecated for little endian; use "
2074 	     "assignment for unaligned loads and stores");
2075   else if (fcode == RS6000_OVLD_VEC_LVSR && !BYTES_BIG_ENDIAN)
2076     warning (OPT_Wdeprecated,
2077 	     "%<vec_lvsr%> is deprecated for little endian; use "
2078 	     "assignment for unaligned loads and stores");
2079 
2080   if (fcode == RS6000_OVLD_VEC_MUL)
2081     {
2082       /* vec_mul needs to be special cased because there are no instructions
2083 	 for it for the {un}signed char, {un}signed short, and {un}signed int
2084 	 types.  */
2085       if (nargs != 2)
2086 	{
2087 	  error ("builtin %qs only accepts 2 arguments", "vec_mul");
2088 	  return error_mark_node;
2089 	}
2090 
2091       tree arg0 = (*arglist)[0];
2092       tree arg0_type = TREE_TYPE (arg0);
2093       tree arg1 = (*arglist)[1];
2094       tree arg1_type = TREE_TYPE (arg1);
2095 
2096       /* Both arguments must be vectors and the types must be compatible.  */
2097       if (TREE_CODE (arg0_type) != VECTOR_TYPE)
2098 	goto bad;
2099       if (!lang_hooks.types_compatible_p (arg0_type, arg1_type))
2100 	goto bad;
2101 
2102       switch (TYPE_MODE (TREE_TYPE (arg0_type)))
2103 	{
2104 	  case E_QImode:
2105 	  case E_HImode:
2106 	  case E_SImode:
2107 	  case E_DImode:
2108 	  case E_TImode:
2109 	    {
2110 	      /* For scalar types just use a multiply expression.  */
2111 	      return fold_build2_loc (loc, MULT_EXPR, TREE_TYPE (arg0), arg0,
2112 				      fold_convert (TREE_TYPE (arg0), arg1));
2113 	    }
2114 	  case E_SFmode:
2115 	    {
2116 	      /* For floats use the xvmulsp instruction directly.  */
2117 	      tree call = rs6000_builtin_decls_x[RS6000_BIF_XVMULSP];
2118 	      return build_call_expr (call, 2, arg0, arg1);
2119 	    }
2120 	  case E_DFmode:
2121 	    {
2122 	      /* For doubles use the xvmuldp instruction directly.  */
2123 	      tree call = rs6000_builtin_decls_x[RS6000_BIF_XVMULDP];
2124 	      return build_call_expr (call, 2, arg0, arg1);
2125 	    }
2126 	  /* Other types are errors.  */
2127 	  default:
2128 	    goto bad;
2129 	}
2130     }
2131 
2132   if (fcode == RS6000_OVLD_VEC_CMPNE)
2133     {
2134       /* vec_cmpne needs to be special cased because there are no instructions
2135 	 for it (prior to power 9).  */
2136       if (nargs != 2)
2137 	{
2138 	  error ("builtin %qs only accepts 2 arguments", "vec_cmpne");
2139 	  return error_mark_node;
2140 	}
2141 
2142       tree arg0 = (*arglist)[0];
2143       tree arg0_type = TREE_TYPE (arg0);
2144       tree arg1 = (*arglist)[1];
2145       tree arg1_type = TREE_TYPE (arg1);
2146 
2147       /* Both arguments must be vectors and the types must be compatible.  */
2148       if (TREE_CODE (arg0_type) != VECTOR_TYPE)
2149 	goto bad;
2150       if (!lang_hooks.types_compatible_p (arg0_type, arg1_type))
2151 	goto bad;
2152 
2153       /* Power9 instructions provide the most efficient implementation of
2154 	 ALTIVEC_BUILTIN_VEC_CMPNE if the mode is not DImode or TImode
2155 	 or SFmode or DFmode.  */
2156       if (!TARGET_P9_VECTOR
2157 	  || (TYPE_MODE (TREE_TYPE (arg0_type)) == DImode)
2158 	  || (TYPE_MODE (TREE_TYPE (arg0_type)) == TImode)
2159 	  || (TYPE_MODE (TREE_TYPE (arg0_type)) == SFmode)
2160 	  || (TYPE_MODE (TREE_TYPE (arg0_type)) == DFmode))
2161 	{
2162 	  switch (TYPE_MODE (TREE_TYPE (arg0_type)))
2163 	    {
2164 	      /* vec_cmpneq (va, vb) == vec_nor (vec_cmpeq (va, vb),
2165 		 vec_cmpeq (va, vb)).  */
2166 	      /* Note:  vec_nand also works but opt changes vec_nand's
2167 		 to vec_nor's anyway.  */
2168 	    case E_QImode:
2169 	    case E_HImode:
2170 	    case E_SImode:
2171 	    case E_DImode:
2172 	    case E_TImode:
2173 	    case E_SFmode:
2174 	    case E_DFmode:
2175 	      {
2176 		/* call = vec_cmpeq (va, vb)
2177 		   result = vec_nor (call, call).  */
2178 		vec<tree, va_gc> *params = make_tree_vector ();
2179 		vec_safe_push (params, arg0);
2180 		vec_safe_push (params, arg1);
2181 		tree call = altivec_resolve_new_overloaded_builtin
2182 		  (loc, rs6000_builtin_decls_x[RS6000_OVLD_VEC_CMPEQ],
2183 		   params);
2184 		/* Use save_expr to ensure that operands used more than once
2185 		   that may have side effects (like calls) are only evaluated
2186 		   once.  */
2187 		call = save_expr (call);
2188 		params = make_tree_vector ();
2189 		vec_safe_push (params, call);
2190 		vec_safe_push (params, call);
2191 		return altivec_resolve_new_overloaded_builtin
2192 		  (loc, rs6000_builtin_decls_x[RS6000_OVLD_VEC_NOR], params);
2193 	      }
2194 	      /* Other types are errors.  */
2195 	    default:
2196 	      goto bad;
2197 	    }
2198 	}
2199       /* else, fall through and process the Power9 alternative below */
2200     }
2201 
2202   if (fcode == RS6000_OVLD_VEC_ADDE || fcode == RS6000_OVLD_VEC_SUBE)
2203     {
2204       /* vec_adde needs to be special cased because there is no instruction
2205 	  for the {un}signed int version.  */
2206       if (nargs != 3)
2207 	{
2208 	  const char *name;
2209 	  name = fcode == RS6000_OVLD_VEC_ADDE ? "vec_adde" : "vec_sube";
2210 	  error ("builtin %qs only accepts 3 arguments", name);
2211 	  return error_mark_node;
2212 	}
2213 
2214       tree arg0 = (*arglist)[0];
2215       tree arg0_type = TREE_TYPE (arg0);
2216       tree arg1 = (*arglist)[1];
2217       tree arg1_type = TREE_TYPE (arg1);
2218       tree arg2 = (*arglist)[2];
2219       tree arg2_type = TREE_TYPE (arg2);
2220 
2221       /* All 3 arguments must be vectors of (signed or unsigned) (int or
2222 	 __int128) and the types must be compatible.  */
2223       if (TREE_CODE (arg0_type) != VECTOR_TYPE)
2224 	goto bad;
2225       if (!lang_hooks.types_compatible_p (arg0_type, arg1_type)
2226 	  || !lang_hooks.types_compatible_p (arg1_type, arg2_type))
2227 	goto bad;
2228 
2229       switch (TYPE_MODE (TREE_TYPE (arg0_type)))
2230 	{
2231 	  /* For {un}signed ints,
2232 	     vec_adde (va, vb, carryv) == vec_add (vec_add (va, vb),
2233 						   vec_and (carryv, 1)).
2234 	     vec_sube (va, vb, carryv) == vec_sub (vec_sub (va, vb),
2235 						   vec_and (carryv, 1)).  */
2236 	  case E_SImode:
2237 	    {
2238 	      tree add_sub_builtin;
2239 
2240 	      vec<tree, va_gc> *params = make_tree_vector ();
2241 	      vec_safe_push (params, arg0);
2242 	      vec_safe_push (params, arg1);
2243 
2244 	      if (fcode == RS6000_OVLD_VEC_ADDE)
2245 		add_sub_builtin = rs6000_builtin_decls_x[RS6000_OVLD_VEC_ADD];
2246 	      else
2247 		add_sub_builtin = rs6000_builtin_decls_x[RS6000_OVLD_VEC_SUB];
2248 
2249 	      tree call
2250 		= altivec_resolve_new_overloaded_builtin (loc,
2251 							  add_sub_builtin,
2252 							  params);
2253 	      tree const1 = build_int_cstu (TREE_TYPE (arg0_type), 1);
2254 	      tree ones_vector = build_vector_from_val (arg0_type, const1);
2255 	      tree and_expr = fold_build2_loc (loc, BIT_AND_EXPR, arg0_type,
2256 					       arg2, ones_vector);
2257 	      params = make_tree_vector ();
2258 	      vec_safe_push (params, call);
2259 	      vec_safe_push (params, and_expr);
2260 	      return altivec_resolve_new_overloaded_builtin (loc,
2261 							     add_sub_builtin,
2262 							     params);
2263 	    }
2264 	  /* For {un}signed __int128s use the vaddeuqm/vsubeuqm instruction
2265 	     directly.  */
2266 	  case E_TImode:
2267 	    break;
2268 
2269 	  /* Types other than {un}signed int and {un}signed __int128
2270 		are errors.  */
2271 	  default:
2272 	    goto bad;
2273 	}
2274     }
2275 
2276   if (fcode == RS6000_OVLD_VEC_ADDEC || fcode == RS6000_OVLD_VEC_SUBEC)
2277     {
2278       /* vec_addec and vec_subec needs to be special cased because there is
2279 	 no instruction for the {un}signed int version.  */
2280       if (nargs != 3)
2281 	{
2282 	  const char *name;
2283 	  name = fcode == RS6000_OVLD_VEC_ADDEC ? "vec_addec" : "vec_subec";
2284 	  error ("builtin %qs only accepts 3 arguments", name);
2285 	  return error_mark_node;
2286 	}
2287 
2288       tree arg0 = (*arglist)[0];
2289       tree arg0_type = TREE_TYPE (arg0);
2290       tree arg1 = (*arglist)[1];
2291       tree arg1_type = TREE_TYPE (arg1);
2292       tree arg2 = (*arglist)[2];
2293       tree arg2_type = TREE_TYPE (arg2);
2294 
2295       /* All 3 arguments must be vectors of (signed or unsigned) (int or
2296 	 __int128) and the types must be compatible.  */
2297       if (TREE_CODE (arg0_type) != VECTOR_TYPE)
2298 	goto bad;
2299       if (!lang_hooks.types_compatible_p (arg0_type, arg1_type)
2300 	  || !lang_hooks.types_compatible_p (arg1_type, arg2_type))
2301 	goto bad;
2302 
2303       switch (TYPE_MODE (TREE_TYPE (arg0_type)))
2304 	{
2305 	  /* For {un}signed ints,
2306 	      vec_addec (va, vb, carryv) ==
2307 				vec_or (vec_addc (va, vb),
2308 					vec_addc (vec_add (va, vb),
2309 						  vec_and (carryv, 0x1))).  */
2310 	  case E_SImode:
2311 	    {
2312 	    /* Use save_expr to ensure that operands used more than once
2313 		that may have side effects (like calls) are only evaluated
2314 		once.  */
2315 	    tree as_builtin;
2316 	    tree as_c_builtin;
2317 
2318 	    arg0 = save_expr (arg0);
2319 	    arg1 = save_expr (arg1);
2320 	    vec<tree, va_gc> *params = make_tree_vector ();
2321 	    vec_safe_push (params, arg0);
2322 	    vec_safe_push (params, arg1);
2323 
2324 	    if (fcode == RS6000_OVLD_VEC_ADDEC)
2325 	      as_c_builtin = rs6000_builtin_decls_x[RS6000_OVLD_VEC_ADDC];
2326 	    else
2327 	      as_c_builtin = rs6000_builtin_decls_x[RS6000_OVLD_VEC_SUBC];
2328 
2329 	    tree call1 = altivec_resolve_new_overloaded_builtin (loc,
2330 								 as_c_builtin,
2331 								 params);
2332 	    params = make_tree_vector ();
2333 	    vec_safe_push (params, arg0);
2334 	    vec_safe_push (params, arg1);
2335 
2336 	    if (fcode == RS6000_OVLD_VEC_ADDEC)
2337 	      as_builtin = rs6000_builtin_decls_x[RS6000_OVLD_VEC_ADD];
2338 	    else
2339 	      as_builtin = rs6000_builtin_decls_x[RS6000_OVLD_VEC_SUB];
2340 
2341 	    tree call2 = altivec_resolve_new_overloaded_builtin (loc,
2342 								 as_builtin,
2343 								 params);
2344 	    tree const1 = build_int_cstu (TREE_TYPE (arg0_type), 1);
2345 	    tree ones_vector = build_vector_from_val (arg0_type, const1);
2346 	    tree and_expr = fold_build2_loc (loc, BIT_AND_EXPR, arg0_type,
2347 					     arg2, ones_vector);
2348 	    params = make_tree_vector ();
2349 	    vec_safe_push (params, call2);
2350 	    vec_safe_push (params, and_expr);
2351 	    call2 = altivec_resolve_new_overloaded_builtin (loc, as_c_builtin,
2352 							    params);
2353 	    params = make_tree_vector ();
2354 	    vec_safe_push (params, call1);
2355 	    vec_safe_push (params, call2);
2356 	    tree or_builtin = rs6000_builtin_decls_x[RS6000_OVLD_VEC_OR];
2357 	    return altivec_resolve_new_overloaded_builtin (loc, or_builtin,
2358 							   params);
2359 	    }
2360 	  /* For {un}signed __int128s use the vaddecuq/vsubbecuq
2361 	     instructions.  This occurs through normal processing.  */
2362 	  case E_TImode:
2363 	    break;
2364 
2365 	  /* Types other than {un}signed int and {un}signed __int128
2366 		are errors.  */
2367 	  default:
2368 	    goto bad;
2369 	}
2370     }
2371 
2372   /* For now treat vec_splats and vec_promote as the same.  */
2373   if (fcode == RS6000_OVLD_VEC_SPLATS || fcode == RS6000_OVLD_VEC_PROMOTE)
2374     {
2375       tree type, arg;
2376       int size;
2377       int i;
2378       bool unsigned_p;
2379       vec<constructor_elt, va_gc> *vec;
2380       const char *name;
2381       name = fcode == RS6000_OVLD_VEC_SPLATS ? "vec_splats" : "vec_promote";
2382 
2383       if (fcode == RS6000_OVLD_VEC_SPLATS && nargs != 1)
2384 	{
2385 	  error ("builtin %qs only accepts 1 argument", name);
2386 	  return error_mark_node;
2387 	}
2388       if (fcode == RS6000_OVLD_VEC_PROMOTE && nargs != 2)
2389 	{
2390 	  error ("builtin %qs only accepts 2 arguments", name);
2391 	  return error_mark_node;
2392 	}
2393       /* Ignore promote's element argument.  */
2394       if (fcode == RS6000_OVLD_VEC_PROMOTE
2395 	  && !INTEGRAL_TYPE_P (TREE_TYPE ((*arglist)[1])))
2396 	goto bad;
2397 
2398       arg = (*arglist)[0];
2399       type = TREE_TYPE (arg);
2400       if (!SCALAR_FLOAT_TYPE_P (type)
2401 	  && !INTEGRAL_TYPE_P (type))
2402 	goto bad;
2403       unsigned_p = TYPE_UNSIGNED (type);
2404       switch (TYPE_MODE (type))
2405 	{
2406 	  case E_TImode:
2407 	    type = unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node;
2408 	    size = 1;
2409 	    break;
2410 	  case E_DImode:
2411 	    type = unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node;
2412 	    size = 2;
2413 	    break;
2414 	  case E_SImode:
2415 	    type = unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node;
2416 	    size = 4;
2417 	    break;
2418 	  case E_HImode:
2419 	    type = unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node;
2420 	    size = 8;
2421 	    break;
2422 	  case E_QImode:
2423 	    type = unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node;
2424 	    size = 16;
2425 	    break;
2426 	  case E_SFmode:
2427 	    type = V4SF_type_node;
2428 	    size = 4;
2429 	    break;
2430 	  case E_DFmode:
2431 	    type = V2DF_type_node;
2432 	    size = 2;
2433 	    break;
2434 	  default:
2435 	    goto bad;
2436 	}
2437       arg = save_expr (fold_convert (TREE_TYPE (type), arg));
2438       vec_alloc (vec, size);
2439       for (i = 0; i < size; i++)
2440 	{
2441 	  constructor_elt elt = {NULL_TREE, arg};
2442 	  vec->quick_push (elt);
2443 	}
2444       return build_constructor (type, vec);
2445     }
2446 
2447   /* For now use pointer tricks to do the extraction, unless we are on VSX
2448      extracting a double from a constant offset.  */
2449   if (fcode == RS6000_OVLD_VEC_EXTRACT)
2450     {
2451       tree arg1;
2452       tree arg1_type;
2453       tree arg2;
2454       tree arg1_inner_type;
2455       tree decl, stmt;
2456       tree innerptrtype;
2457       machine_mode mode;
2458 
2459       /* No second argument. */
2460       if (nargs != 2)
2461 	{
2462 	  error ("builtin %qs only accepts 2 arguments", "vec_extract");
2463 	  return error_mark_node;
2464 	}
2465 
2466       arg2 = (*arglist)[1];
2467       arg1 = (*arglist)[0];
2468       arg1_type = TREE_TYPE (arg1);
2469 
2470       if (TREE_CODE (arg1_type) != VECTOR_TYPE)
2471 	goto bad;
2472       if (!INTEGRAL_TYPE_P (TREE_TYPE (arg2)))
2473 	goto bad;
2474 
2475       /* See if we can optimize vec_extracts with the current VSX instruction
2476 	 set.  */
2477       mode = TYPE_MODE (arg1_type);
2478       if (VECTOR_MEM_VSX_P (mode))
2479 
2480 	{
2481 	  tree call = NULL_TREE;
2482 	  int nunits = GET_MODE_NUNITS (mode);
2483 
2484 	  arg2 = fold_for_warn (arg2);
2485 
2486 	  /* If the second argument is an integer constant, generate
2487 	     the built-in code if we can.  We need 64-bit and direct
2488 	     move to extract the small integer vectors.  */
2489 	  if (TREE_CODE (arg2) == INTEGER_CST)
2490 	    {
2491 	      wide_int selector = wi::to_wide (arg2);
2492 	      selector = wi::umod_trunc (selector, nunits);
2493 	      arg2 = wide_int_to_tree (TREE_TYPE (arg2), selector);
2494 	      switch (mode)
2495 		{
2496 		default:
2497 		  break;
2498 
2499 		case E_V1TImode:
2500 		  call = rs6000_builtin_decls_x[RS6000_BIF_VEC_EXT_V1TI];
2501 		  break;
2502 
2503 		case E_V2DFmode:
2504 		  call = rs6000_builtin_decls_x[RS6000_BIF_VEC_EXT_V2DF];
2505 		  break;
2506 
2507 		case E_V2DImode:
2508 		  call = rs6000_builtin_decls_x[RS6000_BIF_VEC_EXT_V2DI];
2509 		  break;
2510 
2511 		case E_V4SFmode:
2512 		  call = rs6000_builtin_decls_x[RS6000_BIF_VEC_EXT_V4SF];
2513 		  break;
2514 
2515 		case E_V4SImode:
2516 		  if (TARGET_DIRECT_MOVE_64BIT)
2517 		    call = rs6000_builtin_decls_x[RS6000_BIF_VEC_EXT_V4SI];
2518 		  break;
2519 
2520 		case E_V8HImode:
2521 		  if (TARGET_DIRECT_MOVE_64BIT)
2522 		    call = rs6000_builtin_decls_x[RS6000_BIF_VEC_EXT_V8HI];
2523 		  break;
2524 
2525 		case E_V16QImode:
2526 		  if (TARGET_DIRECT_MOVE_64BIT)
2527 		    call = rs6000_builtin_decls_x[RS6000_BIF_VEC_EXT_V16QI];
2528 		  break;
2529 		}
2530 	    }
2531 
2532 	  /* If the second argument is variable, we can optimize it if we are
2533 	     generating 64-bit code on a machine with direct move.  */
2534 	  else if (TREE_CODE (arg2) != INTEGER_CST && TARGET_DIRECT_MOVE_64BIT)
2535 	    {
2536 	      switch (mode)
2537 		{
2538 		default:
2539 		  break;
2540 
2541 		case E_V2DFmode:
2542 		  call = rs6000_builtin_decls_x[RS6000_BIF_VEC_EXT_V2DF];
2543 		  break;
2544 
2545 		case E_V2DImode:
2546 		  call = rs6000_builtin_decls_x[RS6000_BIF_VEC_EXT_V2DI];
2547 		  break;
2548 
2549 		case E_V4SFmode:
2550 		  call = rs6000_builtin_decls_x[RS6000_BIF_VEC_EXT_V4SF];
2551 		  break;
2552 
2553 		case E_V4SImode:
2554 		  call = rs6000_builtin_decls_x[RS6000_BIF_VEC_EXT_V4SI];
2555 		  break;
2556 
2557 		case E_V8HImode:
2558 		  call = rs6000_builtin_decls_x[RS6000_BIF_VEC_EXT_V8HI];
2559 		  break;
2560 
2561 		case E_V16QImode:
2562 		  call = rs6000_builtin_decls_x[RS6000_BIF_VEC_EXT_V16QI];
2563 		  break;
2564 		}
2565 	    }
2566 
2567 	  if (call)
2568 	    {
2569 	      tree result = build_call_expr (call, 2, arg1, arg2);
2570 	      /* Coerce the result to vector element type.  May be no-op.  */
2571 	      arg1_inner_type = TREE_TYPE (arg1_type);
2572 	      result = fold_convert (arg1_inner_type, result);
2573 	      return result;
2574 	    }
2575 	}
2576 
2577       /* Build *(((arg1_inner_type*)&(vector type){arg1})+arg2). */
2578       arg1_inner_type = TREE_TYPE (arg1_type);
2579       tree subp = build_int_cst (TREE_TYPE (arg2),
2580 				 TYPE_VECTOR_SUBPARTS (arg1_type) - 1);
2581       arg2 = build_binary_op (loc, BIT_AND_EXPR, arg2, subp, 0);
2582       decl = build_decl (loc, VAR_DECL, NULL_TREE, arg1_type);
2583       DECL_EXTERNAL (decl) = 0;
2584       TREE_PUBLIC (decl) = 0;
2585       DECL_CONTEXT (decl) = current_function_decl;
2586       TREE_USED (decl) = 1;
2587       TREE_TYPE (decl) = arg1_type;
2588       TREE_READONLY (decl) = TYPE_READONLY (arg1_type);
2589       if (c_dialect_cxx ())
2590 	{
2591 	  stmt = build4 (TARGET_EXPR, arg1_type, decl, arg1,
2592 			 NULL_TREE, NULL_TREE);
2593 	  SET_EXPR_LOCATION (stmt, loc);
2594 	}
2595       else
2596 	{
2597 	  DECL_INITIAL (decl) = arg1;
2598 	  stmt = build1 (DECL_EXPR, arg1_type, decl);
2599 	  TREE_ADDRESSABLE (decl) = 1;
2600 	  SET_EXPR_LOCATION (stmt, loc);
2601 	  stmt = build1 (COMPOUND_LITERAL_EXPR, arg1_type, stmt);
2602 	}
2603 
2604       innerptrtype = build_pointer_type (arg1_inner_type);
2605 
2606       stmt = build_unary_op (loc, ADDR_EXPR, stmt, 0);
2607       stmt = convert (innerptrtype, stmt);
2608       stmt = build_binary_op (loc, PLUS_EXPR, stmt, arg2, 1);
2609       stmt = build_indirect_ref (loc, stmt, RO_NULL);
2610 
2611       /* PR83660: We mark this as having side effects so that
2612 	 downstream in fold_build_cleanup_point_expr () it will get a
2613 	 CLEANUP_POINT_EXPR.  If it does not we can run into an ICE
2614 	 later in gimplify_cleanup_point_expr ().  Potentially this
2615 	 causes missed optimization because there actually is no side
2616 	 effect.  */
2617       if (c_dialect_cxx ())
2618 	TREE_SIDE_EFFECTS (stmt) = 1;
2619 
2620       return stmt;
2621     }
2622 
2623   /* For now use pointer tricks to do the insertion, unless we are on VSX
2624      inserting a double to a constant offset.  */
2625   if (fcode == RS6000_OVLD_VEC_INSERT)
2626     {
2627       tree arg0;
2628       tree arg1;
2629       tree arg2;
2630       tree arg1_type;
2631       tree decl, stmt;
2632       machine_mode mode;
2633 
2634       /* No second or third arguments. */
2635       if (nargs != 3)
2636 	{
2637 	  error ("builtin %qs only accepts 3 arguments", "vec_insert");
2638 	  return error_mark_node;
2639 	}
2640 
2641       arg0 = (*arglist)[0];
2642       arg1 = (*arglist)[1];
2643       arg1_type = TREE_TYPE (arg1);
2644       arg2 = fold_for_warn ((*arglist)[2]);
2645 
2646       if (TREE_CODE (arg1_type) != VECTOR_TYPE)
2647 	goto bad;
2648       if (!INTEGRAL_TYPE_P (TREE_TYPE (arg2)))
2649 	goto bad;
2650 
2651       /* If we can use the VSX xxpermdi instruction, use that for insert.  */
2652       mode = TYPE_MODE (arg1_type);
2653       if ((mode == V2DFmode || mode == V2DImode) && VECTOR_UNIT_VSX_P (mode)
2654 	  && TREE_CODE (arg2) == INTEGER_CST)
2655 	{
2656 	  wide_int selector = wi::to_wide (arg2);
2657 	  selector = wi::umod_trunc (selector, 2);
2658 	  tree call = NULL_TREE;
2659 
2660 	  arg2 = wide_int_to_tree (TREE_TYPE (arg2), selector);
2661 	  if (mode == V2DFmode)
2662 	    call = rs6000_builtin_decls_x[RS6000_BIF_VEC_SET_V2DF];
2663 	  else if (mode == V2DImode)
2664 	    call = rs6000_builtin_decls_x[RS6000_BIF_VEC_SET_V2DI];
2665 
2666 	  /* Note, __builtin_vec_insert_<xxx> has vector and scalar types
2667 	     reversed.  */
2668 	  if (call)
2669 	    return build_call_expr (call, 3, arg1, arg0, arg2);
2670 	}
2671       else if (mode == V1TImode && VECTOR_UNIT_VSX_P (mode)
2672 	       && TREE_CODE (arg2) == INTEGER_CST)
2673 	{
2674 	  tree call = rs6000_builtin_decls_x[RS6000_BIF_VEC_SET_V1TI];
2675 	  wide_int selector = wi::zero(32);
2676 
2677 	  arg2 = wide_int_to_tree (TREE_TYPE (arg2), selector);
2678 	  /* Note, __builtin_vec_insert_<xxx> has vector and scalar types
2679 	     reversed.  */
2680 	  return build_call_expr (call, 3, arg1, arg0, arg2);
2681 	}
2682 
2683       /* Build *(((arg1_inner_type*)&(vector type){arg1})+arg2) = arg0 with
2684 	 VIEW_CONVERT_EXPR.  i.e.:
2685 	 D.3192 = v1;
2686 	 _1 = n & 3;
2687 	 VIEW_CONVERT_EXPR<int[4]>(D.3192)[_1] = i;
2688 	 v1 = D.3192;
2689 	 D.3194 = v1;  */
2690       if (TYPE_VECTOR_SUBPARTS (arg1_type) == 1)
2691 	arg2 = build_int_cst (TREE_TYPE (arg2), 0);
2692       else
2693 	arg2 = build_binary_op (loc, BIT_AND_EXPR, arg2,
2694 				build_int_cst (TREE_TYPE (arg2),
2695 					       TYPE_VECTOR_SUBPARTS (arg1_type)
2696 					       - 1), 0);
2697       decl = build_decl (loc, VAR_DECL, NULL_TREE, arg1_type);
2698       DECL_EXTERNAL (decl) = 0;
2699       TREE_PUBLIC (decl) = 0;
2700       DECL_CONTEXT (decl) = current_function_decl;
2701       TREE_USED (decl) = 1;
2702       TREE_TYPE (decl) = arg1_type;
2703       TREE_READONLY (decl) = TYPE_READONLY (arg1_type);
2704       TREE_ADDRESSABLE (decl) = 1;
2705       if (c_dialect_cxx ())
2706 	{
2707 	  stmt = build4 (TARGET_EXPR, arg1_type, decl, arg1,
2708 			 NULL_TREE, NULL_TREE);
2709 	  SET_EXPR_LOCATION (stmt, loc);
2710 	}
2711       else
2712 	{
2713 	  DECL_INITIAL (decl) = arg1;
2714 	  stmt = build1 (DECL_EXPR, arg1_type, decl);
2715 	  SET_EXPR_LOCATION (stmt, loc);
2716 	  stmt = build1 (COMPOUND_LITERAL_EXPR, arg1_type, stmt);
2717 	}
2718 
2719       if (TARGET_VSX)
2720 	{
2721 	  stmt = build_array_ref (loc, stmt, arg2);
2722 	  stmt = fold_build2 (MODIFY_EXPR, TREE_TYPE (arg0), stmt,
2723 			      convert (TREE_TYPE (stmt), arg0));
2724 	  stmt = build2 (COMPOUND_EXPR, arg1_type, stmt, decl);
2725 	}
2726       else
2727 	{
2728 	  tree arg1_inner_type;
2729 	  tree innerptrtype;
2730 	  arg1_inner_type = TREE_TYPE (arg1_type);
2731 	  innerptrtype = build_pointer_type (arg1_inner_type);
2732 
2733 	  stmt = build_unary_op (loc, ADDR_EXPR, stmt, 0);
2734 	  stmt = convert (innerptrtype, stmt);
2735 	  stmt = build_binary_op (loc, PLUS_EXPR, stmt, arg2, 1);
2736 	  stmt = build_indirect_ref (loc, stmt, RO_NULL);
2737 	  stmt = build2 (MODIFY_EXPR, TREE_TYPE (stmt), stmt,
2738 			 convert (TREE_TYPE (stmt), arg0));
2739 	  stmt = build2 (COMPOUND_EXPR, arg1_type, stmt, decl);
2740 	}
2741       return stmt;
2742     }
2743 
2744   unsigned int n;
2745   for (n = 0;
2746        !VOID_TYPE_P (TREE_VALUE (fnargs)) && n < nargs;
2747        fnargs = TREE_CHAIN (fnargs), n++)
2748     {
2749       tree decl_type = TREE_VALUE (fnargs);
2750       tree arg = (*arglist)[n];
2751       tree type;
2752 
2753       if (arg == error_mark_node)
2754 	return error_mark_node;
2755 
2756       if (n >= MAX_OVLD_ARGS)
2757 	abort ();
2758 
2759       arg = default_conversion (arg);
2760 
2761       /* The C++ front-end converts float * to const void * using
2762 	 NOP_EXPR<const void *> (NOP_EXPR<void *> (x)).  */
2763       type = TREE_TYPE (arg);
2764       if (POINTER_TYPE_P (type)
2765 	  && TREE_CODE (arg) == NOP_EXPR
2766 	  && lang_hooks.types_compatible_p (TREE_TYPE (arg),
2767 					    const_ptr_type_node)
2768 	  && lang_hooks.types_compatible_p (TREE_TYPE (TREE_OPERAND (arg, 0)),
2769 					    ptr_type_node))
2770 	{
2771 	  arg = TREE_OPERAND (arg, 0);
2772 	  type = TREE_TYPE (arg);
2773 	}
2774 
2775       /* Remove the const from the pointers to simplify the overload
2776 	 matching further down.  */
2777       if (POINTER_TYPE_P (decl_type)
2778 	  && POINTER_TYPE_P (type)
2779 	  && TYPE_QUALS (TREE_TYPE (type)) != 0)
2780 	{
2781 	  if (TYPE_READONLY (TREE_TYPE (type))
2782 	      && !TYPE_READONLY (TREE_TYPE (decl_type)))
2783 	    warning (0, "passing argument %d of %qE discards const qualifier "
2784 		     "from pointer target type", n + 1, fndecl);
2785 	  type = build_qualified_type (TREE_TYPE (type), 0);
2786 	  type = build_pointer_type (type);
2787 	  arg = fold_convert (type, arg);
2788 	}
2789 
2790       /* For RS6000_OVLD_VEC_LXVL, convert any const * to its non constant
2791 	 equivalent to simplify the overload matching below.  */
2792       if (fcode == RS6000_OVLD_VEC_LXVL)
2793 	{
2794 	  if (POINTER_TYPE_P (type)
2795 	      && TYPE_READONLY (TREE_TYPE (type)))
2796 	    {
2797 	      type = build_qualified_type (TREE_TYPE (type), 0);
2798 	      type = build_pointer_type (type);
2799 	      arg = fold_convert (type, arg);
2800 	    }
2801 	}
2802 
2803       args[n] = arg;
2804       types[n] = type;
2805     }
2806 
2807   /* If the number of arguments did not match the prototype, return NULL
2808      and the generic code will issue the appropriate error message.  */
2809   if (!VOID_TYPE_P (TREE_VALUE (fnargs)) || n < nargs)
2810     return NULL;
2811 
2812   if (fcode == RS6000_OVLD_VEC_STEP)
2813     {
2814       if (TREE_CODE (types[0]) != VECTOR_TYPE)
2815 	goto bad;
2816 
2817       return build_int_cst (NULL_TREE, TYPE_VECTOR_SUBPARTS (types[0]));
2818     }
2819 
2820   {
2821     bool unsupported_builtin = false;
2822     enum rs6000_gen_builtins overloaded_code;
2823     bool supported = false;
2824     ovlddata *instance = rs6000_overload_info[adj_fcode].first_instance;
2825     gcc_assert (instance != NULL);
2826 
2827     /* Need to special case __builtin_cmpb because the overloaded forms
2828        of this function take (unsigned int, unsigned int) or (unsigned
2829        long long int, unsigned long long int).  Since C conventions
2830        allow the respective argument types to be implicitly coerced into
2831        each other, the default handling does not provide adequate
2832        discrimination between the desired forms of the function.  */
2833     if (fcode == RS6000_OVLD_SCAL_CMPB)
2834       {
2835 	machine_mode arg1_mode = TYPE_MODE (types[0]);
2836 	machine_mode arg2_mode = TYPE_MODE (types[1]);
2837 
2838 	if (nargs != 2)
2839 	  {
2840 	    error ("builtin %qs only accepts 2 arguments", "__builtin_cmpb");
2841 	    return error_mark_node;
2842 	  }
2843 
2844 	/* If any supplied arguments are wider than 32 bits, resolve to
2845 	   64-bit variant of built-in function.  */
2846 	if (GET_MODE_PRECISION (arg1_mode) > 32
2847 	    || GET_MODE_PRECISION (arg2_mode) > 32)
2848 	  /* Assure all argument and result types are compatible with
2849 	     the built-in function represented by RS6000_BIF_CMPB.  */
2850 	  overloaded_code = RS6000_BIF_CMPB;
2851 	else
2852 	  /* Assure all argument and result types are compatible with
2853 	     the built-in function represented by RS6000_BIF_CMPB_32.  */
2854 	  overloaded_code = RS6000_BIF_CMPB_32;
2855 
2856 	while (instance && instance->bifid != overloaded_code)
2857 	  instance = instance->next;
2858 
2859 	gcc_assert (instance != NULL);
2860 	tree fntype = rs6000_builtin_info_x[instance->bifid].fntype;
2861 	tree parmtype0 = TREE_VALUE (TYPE_ARG_TYPES (fntype));
2862 	tree parmtype1 = TREE_VALUE (TREE_CHAIN (TYPE_ARG_TYPES (fntype)));
2863 
2864 	if (rs6000_new_builtin_type_compatible (types[0], parmtype0)
2865 	    && rs6000_new_builtin_type_compatible (types[1], parmtype1))
2866 	  {
2867 	    if (rs6000_builtin_decl (instance->bifid, false) != error_mark_node
2868 		&& rs6000_new_builtin_is_supported (instance->bifid))
2869 	      {
2870 		tree ret_type = TREE_TYPE (instance->fntype);
2871 		return altivec_build_new_resolved_builtin (args, n, fntype,
2872 							   ret_type,
2873 							   instance->bifid,
2874 							   fcode);
2875 	      }
2876 	    else
2877 	      unsupported_builtin = true;
2878 	  }
2879       }
2880     else if (fcode == RS6000_OVLD_VEC_VSIE)
2881       {
2882 	machine_mode arg1_mode = TYPE_MODE (types[0]);
2883 
2884 	if (nargs != 2)
2885 	  {
2886 	    error ("builtin %qs only accepts 2 arguments",
2887 		   "scalar_insert_exp");
2888 	    return error_mark_node;
2889 	  }
2890 
2891 	/* If supplied first argument is wider than 64 bits, resolve to
2892 	   128-bit variant of built-in function.  */
2893 	if (GET_MODE_PRECISION (arg1_mode) > 64)
2894 	  {
2895 	    /* If first argument is of float variety, choose variant
2896 	       that expects __ieee128 argument.  Otherwise, expect
2897 	       __int128 argument.  */
2898 	    if (GET_MODE_CLASS (arg1_mode) == MODE_FLOAT)
2899 	      overloaded_code = RS6000_BIF_VSIEQPF;
2900 	    else
2901 	      overloaded_code = RS6000_BIF_VSIEQP;
2902 	  }
2903 	else
2904 	  {
2905 	    /* If first argument is of float variety, choose variant
2906 	       that expects double argument.  Otherwise, expect
2907 	       long long int argument.  */
2908 	    if (GET_MODE_CLASS (arg1_mode) == MODE_FLOAT)
2909 	      overloaded_code = RS6000_BIF_VSIEDPF;
2910 	    else
2911 	      overloaded_code = RS6000_BIF_VSIEDP;
2912 	  }
2913 
2914 	while (instance && instance->bifid != overloaded_code)
2915 	  instance = instance->next;
2916 
2917 	gcc_assert (instance != NULL);
2918 	tree fntype = rs6000_builtin_info_x[instance->bifid].fntype;
2919 	tree parmtype0 = TREE_VALUE (TYPE_ARG_TYPES (fntype));
2920 	tree parmtype1 = TREE_VALUE (TREE_CHAIN (TYPE_ARG_TYPES (fntype)));
2921 
2922 	if (rs6000_new_builtin_type_compatible (types[0], parmtype0)
2923 	    && rs6000_new_builtin_type_compatible (types[1], parmtype1))
2924 	  {
2925 	    if (rs6000_builtin_decl (instance->bifid, false) != error_mark_node
2926 		&& rs6000_new_builtin_is_supported (instance->bifid))
2927 	      {
2928 		tree ret_type = TREE_TYPE (instance->fntype);
2929 		return altivec_build_new_resolved_builtin (args, n, fntype,
2930 							   ret_type,
2931 							   instance->bifid,
2932 							   fcode);
2933 	      }
2934 	    else
2935 	      unsupported_builtin = true;
2936 	  }
2937       }
2938     else
2939       {
2940 	/* Functions with no arguments can have only one overloaded
2941 	   instance.  */
2942 	gcc_assert (n > 0 || !instance->next);
2943 
2944 	for (; instance != NULL; instance = instance->next)
2945 	  {
2946 	    bool mismatch = false;
2947 	    tree nextparm = TYPE_ARG_TYPES (instance->fntype);
2948 
2949 	    for (unsigned int arg_i = 0;
2950 		 arg_i < nargs && nextparm != NULL;
2951 		 arg_i++)
2952 	      {
2953 		tree parmtype = TREE_VALUE (nextparm);
2954 		if (!rs6000_new_builtin_type_compatible (types[arg_i],
2955 							 parmtype))
2956 		  {
2957 		    mismatch = true;
2958 		    break;
2959 		  }
2960 		nextparm = TREE_CHAIN (nextparm);
2961 	      }
2962 
2963 	    if (mismatch)
2964 	      continue;
2965 
2966 	    supported = rs6000_new_builtin_is_supported (instance->bifid);
2967 	    if (rs6000_builtin_decl (instance->bifid, false) != error_mark_node
2968 		&& supported)
2969 	      {
2970 		tree fntype = rs6000_builtin_info_x[instance->bifid].fntype;
2971 		tree ret_type = TREE_TYPE (instance->fntype);
2972 		return altivec_build_new_resolved_builtin (args, n, fntype,
2973 							   ret_type,
2974 							   instance->bifid,
2975 							   fcode);
2976 	      }
2977 	    else
2978 	      {
2979 		unsupported_builtin = true;
2980 		break;
2981 	      }
2982 	  }
2983       }
2984 
2985     if (unsupported_builtin)
2986       {
2987 	const char *name = rs6000_overload_info[adj_fcode].ovld_name;
2988 	if (!supported)
2989 	  {
2990 	    /* Indicate that the instantiation of the overloaded builtin
2991 	       name is not available with the target flags in effect.  */
2992 	    rs6000_gen_builtins fcode = (rs6000_gen_builtins) instance->bifid;
2993 	    rs6000_invalid_new_builtin (fcode);
2994 	    /* Provide clarity of the relationship between the overload
2995 	       and the instantiation.  */
2996 	    const char *internal_name
2997 	      = rs6000_builtin_info_x[instance->bifid].bifname;
2998 	    rich_location richloc (line_table, input_location);
2999 	    inform (&richloc,
3000 		    "overloaded builtin %qs is implemented by builtin %qs",
3001 		    name, internal_name);
3002 	  }
3003 	else
3004 	  error ("%qs is not supported in this compiler configuration", name);
3005 
3006 	return error_mark_node;
3007       }
3008   }
3009  bad:
3010   {
3011     const char *name = rs6000_overload_info[adj_fcode].ovld_name;
3012     error ("invalid parameter combination for AltiVec intrinsic %qs", name);
3013     return error_mark_node;
3014   }
3015 }
3016