xref: /dragonfly/contrib/gcc-8.0/gcc/internal-fn.c (revision 8af44722)
1 /* Internal functions.
2    Copyright (C) 2011-2018 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "target.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "predict.h"
29 #include "stringpool.h"
30 #include "tree-vrp.h"
31 #include "tree-ssanames.h"
32 #include "expmed.h"
33 #include "memmodel.h"
34 #include "optabs.h"
35 #include "emit-rtl.h"
36 #include "diagnostic-core.h"
37 #include "fold-const.h"
38 #include "internal-fn.h"
39 #include "stor-layout.h"
40 #include "dojump.h"
41 #include "expr.h"
42 #include "stringpool.h"
43 #include "attribs.h"
44 #include "asan.h"
45 #include "ubsan.h"
46 #include "recog.h"
47 #include "builtins.h"
48 #include "optabs-tree.h"
49 #include "gimple-ssa.h"
50 #include "tree-phinodes.h"
51 #include "ssa-iterators.h"
52 
53 /* The names of each internal function, indexed by function number.  */
54 const char *const internal_fn_name_array[] = {
55 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) #CODE,
56 #include "internal-fn.def"
57   "<invalid-fn>"
58 };
59 
60 /* The ECF_* flags of each internal function, indexed by function number.  */
61 const int internal_fn_flags_array[] = {
62 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) FLAGS,
63 #include "internal-fn.def"
64   0
65 };
66 
67 /* Fnspec of each internal function, indexed by function number.  */
68 const_tree internal_fn_fnspec_array[IFN_LAST + 1];
69 
70 void
71 init_internal_fns ()
72 {
73 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
74   if (FNSPEC) internal_fn_fnspec_array[IFN_##CODE] = \
75     build_string ((int) sizeof (FNSPEC), FNSPEC ? FNSPEC : "");
76 #include "internal-fn.def"
77   internal_fn_fnspec_array[IFN_LAST] = 0;
78 }
79 
80 /* Create static initializers for the information returned by
81    direct_internal_fn.  */
82 #define not_direct { -2, -2, false }
83 #define mask_load_direct { -1, 2, false }
84 #define load_lanes_direct { -1, -1, false }
85 #define mask_load_lanes_direct { -1, -1, false }
86 #define gather_load_direct { -1, -1, false }
87 #define mask_store_direct { 3, 2, false }
88 #define store_lanes_direct { 0, 0, false }
89 #define mask_store_lanes_direct { 0, 0, false }
90 #define scatter_store_direct { 3, 3, false }
91 #define unary_direct { 0, 0, true }
92 #define binary_direct { 0, 0, true }
93 #define cond_unary_direct { 1, 1, true }
94 #define cond_binary_direct { 1, 1, true }
95 #define while_direct { 0, 2, false }
96 #define fold_extract_direct { 2, 2, false }
97 #define fold_left_direct { 1, 1, false }
98 
99 const direct_internal_fn_info direct_internal_fn_array[IFN_LAST + 1] = {
100 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) not_direct,
101 #define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) TYPE##_direct,
102 #define DEF_INTERNAL_SIGNED_OPTAB_FN(CODE, FLAGS, SELECTOR, SIGNED_OPTAB, \
103 				     UNSIGNED_OPTAB, TYPE) TYPE##_direct,
104 #include "internal-fn.def"
105   not_direct
106 };
107 
108 /* ARRAY_TYPE is an array of vector modes.  Return the associated insn
109    for load-lanes-style optab OPTAB, or CODE_FOR_nothing if none.  */
110 
111 static enum insn_code
112 get_multi_vector_move (tree array_type, convert_optab optab)
113 {
114   machine_mode imode;
115   machine_mode vmode;
116 
117   gcc_assert (TREE_CODE (array_type) == ARRAY_TYPE);
118   imode = TYPE_MODE (array_type);
119   vmode = TYPE_MODE (TREE_TYPE (array_type));
120 
121   return convert_optab_handler (optab, imode, vmode);
122 }
123 
124 /* Expand LOAD_LANES call STMT using optab OPTAB.  */
125 
126 static void
127 expand_load_lanes_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
128 {
129   struct expand_operand ops[2];
130   tree type, lhs, rhs;
131   rtx target, mem;
132 
133   lhs = gimple_call_lhs (stmt);
134   rhs = gimple_call_arg (stmt, 0);
135   type = TREE_TYPE (lhs);
136 
137   target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
138   mem = expand_normal (rhs);
139 
140   gcc_assert (MEM_P (mem));
141   PUT_MODE (mem, TYPE_MODE (type));
142 
143   create_output_operand (&ops[0], target, TYPE_MODE (type));
144   create_fixed_operand (&ops[1], mem);
145   expand_insn (get_multi_vector_move (type, optab), 2, ops);
146 }
147 
148 /* Expand STORE_LANES call STMT using optab OPTAB.  */
149 
150 static void
151 expand_store_lanes_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
152 {
153   struct expand_operand ops[2];
154   tree type, lhs, rhs;
155   rtx target, reg;
156 
157   lhs = gimple_call_lhs (stmt);
158   rhs = gimple_call_arg (stmt, 0);
159   type = TREE_TYPE (rhs);
160 
161   target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
162   reg = expand_normal (rhs);
163 
164   gcc_assert (MEM_P (target));
165   PUT_MODE (target, TYPE_MODE (type));
166 
167   create_fixed_operand (&ops[0], target);
168   create_input_operand (&ops[1], reg, TYPE_MODE (type));
169   expand_insn (get_multi_vector_move (type, optab), 2, ops);
170 }
171 
172 static void
173 expand_ANNOTATE (internal_fn, gcall *)
174 {
175   gcc_unreachable ();
176 }
177 
178 /* This should get expanded in omp_device_lower pass.  */
179 
180 static void
181 expand_GOMP_USE_SIMT (internal_fn, gcall *)
182 {
183   gcc_unreachable ();
184 }
185 
186 /* This should get expanded in omp_device_lower pass.  */
187 
188 static void
189 expand_GOMP_SIMT_ENTER (internal_fn, gcall *)
190 {
191   gcc_unreachable ();
192 }
193 
194 /* Allocate per-lane storage and begin non-uniform execution region.  */
195 
196 static void
197 expand_GOMP_SIMT_ENTER_ALLOC (internal_fn, gcall *stmt)
198 {
199   rtx target;
200   tree lhs = gimple_call_lhs (stmt);
201   if (lhs)
202     target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
203   else
204     target = gen_reg_rtx (Pmode);
205   rtx size = expand_normal (gimple_call_arg (stmt, 0));
206   rtx align = expand_normal (gimple_call_arg (stmt, 1));
207   struct expand_operand ops[3];
208   create_output_operand (&ops[0], target, Pmode);
209   create_input_operand (&ops[1], size, Pmode);
210   create_input_operand (&ops[2], align, Pmode);
211   gcc_assert (targetm.have_omp_simt_enter ());
212   expand_insn (targetm.code_for_omp_simt_enter, 3, ops);
213 }
214 
215 /* Deallocate per-lane storage and leave non-uniform execution region.  */
216 
217 static void
218 expand_GOMP_SIMT_EXIT (internal_fn, gcall *stmt)
219 {
220   gcc_checking_assert (!gimple_call_lhs (stmt));
221   rtx arg = expand_normal (gimple_call_arg (stmt, 0));
222   struct expand_operand ops[1];
223   create_input_operand (&ops[0], arg, Pmode);
224   gcc_assert (targetm.have_omp_simt_exit ());
225   expand_insn (targetm.code_for_omp_simt_exit, 1, ops);
226 }
227 
228 /* Lane index on SIMT targets: thread index in the warp on NVPTX.  On targets
229    without SIMT execution this should be expanded in omp_device_lower pass.  */
230 
231 static void
232 expand_GOMP_SIMT_LANE (internal_fn, gcall *stmt)
233 {
234   tree lhs = gimple_call_lhs (stmt);
235   if (!lhs)
236     return;
237 
238   rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
239   gcc_assert (targetm.have_omp_simt_lane ());
240   emit_insn (targetm.gen_omp_simt_lane (target));
241 }
242 
243 /* This should get expanded in omp_device_lower pass.  */
244 
245 static void
246 expand_GOMP_SIMT_VF (internal_fn, gcall *)
247 {
248   gcc_unreachable ();
249 }
250 
251 /* Lane index of the first SIMT lane that supplies a non-zero argument.
252    This is a SIMT counterpart to GOMP_SIMD_LAST_LANE, used to represent the
253    lane that executed the last iteration for handling OpenMP lastprivate.  */
254 
255 static void
256 expand_GOMP_SIMT_LAST_LANE (internal_fn, gcall *stmt)
257 {
258   tree lhs = gimple_call_lhs (stmt);
259   if (!lhs)
260     return;
261 
262   rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
263   rtx cond = expand_normal (gimple_call_arg (stmt, 0));
264   machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
265   struct expand_operand ops[2];
266   create_output_operand (&ops[0], target, mode);
267   create_input_operand (&ops[1], cond, mode);
268   gcc_assert (targetm.have_omp_simt_last_lane ());
269   expand_insn (targetm.code_for_omp_simt_last_lane, 2, ops);
270 }
271 
272 /* Non-transparent predicate used in SIMT lowering of OpenMP "ordered".  */
273 
274 static void
275 expand_GOMP_SIMT_ORDERED_PRED (internal_fn, gcall *stmt)
276 {
277   tree lhs = gimple_call_lhs (stmt);
278   if (!lhs)
279     return;
280 
281   rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
282   rtx ctr = expand_normal (gimple_call_arg (stmt, 0));
283   machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
284   struct expand_operand ops[2];
285   create_output_operand (&ops[0], target, mode);
286   create_input_operand (&ops[1], ctr, mode);
287   gcc_assert (targetm.have_omp_simt_ordered ());
288   expand_insn (targetm.code_for_omp_simt_ordered, 2, ops);
289 }
290 
291 /* "Or" boolean reduction across SIMT lanes: return non-zero in all lanes if
292    any lane supplies a non-zero argument.  */
293 
294 static void
295 expand_GOMP_SIMT_VOTE_ANY (internal_fn, gcall *stmt)
296 {
297   tree lhs = gimple_call_lhs (stmt);
298   if (!lhs)
299     return;
300 
301   rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
302   rtx cond = expand_normal (gimple_call_arg (stmt, 0));
303   machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
304   struct expand_operand ops[2];
305   create_output_operand (&ops[0], target, mode);
306   create_input_operand (&ops[1], cond, mode);
307   gcc_assert (targetm.have_omp_simt_vote_any ());
308   expand_insn (targetm.code_for_omp_simt_vote_any, 2, ops);
309 }
310 
311 /* Exchange between SIMT lanes with a "butterfly" pattern: source lane index
312    is destination lane index XOR given offset.  */
313 
314 static void
315 expand_GOMP_SIMT_XCHG_BFLY (internal_fn, gcall *stmt)
316 {
317   tree lhs = gimple_call_lhs (stmt);
318   if (!lhs)
319     return;
320 
321   rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
322   rtx src = expand_normal (gimple_call_arg (stmt, 0));
323   rtx idx = expand_normal (gimple_call_arg (stmt, 1));
324   machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
325   struct expand_operand ops[3];
326   create_output_operand (&ops[0], target, mode);
327   create_input_operand (&ops[1], src, mode);
328   create_input_operand (&ops[2], idx, SImode);
329   gcc_assert (targetm.have_omp_simt_xchg_bfly ());
330   expand_insn (targetm.code_for_omp_simt_xchg_bfly, 3, ops);
331 }
332 
333 /* Exchange between SIMT lanes according to given source lane index.  */
334 
335 static void
336 expand_GOMP_SIMT_XCHG_IDX (internal_fn, gcall *stmt)
337 {
338   tree lhs = gimple_call_lhs (stmt);
339   if (!lhs)
340     return;
341 
342   rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
343   rtx src = expand_normal (gimple_call_arg (stmt, 0));
344   rtx idx = expand_normal (gimple_call_arg (stmt, 1));
345   machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
346   struct expand_operand ops[3];
347   create_output_operand (&ops[0], target, mode);
348   create_input_operand (&ops[1], src, mode);
349   create_input_operand (&ops[2], idx, SImode);
350   gcc_assert (targetm.have_omp_simt_xchg_idx ());
351   expand_insn (targetm.code_for_omp_simt_xchg_idx, 3, ops);
352 }
353 
354 /* This should get expanded in adjust_simduid_builtins.  */
355 
356 static void
357 expand_GOMP_SIMD_LANE (internal_fn, gcall *)
358 {
359   gcc_unreachable ();
360 }
361 
362 /* This should get expanded in adjust_simduid_builtins.  */
363 
364 static void
365 expand_GOMP_SIMD_VF (internal_fn, gcall *)
366 {
367   gcc_unreachable ();
368 }
369 
370 /* This should get expanded in adjust_simduid_builtins.  */
371 
372 static void
373 expand_GOMP_SIMD_LAST_LANE (internal_fn, gcall *)
374 {
375   gcc_unreachable ();
376 }
377 
378 /* This should get expanded in adjust_simduid_builtins.  */
379 
380 static void
381 expand_GOMP_SIMD_ORDERED_START (internal_fn, gcall *)
382 {
383   gcc_unreachable ();
384 }
385 
386 /* This should get expanded in adjust_simduid_builtins.  */
387 
388 static void
389 expand_GOMP_SIMD_ORDERED_END (internal_fn, gcall *)
390 {
391   gcc_unreachable ();
392 }
393 
394 /* This should get expanded in the sanopt pass.  */
395 
396 static void
397 expand_UBSAN_NULL (internal_fn, gcall *)
398 {
399   gcc_unreachable ();
400 }
401 
402 /* This should get expanded in the sanopt pass.  */
403 
404 static void
405 expand_UBSAN_BOUNDS (internal_fn, gcall *)
406 {
407   gcc_unreachable ();
408 }
409 
410 /* This should get expanded in the sanopt pass.  */
411 
412 static void
413 expand_UBSAN_VPTR (internal_fn, gcall *)
414 {
415   gcc_unreachable ();
416 }
417 
418 /* This should get expanded in the sanopt pass.  */
419 
420 static void
421 expand_UBSAN_PTR (internal_fn, gcall *)
422 {
423   gcc_unreachable ();
424 }
425 
426 /* This should get expanded in the sanopt pass.  */
427 
428 static void
429 expand_UBSAN_OBJECT_SIZE (internal_fn, gcall *)
430 {
431   gcc_unreachable ();
432 }
433 
434 /* This should get expanded in the sanopt pass.  */
435 
436 static void
437 expand_ASAN_CHECK (internal_fn, gcall *)
438 {
439   gcc_unreachable ();
440 }
441 
442 /* This should get expanded in the sanopt pass.  */
443 
444 static void
445 expand_ASAN_MARK (internal_fn, gcall *)
446 {
447   gcc_unreachable ();
448 }
449 
450 /* This should get expanded in the sanopt pass.  */
451 
452 static void
453 expand_ASAN_POISON (internal_fn, gcall *)
454 {
455   gcc_unreachable ();
456 }
457 
458 /* This should get expanded in the sanopt pass.  */
459 
460 static void
461 expand_ASAN_POISON_USE (internal_fn, gcall *)
462 {
463   gcc_unreachable ();
464 }
465 
466 /* This should get expanded in the tsan pass.  */
467 
468 static void
469 expand_TSAN_FUNC_EXIT (internal_fn, gcall *)
470 {
471   gcc_unreachable ();
472 }
473 
474 /* This should get expanded in the lower pass.  */
475 
476 static void
477 expand_FALLTHROUGH (internal_fn, gcall *call)
478 {
479   error_at (gimple_location (call),
480 	    "invalid use of attribute %<fallthrough%>");
481 }
482 
483 /* Return minimum precision needed to represent all values
484    of ARG in SIGNed integral type.  */
485 
486 static int
487 get_min_precision (tree arg, signop sign)
488 {
489   int prec = TYPE_PRECISION (TREE_TYPE (arg));
490   int cnt = 0;
491   signop orig_sign = sign;
492   if (TREE_CODE (arg) == INTEGER_CST)
493     {
494       int p;
495       if (TYPE_SIGN (TREE_TYPE (arg)) != sign)
496 	{
497 	  widest_int w = wi::to_widest (arg);
498 	  w = wi::ext (w, prec, sign);
499 	  p = wi::min_precision (w, sign);
500 	}
501       else
502 	p = wi::min_precision (wi::to_wide (arg), sign);
503       return MIN (p, prec);
504     }
505   while (CONVERT_EXPR_P (arg)
506 	 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
507 	 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg, 0))) <= prec)
508     {
509       arg = TREE_OPERAND (arg, 0);
510       if (TYPE_PRECISION (TREE_TYPE (arg)) < prec)
511 	{
512 	  if (TYPE_UNSIGNED (TREE_TYPE (arg)))
513 	    sign = UNSIGNED;
514 	  else if (sign == UNSIGNED && get_range_pos_neg (arg) != 1)
515 	    return prec + (orig_sign != sign);
516 	  prec = TYPE_PRECISION (TREE_TYPE (arg));
517 	}
518       if (++cnt > 30)
519 	return prec + (orig_sign != sign);
520     }
521   if (TREE_CODE (arg) != SSA_NAME)
522     return prec + (orig_sign != sign);
523   wide_int arg_min, arg_max;
524   while (get_range_info (arg, &arg_min, &arg_max) != VR_RANGE)
525     {
526       gimple *g = SSA_NAME_DEF_STMT (arg);
527       if (is_gimple_assign (g)
528 	  && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (g)))
529 	{
530 	  tree t = gimple_assign_rhs1 (g);
531 	  if (INTEGRAL_TYPE_P (TREE_TYPE (t))
532 	      && TYPE_PRECISION (TREE_TYPE (t)) <= prec)
533 	    {
534 	      arg = t;
535 	      if (TYPE_PRECISION (TREE_TYPE (arg)) < prec)
536 		{
537 		  if (TYPE_UNSIGNED (TREE_TYPE (arg)))
538 		    sign = UNSIGNED;
539 		  else if (sign == UNSIGNED && get_range_pos_neg (arg) != 1)
540 		    return prec + (orig_sign != sign);
541 		  prec = TYPE_PRECISION (TREE_TYPE (arg));
542 		}
543 	      if (++cnt > 30)
544 		return prec + (orig_sign != sign);
545 	      continue;
546 	    }
547 	}
548       return prec + (orig_sign != sign);
549     }
550   if (sign == TYPE_SIGN (TREE_TYPE (arg)))
551     {
552       int p1 = wi::min_precision (arg_min, sign);
553       int p2 = wi::min_precision (arg_max, sign);
554       p1 = MAX (p1, p2);
555       prec = MIN (prec, p1);
556     }
557   else if (sign == UNSIGNED && !wi::neg_p (arg_min, SIGNED))
558     {
559       int p = wi::min_precision (arg_max, UNSIGNED);
560       prec = MIN (prec, p);
561     }
562   return prec + (orig_sign != sign);
563 }
564 
565 /* Helper for expand_*_overflow.  Set the __imag__ part to true
566    (1 except for signed:1 type, in which case store -1).  */
567 
568 static void
569 expand_arith_set_overflow (tree lhs, rtx target)
570 {
571   if (TYPE_PRECISION (TREE_TYPE (TREE_TYPE (lhs))) == 1
572       && !TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs))))
573     write_complex_part (target, constm1_rtx, true);
574   else
575     write_complex_part (target, const1_rtx, true);
576 }
577 
578 /* Helper for expand_*_overflow.  Store RES into the __real__ part
579    of TARGET.  If RES has larger MODE than __real__ part of TARGET,
580    set the __imag__ part to 1 if RES doesn't fit into it.  Similarly
581    if LHS has smaller precision than its mode.  */
582 
583 static void
584 expand_arith_overflow_result_store (tree lhs, rtx target,
585 				    scalar_int_mode mode, rtx res)
586 {
587   scalar_int_mode tgtmode
588     = as_a <scalar_int_mode> (GET_MODE_INNER (GET_MODE (target)));
589   rtx lres = res;
590   if (tgtmode != mode)
591     {
592       rtx_code_label *done_label = gen_label_rtx ();
593       int uns = TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs)));
594       lres = convert_modes (tgtmode, mode, res, uns);
595       gcc_assert (GET_MODE_PRECISION (tgtmode) < GET_MODE_PRECISION (mode));
596       do_compare_rtx_and_jump (res, convert_modes (mode, tgtmode, lres, uns),
597 			       EQ, true, mode, NULL_RTX, NULL, done_label,
598 			       profile_probability::very_likely ());
599       expand_arith_set_overflow (lhs, target);
600       emit_label (done_label);
601     }
602   int prec = TYPE_PRECISION (TREE_TYPE (TREE_TYPE (lhs)));
603   int tgtprec = GET_MODE_PRECISION (tgtmode);
604   if (prec < tgtprec)
605     {
606       rtx_code_label *done_label = gen_label_rtx ();
607       int uns = TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs)));
608       res = lres;
609       if (uns)
610 	{
611 	  rtx mask
612 	    = immed_wide_int_const (wi::shifted_mask (0, prec, false, tgtprec),
613 				    tgtmode);
614 	  lres = expand_simple_binop (tgtmode, AND, res, mask, NULL_RTX,
615 				      true, OPTAB_LIB_WIDEN);
616 	}
617       else
618 	{
619 	  lres = expand_shift (LSHIFT_EXPR, tgtmode, res, tgtprec - prec,
620 			       NULL_RTX, 1);
621 	  lres = expand_shift (RSHIFT_EXPR, tgtmode, lres, tgtprec - prec,
622 			       NULL_RTX, 0);
623 	}
624       do_compare_rtx_and_jump (res, lres,
625 			       EQ, true, tgtmode, NULL_RTX, NULL, done_label,
626 			       profile_probability::very_likely ());
627       expand_arith_set_overflow (lhs, target);
628       emit_label (done_label);
629     }
630   write_complex_part (target, lres, false);
631 }
632 
633 /* Helper for expand_*_overflow.  Store RES into TARGET.  */
634 
635 static void
636 expand_ubsan_result_store (rtx target, rtx res)
637 {
638   if (GET_CODE (target) == SUBREG && SUBREG_PROMOTED_VAR_P (target))
639     /* If this is a scalar in a register that is stored in a wider mode
640        than the declared mode, compute the result into its declared mode
641        and then convert to the wider mode.  Our value is the computed
642        expression.  */
643     convert_move (SUBREG_REG (target), res, SUBREG_PROMOTED_SIGN (target));
644   else
645     emit_move_insn (target, res);
646 }
647 
648 /* Add sub/add overflow checking to the statement STMT.
649    CODE says whether the operation is +, or -.  */
650 
651 static void
652 expand_addsub_overflow (location_t loc, tree_code code, tree lhs,
653 			tree arg0, tree arg1, bool unsr_p, bool uns0_p,
654 			bool uns1_p, bool is_ubsan, tree *datap)
655 {
656   rtx res, target = NULL_RTX;
657   tree fn;
658   rtx_code_label *done_label = gen_label_rtx ();
659   rtx_code_label *do_error = gen_label_rtx ();
660   do_pending_stack_adjust ();
661   rtx op0 = expand_normal (arg0);
662   rtx op1 = expand_normal (arg1);
663   scalar_int_mode mode = SCALAR_INT_TYPE_MODE (TREE_TYPE (arg0));
664   int prec = GET_MODE_PRECISION (mode);
665   rtx sgn = immed_wide_int_const (wi::min_value (prec, SIGNED), mode);
666   bool do_xor = false;
667 
668   if (is_ubsan)
669     gcc_assert (!unsr_p && !uns0_p && !uns1_p);
670 
671   if (lhs)
672     {
673       target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
674       if (!is_ubsan)
675 	write_complex_part (target, const0_rtx, true);
676     }
677 
678   /* We assume both operands and result have the same precision
679      here (GET_MODE_BITSIZE (mode)), S stands for signed type
680      with that precision, U for unsigned type with that precision,
681      sgn for unsigned most significant bit in that precision.
682      s1 is signed first operand, u1 is unsigned first operand,
683      s2 is signed second operand, u2 is unsigned second operand,
684      sr is signed result, ur is unsigned result and the following
685      rules say how to compute result (which is always result of
686      the operands as if both were unsigned, cast to the right
687      signedness) and how to compute whether operation overflowed.
688 
689      s1 + s2 -> sr
690 	res = (S) ((U) s1 + (U) s2)
691 	ovf = s2 < 0 ? res > s1 : res < s1 (or jump on overflow)
692      s1 - s2 -> sr
693 	res = (S) ((U) s1 - (U) s2)
694 	ovf = s2 < 0 ? res < s1 : res > s2 (or jump on overflow)
695      u1 + u2 -> ur
696 	res = u1 + u2
697 	ovf = res < u1 (or jump on carry, but RTL opts will handle it)
698      u1 - u2 -> ur
699 	res = u1 - u2
700 	ovf = res > u1 (or jump on carry, but RTL opts will handle it)
701      s1 + u2 -> sr
702 	res = (S) ((U) s1 + u2)
703 	ovf = ((U) res ^ sgn) < u2
704      s1 + u2 -> ur
705 	t1 = (S) (u2 ^ sgn)
706 	t2 = s1 + t1
707 	res = (U) t2 ^ sgn
708 	ovf = t1 < 0 ? t2 > s1 : t2 < s1 (or jump on overflow)
709      s1 - u2 -> sr
710 	res = (S) ((U) s1 - u2)
711 	ovf = u2 > ((U) s1 ^ sgn)
712      s1 - u2 -> ur
713 	res = (U) s1 - u2
714 	ovf = s1 < 0 || u2 > (U) s1
715      u1 - s2 -> sr
716 	res = u1 - (U) s2
717  	ovf = u1 >= ((U) s2 ^ sgn)
718      u1 - s2 -> ur
719 	t1 = u1 ^ sgn
720 	t2 = t1 - (U) s2
721 	res = t2 ^ sgn
722 	ovf = s2 < 0 ? (S) t2 < (S) t1 : (S) t2 > (S) t1 (or jump on overflow)
723      s1 + s2 -> ur
724 	res = (U) s1 + (U) s2
725 	ovf = s2 < 0 ? (s1 | (S) res) < 0) : (s1 & (S) res) < 0)
726      u1 + u2 -> sr
727 	res = (S) (u1 + u2)
728 	ovf = (U) res < u2 || res < 0
729      u1 - u2 -> sr
730 	res = (S) (u1 - u2)
731 	ovf = u1 >= u2 ? res < 0 : res >= 0
732      s1 - s2 -> ur
733 	res = (U) s1 - (U) s2
734 	ovf = s2 >= 0 ? ((s1 | (S) res) < 0) : ((s1 & (S) res) < 0)  */
735 
736   if (code == PLUS_EXPR && uns0_p && !uns1_p)
737     {
738       /* PLUS_EXPR is commutative, if operand signedness differs,
739 	 canonicalize to the first operand being signed and second
740 	 unsigned to simplify following code.  */
741       std::swap (op0, op1);
742       std::swap (arg0, arg1);
743       uns0_p = false;
744       uns1_p = true;
745     }
746 
747   /* u1 +- u2 -> ur  */
748   if (uns0_p && uns1_p && unsr_p)
749     {
750       insn_code icode = optab_handler (code == PLUS_EXPR ? uaddv4_optab
751                                        : usubv4_optab, mode);
752       if (icode != CODE_FOR_nothing)
753 	{
754 	  struct expand_operand ops[4];
755 	  rtx_insn *last = get_last_insn ();
756 
757 	  res = gen_reg_rtx (mode);
758 	  create_output_operand (&ops[0], res, mode);
759 	  create_input_operand (&ops[1], op0, mode);
760 	  create_input_operand (&ops[2], op1, mode);
761 	  create_fixed_operand (&ops[3], do_error);
762 	  if (maybe_expand_insn (icode, 4, ops))
763 	    {
764 	      last = get_last_insn ();
765 	      if (profile_status_for_fn (cfun) != PROFILE_ABSENT
766 		  && JUMP_P (last)
767 		  && any_condjump_p (last)
768 		  && !find_reg_note (last, REG_BR_PROB, 0))
769 		add_reg_br_prob_note (last,
770 				      profile_probability::very_unlikely ());
771 	      emit_jump (done_label);
772 	      goto do_error_label;
773 	    }
774 
775 	  delete_insns_since (last);
776 	}
777 
778       /* Compute the operation.  On RTL level, the addition is always
779 	 unsigned.  */
780       res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
781 			  op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
782       rtx tem = op0;
783       /* For PLUS_EXPR, the operation is commutative, so we can pick
784 	 operand to compare against.  For prec <= BITS_PER_WORD, I think
785 	 preferring REG operand is better over CONST_INT, because
786 	 the CONST_INT might enlarge the instruction or CSE would need
787 	 to figure out we'd already loaded it into a register before.
788 	 For prec > BITS_PER_WORD, I think CONST_INT might be more beneficial,
789 	 as then the multi-word comparison can be perhaps simplified.  */
790       if (code == PLUS_EXPR
791 	  && (prec <= BITS_PER_WORD
792 	      ? (CONST_SCALAR_INT_P (op0) && REG_P (op1))
793 	      : CONST_SCALAR_INT_P (op1)))
794 	tem = op1;
795       do_compare_rtx_and_jump (res, tem, code == PLUS_EXPR ? GEU : LEU,
796 			       true, mode, NULL_RTX, NULL, done_label,
797 			       profile_probability::very_likely ());
798       goto do_error_label;
799     }
800 
801   /* s1 +- u2 -> sr  */
802   if (!uns0_p && uns1_p && !unsr_p)
803     {
804       /* Compute the operation.  On RTL level, the addition is always
805 	 unsigned.  */
806       res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
807 			  op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
808       rtx tem = expand_binop (mode, add_optab,
809 			      code == PLUS_EXPR ? res : op0, sgn,
810 			      NULL_RTX, false, OPTAB_LIB_WIDEN);
811       do_compare_rtx_and_jump (tem, op1, GEU, true, mode, NULL_RTX, NULL,
812 			       done_label, profile_probability::very_likely ());
813       goto do_error_label;
814     }
815 
816   /* s1 + u2 -> ur  */
817   if (code == PLUS_EXPR && !uns0_p && uns1_p && unsr_p)
818     {
819       op1 = expand_binop (mode, add_optab, op1, sgn, NULL_RTX, false,
820 			  OPTAB_LIB_WIDEN);
821       /* As we've changed op1, we have to avoid using the value range
822 	 for the original argument.  */
823       arg1 = error_mark_node;
824       do_xor = true;
825       goto do_signed;
826     }
827 
828   /* u1 - s2 -> ur  */
829   if (code == MINUS_EXPR && uns0_p && !uns1_p && unsr_p)
830     {
831       op0 = expand_binop (mode, add_optab, op0, sgn, NULL_RTX, false,
832 			  OPTAB_LIB_WIDEN);
833       /* As we've changed op0, we have to avoid using the value range
834 	 for the original argument.  */
835       arg0 = error_mark_node;
836       do_xor = true;
837       goto do_signed;
838     }
839 
840   /* s1 - u2 -> ur  */
841   if (code == MINUS_EXPR && !uns0_p && uns1_p && unsr_p)
842     {
843       /* Compute the operation.  On RTL level, the addition is always
844 	 unsigned.  */
845       res = expand_binop (mode, sub_optab, op0, op1, NULL_RTX, false,
846 			  OPTAB_LIB_WIDEN);
847       int pos_neg = get_range_pos_neg (arg0);
848       if (pos_neg == 2)
849 	/* If ARG0 is known to be always negative, this is always overflow.  */
850 	emit_jump (do_error);
851       else if (pos_neg == 3)
852 	/* If ARG0 is not known to be always positive, check at runtime.  */
853 	do_compare_rtx_and_jump (op0, const0_rtx, LT, false, mode, NULL_RTX,
854 				 NULL, do_error, profile_probability::very_unlikely ());
855       do_compare_rtx_and_jump (op1, op0, LEU, true, mode, NULL_RTX, NULL,
856 			       done_label, profile_probability::very_likely ());
857       goto do_error_label;
858     }
859 
860   /* u1 - s2 -> sr  */
861   if (code == MINUS_EXPR && uns0_p && !uns1_p && !unsr_p)
862     {
863       /* Compute the operation.  On RTL level, the addition is always
864 	 unsigned.  */
865       res = expand_binop (mode, sub_optab, op0, op1, NULL_RTX, false,
866 			  OPTAB_LIB_WIDEN);
867       rtx tem = expand_binop (mode, add_optab, op1, sgn, NULL_RTX, false,
868 			      OPTAB_LIB_WIDEN);
869       do_compare_rtx_and_jump (op0, tem, LTU, true, mode, NULL_RTX, NULL,
870 			       done_label, profile_probability::very_likely ());
871       goto do_error_label;
872     }
873 
874   /* u1 + u2 -> sr  */
875   if (code == PLUS_EXPR && uns0_p && uns1_p && !unsr_p)
876     {
877       /* Compute the operation.  On RTL level, the addition is always
878 	 unsigned.  */
879       res = expand_binop (mode, add_optab, op0, op1, NULL_RTX, false,
880 			  OPTAB_LIB_WIDEN);
881       do_compare_rtx_and_jump (res, const0_rtx, LT, false, mode, NULL_RTX,
882 			       NULL, do_error, profile_probability::very_unlikely ());
883       rtx tem = op1;
884       /* The operation is commutative, so we can pick operand to compare
885 	 against.  For prec <= BITS_PER_WORD, I think preferring REG operand
886 	 is better over CONST_INT, because the CONST_INT might enlarge the
887 	 instruction or CSE would need to figure out we'd already loaded it
888 	 into a register before.  For prec > BITS_PER_WORD, I think CONST_INT
889 	 might be more beneficial, as then the multi-word comparison can be
890 	 perhaps simplified.  */
891       if (prec <= BITS_PER_WORD
892 	  ? (CONST_SCALAR_INT_P (op1) && REG_P (op0))
893 	  : CONST_SCALAR_INT_P (op0))
894 	tem = op0;
895       do_compare_rtx_and_jump (res, tem, GEU, true, mode, NULL_RTX, NULL,
896 			       done_label, profile_probability::very_likely ());
897       goto do_error_label;
898     }
899 
900   /* s1 +- s2 -> ur  */
901   if (!uns0_p && !uns1_p && unsr_p)
902     {
903       /* Compute the operation.  On RTL level, the addition is always
904 	 unsigned.  */
905       res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
906 			  op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
907       int pos_neg = get_range_pos_neg (arg1);
908       if (code == PLUS_EXPR)
909 	{
910 	  int pos_neg0 = get_range_pos_neg (arg0);
911 	  if (pos_neg0 != 3 && pos_neg == 3)
912 	    {
913 	      std::swap (op0, op1);
914 	      pos_neg = pos_neg0;
915 	    }
916 	}
917       rtx tem;
918       if (pos_neg != 3)
919 	{
920 	  tem = expand_binop (mode, ((pos_neg == 1) ^ (code == MINUS_EXPR))
921 				    ? and_optab : ior_optab,
922 			      op0, res, NULL_RTX, false, OPTAB_LIB_WIDEN);
923 	  do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL,
924 				   NULL, done_label, profile_probability::very_likely ());
925 	}
926       else
927 	{
928 	  rtx_code_label *do_ior_label = gen_label_rtx ();
929 	  do_compare_rtx_and_jump (op1, const0_rtx,
930 				   code == MINUS_EXPR ? GE : LT, false, mode,
931 				   NULL_RTX, NULL, do_ior_label,
932 				   profile_probability::even ());
933 	  tem = expand_binop (mode, and_optab, op0, res, NULL_RTX, false,
934 			      OPTAB_LIB_WIDEN);
935 	  do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
936 				   NULL, done_label, profile_probability::very_likely ());
937 	  emit_jump (do_error);
938 	  emit_label (do_ior_label);
939 	  tem = expand_binop (mode, ior_optab, op0, res, NULL_RTX, false,
940 			      OPTAB_LIB_WIDEN);
941 	  do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
942 				   NULL, done_label, profile_probability::very_likely ());
943 	}
944       goto do_error_label;
945     }
946 
947   /* u1 - u2 -> sr  */
948   if (code == MINUS_EXPR && uns0_p && uns1_p && !unsr_p)
949     {
950       /* Compute the operation.  On RTL level, the addition is always
951 	 unsigned.  */
952       res = expand_binop (mode, sub_optab, op0, op1, NULL_RTX, false,
953 			  OPTAB_LIB_WIDEN);
954       rtx_code_label *op0_geu_op1 = gen_label_rtx ();
955       do_compare_rtx_and_jump (op0, op1, GEU, true, mode, NULL_RTX, NULL,
956 			       op0_geu_op1, profile_probability::even ());
957       do_compare_rtx_and_jump (res, const0_rtx, LT, false, mode, NULL_RTX,
958 			       NULL, done_label, profile_probability::very_likely ());
959       emit_jump (do_error);
960       emit_label (op0_geu_op1);
961       do_compare_rtx_and_jump (res, const0_rtx, GE, false, mode, NULL_RTX,
962 			       NULL, done_label, profile_probability::very_likely ());
963       goto do_error_label;
964     }
965 
966   gcc_assert (!uns0_p && !uns1_p && !unsr_p);
967 
968   /* s1 +- s2 -> sr  */
969  do_signed:
970   {
971     insn_code icode = optab_handler (code == PLUS_EXPR ? addv4_optab
972 				     : subv4_optab, mode);
973     if (icode != CODE_FOR_nothing)
974       {
975 	struct expand_operand ops[4];
976 	rtx_insn *last = get_last_insn ();
977 
978 	res = gen_reg_rtx (mode);
979 	create_output_operand (&ops[0], res, mode);
980 	create_input_operand (&ops[1], op0, mode);
981 	create_input_operand (&ops[2], op1, mode);
982 	create_fixed_operand (&ops[3], do_error);
983 	if (maybe_expand_insn (icode, 4, ops))
984 	  {
985 	    last = get_last_insn ();
986 	    if (profile_status_for_fn (cfun) != PROFILE_ABSENT
987 		&& JUMP_P (last)
988 		&& any_condjump_p (last)
989 		&& !find_reg_note (last, REG_BR_PROB, 0))
990 	      add_reg_br_prob_note (last,
991 				    profile_probability::very_unlikely ());
992 	    emit_jump (done_label);
993 	    goto do_error_label;
994 	  }
995 
996 	delete_insns_since (last);
997       }
998 
999     /* Compute the operation.  On RTL level, the addition is always
1000        unsigned.  */
1001     res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
1002 			op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
1003 
1004     /* If we can prove that one of the arguments (for MINUS_EXPR only
1005        the second operand, as subtraction is not commutative) is always
1006        non-negative or always negative, we can do just one comparison
1007        and conditional jump.  */
1008     int pos_neg = get_range_pos_neg (arg1);
1009     if (code == PLUS_EXPR)
1010       {
1011 	int pos_neg0 = get_range_pos_neg (arg0);
1012 	if (pos_neg0 != 3 && pos_neg == 3)
1013 	  {
1014 	    std::swap (op0, op1);
1015 	    pos_neg = pos_neg0;
1016 	  }
1017       }
1018 
1019     /* Addition overflows if and only if the two operands have the same sign,
1020        and the result has the opposite sign.  Subtraction overflows if and
1021        only if the two operands have opposite sign, and the subtrahend has
1022        the same sign as the result.  Here 0 is counted as positive.  */
1023     if (pos_neg == 3)
1024       {
1025 	/* Compute op0 ^ op1 (operands have opposite sign).  */
1026         rtx op_xor = expand_binop (mode, xor_optab, op0, op1, NULL_RTX, false,
1027 				   OPTAB_LIB_WIDEN);
1028 
1029 	/* Compute res ^ op1 (result and 2nd operand have opposite sign).  */
1030 	rtx res_xor = expand_binop (mode, xor_optab, res, op1, NULL_RTX, false,
1031 				    OPTAB_LIB_WIDEN);
1032 
1033 	rtx tem;
1034 	if (code == PLUS_EXPR)
1035 	  {
1036 	    /* Compute (res ^ op1) & ~(op0 ^ op1).  */
1037 	    tem = expand_unop (mode, one_cmpl_optab, op_xor, NULL_RTX, false);
1038 	    tem = expand_binop (mode, and_optab, res_xor, tem, NULL_RTX, false,
1039 				OPTAB_LIB_WIDEN);
1040 	  }
1041 	else
1042 	  {
1043 	    /* Compute (op0 ^ op1) & ~(res ^ op1).  */
1044 	    tem = expand_unop (mode, one_cmpl_optab, res_xor, NULL_RTX, false);
1045 	    tem = expand_binop (mode, and_optab, op_xor, tem, NULL_RTX, false,
1046 				OPTAB_LIB_WIDEN);
1047 	  }
1048 
1049 	/* No overflow if the result has bit sign cleared.  */
1050 	do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
1051 				 NULL, done_label, profile_probability::very_likely ());
1052       }
1053 
1054     /* Compare the result of the operation with the first operand.
1055        No overflow for addition if second operand is positive and result
1056        is larger or second operand is negative and result is smaller.
1057        Likewise for subtraction with sign of second operand flipped.  */
1058     else
1059       do_compare_rtx_and_jump (res, op0,
1060 			       (pos_neg == 1) ^ (code == MINUS_EXPR) ? GE : LE,
1061 			       false, mode, NULL_RTX, NULL, done_label,
1062 			       profile_probability::very_likely ());
1063   }
1064 
1065  do_error_label:
1066   emit_label (do_error);
1067   if (is_ubsan)
1068     {
1069       /* Expand the ubsan builtin call.  */
1070       push_temp_slots ();
1071       fn = ubsan_build_overflow_builtin (code, loc, TREE_TYPE (arg0),
1072 					 arg0, arg1, datap);
1073       expand_normal (fn);
1074       pop_temp_slots ();
1075       do_pending_stack_adjust ();
1076     }
1077   else if (lhs)
1078     expand_arith_set_overflow (lhs, target);
1079 
1080   /* We're done.  */
1081   emit_label (done_label);
1082 
1083   if (lhs)
1084     {
1085       if (is_ubsan)
1086 	expand_ubsan_result_store (target, res);
1087       else
1088 	{
1089 	  if (do_xor)
1090 	    res = expand_binop (mode, add_optab, res, sgn, NULL_RTX, false,
1091 				OPTAB_LIB_WIDEN);
1092 
1093 	  expand_arith_overflow_result_store (lhs, target, mode, res);
1094 	}
1095     }
1096 }
1097 
1098 /* Add negate overflow checking to the statement STMT.  */
1099 
1100 static void
1101 expand_neg_overflow (location_t loc, tree lhs, tree arg1, bool is_ubsan,
1102 		     tree *datap)
1103 {
1104   rtx res, op1;
1105   tree fn;
1106   rtx_code_label *done_label, *do_error;
1107   rtx target = NULL_RTX;
1108 
1109   done_label = gen_label_rtx ();
1110   do_error = gen_label_rtx ();
1111 
1112   do_pending_stack_adjust ();
1113   op1 = expand_normal (arg1);
1114 
1115   scalar_int_mode mode = SCALAR_INT_TYPE_MODE (TREE_TYPE (arg1));
1116   if (lhs)
1117     {
1118       target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1119       if (!is_ubsan)
1120 	write_complex_part (target, const0_rtx, true);
1121     }
1122 
1123   enum insn_code icode = optab_handler (negv3_optab, mode);
1124   if (icode != CODE_FOR_nothing)
1125     {
1126       struct expand_operand ops[3];
1127       rtx_insn *last = get_last_insn ();
1128 
1129       res = gen_reg_rtx (mode);
1130       create_output_operand (&ops[0], res, mode);
1131       create_input_operand (&ops[1], op1, mode);
1132       create_fixed_operand (&ops[2], do_error);
1133       if (maybe_expand_insn (icode, 3, ops))
1134 	{
1135 	  last = get_last_insn ();
1136 	  if (profile_status_for_fn (cfun) != PROFILE_ABSENT
1137 	      && JUMP_P (last)
1138 	      && any_condjump_p (last)
1139 	      && !find_reg_note (last, REG_BR_PROB, 0))
1140 	    add_reg_br_prob_note (last,
1141 				  profile_probability::very_unlikely ());
1142 	  emit_jump (done_label);
1143         }
1144       else
1145 	{
1146 	  delete_insns_since (last);
1147 	  icode = CODE_FOR_nothing;
1148 	}
1149     }
1150 
1151   if (icode == CODE_FOR_nothing)
1152     {
1153       /* Compute the operation.  On RTL level, the addition is always
1154 	 unsigned.  */
1155       res = expand_unop (mode, neg_optab, op1, NULL_RTX, false);
1156 
1157       /* Compare the operand with the most negative value.  */
1158       rtx minv = expand_normal (TYPE_MIN_VALUE (TREE_TYPE (arg1)));
1159       do_compare_rtx_and_jump (op1, minv, NE, true, mode, NULL_RTX, NULL,
1160 			       done_label, profile_probability::very_likely ());
1161     }
1162 
1163   emit_label (do_error);
1164   if (is_ubsan)
1165     {
1166       /* Expand the ubsan builtin call.  */
1167       push_temp_slots ();
1168       fn = ubsan_build_overflow_builtin (NEGATE_EXPR, loc, TREE_TYPE (arg1),
1169 					 arg1, NULL_TREE, datap);
1170       expand_normal (fn);
1171       pop_temp_slots ();
1172       do_pending_stack_adjust ();
1173     }
1174   else if (lhs)
1175     expand_arith_set_overflow (lhs, target);
1176 
1177   /* We're done.  */
1178   emit_label (done_label);
1179 
1180   if (lhs)
1181     {
1182       if (is_ubsan)
1183 	expand_ubsan_result_store (target, res);
1184       else
1185 	expand_arith_overflow_result_store (lhs, target, mode, res);
1186     }
1187 }
1188 
1189 /* Return true if UNS WIDEN_MULT_EXPR with result mode WMODE and operand
1190    mode MODE can be expanded without using a libcall.  */
1191 
1192 static bool
1193 can_widen_mult_without_libcall (scalar_int_mode wmode, scalar_int_mode mode,
1194 				rtx op0, rtx op1, bool uns)
1195 {
1196   if (find_widening_optab_handler (umul_widen_optab, wmode, mode)
1197       != CODE_FOR_nothing)
1198     return true;
1199 
1200   if (find_widening_optab_handler (smul_widen_optab, wmode, mode)
1201       != CODE_FOR_nothing)
1202     return true;
1203 
1204   rtx_insn *last = get_last_insn ();
1205   if (CONSTANT_P (op0))
1206     op0 = convert_modes (wmode, mode, op0, uns);
1207   else
1208     op0 = gen_raw_REG (wmode, LAST_VIRTUAL_REGISTER + 1);
1209   if (CONSTANT_P (op1))
1210     op1 = convert_modes (wmode, mode, op1, uns);
1211   else
1212     op1 = gen_raw_REG (wmode, LAST_VIRTUAL_REGISTER + 2);
1213   rtx ret = expand_mult (wmode, op0, op1, NULL_RTX, uns, true);
1214   delete_insns_since (last);
1215   return ret != NULL_RTX;
1216 }
1217 
1218 /* Add mul overflow checking to the statement STMT.  */
1219 
1220 static void
1221 expand_mul_overflow (location_t loc, tree lhs, tree arg0, tree arg1,
1222 		     bool unsr_p, bool uns0_p, bool uns1_p, bool is_ubsan,
1223 		     tree *datap)
1224 {
1225   rtx res, op0, op1;
1226   tree fn, type;
1227   rtx_code_label *done_label, *do_error;
1228   rtx target = NULL_RTX;
1229   signop sign;
1230   enum insn_code icode;
1231 
1232   done_label = gen_label_rtx ();
1233   do_error = gen_label_rtx ();
1234 
1235   do_pending_stack_adjust ();
1236   op0 = expand_normal (arg0);
1237   op1 = expand_normal (arg1);
1238 
1239   scalar_int_mode mode = SCALAR_INT_TYPE_MODE (TREE_TYPE (arg0));
1240   bool uns = unsr_p;
1241   if (lhs)
1242     {
1243       target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1244       if (!is_ubsan)
1245 	write_complex_part (target, const0_rtx, true);
1246     }
1247 
1248   if (is_ubsan)
1249     gcc_assert (!unsr_p && !uns0_p && !uns1_p);
1250 
1251   /* We assume both operands and result have the same precision
1252      here (GET_MODE_BITSIZE (mode)), S stands for signed type
1253      with that precision, U for unsigned type with that precision,
1254      sgn for unsigned most significant bit in that precision.
1255      s1 is signed first operand, u1 is unsigned first operand,
1256      s2 is signed second operand, u2 is unsigned second operand,
1257      sr is signed result, ur is unsigned result and the following
1258      rules say how to compute result (which is always result of
1259      the operands as if both were unsigned, cast to the right
1260      signedness) and how to compute whether operation overflowed.
1261      main_ovf (false) stands for jump on signed multiplication
1262      overflow or the main algorithm with uns == false.
1263      main_ovf (true) stands for jump on unsigned multiplication
1264      overflow or the main algorithm with uns == true.
1265 
1266      s1 * s2 -> sr
1267 	res = (S) ((U) s1 * (U) s2)
1268 	ovf = main_ovf (false)
1269      u1 * u2 -> ur
1270 	res = u1 * u2
1271 	ovf = main_ovf (true)
1272      s1 * u2 -> ur
1273 	res = (U) s1 * u2
1274 	ovf = (s1 < 0 && u2) || main_ovf (true)
1275      u1 * u2 -> sr
1276 	res = (S) (u1 * u2)
1277 	ovf = res < 0 || main_ovf (true)
1278      s1 * u2 -> sr
1279 	res = (S) ((U) s1 * u2)
1280 	ovf = (S) u2 >= 0 ? main_ovf (false)
1281 			  : (s1 != 0 && (s1 != -1 || u2 != (U) res))
1282      s1 * s2 -> ur
1283 	t1 = (s1 & s2) < 0 ? (-(U) s1) : ((U) s1)
1284 	t2 = (s1 & s2) < 0 ? (-(U) s2) : ((U) s2)
1285 	res = t1 * t2
1286 	ovf = (s1 ^ s2) < 0 ? (s1 && s2) : main_ovf (true)  */
1287 
1288   if (uns0_p && !uns1_p)
1289     {
1290       /* Multiplication is commutative, if operand signedness differs,
1291 	 canonicalize to the first operand being signed and second
1292 	 unsigned to simplify following code.  */
1293       std::swap (op0, op1);
1294       std::swap (arg0, arg1);
1295       uns0_p = false;
1296       uns1_p = true;
1297     }
1298 
1299   int pos_neg0 = get_range_pos_neg (arg0);
1300   int pos_neg1 = get_range_pos_neg (arg1);
1301 
1302   /* s1 * u2 -> ur  */
1303   if (!uns0_p && uns1_p && unsr_p)
1304     {
1305       switch (pos_neg0)
1306 	{
1307 	case 1:
1308 	  /* If s1 is non-negative, just perform normal u1 * u2 -> ur.  */
1309 	  goto do_main;
1310 	case 2:
1311 	  /* If s1 is negative, avoid the main code, just multiply and
1312 	     signal overflow if op1 is not 0.  */
1313 	  struct separate_ops ops;
1314 	  ops.code = MULT_EXPR;
1315 	  ops.type = TREE_TYPE (arg1);
1316 	  ops.op0 = make_tree (ops.type, op0);
1317 	  ops.op1 = make_tree (ops.type, op1);
1318 	  ops.op2 = NULL_TREE;
1319 	  ops.location = loc;
1320 	  res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1321 	  do_compare_rtx_and_jump (op1, const0_rtx, EQ, true, mode, NULL_RTX,
1322 				   NULL, done_label, profile_probability::very_likely ());
1323 	  goto do_error_label;
1324 	case 3:
1325 	  rtx_code_label *do_main_label;
1326 	  do_main_label = gen_label_rtx ();
1327 	  do_compare_rtx_and_jump (op0, const0_rtx, GE, false, mode, NULL_RTX,
1328 				   NULL, do_main_label, profile_probability::very_likely ());
1329 	  do_compare_rtx_and_jump (op1, const0_rtx, EQ, true, mode, NULL_RTX,
1330 				   NULL, do_main_label, profile_probability::very_likely ());
1331 	  expand_arith_set_overflow (lhs, target);
1332 	  emit_label (do_main_label);
1333 	  goto do_main;
1334 	default:
1335 	  gcc_unreachable ();
1336 	}
1337     }
1338 
1339   /* u1 * u2 -> sr  */
1340   if (uns0_p && uns1_p && !unsr_p)
1341     {
1342       uns = true;
1343       /* Rest of handling of this case after res is computed.  */
1344       goto do_main;
1345     }
1346 
1347   /* s1 * u2 -> sr  */
1348   if (!uns0_p && uns1_p && !unsr_p)
1349     {
1350       switch (pos_neg1)
1351 	{
1352 	case 1:
1353 	  goto do_main;
1354 	case 2:
1355 	  /* If (S) u2 is negative (i.e. u2 is larger than maximum of S,
1356 	     avoid the main code, just multiply and signal overflow
1357 	     unless 0 * u2 or -1 * ((U) Smin).  */
1358 	  struct separate_ops ops;
1359 	  ops.code = MULT_EXPR;
1360 	  ops.type = TREE_TYPE (arg1);
1361 	  ops.op0 = make_tree (ops.type, op0);
1362 	  ops.op1 = make_tree (ops.type, op1);
1363 	  ops.op2 = NULL_TREE;
1364 	  ops.location = loc;
1365 	  res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1366 	  do_compare_rtx_and_jump (op0, const0_rtx, EQ, true, mode, NULL_RTX,
1367 				   NULL, done_label, profile_probability::very_likely ());
1368 	  do_compare_rtx_and_jump (op0, constm1_rtx, NE, true, mode, NULL_RTX,
1369 				   NULL, do_error, profile_probability::very_unlikely ());
1370 	  int prec;
1371 	  prec = GET_MODE_PRECISION (mode);
1372 	  rtx sgn;
1373 	  sgn = immed_wide_int_const (wi::min_value (prec, SIGNED), mode);
1374 	  do_compare_rtx_and_jump (op1, sgn, EQ, true, mode, NULL_RTX,
1375 				   NULL, done_label, profile_probability::very_likely ());
1376 	  goto do_error_label;
1377 	case 3:
1378 	  /* Rest of handling of this case after res is computed.  */
1379 	  goto do_main;
1380 	default:
1381 	  gcc_unreachable ();
1382 	}
1383     }
1384 
1385   /* s1 * s2 -> ur  */
1386   if (!uns0_p && !uns1_p && unsr_p)
1387     {
1388       rtx tem, tem2;
1389       switch (pos_neg0 | pos_neg1)
1390 	{
1391 	case 1: /* Both operands known to be non-negative.  */
1392 	  goto do_main;
1393 	case 2: /* Both operands known to be negative.  */
1394 	  op0 = expand_unop (mode, neg_optab, op0, NULL_RTX, false);
1395 	  op1 = expand_unop (mode, neg_optab, op1, NULL_RTX, false);
1396 	  /* Avoid looking at arg0/arg1 ranges, as we've changed
1397 	     the arguments.  */
1398 	  arg0 = error_mark_node;
1399 	  arg1 = error_mark_node;
1400 	  goto do_main;
1401 	case 3:
1402 	  if ((pos_neg0 ^ pos_neg1) == 3)
1403 	    {
1404 	      /* If one operand is known to be negative and the other
1405 		 non-negative, this overflows always, unless the non-negative
1406 		 one is 0.  Just do normal multiply and set overflow
1407 		 unless one of the operands is 0.  */
1408 	      struct separate_ops ops;
1409 	      ops.code = MULT_EXPR;
1410 	      ops.type
1411 		= build_nonstandard_integer_type (GET_MODE_PRECISION (mode),
1412 						  1);
1413 	      ops.op0 = make_tree (ops.type, op0);
1414 	      ops.op1 = make_tree (ops.type, op1);
1415 	      ops.op2 = NULL_TREE;
1416 	      ops.location = loc;
1417 	      res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1418 	      tem = expand_binop (mode, and_optab, op0, op1, NULL_RTX, false,
1419 				  OPTAB_LIB_WIDEN);
1420 	      do_compare_rtx_and_jump (tem, const0_rtx, EQ, true, mode,
1421 				       NULL_RTX, NULL, done_label,
1422 				       profile_probability::very_likely ());
1423 	      goto do_error_label;
1424 	    }
1425 	  /* The general case, do all the needed comparisons at runtime.  */
1426 	  rtx_code_label *do_main_label, *after_negate_label;
1427 	  rtx rop0, rop1;
1428 	  rop0 = gen_reg_rtx (mode);
1429 	  rop1 = gen_reg_rtx (mode);
1430 	  emit_move_insn (rop0, op0);
1431 	  emit_move_insn (rop1, op1);
1432 	  op0 = rop0;
1433 	  op1 = rop1;
1434 	  do_main_label = gen_label_rtx ();
1435 	  after_negate_label = gen_label_rtx ();
1436 	  tem = expand_binop (mode, and_optab, op0, op1, NULL_RTX, false,
1437 			      OPTAB_LIB_WIDEN);
1438 	  do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
1439 				   NULL, after_negate_label, profile_probability::very_likely ());
1440 	  /* Both arguments negative here, negate them and continue with
1441 	     normal unsigned overflow checking multiplication.  */
1442 	  emit_move_insn (op0, expand_unop (mode, neg_optab, op0,
1443 					    NULL_RTX, false));
1444 	  emit_move_insn (op1, expand_unop (mode, neg_optab, op1,
1445 					    NULL_RTX, false));
1446 	  /* Avoid looking at arg0/arg1 ranges, as we might have changed
1447 	     the arguments.  */
1448 	  arg0 = error_mark_node;
1449 	  arg1 = error_mark_node;
1450 	  emit_jump (do_main_label);
1451 	  emit_label (after_negate_label);
1452 	  tem2 = expand_binop (mode, xor_optab, op0, op1, NULL_RTX, false,
1453 			       OPTAB_LIB_WIDEN);
1454 	  do_compare_rtx_and_jump (tem2, const0_rtx, GE, false, mode, NULL_RTX,
1455 				   NULL, do_main_label, profile_probability::very_likely ());
1456 	  /* One argument is negative here, the other positive.  This
1457 	     overflows always, unless one of the arguments is 0.  But
1458 	     if e.g. s2 is 0, (U) s1 * 0 doesn't overflow, whatever s1
1459 	     is, thus we can keep do_main code oring in overflow as is.  */
1460 	  do_compare_rtx_and_jump (tem, const0_rtx, EQ, true, mode, NULL_RTX,
1461 				   NULL, do_main_label, profile_probability::very_likely ());
1462 	  expand_arith_set_overflow (lhs, target);
1463 	  emit_label (do_main_label);
1464 	  goto do_main;
1465 	default:
1466 	  gcc_unreachable ();
1467 	}
1468     }
1469 
1470  do_main:
1471   type = build_nonstandard_integer_type (GET_MODE_PRECISION (mode), uns);
1472   sign = uns ? UNSIGNED : SIGNED;
1473   icode = optab_handler (uns ? umulv4_optab : mulv4_optab, mode);
1474   if (uns
1475       && (integer_pow2p (arg0) || integer_pow2p (arg1))
1476       && (optimize_insn_for_speed_p () || icode == CODE_FOR_nothing))
1477     {
1478       /* Optimize unsigned multiplication by power of 2 constant
1479 	 using 2 shifts, one for result, one to extract the shifted
1480 	 out bits to see if they are all zero.
1481 	 Don't do this if optimizing for size and we have umulv4_optab,
1482 	 in that case assume multiplication will be shorter.
1483 	 This is heuristics based on the single target that provides
1484 	 umulv4 right now (i?86/x86_64), if further targets add it, this
1485 	 might need to be revisited.
1486 	 Cases where both operands are constant should be folded already
1487 	 during GIMPLE, and cases where one operand is constant but not
1488 	 power of 2 are questionable, either the WIDEN_MULT_EXPR case
1489 	 below can be done without multiplication, just by shifts and adds,
1490 	 or we'd need to divide the result (and hope it actually doesn't
1491 	 really divide nor multiply) and compare the result of the division
1492 	 with the original operand.  */
1493       rtx opn0 = op0;
1494       rtx opn1 = op1;
1495       tree argn0 = arg0;
1496       tree argn1 = arg1;
1497       if (integer_pow2p (arg0))
1498 	{
1499 	  std::swap (opn0, opn1);
1500 	  std::swap (argn0, argn1);
1501 	}
1502       int cnt = tree_log2 (argn1);
1503       if (cnt >= 0 && cnt < GET_MODE_PRECISION (mode))
1504 	{
1505 	  rtx upper = const0_rtx;
1506 	  res = expand_shift (LSHIFT_EXPR, mode, opn0, cnt, NULL_RTX, uns);
1507 	  if (cnt != 0)
1508 	    upper = expand_shift (RSHIFT_EXPR, mode, opn0,
1509 				  GET_MODE_PRECISION (mode) - cnt,
1510 				  NULL_RTX, uns);
1511 	  do_compare_rtx_and_jump (upper, const0_rtx, EQ, true, mode,
1512 				   NULL_RTX, NULL, done_label,
1513 				   profile_probability::very_likely ());
1514 	  goto do_error_label;
1515 	}
1516     }
1517   if (icode != CODE_FOR_nothing)
1518     {
1519       struct expand_operand ops[4];
1520       rtx_insn *last = get_last_insn ();
1521 
1522       res = gen_reg_rtx (mode);
1523       create_output_operand (&ops[0], res, mode);
1524       create_input_operand (&ops[1], op0, mode);
1525       create_input_operand (&ops[2], op1, mode);
1526       create_fixed_operand (&ops[3], do_error);
1527       if (maybe_expand_insn (icode, 4, ops))
1528 	{
1529 	  last = get_last_insn ();
1530 	  if (profile_status_for_fn (cfun) != PROFILE_ABSENT
1531 	      && JUMP_P (last)
1532 	      && any_condjump_p (last)
1533 	      && !find_reg_note (last, REG_BR_PROB, 0))
1534 	    add_reg_br_prob_note (last,
1535 				  profile_probability::very_unlikely ());
1536 	  emit_jump (done_label);
1537         }
1538       else
1539 	{
1540 	  delete_insns_since (last);
1541 	  icode = CODE_FOR_nothing;
1542 	}
1543     }
1544 
1545   if (icode == CODE_FOR_nothing)
1546     {
1547       struct separate_ops ops;
1548       int prec = GET_MODE_PRECISION (mode);
1549       scalar_int_mode hmode, wmode;
1550       ops.op0 = make_tree (type, op0);
1551       ops.op1 = make_tree (type, op1);
1552       ops.op2 = NULL_TREE;
1553       ops.location = loc;
1554 
1555       /* Optimize unsigned overflow check where we don't use the
1556 	 multiplication result, just whether overflow happened.
1557 	 If we can do MULT_HIGHPART_EXPR, that followed by
1558 	 comparison of the result against zero is cheapest.
1559 	 We'll still compute res, but it should be DCEd later.  */
1560       use_operand_p use;
1561       gimple *use_stmt;
1562       if (!is_ubsan
1563 	  && lhs
1564 	  && uns
1565 	  && !(uns0_p && uns1_p && !unsr_p)
1566 	  && can_mult_highpart_p (mode, uns) == 1
1567 	  && single_imm_use (lhs, &use, &use_stmt)
1568 	  && is_gimple_assign (use_stmt)
1569 	  && gimple_assign_rhs_code (use_stmt) == IMAGPART_EXPR)
1570 	goto highpart;
1571 
1572       if (GET_MODE_2XWIDER_MODE (mode).exists (&wmode)
1573 	  && targetm.scalar_mode_supported_p (wmode)
1574 	  && can_widen_mult_without_libcall (wmode, mode, op0, op1, uns))
1575 	{
1576 	twoxwider:
1577 	  ops.code = WIDEN_MULT_EXPR;
1578 	  ops.type
1579 	    = build_nonstandard_integer_type (GET_MODE_PRECISION (wmode), uns);
1580 
1581 	  res = expand_expr_real_2 (&ops, NULL_RTX, wmode, EXPAND_NORMAL);
1582 	  rtx hipart = expand_shift (RSHIFT_EXPR, wmode, res, prec,
1583 				     NULL_RTX, uns);
1584 	  hipart = convert_modes (mode, wmode, hipart, uns);
1585 	  res = convert_modes (mode, wmode, res, uns);
1586 	  if (uns)
1587 	    /* For the unsigned multiplication, there was overflow if
1588 	       HIPART is non-zero.  */
1589 	    do_compare_rtx_and_jump (hipart, const0_rtx, EQ, true, mode,
1590 				     NULL_RTX, NULL, done_label,
1591 				     profile_probability::very_likely ());
1592 	  else
1593 	    {
1594 	      rtx signbit = expand_shift (RSHIFT_EXPR, mode, res, prec - 1,
1595 					  NULL_RTX, 0);
1596 	      /* RES is low half of the double width result, HIPART
1597 		 the high half.  There was overflow if
1598 		 HIPART is different from RES < 0 ? -1 : 0.  */
1599 	      do_compare_rtx_and_jump (signbit, hipart, EQ, true, mode,
1600 				       NULL_RTX, NULL, done_label,
1601 				       profile_probability::very_likely ());
1602 	    }
1603 	}
1604       else if (can_mult_highpart_p (mode, uns) == 1)
1605 	{
1606 	highpart:
1607 	  ops.code = MULT_HIGHPART_EXPR;
1608 	  ops.type = type;
1609 
1610 	  rtx hipart = expand_expr_real_2 (&ops, NULL_RTX, mode,
1611 					   EXPAND_NORMAL);
1612 	  ops.code = MULT_EXPR;
1613 	  res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1614 	  if (uns)
1615 	    /* For the unsigned multiplication, there was overflow if
1616 	       HIPART is non-zero.  */
1617 	    do_compare_rtx_and_jump (hipart, const0_rtx, EQ, true, mode,
1618 				     NULL_RTX, NULL, done_label,
1619 				     profile_probability::very_likely ());
1620 	  else
1621 	    {
1622 	      rtx signbit = expand_shift (RSHIFT_EXPR, mode, res, prec - 1,
1623 					  NULL_RTX, 0);
1624 	      /* RES is low half of the double width result, HIPART
1625 		 the high half.  There was overflow if
1626 		 HIPART is different from RES < 0 ? -1 : 0.  */
1627 	      do_compare_rtx_and_jump (signbit, hipart, EQ, true, mode,
1628 				       NULL_RTX, NULL, done_label,
1629 				       profile_probability::very_likely ());
1630 	    }
1631 
1632 	}
1633       else if (int_mode_for_size (prec / 2, 1).exists (&hmode)
1634 	       && 2 * GET_MODE_PRECISION (hmode) == prec)
1635 	{
1636 	  rtx_code_label *large_op0 = gen_label_rtx ();
1637 	  rtx_code_label *small_op0_large_op1 = gen_label_rtx ();
1638 	  rtx_code_label *one_small_one_large = gen_label_rtx ();
1639 	  rtx_code_label *both_ops_large = gen_label_rtx ();
1640 	  rtx_code_label *after_hipart_neg = uns ? NULL : gen_label_rtx ();
1641 	  rtx_code_label *after_lopart_neg = uns ? NULL : gen_label_rtx ();
1642 	  rtx_code_label *do_overflow = gen_label_rtx ();
1643 	  rtx_code_label *hipart_different = uns ? NULL : gen_label_rtx ();
1644 
1645 	  unsigned int hprec = GET_MODE_PRECISION (hmode);
1646 	  rtx hipart0 = expand_shift (RSHIFT_EXPR, mode, op0, hprec,
1647 				      NULL_RTX, uns);
1648 	  hipart0 = convert_modes (hmode, mode, hipart0, uns);
1649 	  rtx lopart0 = convert_modes (hmode, mode, op0, uns);
1650 	  rtx signbit0 = const0_rtx;
1651 	  if (!uns)
1652 	    signbit0 = expand_shift (RSHIFT_EXPR, hmode, lopart0, hprec - 1,
1653 				     NULL_RTX, 0);
1654 	  rtx hipart1 = expand_shift (RSHIFT_EXPR, mode, op1, hprec,
1655 				      NULL_RTX, uns);
1656 	  hipart1 = convert_modes (hmode, mode, hipart1, uns);
1657 	  rtx lopart1 = convert_modes (hmode, mode, op1, uns);
1658 	  rtx signbit1 = const0_rtx;
1659 	  if (!uns)
1660 	    signbit1 = expand_shift (RSHIFT_EXPR, hmode, lopart1, hprec - 1,
1661 				     NULL_RTX, 0);
1662 
1663 	  res = gen_reg_rtx (mode);
1664 
1665 	  /* True if op0 resp. op1 are known to be in the range of
1666 	     halfstype.  */
1667 	  bool op0_small_p = false;
1668 	  bool op1_small_p = false;
1669 	  /* True if op0 resp. op1 are known to have all zeros or all ones
1670 	     in the upper half of bits, but are not known to be
1671 	     op{0,1}_small_p.  */
1672 	  bool op0_medium_p = false;
1673 	  bool op1_medium_p = false;
1674 	  /* -1 if op{0,1} is known to be negative, 0 if it is known to be
1675 	     nonnegative, 1 if unknown.  */
1676 	  int op0_sign = 1;
1677 	  int op1_sign = 1;
1678 
1679 	  if (pos_neg0 == 1)
1680 	    op0_sign = 0;
1681 	  else if (pos_neg0 == 2)
1682 	    op0_sign = -1;
1683 	  if (pos_neg1 == 1)
1684 	    op1_sign = 0;
1685 	  else if (pos_neg1 == 2)
1686 	    op1_sign = -1;
1687 
1688 	  unsigned int mprec0 = prec;
1689 	  if (arg0 != error_mark_node)
1690 	    mprec0 = get_min_precision (arg0, sign);
1691 	  if (mprec0 <= hprec)
1692 	    op0_small_p = true;
1693 	  else if (!uns && mprec0 <= hprec + 1)
1694 	    op0_medium_p = true;
1695 	  unsigned int mprec1 = prec;
1696 	  if (arg1 != error_mark_node)
1697 	    mprec1 = get_min_precision (arg1, sign);
1698 	  if (mprec1 <= hprec)
1699 	    op1_small_p = true;
1700 	  else if (!uns && mprec1 <= hprec + 1)
1701 	    op1_medium_p = true;
1702 
1703 	  int smaller_sign = 1;
1704 	  int larger_sign = 1;
1705 	  if (op0_small_p)
1706 	    {
1707 	      smaller_sign = op0_sign;
1708 	      larger_sign = op1_sign;
1709 	    }
1710 	  else if (op1_small_p)
1711 	    {
1712 	      smaller_sign = op1_sign;
1713 	      larger_sign = op0_sign;
1714 	    }
1715 	  else if (op0_sign == op1_sign)
1716 	    {
1717 	      smaller_sign = op0_sign;
1718 	      larger_sign = op0_sign;
1719 	    }
1720 
1721 	  if (!op0_small_p)
1722 	    do_compare_rtx_and_jump (signbit0, hipart0, NE, true, hmode,
1723 				     NULL_RTX, NULL, large_op0,
1724 				     profile_probability::unlikely ());
1725 
1726 	  if (!op1_small_p)
1727 	    do_compare_rtx_and_jump (signbit1, hipart1, NE, true, hmode,
1728 				     NULL_RTX, NULL, small_op0_large_op1,
1729 				     profile_probability::unlikely ());
1730 
1731 	  /* If both op0 and op1 are sign (!uns) or zero (uns) extended from
1732 	     hmode to mode, the multiplication will never overflow.  We can
1733 	     do just one hmode x hmode => mode widening multiplication.  */
1734 	  rtx lopart0s = lopart0, lopart1s = lopart1;
1735 	  if (GET_CODE (lopart0) == SUBREG)
1736 	    {
1737 	      lopart0s = shallow_copy_rtx (lopart0);
1738 	      SUBREG_PROMOTED_VAR_P (lopart0s) = 1;
1739 	      SUBREG_PROMOTED_SET (lopart0s, uns ? SRP_UNSIGNED : SRP_SIGNED);
1740 	    }
1741 	  if (GET_CODE (lopart1) == SUBREG)
1742 	    {
1743 	      lopart1s = shallow_copy_rtx (lopart1);
1744 	      SUBREG_PROMOTED_VAR_P (lopart1s) = 1;
1745 	      SUBREG_PROMOTED_SET (lopart1s, uns ? SRP_UNSIGNED : SRP_SIGNED);
1746 	    }
1747 	  tree halfstype = build_nonstandard_integer_type (hprec, uns);
1748 	  ops.op0 = make_tree (halfstype, lopart0s);
1749 	  ops.op1 = make_tree (halfstype, lopart1s);
1750 	  ops.code = WIDEN_MULT_EXPR;
1751 	  ops.type = type;
1752 	  rtx thisres
1753 	    = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1754 	  emit_move_insn (res, thisres);
1755 	  emit_jump (done_label);
1756 
1757 	  emit_label (small_op0_large_op1);
1758 
1759 	  /* If op0 is sign (!uns) or zero (uns) extended from hmode to mode,
1760 	     but op1 is not, just swap the arguments and handle it as op1
1761 	     sign/zero extended, op0 not.  */
1762 	  rtx larger = gen_reg_rtx (mode);
1763 	  rtx hipart = gen_reg_rtx (hmode);
1764 	  rtx lopart = gen_reg_rtx (hmode);
1765 	  emit_move_insn (larger, op1);
1766 	  emit_move_insn (hipart, hipart1);
1767 	  emit_move_insn (lopart, lopart0);
1768 	  emit_jump (one_small_one_large);
1769 
1770 	  emit_label (large_op0);
1771 
1772 	  if (!op1_small_p)
1773 	    do_compare_rtx_and_jump (signbit1, hipart1, NE, true, hmode,
1774 				     NULL_RTX, NULL, both_ops_large,
1775 				     profile_probability::unlikely ());
1776 
1777 	  /* If op1 is sign (!uns) or zero (uns) extended from hmode to mode,
1778 	     but op0 is not, prepare larger, hipart and lopart pseudos and
1779 	     handle it together with small_op0_large_op1.  */
1780 	  emit_move_insn (larger, op0);
1781 	  emit_move_insn (hipart, hipart0);
1782 	  emit_move_insn (lopart, lopart1);
1783 
1784 	  emit_label (one_small_one_large);
1785 
1786 	  /* lopart is the low part of the operand that is sign extended
1787 	     to mode, larger is the other operand, hipart is the
1788 	     high part of larger and lopart0 and lopart1 are the low parts
1789 	     of both operands.
1790 	     We perform lopart0 * lopart1 and lopart * hipart widening
1791 	     multiplications.  */
1792 	  tree halfutype = build_nonstandard_integer_type (hprec, 1);
1793 	  ops.op0 = make_tree (halfutype, lopart0);
1794 	  ops.op1 = make_tree (halfutype, lopart1);
1795 	  rtx lo0xlo1
1796 	    = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1797 
1798 	  ops.op0 = make_tree (halfutype, lopart);
1799 	  ops.op1 = make_tree (halfutype, hipart);
1800 	  rtx loxhi = gen_reg_rtx (mode);
1801 	  rtx tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1802 	  emit_move_insn (loxhi, tem);
1803 
1804 	  if (!uns)
1805 	    {
1806 	      /* if (hipart < 0) loxhi -= lopart << (bitsize / 2);  */
1807 	      if (larger_sign == 0)
1808 		emit_jump (after_hipart_neg);
1809 	      else if (larger_sign != -1)
1810 		do_compare_rtx_and_jump (hipart, const0_rtx, GE, false, hmode,
1811 					 NULL_RTX, NULL, after_hipart_neg,
1812 					 profile_probability::even ());
1813 
1814 	      tem = convert_modes (mode, hmode, lopart, 1);
1815 	      tem = expand_shift (LSHIFT_EXPR, mode, tem, hprec, NULL_RTX, 1);
1816 	      tem = expand_simple_binop (mode, MINUS, loxhi, tem, NULL_RTX,
1817 					 1, OPTAB_WIDEN);
1818 	      emit_move_insn (loxhi, tem);
1819 
1820 	      emit_label (after_hipart_neg);
1821 
1822 	      /* if (lopart < 0) loxhi -= larger;  */
1823 	      if (smaller_sign == 0)
1824 		emit_jump (after_lopart_neg);
1825 	      else if (smaller_sign != -1)
1826 		do_compare_rtx_and_jump (lopart, const0_rtx, GE, false, hmode,
1827 					 NULL_RTX, NULL, after_lopart_neg,
1828 					 profile_probability::even ());
1829 
1830 	      tem = expand_simple_binop (mode, MINUS, loxhi, larger, NULL_RTX,
1831 					 1, OPTAB_WIDEN);
1832 	      emit_move_insn (loxhi, tem);
1833 
1834 	      emit_label (after_lopart_neg);
1835 	    }
1836 
1837 	  /* loxhi += (uns) lo0xlo1 >> (bitsize / 2);  */
1838 	  tem = expand_shift (RSHIFT_EXPR, mode, lo0xlo1, hprec, NULL_RTX, 1);
1839 	  tem = expand_simple_binop (mode, PLUS, loxhi, tem, NULL_RTX,
1840 				     1, OPTAB_WIDEN);
1841 	  emit_move_insn (loxhi, tem);
1842 
1843 	  /* if (loxhi >> (bitsize / 2)
1844 		 == (hmode) loxhi >> (bitsize / 2 - 1))  (if !uns)
1845 	     if (loxhi >> (bitsize / 2) == 0		 (if uns).  */
1846 	  rtx hipartloxhi = expand_shift (RSHIFT_EXPR, mode, loxhi, hprec,
1847 					  NULL_RTX, 0);
1848 	  hipartloxhi = convert_modes (hmode, mode, hipartloxhi, 0);
1849 	  rtx signbitloxhi = const0_rtx;
1850 	  if (!uns)
1851 	    signbitloxhi = expand_shift (RSHIFT_EXPR, hmode,
1852 					 convert_modes (hmode, mode,
1853 							loxhi, 0),
1854 					 hprec - 1, NULL_RTX, 0);
1855 
1856 	  do_compare_rtx_and_jump (signbitloxhi, hipartloxhi, NE, true, hmode,
1857 				   NULL_RTX, NULL, do_overflow,
1858 				   profile_probability::very_unlikely ());
1859 
1860 	  /* res = (loxhi << (bitsize / 2)) | (hmode) lo0xlo1;  */
1861 	  rtx loxhishifted = expand_shift (LSHIFT_EXPR, mode, loxhi, hprec,
1862 					   NULL_RTX, 1);
1863 	  tem = convert_modes (mode, hmode,
1864 			       convert_modes (hmode, mode, lo0xlo1, 1), 1);
1865 
1866 	  tem = expand_simple_binop (mode, IOR, loxhishifted, tem, res,
1867 				     1, OPTAB_WIDEN);
1868 	  if (tem != res)
1869 	    emit_move_insn (res, tem);
1870 	  emit_jump (done_label);
1871 
1872 	  emit_label (both_ops_large);
1873 
1874 	  /* If both operands are large (not sign (!uns) or zero (uns)
1875 	     extended from hmode), then perform the full multiplication
1876 	     which will be the result of the operation.
1877 	     The only cases which don't overflow are for signed multiplication
1878 	     some cases where both hipart0 and highpart1 are 0 or -1.
1879 	     For unsigned multiplication when high parts are both non-zero
1880 	     this overflows always.  */
1881 	  ops.code = MULT_EXPR;
1882 	  ops.op0 = make_tree (type, op0);
1883 	  ops.op1 = make_tree (type, op1);
1884 	  tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1885 	  emit_move_insn (res, tem);
1886 
1887 	  if (!uns)
1888 	    {
1889 	      if (!op0_medium_p)
1890 		{
1891 		  tem = expand_simple_binop (hmode, PLUS, hipart0, const1_rtx,
1892 					     NULL_RTX, 1, OPTAB_WIDEN);
1893 		  do_compare_rtx_and_jump (tem, const1_rtx, GTU, true, hmode,
1894 					   NULL_RTX, NULL, do_error,
1895 					   profile_probability::very_unlikely ());
1896 		}
1897 
1898 	      if (!op1_medium_p)
1899 		{
1900 		  tem = expand_simple_binop (hmode, PLUS, hipart1, const1_rtx,
1901 					     NULL_RTX, 1, OPTAB_WIDEN);
1902 		  do_compare_rtx_and_jump (tem, const1_rtx, GTU, true, hmode,
1903 					   NULL_RTX, NULL, do_error,
1904 					   profile_probability::very_unlikely ());
1905 		}
1906 
1907 	      /* At this point hipart{0,1} are both in [-1, 0].  If they are
1908 		 the same, overflow happened if res is non-positive, if they
1909 		 are different, overflow happened if res is positive.  */
1910 	      if (op0_sign != 1 && op1_sign != 1 && op0_sign != op1_sign)
1911 		emit_jump (hipart_different);
1912 	      else if (op0_sign == 1 || op1_sign == 1)
1913 		do_compare_rtx_and_jump (hipart0, hipart1, NE, true, hmode,
1914 					 NULL_RTX, NULL, hipart_different,
1915 					 profile_probability::even ());
1916 
1917 	      do_compare_rtx_and_jump (res, const0_rtx, LE, false, mode,
1918 				       NULL_RTX, NULL, do_error,
1919 				       profile_probability::very_unlikely ());
1920 	      emit_jump (done_label);
1921 
1922 	      emit_label (hipart_different);
1923 
1924 	      do_compare_rtx_and_jump (res, const0_rtx, GE, false, mode,
1925 				       NULL_RTX, NULL, do_error,
1926 				       profile_probability::very_unlikely ());
1927 	      emit_jump (done_label);
1928 	    }
1929 
1930 	  emit_label (do_overflow);
1931 
1932 	  /* Overflow, do full multiplication and fallthru into do_error.  */
1933 	  ops.op0 = make_tree (type, op0);
1934 	  ops.op1 = make_tree (type, op1);
1935 	  tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1936 	  emit_move_insn (res, tem);
1937 	}
1938       else if (GET_MODE_2XWIDER_MODE (mode).exists (&wmode)
1939 	       && targetm.scalar_mode_supported_p (wmode))
1940 	/* Even emitting a libcall is better than not detecting overflow
1941 	   at all.  */
1942 	goto twoxwider;
1943       else
1944 	{
1945 	  gcc_assert (!is_ubsan);
1946 	  ops.code = MULT_EXPR;
1947 	  ops.type = type;
1948 	  res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1949 	  emit_jump (done_label);
1950 	}
1951     }
1952 
1953  do_error_label:
1954   emit_label (do_error);
1955   if (is_ubsan)
1956     {
1957       /* Expand the ubsan builtin call.  */
1958       push_temp_slots ();
1959       fn = ubsan_build_overflow_builtin (MULT_EXPR, loc, TREE_TYPE (arg0),
1960 					 arg0, arg1, datap);
1961       expand_normal (fn);
1962       pop_temp_slots ();
1963       do_pending_stack_adjust ();
1964     }
1965   else if (lhs)
1966     expand_arith_set_overflow (lhs, target);
1967 
1968   /* We're done.  */
1969   emit_label (done_label);
1970 
1971   /* u1 * u2 -> sr  */
1972   if (uns0_p && uns1_p && !unsr_p)
1973     {
1974       rtx_code_label *all_done_label = gen_label_rtx ();
1975       do_compare_rtx_and_jump (res, const0_rtx, GE, false, mode, NULL_RTX,
1976 			       NULL, all_done_label, profile_probability::very_likely ());
1977       expand_arith_set_overflow (lhs, target);
1978       emit_label (all_done_label);
1979     }
1980 
1981   /* s1 * u2 -> sr  */
1982   if (!uns0_p && uns1_p && !unsr_p && pos_neg1 == 3)
1983     {
1984       rtx_code_label *all_done_label = gen_label_rtx ();
1985       rtx_code_label *set_noovf = gen_label_rtx ();
1986       do_compare_rtx_and_jump (op1, const0_rtx, GE, false, mode, NULL_RTX,
1987 			       NULL, all_done_label, profile_probability::very_likely ());
1988       expand_arith_set_overflow (lhs, target);
1989       do_compare_rtx_and_jump (op0, const0_rtx, EQ, true, mode, NULL_RTX,
1990 			       NULL, set_noovf, profile_probability::very_likely ());
1991       do_compare_rtx_and_jump (op0, constm1_rtx, NE, true, mode, NULL_RTX,
1992 			       NULL, all_done_label, profile_probability::very_unlikely ());
1993       do_compare_rtx_and_jump (op1, res, NE, true, mode, NULL_RTX, NULL,
1994 			       all_done_label, profile_probability::very_unlikely ());
1995       emit_label (set_noovf);
1996       write_complex_part (target, const0_rtx, true);
1997       emit_label (all_done_label);
1998     }
1999 
2000   if (lhs)
2001     {
2002       if (is_ubsan)
2003 	expand_ubsan_result_store (target, res);
2004       else
2005 	expand_arith_overflow_result_store (lhs, target, mode, res);
2006     }
2007 }
2008 
2009 /* Expand UBSAN_CHECK_* internal function if it has vector operands.  */
2010 
2011 static void
2012 expand_vector_ubsan_overflow (location_t loc, enum tree_code code, tree lhs,
2013 			      tree arg0, tree arg1)
2014 {
2015   poly_uint64 cnt = TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg0));
2016   rtx_code_label *loop_lab = NULL;
2017   rtx cntvar = NULL_RTX;
2018   tree cntv = NULL_TREE;
2019   tree eltype = TREE_TYPE (TREE_TYPE (arg0));
2020   tree sz = TYPE_SIZE (eltype);
2021   tree data = NULL_TREE;
2022   tree resv = NULL_TREE;
2023   rtx lhsr = NULL_RTX;
2024   rtx resvr = NULL_RTX;
2025   unsigned HOST_WIDE_INT const_cnt = 0;
2026   bool use_loop_p = (!cnt.is_constant (&const_cnt) || const_cnt > 4);
2027 
2028   if (lhs)
2029     {
2030       optab op;
2031       lhsr = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2032       if (!VECTOR_MODE_P (GET_MODE (lhsr))
2033 	  || (op = optab_for_tree_code (code, TREE_TYPE (arg0),
2034 					optab_default)) == unknown_optab
2035 	  || (optab_handler (op, TYPE_MODE (TREE_TYPE (arg0)))
2036 	      == CODE_FOR_nothing))
2037 	{
2038 	  if (MEM_P (lhsr))
2039 	    resv = make_tree (TREE_TYPE (lhs), lhsr);
2040 	  else
2041 	    {
2042 	      resvr = assign_temp (TREE_TYPE (lhs), 1, 1);
2043 	      resv = make_tree (TREE_TYPE (lhs), resvr);
2044 	    }
2045 	}
2046     }
2047   if (use_loop_p)
2048     {
2049       do_pending_stack_adjust ();
2050       loop_lab = gen_label_rtx ();
2051       cntvar = gen_reg_rtx (TYPE_MODE (sizetype));
2052       cntv = make_tree (sizetype, cntvar);
2053       emit_move_insn (cntvar, const0_rtx);
2054       emit_label (loop_lab);
2055     }
2056   if (TREE_CODE (arg0) != VECTOR_CST)
2057     {
2058       rtx arg0r = expand_normal (arg0);
2059       arg0 = make_tree (TREE_TYPE (arg0), arg0r);
2060     }
2061   if (TREE_CODE (arg1) != VECTOR_CST)
2062     {
2063       rtx arg1r = expand_normal (arg1);
2064       arg1 = make_tree (TREE_TYPE (arg1), arg1r);
2065     }
2066   for (unsigned int i = 0; i < (use_loop_p ? 1 : const_cnt); i++)
2067     {
2068       tree op0, op1, res = NULL_TREE;
2069       if (use_loop_p)
2070 	{
2071 	  tree atype = build_array_type_nelts (eltype, cnt);
2072 	  op0 = uniform_vector_p (arg0);
2073 	  if (op0 == NULL_TREE)
2074 	    {
2075 	      op0 = fold_build1_loc (loc, VIEW_CONVERT_EXPR, atype, arg0);
2076 	      op0 = build4_loc (loc, ARRAY_REF, eltype, op0, cntv,
2077 				NULL_TREE, NULL_TREE);
2078 	    }
2079 	  op1 = uniform_vector_p (arg1);
2080 	  if (op1 == NULL_TREE)
2081 	    {
2082 	      op1 = fold_build1_loc (loc, VIEW_CONVERT_EXPR, atype, arg1);
2083 	      op1 = build4_loc (loc, ARRAY_REF, eltype, op1, cntv,
2084 				NULL_TREE, NULL_TREE);
2085 	    }
2086 	  if (resv)
2087 	    {
2088 	      res = fold_build1_loc (loc, VIEW_CONVERT_EXPR, atype, resv);
2089 	      res = build4_loc (loc, ARRAY_REF, eltype, res, cntv,
2090 				NULL_TREE, NULL_TREE);
2091 	    }
2092 	}
2093       else
2094 	{
2095 	  tree bitpos = bitsize_int (tree_to_uhwi (sz) * i);
2096 	  op0 = fold_build3_loc (loc, BIT_FIELD_REF, eltype, arg0, sz, bitpos);
2097 	  op1 = fold_build3_loc (loc, BIT_FIELD_REF, eltype, arg1, sz, bitpos);
2098 	  if (resv)
2099 	    res = fold_build3_loc (loc, BIT_FIELD_REF, eltype, resv, sz,
2100 				   bitpos);
2101 	}
2102       switch (code)
2103 	{
2104 	case PLUS_EXPR:
2105 	  expand_addsub_overflow (loc, PLUS_EXPR, res, op0, op1,
2106 				  false, false, false, true, &data);
2107 	  break;
2108 	case MINUS_EXPR:
2109 	  if (use_loop_p ? integer_zerop (arg0) : integer_zerop (op0))
2110 	    expand_neg_overflow (loc, res, op1, true, &data);
2111 	  else
2112 	    expand_addsub_overflow (loc, MINUS_EXPR, res, op0, op1,
2113 				    false, false, false, true, &data);
2114 	  break;
2115 	case MULT_EXPR:
2116 	  expand_mul_overflow (loc, res, op0, op1, false, false, false,
2117 			       true, &data);
2118 	  break;
2119 	default:
2120 	  gcc_unreachable ();
2121 	}
2122     }
2123   if (use_loop_p)
2124     {
2125       struct separate_ops ops;
2126       ops.code = PLUS_EXPR;
2127       ops.type = TREE_TYPE (cntv);
2128       ops.op0 = cntv;
2129       ops.op1 = build_int_cst (TREE_TYPE (cntv), 1);
2130       ops.op2 = NULL_TREE;
2131       ops.location = loc;
2132       rtx ret = expand_expr_real_2 (&ops, cntvar, TYPE_MODE (sizetype),
2133 				    EXPAND_NORMAL);
2134       if (ret != cntvar)
2135 	emit_move_insn (cntvar, ret);
2136       rtx cntrtx = gen_int_mode (cnt, TYPE_MODE (sizetype));
2137       do_compare_rtx_and_jump (cntvar, cntrtx, NE, false,
2138 			       TYPE_MODE (sizetype), NULL_RTX, NULL, loop_lab,
2139 			       profile_probability::very_likely ());
2140     }
2141   if (lhs && resv == NULL_TREE)
2142     {
2143       struct separate_ops ops;
2144       ops.code = code;
2145       ops.type = TREE_TYPE (arg0);
2146       ops.op0 = arg0;
2147       ops.op1 = arg1;
2148       ops.op2 = NULL_TREE;
2149       ops.location = loc;
2150       rtx ret = expand_expr_real_2 (&ops, lhsr, TYPE_MODE (TREE_TYPE (arg0)),
2151 				    EXPAND_NORMAL);
2152       if (ret != lhsr)
2153 	emit_move_insn (lhsr, ret);
2154     }
2155   else if (resvr)
2156     emit_move_insn (lhsr, resvr);
2157 }
2158 
2159 /* Expand UBSAN_CHECK_ADD call STMT.  */
2160 
2161 static void
2162 expand_UBSAN_CHECK_ADD (internal_fn, gcall *stmt)
2163 {
2164   location_t loc = gimple_location (stmt);
2165   tree lhs = gimple_call_lhs (stmt);
2166   tree arg0 = gimple_call_arg (stmt, 0);
2167   tree arg1 = gimple_call_arg (stmt, 1);
2168   if (VECTOR_TYPE_P (TREE_TYPE (arg0)))
2169     expand_vector_ubsan_overflow (loc, PLUS_EXPR, lhs, arg0, arg1);
2170   else
2171     expand_addsub_overflow (loc, PLUS_EXPR, lhs, arg0, arg1,
2172 			    false, false, false, true, NULL);
2173 }
2174 
2175 /* Expand UBSAN_CHECK_SUB call STMT.  */
2176 
2177 static void
2178 expand_UBSAN_CHECK_SUB (internal_fn, gcall *stmt)
2179 {
2180   location_t loc = gimple_location (stmt);
2181   tree lhs = gimple_call_lhs (stmt);
2182   tree arg0 = gimple_call_arg (stmt, 0);
2183   tree arg1 = gimple_call_arg (stmt, 1);
2184   if (VECTOR_TYPE_P (TREE_TYPE (arg0)))
2185     expand_vector_ubsan_overflow (loc, MINUS_EXPR, lhs, arg0, arg1);
2186   else if (integer_zerop (arg0))
2187     expand_neg_overflow (loc, lhs, arg1, true, NULL);
2188   else
2189     expand_addsub_overflow (loc, MINUS_EXPR, lhs, arg0, arg1,
2190 			    false, false, false, true, NULL);
2191 }
2192 
2193 /* Expand UBSAN_CHECK_MUL call STMT.  */
2194 
2195 static void
2196 expand_UBSAN_CHECK_MUL (internal_fn, gcall *stmt)
2197 {
2198   location_t loc = gimple_location (stmt);
2199   tree lhs = gimple_call_lhs (stmt);
2200   tree arg0 = gimple_call_arg (stmt, 0);
2201   tree arg1 = gimple_call_arg (stmt, 1);
2202   if (VECTOR_TYPE_P (TREE_TYPE (arg0)))
2203     expand_vector_ubsan_overflow (loc, MULT_EXPR, lhs, arg0, arg1);
2204   else
2205     expand_mul_overflow (loc, lhs, arg0, arg1, false, false, false, true,
2206 			 NULL);
2207 }
2208 
2209 /* Helper function for {ADD,SUB,MUL}_OVERFLOW call stmt expansion.  */
2210 
2211 static void
2212 expand_arith_overflow (enum tree_code code, gimple *stmt)
2213 {
2214   tree lhs = gimple_call_lhs (stmt);
2215   if (lhs == NULL_TREE)
2216     return;
2217   tree arg0 = gimple_call_arg (stmt, 0);
2218   tree arg1 = gimple_call_arg (stmt, 1);
2219   tree type = TREE_TYPE (TREE_TYPE (lhs));
2220   int uns0_p = TYPE_UNSIGNED (TREE_TYPE (arg0));
2221   int uns1_p = TYPE_UNSIGNED (TREE_TYPE (arg1));
2222   int unsr_p = TYPE_UNSIGNED (type);
2223   int prec0 = TYPE_PRECISION (TREE_TYPE (arg0));
2224   int prec1 = TYPE_PRECISION (TREE_TYPE (arg1));
2225   int precres = TYPE_PRECISION (type);
2226   location_t loc = gimple_location (stmt);
2227   if (!uns0_p && get_range_pos_neg (arg0) == 1)
2228     uns0_p = true;
2229   if (!uns1_p && get_range_pos_neg (arg1) == 1)
2230     uns1_p = true;
2231   int pr = get_min_precision (arg0, uns0_p ? UNSIGNED : SIGNED);
2232   prec0 = MIN (prec0, pr);
2233   pr = get_min_precision (arg1, uns1_p ? UNSIGNED : SIGNED);
2234   prec1 = MIN (prec1, pr);
2235 
2236   /* If uns0_p && uns1_p, precop is minimum needed precision
2237      of unsigned type to hold the exact result, otherwise
2238      precop is minimum needed precision of signed type to
2239      hold the exact result.  */
2240   int precop;
2241   if (code == MULT_EXPR)
2242     precop = prec0 + prec1 + (uns0_p != uns1_p);
2243   else
2244     {
2245       if (uns0_p == uns1_p)
2246 	precop = MAX (prec0, prec1) + 1;
2247       else if (uns0_p)
2248 	precop = MAX (prec0 + 1, prec1) + 1;
2249       else
2250 	precop = MAX (prec0, prec1 + 1) + 1;
2251     }
2252   int orig_precres = precres;
2253 
2254   do
2255     {
2256       if ((uns0_p && uns1_p)
2257 	  ? ((precop + !unsr_p) <= precres
2258 	     /* u1 - u2 -> ur can overflow, no matter what precision
2259 		the result has.  */
2260 	     && (code != MINUS_EXPR || !unsr_p))
2261 	  : (!unsr_p && precop <= precres))
2262 	{
2263 	  /* The infinity precision result will always fit into result.  */
2264 	  rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2265 	  write_complex_part (target, const0_rtx, true);
2266 	  scalar_int_mode mode = SCALAR_INT_TYPE_MODE (type);
2267 	  struct separate_ops ops;
2268 	  ops.code = code;
2269 	  ops.type = type;
2270 	  ops.op0 = fold_convert_loc (loc, type, arg0);
2271 	  ops.op1 = fold_convert_loc (loc, type, arg1);
2272 	  ops.op2 = NULL_TREE;
2273 	  ops.location = loc;
2274 	  rtx tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
2275 	  expand_arith_overflow_result_store (lhs, target, mode, tem);
2276 	  return;
2277 	}
2278 
2279       /* For operations with low precision, if target doesn't have them, start
2280 	 with precres widening right away, otherwise do it only if the most
2281 	 simple cases can't be used.  */
2282       const int min_precision = targetm.min_arithmetic_precision ();
2283       if (orig_precres == precres && precres < min_precision)
2284 	;
2285       else if ((uns0_p && uns1_p && unsr_p && prec0 <= precres
2286 		&& prec1 <= precres)
2287 	  || ((!uns0_p || !uns1_p) && !unsr_p
2288 	      && prec0 + uns0_p <= precres
2289 	      && prec1 + uns1_p <= precres))
2290 	{
2291 	  arg0 = fold_convert_loc (loc, type, arg0);
2292 	  arg1 = fold_convert_loc (loc, type, arg1);
2293 	  switch (code)
2294 	    {
2295 	    case MINUS_EXPR:
2296 	      if (integer_zerop (arg0) && !unsr_p)
2297 		{
2298 		  expand_neg_overflow (loc, lhs, arg1, false, NULL);
2299 		  return;
2300 		}
2301 	      /* FALLTHRU */
2302 	    case PLUS_EXPR:
2303 	      expand_addsub_overflow (loc, code, lhs, arg0, arg1, unsr_p,
2304 				      unsr_p, unsr_p, false, NULL);
2305 	      return;
2306 	    case MULT_EXPR:
2307 	      expand_mul_overflow (loc, lhs, arg0, arg1, unsr_p,
2308 				   unsr_p, unsr_p, false, NULL);
2309 	      return;
2310 	    default:
2311 	      gcc_unreachable ();
2312 	    }
2313 	}
2314 
2315       /* For sub-word operations, retry with a wider type first.  */
2316       if (orig_precres == precres && precop <= BITS_PER_WORD)
2317 	{
2318 	  int p = MAX (min_precision, precop);
2319 	  scalar_int_mode m = smallest_int_mode_for_size (p);
2320 	  tree optype = build_nonstandard_integer_type (GET_MODE_PRECISION (m),
2321 							uns0_p && uns1_p
2322 							&& unsr_p);
2323 	  p = TYPE_PRECISION (optype);
2324 	  if (p > precres)
2325 	    {
2326 	      precres = p;
2327 	      unsr_p = TYPE_UNSIGNED (optype);
2328 	      type = optype;
2329 	      continue;
2330 	    }
2331 	}
2332 
2333       if (prec0 <= precres && prec1 <= precres)
2334 	{
2335 	  tree types[2];
2336 	  if (unsr_p)
2337 	    {
2338 	      types[0] = build_nonstandard_integer_type (precres, 0);
2339 	      types[1] = type;
2340 	    }
2341 	  else
2342 	    {
2343 	      types[0] = type;
2344 	      types[1] = build_nonstandard_integer_type (precres, 1);
2345 	    }
2346 	  arg0 = fold_convert_loc (loc, types[uns0_p], arg0);
2347 	  arg1 = fold_convert_loc (loc, types[uns1_p], arg1);
2348 	  if (code != MULT_EXPR)
2349 	    expand_addsub_overflow (loc, code, lhs, arg0, arg1, unsr_p,
2350 				    uns0_p, uns1_p, false, NULL);
2351 	  else
2352 	    expand_mul_overflow (loc, lhs, arg0, arg1, unsr_p,
2353 				 uns0_p, uns1_p, false, NULL);
2354 	  return;
2355 	}
2356 
2357       /* Retry with a wider type.  */
2358       if (orig_precres == precres)
2359 	{
2360 	  int p = MAX (prec0, prec1);
2361 	  scalar_int_mode m = smallest_int_mode_for_size (p);
2362 	  tree optype = build_nonstandard_integer_type (GET_MODE_PRECISION (m),
2363 							uns0_p && uns1_p
2364 							&& unsr_p);
2365 	  p = TYPE_PRECISION (optype);
2366 	  if (p > precres)
2367 	    {
2368 	      precres = p;
2369 	      unsr_p = TYPE_UNSIGNED (optype);
2370 	      type = optype;
2371 	      continue;
2372 	    }
2373 	}
2374 
2375       gcc_unreachable ();
2376     }
2377   while (1);
2378 }
2379 
2380 /* Expand ADD_OVERFLOW STMT.  */
2381 
2382 static void
2383 expand_ADD_OVERFLOW (internal_fn, gcall *stmt)
2384 {
2385   expand_arith_overflow (PLUS_EXPR, stmt);
2386 }
2387 
2388 /* Expand SUB_OVERFLOW STMT.  */
2389 
2390 static void
2391 expand_SUB_OVERFLOW (internal_fn, gcall *stmt)
2392 {
2393   expand_arith_overflow (MINUS_EXPR, stmt);
2394 }
2395 
2396 /* Expand MUL_OVERFLOW STMT.  */
2397 
2398 static void
2399 expand_MUL_OVERFLOW (internal_fn, gcall *stmt)
2400 {
2401   expand_arith_overflow (MULT_EXPR, stmt);
2402 }
2403 
2404 /* This should get folded in tree-vectorizer.c.  */
2405 
2406 static void
2407 expand_LOOP_VECTORIZED (internal_fn, gcall *)
2408 {
2409   gcc_unreachable ();
2410 }
2411 
2412 /* This should get folded in tree-vectorizer.c.  */
2413 
2414 static void
2415 expand_LOOP_DIST_ALIAS (internal_fn, gcall *)
2416 {
2417   gcc_unreachable ();
2418 }
2419 
2420 /* Return a memory reference of type TYPE for argument INDEX of STMT.
2421    Use argument INDEX + 1 to derive the second (TBAA) operand.  */
2422 
2423 static tree
2424 expand_call_mem_ref (tree type, gcall *stmt, int index)
2425 {
2426   tree addr = gimple_call_arg (stmt, index);
2427   tree alias_ptr_type = TREE_TYPE (gimple_call_arg (stmt, index + 1));
2428   unsigned int align = tree_to_shwi (gimple_call_arg (stmt, index + 1));
2429   if (TYPE_ALIGN (type) != align)
2430     type = build_aligned_type (type, align);
2431 
2432   tree tmp = addr;
2433   if (TREE_CODE (tmp) == SSA_NAME)
2434     {
2435       gimple *def = SSA_NAME_DEF_STMT (tmp);
2436       if (gimple_assign_single_p (def))
2437 	tmp = gimple_assign_rhs1 (def);
2438     }
2439 
2440   if (TREE_CODE (tmp) == ADDR_EXPR)
2441     {
2442       tree mem = TREE_OPERAND (tmp, 0);
2443       if (TREE_CODE (mem) == TARGET_MEM_REF
2444 	  && types_compatible_p (TREE_TYPE (mem), type))
2445 	{
2446 	  tree offset = TMR_OFFSET (mem);
2447 	  if (type != TREE_TYPE (mem)
2448 	      || alias_ptr_type != TREE_TYPE (offset)
2449 	      || !integer_zerop (offset))
2450 	    {
2451 	      mem = copy_node (mem);
2452 	      TMR_OFFSET (mem) = wide_int_to_tree (alias_ptr_type,
2453 						   wi::to_poly_wide (offset));
2454 	      TREE_TYPE (mem) = type;
2455 	    }
2456 	  return mem;
2457 	}
2458     }
2459 
2460   return fold_build2 (MEM_REF, type, addr, build_int_cst (alias_ptr_type, 0));
2461 }
2462 
2463 /* Expand MASK_LOAD{,_LANES} call STMT using optab OPTAB.  */
2464 
2465 static void
2466 expand_mask_load_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
2467 {
2468   struct expand_operand ops[3];
2469   tree type, lhs, rhs, maskt;
2470   rtx mem, target, mask;
2471   insn_code icode;
2472 
2473   maskt = gimple_call_arg (stmt, 2);
2474   lhs = gimple_call_lhs (stmt);
2475   if (lhs == NULL_TREE)
2476     return;
2477   type = TREE_TYPE (lhs);
2478   rhs = expand_call_mem_ref (type, stmt, 0);
2479 
2480   if (optab == vec_mask_load_lanes_optab)
2481     icode = get_multi_vector_move (type, optab);
2482   else
2483     icode = convert_optab_handler (optab, TYPE_MODE (type),
2484 				   TYPE_MODE (TREE_TYPE (maskt)));
2485 
2486   mem = expand_expr (rhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2487   gcc_assert (MEM_P (mem));
2488   mask = expand_normal (maskt);
2489   target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2490   create_output_operand (&ops[0], target, TYPE_MODE (type));
2491   create_fixed_operand (&ops[1], mem);
2492   create_input_operand (&ops[2], mask, TYPE_MODE (TREE_TYPE (maskt)));
2493   expand_insn (icode, 3, ops);
2494 }
2495 
2496 #define expand_mask_load_lanes_optab_fn expand_mask_load_optab_fn
2497 
2498 /* Expand MASK_STORE{,_LANES} call STMT using optab OPTAB.  */
2499 
2500 static void
2501 expand_mask_store_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
2502 {
2503   struct expand_operand ops[3];
2504   tree type, lhs, rhs, maskt;
2505   rtx mem, reg, mask;
2506   insn_code icode;
2507 
2508   maskt = gimple_call_arg (stmt, 2);
2509   rhs = gimple_call_arg (stmt, 3);
2510   type = TREE_TYPE (rhs);
2511   lhs = expand_call_mem_ref (type, stmt, 0);
2512 
2513   if (optab == vec_mask_store_lanes_optab)
2514     icode = get_multi_vector_move (type, optab);
2515   else
2516     icode = convert_optab_handler (optab, TYPE_MODE (type),
2517 				   TYPE_MODE (TREE_TYPE (maskt)));
2518 
2519   mem = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2520   gcc_assert (MEM_P (mem));
2521   mask = expand_normal (maskt);
2522   reg = expand_normal (rhs);
2523   create_fixed_operand (&ops[0], mem);
2524   create_input_operand (&ops[1], reg, TYPE_MODE (type));
2525   create_input_operand (&ops[2], mask, TYPE_MODE (TREE_TYPE (maskt)));
2526   expand_insn (icode, 3, ops);
2527 }
2528 
2529 #define expand_mask_store_lanes_optab_fn expand_mask_store_optab_fn
2530 
2531 static void
2532 expand_ABNORMAL_DISPATCHER (internal_fn, gcall *)
2533 {
2534 }
2535 
2536 static void
2537 expand_BUILTIN_EXPECT (internal_fn, gcall *stmt)
2538 {
2539   /* When guessing was done, the hints should be already stripped away.  */
2540   gcc_assert (!flag_guess_branch_prob || optimize == 0 || seen_error ());
2541 
2542   rtx target;
2543   tree lhs = gimple_call_lhs (stmt);
2544   if (lhs)
2545     target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2546   else
2547     target = const0_rtx;
2548   rtx val = expand_expr (gimple_call_arg (stmt, 0), target, VOIDmode, EXPAND_NORMAL);
2549   if (lhs && val != target)
2550     emit_move_insn (target, val);
2551 }
2552 
2553 /* IFN_VA_ARG is supposed to be expanded at pass_stdarg.  So this dummy function
2554    should never be called.  */
2555 
2556 static void
2557 expand_VA_ARG (internal_fn, gcall *)
2558 {
2559   gcc_unreachable ();
2560 }
2561 
2562 /* Expand the IFN_UNIQUE function according to its first argument.  */
2563 
2564 static void
2565 expand_UNIQUE (internal_fn, gcall *stmt)
2566 {
2567   rtx pattern = NULL_RTX;
2568   enum ifn_unique_kind kind
2569     = (enum ifn_unique_kind) TREE_INT_CST_LOW (gimple_call_arg (stmt, 0));
2570 
2571   switch (kind)
2572     {
2573     default:
2574       gcc_unreachable ();
2575 
2576     case IFN_UNIQUE_UNSPEC:
2577       if (targetm.have_unique ())
2578 	pattern = targetm.gen_unique ();
2579       break;
2580 
2581     case IFN_UNIQUE_OACC_FORK:
2582     case IFN_UNIQUE_OACC_JOIN:
2583       if (targetm.have_oacc_fork () && targetm.have_oacc_join ())
2584 	{
2585 	  tree lhs = gimple_call_lhs (stmt);
2586 	  rtx target = const0_rtx;
2587 
2588 	  if (lhs)
2589 	    target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2590 
2591 	  rtx data_dep = expand_normal (gimple_call_arg (stmt, 1));
2592 	  rtx axis = expand_normal (gimple_call_arg (stmt, 2));
2593 
2594 	  if (kind == IFN_UNIQUE_OACC_FORK)
2595 	    pattern = targetm.gen_oacc_fork (target, data_dep, axis);
2596 	  else
2597 	    pattern = targetm.gen_oacc_join (target, data_dep, axis);
2598 	}
2599       else
2600 	gcc_unreachable ();
2601       break;
2602     }
2603 
2604   if (pattern)
2605     emit_insn (pattern);
2606 }
2607 
2608 /* The size of an OpenACC compute dimension.  */
2609 
2610 static void
2611 expand_GOACC_DIM_SIZE (internal_fn, gcall *stmt)
2612 {
2613   tree lhs = gimple_call_lhs (stmt);
2614 
2615   if (!lhs)
2616     return;
2617 
2618   rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2619   if (targetm.have_oacc_dim_size ())
2620     {
2621       rtx dim = expand_expr (gimple_call_arg (stmt, 0), NULL_RTX,
2622 			     VOIDmode, EXPAND_NORMAL);
2623       emit_insn (targetm.gen_oacc_dim_size (target, dim));
2624     }
2625   else
2626     emit_move_insn (target, GEN_INT (1));
2627 }
2628 
2629 /* The position of an OpenACC execution engine along one compute axis.  */
2630 
2631 static void
2632 expand_GOACC_DIM_POS (internal_fn, gcall *stmt)
2633 {
2634   tree lhs = gimple_call_lhs (stmt);
2635 
2636   if (!lhs)
2637     return;
2638 
2639   rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2640   if (targetm.have_oacc_dim_pos ())
2641     {
2642       rtx dim = expand_expr (gimple_call_arg (stmt, 0), NULL_RTX,
2643 			     VOIDmode, EXPAND_NORMAL);
2644       emit_insn (targetm.gen_oacc_dim_pos (target, dim));
2645     }
2646   else
2647     emit_move_insn (target, const0_rtx);
2648 }
2649 
2650 /* This is expanded by oacc_device_lower pass.  */
2651 
2652 static void
2653 expand_GOACC_LOOP (internal_fn, gcall *)
2654 {
2655   gcc_unreachable ();
2656 }
2657 
2658 /* This is expanded by oacc_device_lower pass.  */
2659 
2660 static void
2661 expand_GOACC_REDUCTION (internal_fn, gcall *)
2662 {
2663   gcc_unreachable ();
2664 }
2665 
2666 /* This is expanded by oacc_device_lower pass.  */
2667 
2668 static void
2669 expand_GOACC_TILE (internal_fn, gcall *)
2670 {
2671   gcc_unreachable ();
2672 }
2673 
2674 /* Set errno to EDOM.  */
2675 
2676 static void
2677 expand_SET_EDOM (internal_fn, gcall *)
2678 {
2679 #ifdef TARGET_EDOM
2680 #ifdef GEN_ERRNO_RTX
2681   rtx errno_rtx = GEN_ERRNO_RTX;
2682 #else
2683   rtx errno_rtx = gen_rtx_MEM (word_mode, gen_rtx_SYMBOL_REF (Pmode, "errno"));
2684 #endif
2685   emit_move_insn (errno_rtx,
2686 		  gen_int_mode (TARGET_EDOM, GET_MODE (errno_rtx)));
2687 #else
2688   gcc_unreachable ();
2689 #endif
2690 }
2691 
2692 /* Expand atomic bit test and set.  */
2693 
2694 static void
2695 expand_ATOMIC_BIT_TEST_AND_SET (internal_fn, gcall *call)
2696 {
2697   expand_ifn_atomic_bit_test_and (call);
2698 }
2699 
2700 /* Expand atomic bit test and complement.  */
2701 
2702 static void
2703 expand_ATOMIC_BIT_TEST_AND_COMPLEMENT (internal_fn, gcall *call)
2704 {
2705   expand_ifn_atomic_bit_test_and (call);
2706 }
2707 
2708 /* Expand atomic bit test and reset.  */
2709 
2710 static void
2711 expand_ATOMIC_BIT_TEST_AND_RESET (internal_fn, gcall *call)
2712 {
2713   expand_ifn_atomic_bit_test_and (call);
2714 }
2715 
2716 /* Expand atomic bit test and set.  */
2717 
2718 static void
2719 expand_ATOMIC_COMPARE_EXCHANGE (internal_fn, gcall *call)
2720 {
2721   expand_ifn_atomic_compare_exchange (call);
2722 }
2723 
2724 /* Expand LAUNDER to assignment, lhs = arg0.  */
2725 
2726 static void
2727 expand_LAUNDER (internal_fn, gcall *call)
2728 {
2729   tree lhs = gimple_call_lhs (call);
2730 
2731   if (!lhs)
2732     return;
2733 
2734   expand_assignment (lhs, gimple_call_arg (call, 0), false);
2735 }
2736 
2737 /* Expand {MASK_,}SCATTER_STORE{S,U} call CALL using optab OPTAB.  */
2738 
2739 static void
2740 expand_scatter_store_optab_fn (internal_fn, gcall *stmt, direct_optab optab)
2741 {
2742   internal_fn ifn = gimple_call_internal_fn (stmt);
2743   int rhs_index = internal_fn_stored_value_index (ifn);
2744   int mask_index = internal_fn_mask_index (ifn);
2745   tree base = gimple_call_arg (stmt, 0);
2746   tree offset = gimple_call_arg (stmt, 1);
2747   tree scale = gimple_call_arg (stmt, 2);
2748   tree rhs = gimple_call_arg (stmt, rhs_index);
2749 
2750   rtx base_rtx = expand_normal (base);
2751   rtx offset_rtx = expand_normal (offset);
2752   HOST_WIDE_INT scale_int = tree_to_shwi (scale);
2753   rtx rhs_rtx = expand_normal (rhs);
2754 
2755   struct expand_operand ops[6];
2756   int i = 0;
2757   create_address_operand (&ops[i++], base_rtx);
2758   create_input_operand (&ops[i++], offset_rtx, TYPE_MODE (TREE_TYPE (offset)));
2759   create_integer_operand (&ops[i++], TYPE_UNSIGNED (TREE_TYPE (offset)));
2760   create_integer_operand (&ops[i++], scale_int);
2761   create_input_operand (&ops[i++], rhs_rtx, TYPE_MODE (TREE_TYPE (rhs)));
2762   if (mask_index >= 0)
2763     {
2764       tree mask = gimple_call_arg (stmt, mask_index);
2765       rtx mask_rtx = expand_normal (mask);
2766       create_input_operand (&ops[i++], mask_rtx, TYPE_MODE (TREE_TYPE (mask)));
2767     }
2768 
2769   insn_code icode = direct_optab_handler (optab, TYPE_MODE (TREE_TYPE (rhs)));
2770   expand_insn (icode, i, ops);
2771 }
2772 
2773 /* Expand {MASK_,}GATHER_LOAD call CALL using optab OPTAB.  */
2774 
2775 static void
2776 expand_gather_load_optab_fn (internal_fn, gcall *stmt, direct_optab optab)
2777 {
2778   tree lhs = gimple_call_lhs (stmt);
2779   tree base = gimple_call_arg (stmt, 0);
2780   tree offset = gimple_call_arg (stmt, 1);
2781   tree scale = gimple_call_arg (stmt, 2);
2782 
2783   rtx lhs_rtx = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2784   rtx base_rtx = expand_normal (base);
2785   rtx offset_rtx = expand_normal (offset);
2786   HOST_WIDE_INT scale_int = tree_to_shwi (scale);
2787 
2788   int i = 0;
2789   struct expand_operand ops[6];
2790   create_output_operand (&ops[i++], lhs_rtx, TYPE_MODE (TREE_TYPE (lhs)));
2791   create_address_operand (&ops[i++], base_rtx);
2792   create_input_operand (&ops[i++], offset_rtx, TYPE_MODE (TREE_TYPE (offset)));
2793   create_integer_operand (&ops[i++], TYPE_UNSIGNED (TREE_TYPE (offset)));
2794   create_integer_operand (&ops[i++], scale_int);
2795   if (optab == mask_gather_load_optab)
2796     {
2797       tree mask = gimple_call_arg (stmt, 3);
2798       rtx mask_rtx = expand_normal (mask);
2799       create_input_operand (&ops[i++], mask_rtx, TYPE_MODE (TREE_TYPE (mask)));
2800     }
2801   insn_code icode = direct_optab_handler (optab, TYPE_MODE (TREE_TYPE (lhs)));
2802   expand_insn (icode, i, ops);
2803 }
2804 
2805 /* Expand DIVMOD() using:
2806  a) optab handler for udivmod/sdivmod if it is available.
2807  b) If optab_handler doesn't exist, generate call to
2808     target-specific divmod libfunc.  */
2809 
2810 static void
2811 expand_DIVMOD (internal_fn, gcall *call_stmt)
2812 {
2813   tree lhs = gimple_call_lhs (call_stmt);
2814   tree arg0 = gimple_call_arg (call_stmt, 0);
2815   tree arg1 = gimple_call_arg (call_stmt, 1);
2816 
2817   gcc_assert (TREE_CODE (TREE_TYPE (lhs)) == COMPLEX_TYPE);
2818   tree type = TREE_TYPE (TREE_TYPE (lhs));
2819   machine_mode mode = TYPE_MODE (type);
2820   bool unsignedp = TYPE_UNSIGNED (type);
2821   optab tab = (unsignedp) ? udivmod_optab : sdivmod_optab;
2822 
2823   rtx op0 = expand_normal (arg0);
2824   rtx op1 = expand_normal (arg1);
2825   rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2826 
2827   rtx quotient, remainder, libfunc;
2828 
2829   /* Check if optab_handler exists for divmod_optab for given mode.  */
2830   if (optab_handler (tab, mode) != CODE_FOR_nothing)
2831     {
2832       quotient = gen_reg_rtx (mode);
2833       remainder = gen_reg_rtx (mode);
2834       expand_twoval_binop (tab, op0, op1, quotient, remainder, unsignedp);
2835     }
2836 
2837   /* Generate call to divmod libfunc if it exists.  */
2838   else if ((libfunc = optab_libfunc (tab, mode)) != NULL_RTX)
2839     targetm.expand_divmod_libfunc (libfunc, mode, op0, op1,
2840 				   &quotient, &remainder);
2841 
2842   else
2843     gcc_unreachable ();
2844 
2845   /* Wrap the return value (quotient, remainder) within COMPLEX_EXPR.  */
2846   expand_expr (build2 (COMPLEX_EXPR, TREE_TYPE (lhs),
2847 		       make_tree (TREE_TYPE (arg0), quotient),
2848 		       make_tree (TREE_TYPE (arg1), remainder)),
2849 	       target, VOIDmode, EXPAND_NORMAL);
2850 }
2851 
2852 /* Expand a NOP.  */
2853 
2854 static void
2855 expand_NOP (internal_fn, gcall *)
2856 {
2857   /* Nothing.  But it shouldn't really prevail.  */
2858 }
2859 
2860 /* Expand a call to FN using the operands in STMT.  FN has a single
2861    output operand and NARGS input operands.  */
2862 
2863 static void
2864 expand_direct_optab_fn (internal_fn fn, gcall *stmt, direct_optab optab,
2865 			unsigned int nargs)
2866 {
2867   expand_operand *ops = XALLOCAVEC (expand_operand, nargs + 1);
2868 
2869   tree_pair types = direct_internal_fn_types (fn, stmt);
2870   insn_code icode = direct_optab_handler (optab, TYPE_MODE (types.first));
2871 
2872   tree lhs = gimple_call_lhs (stmt);
2873   tree lhs_type = TREE_TYPE (lhs);
2874   rtx lhs_rtx = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2875 
2876   /* Do not assign directly to a promoted subreg, since there is no
2877      guarantee that the instruction will leave the upper bits of the
2878      register in the state required by SUBREG_PROMOTED_SIGN.  */
2879   rtx dest = lhs_rtx;
2880   if (GET_CODE (dest) == SUBREG && SUBREG_PROMOTED_VAR_P (dest))
2881     dest = NULL_RTX;
2882 
2883   create_output_operand (&ops[0], dest, insn_data[icode].operand[0].mode);
2884 
2885   for (unsigned int i = 0; i < nargs; ++i)
2886     {
2887       tree rhs = gimple_call_arg (stmt, i);
2888       tree rhs_type = TREE_TYPE (rhs);
2889       rtx rhs_rtx = expand_normal (rhs);
2890       if (INTEGRAL_TYPE_P (rhs_type))
2891 	create_convert_operand_from (&ops[i + 1], rhs_rtx,
2892 				     TYPE_MODE (rhs_type),
2893 				     TYPE_UNSIGNED (rhs_type));
2894       else
2895 	create_input_operand (&ops[i + 1], rhs_rtx, TYPE_MODE (rhs_type));
2896     }
2897 
2898   expand_insn (icode, nargs + 1, ops);
2899   if (!rtx_equal_p (lhs_rtx, ops[0].value))
2900     {
2901       /* If the return value has an integral type, convert the instruction
2902 	 result to that type.  This is useful for things that return an
2903 	 int regardless of the size of the input.  If the instruction result
2904 	 is smaller than required, assume that it is signed.
2905 
2906 	 If the return value has a nonintegral type, its mode must match
2907 	 the instruction result.  */
2908       if (GET_CODE (lhs_rtx) == SUBREG && SUBREG_PROMOTED_VAR_P (lhs_rtx))
2909 	{
2910 	  /* If this is a scalar in a register that is stored in a wider
2911 	     mode than the declared mode, compute the result into its
2912 	     declared mode and then convert to the wider mode.  */
2913 	  gcc_checking_assert (INTEGRAL_TYPE_P (lhs_type));
2914 	  rtx tmp = convert_to_mode (GET_MODE (lhs_rtx), ops[0].value, 0);
2915 	  convert_move (SUBREG_REG (lhs_rtx), tmp,
2916 			SUBREG_PROMOTED_SIGN (lhs_rtx));
2917 	}
2918       else if (GET_MODE (lhs_rtx) == GET_MODE (ops[0].value))
2919 	emit_move_insn (lhs_rtx, ops[0].value);
2920       else
2921 	{
2922 	  gcc_checking_assert (INTEGRAL_TYPE_P (lhs_type));
2923 	  convert_move (lhs_rtx, ops[0].value, 0);
2924 	}
2925     }
2926 }
2927 
2928 /* Expand WHILE_ULT call STMT using optab OPTAB.  */
2929 
2930 static void
2931 expand_while_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
2932 {
2933   expand_operand ops[3];
2934   tree rhs_type[2];
2935 
2936   tree lhs = gimple_call_lhs (stmt);
2937   tree lhs_type = TREE_TYPE (lhs);
2938   rtx lhs_rtx = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2939   create_output_operand (&ops[0], lhs_rtx, TYPE_MODE (lhs_type));
2940 
2941   for (unsigned int i = 0; i < 2; ++i)
2942     {
2943       tree rhs = gimple_call_arg (stmt, i);
2944       rhs_type[i] = TREE_TYPE (rhs);
2945       rtx rhs_rtx = expand_normal (rhs);
2946       create_input_operand (&ops[i + 1], rhs_rtx, TYPE_MODE (rhs_type[i]));
2947     }
2948 
2949   insn_code icode = convert_optab_handler (optab, TYPE_MODE (rhs_type[0]),
2950 					   TYPE_MODE (lhs_type));
2951 
2952   expand_insn (icode, 3, ops);
2953   if (!rtx_equal_p (lhs_rtx, ops[0].value))
2954     emit_move_insn (lhs_rtx, ops[0].value);
2955 }
2956 
2957 /* Expanders for optabs that can use expand_direct_optab_fn.  */
2958 
2959 #define expand_unary_optab_fn(FN, STMT, OPTAB) \
2960   expand_direct_optab_fn (FN, STMT, OPTAB, 1)
2961 
2962 #define expand_binary_optab_fn(FN, STMT, OPTAB) \
2963   expand_direct_optab_fn (FN, STMT, OPTAB, 2)
2964 
2965 #define expand_cond_unary_optab_fn(FN, STMT, OPTAB) \
2966   expand_direct_optab_fn (FN, STMT, OPTAB, 2)
2967 
2968 #define expand_cond_binary_optab_fn(FN, STMT, OPTAB) \
2969   expand_direct_optab_fn (FN, STMT, OPTAB, 3)
2970 
2971 #define expand_fold_extract_optab_fn(FN, STMT, OPTAB) \
2972   expand_direct_optab_fn (FN, STMT, OPTAB, 3)
2973 
2974 #define expand_fold_left_optab_fn(FN, STMT, OPTAB) \
2975   expand_direct_optab_fn (FN, STMT, OPTAB, 2)
2976 
2977 /* RETURN_TYPE and ARGS are a return type and argument list that are
2978    in principle compatible with FN (which satisfies direct_internal_fn_p).
2979    Return the types that should be used to determine whether the
2980    target supports FN.  */
2981 
2982 tree_pair
2983 direct_internal_fn_types (internal_fn fn, tree return_type, tree *args)
2984 {
2985   const direct_internal_fn_info &info = direct_internal_fn (fn);
2986   tree type0 = (info.type0 < 0 ? return_type : TREE_TYPE (args[info.type0]));
2987   tree type1 = (info.type1 < 0 ? return_type : TREE_TYPE (args[info.type1]));
2988   return tree_pair (type0, type1);
2989 }
2990 
2991 /* CALL is a call whose return type and arguments are in principle
2992    compatible with FN (which satisfies direct_internal_fn_p).  Return the
2993    types that should be used to determine whether the target supports FN.  */
2994 
2995 tree_pair
2996 direct_internal_fn_types (internal_fn fn, gcall *call)
2997 {
2998   const direct_internal_fn_info &info = direct_internal_fn (fn);
2999   tree op0 = (info.type0 < 0
3000 	      ? gimple_call_lhs (call)
3001 	      : gimple_call_arg (call, info.type0));
3002   tree op1 = (info.type1 < 0
3003 	      ? gimple_call_lhs (call)
3004 	      : gimple_call_arg (call, info.type1));
3005   return tree_pair (TREE_TYPE (op0), TREE_TYPE (op1));
3006 }
3007 
3008 /* Return true if OPTAB is supported for TYPES (whose modes should be
3009    the same) when the optimization type is OPT_TYPE.  Used for simple
3010    direct optabs.  */
3011 
3012 static bool
3013 direct_optab_supported_p (direct_optab optab, tree_pair types,
3014 			  optimization_type opt_type)
3015 {
3016   machine_mode mode = TYPE_MODE (types.first);
3017   gcc_checking_assert (mode == TYPE_MODE (types.second));
3018   return direct_optab_handler (optab, mode, opt_type) != CODE_FOR_nothing;
3019 }
3020 
3021 /* Return true if OPTAB is supported for TYPES, where the first type
3022    is the destination and the second type is the source.  Used for
3023    convert optabs.  */
3024 
3025 static bool
3026 convert_optab_supported_p (convert_optab optab, tree_pair types,
3027 			   optimization_type opt_type)
3028 {
3029   return (convert_optab_handler (optab, TYPE_MODE (types.first),
3030 				 TYPE_MODE (types.second), opt_type)
3031 	  != CODE_FOR_nothing);
3032 }
3033 
3034 /* Return true if load/store lanes optab OPTAB is supported for
3035    array type TYPES.first when the optimization type is OPT_TYPE.  */
3036 
3037 static bool
3038 multi_vector_optab_supported_p (convert_optab optab, tree_pair types,
3039 				optimization_type opt_type)
3040 {
3041   gcc_assert (TREE_CODE (types.first) == ARRAY_TYPE);
3042   machine_mode imode = TYPE_MODE (types.first);
3043   machine_mode vmode = TYPE_MODE (TREE_TYPE (types.first));
3044   return (convert_optab_handler (optab, imode, vmode, opt_type)
3045 	  != CODE_FOR_nothing);
3046 }
3047 
3048 #define direct_unary_optab_supported_p direct_optab_supported_p
3049 #define direct_binary_optab_supported_p direct_optab_supported_p
3050 #define direct_cond_unary_optab_supported_p direct_optab_supported_p
3051 #define direct_cond_binary_optab_supported_p direct_optab_supported_p
3052 #define direct_mask_load_optab_supported_p direct_optab_supported_p
3053 #define direct_load_lanes_optab_supported_p multi_vector_optab_supported_p
3054 #define direct_mask_load_lanes_optab_supported_p multi_vector_optab_supported_p
3055 #define direct_gather_load_optab_supported_p direct_optab_supported_p
3056 #define direct_mask_store_optab_supported_p direct_optab_supported_p
3057 #define direct_store_lanes_optab_supported_p multi_vector_optab_supported_p
3058 #define direct_mask_store_lanes_optab_supported_p multi_vector_optab_supported_p
3059 #define direct_scatter_store_optab_supported_p direct_optab_supported_p
3060 #define direct_while_optab_supported_p convert_optab_supported_p
3061 #define direct_fold_extract_optab_supported_p direct_optab_supported_p
3062 #define direct_fold_left_optab_supported_p direct_optab_supported_p
3063 
3064 /* Return the optab used by internal function FN.  */
3065 
3066 static optab
3067 direct_internal_fn_optab (internal_fn fn, tree_pair types)
3068 {
3069   switch (fn)
3070     {
3071 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
3072     case IFN_##CODE: break;
3073 #define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) \
3074     case IFN_##CODE: return OPTAB##_optab;
3075 #define DEF_INTERNAL_SIGNED_OPTAB_FN(CODE, FLAGS, SELECTOR, SIGNED_OPTAB, \
3076 				     UNSIGNED_OPTAB, TYPE)		\
3077     case IFN_##CODE: return (TYPE_UNSIGNED (types.SELECTOR)		\
3078 			     ? UNSIGNED_OPTAB ## _optab			\
3079 			     : SIGNED_OPTAB ## _optab);
3080 #include "internal-fn.def"
3081 
3082     case IFN_LAST:
3083       break;
3084     }
3085   gcc_unreachable ();
3086 }
3087 
3088 /* Return the optab used by internal function FN.  */
3089 
3090 static optab
3091 direct_internal_fn_optab (internal_fn fn)
3092 {
3093   switch (fn)
3094     {
3095 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
3096     case IFN_##CODE: break;
3097 #define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) \
3098     case IFN_##CODE: return OPTAB##_optab;
3099 #include "internal-fn.def"
3100 
3101     case IFN_LAST:
3102       break;
3103     }
3104   gcc_unreachable ();
3105 }
3106 
3107 /* Return true if FN is supported for the types in TYPES when the
3108    optimization type is OPT_TYPE.  The types are those associated with
3109    the "type0" and "type1" fields of FN's direct_internal_fn_info
3110    structure.  */
3111 
3112 bool
3113 direct_internal_fn_supported_p (internal_fn fn, tree_pair types,
3114 				optimization_type opt_type)
3115 {
3116   switch (fn)
3117     {
3118 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
3119     case IFN_##CODE: break;
3120 #define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) \
3121     case IFN_##CODE: \
3122       return direct_##TYPE##_optab_supported_p (OPTAB##_optab, types, \
3123 						opt_type);
3124 #define DEF_INTERNAL_SIGNED_OPTAB_FN(CODE, FLAGS, SELECTOR, SIGNED_OPTAB, \
3125 				     UNSIGNED_OPTAB, TYPE)		\
3126     case IFN_##CODE:							\
3127       {									\
3128 	optab which_optab = (TYPE_UNSIGNED (types.SELECTOR)		\
3129 			     ? UNSIGNED_OPTAB ## _optab			\
3130 			     : SIGNED_OPTAB ## _optab);			\
3131 	return direct_##TYPE##_optab_supported_p (which_optab, types,	\
3132 						  opt_type);		\
3133       }
3134 #include "internal-fn.def"
3135 
3136     case IFN_LAST:
3137       break;
3138     }
3139   gcc_unreachable ();
3140 }
3141 
3142 /* Return true if FN is supported for type TYPE when the optimization
3143    type is OPT_TYPE.  The caller knows that the "type0" and "type1"
3144    fields of FN's direct_internal_fn_info structure are the same.  */
3145 
3146 bool
3147 direct_internal_fn_supported_p (internal_fn fn, tree type,
3148 				optimization_type opt_type)
3149 {
3150   const direct_internal_fn_info &info = direct_internal_fn (fn);
3151   gcc_checking_assert (info.type0 == info.type1);
3152   return direct_internal_fn_supported_p (fn, tree_pair (type, type), opt_type);
3153 }
3154 
3155 /* Return true if IFN_SET_EDOM is supported.  */
3156 
3157 bool
3158 set_edom_supported_p (void)
3159 {
3160 #ifdef TARGET_EDOM
3161   return true;
3162 #else
3163   return false;
3164 #endif
3165 }
3166 
3167 #define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) \
3168   static void						\
3169   expand_##CODE (internal_fn fn, gcall *stmt)		\
3170   {							\
3171     expand_##TYPE##_optab_fn (fn, stmt, OPTAB##_optab);	\
3172   }
3173 #define DEF_INTERNAL_SIGNED_OPTAB_FN(CODE, FLAGS, SELECTOR, SIGNED_OPTAB, \
3174 				     UNSIGNED_OPTAB, TYPE)		\
3175   static void								\
3176   expand_##CODE (internal_fn fn, gcall *stmt)				\
3177   {									\
3178     tree_pair types = direct_internal_fn_types (fn, stmt);		\
3179     optab which_optab = direct_internal_fn_optab (fn, types);		\
3180     expand_##TYPE##_optab_fn (fn, stmt, which_optab);			\
3181   }
3182 #include "internal-fn.def"
3183 
3184 /* Routines to expand each internal function, indexed by function number.
3185    Each routine has the prototype:
3186 
3187        expand_<NAME> (gcall *stmt)
3188 
3189    where STMT is the statement that performs the call. */
3190 static void (*const internal_fn_expanders[]) (internal_fn, gcall *) = {
3191 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) expand_##CODE,
3192 #include "internal-fn.def"
3193   0
3194 };
3195 
3196 /* Return a function that performs the conditional form of CODE, i.e.:
3197 
3198      LHS = RHS1 ? RHS2 CODE RHS3 : RHS2
3199 
3200    (operating elementwise if the operands are vectors).  Return IFN_LAST
3201    if no such function exists.  */
3202 
3203 internal_fn
3204 get_conditional_internal_fn (tree_code code)
3205 {
3206   switch (code)
3207     {
3208     case PLUS_EXPR:
3209       return IFN_COND_ADD;
3210     case MINUS_EXPR:
3211       return IFN_COND_SUB;
3212     case MIN_EXPR:
3213       return IFN_COND_MIN;
3214     case MAX_EXPR:
3215       return IFN_COND_MAX;
3216     case BIT_AND_EXPR:
3217       return IFN_COND_AND;
3218     case BIT_IOR_EXPR:
3219       return IFN_COND_IOR;
3220     case BIT_XOR_EXPR:
3221       return IFN_COND_XOR;
3222     default:
3223       return IFN_LAST;
3224     }
3225 }
3226 
3227 /* Return true if IFN is some form of load from memory.  */
3228 
3229 bool
3230 internal_load_fn_p (internal_fn fn)
3231 {
3232   switch (fn)
3233     {
3234     case IFN_MASK_LOAD:
3235     case IFN_LOAD_LANES:
3236     case IFN_MASK_LOAD_LANES:
3237     case IFN_GATHER_LOAD:
3238     case IFN_MASK_GATHER_LOAD:
3239       return true;
3240 
3241     default:
3242       return false;
3243     }
3244 }
3245 
3246 /* Return true if IFN is some form of store to memory.  */
3247 
3248 bool
3249 internal_store_fn_p (internal_fn fn)
3250 {
3251   switch (fn)
3252     {
3253     case IFN_MASK_STORE:
3254     case IFN_STORE_LANES:
3255     case IFN_MASK_STORE_LANES:
3256     case IFN_SCATTER_STORE:
3257     case IFN_MASK_SCATTER_STORE:
3258       return true;
3259 
3260     default:
3261       return false;
3262     }
3263 }
3264 
3265 /* Return true if IFN is some form of gather load or scatter store.  */
3266 
3267 bool
3268 internal_gather_scatter_fn_p (internal_fn fn)
3269 {
3270   switch (fn)
3271     {
3272     case IFN_GATHER_LOAD:
3273     case IFN_MASK_GATHER_LOAD:
3274     case IFN_SCATTER_STORE:
3275     case IFN_MASK_SCATTER_STORE:
3276       return true;
3277 
3278     default:
3279       return false;
3280     }
3281 }
3282 
3283 /* If FN takes a vector mask argument, return the index of that argument,
3284    otherwise return -1.  */
3285 
3286 int
3287 internal_fn_mask_index (internal_fn fn)
3288 {
3289   switch (fn)
3290     {
3291     case IFN_MASK_LOAD:
3292     case IFN_MASK_LOAD_LANES:
3293     case IFN_MASK_STORE:
3294     case IFN_MASK_STORE_LANES:
3295       return 2;
3296 
3297     case IFN_MASK_GATHER_LOAD:
3298       return 3;
3299 
3300     case IFN_MASK_SCATTER_STORE:
3301       return 4;
3302 
3303     default:
3304       return -1;
3305     }
3306 }
3307 
3308 /* If FN takes a value that should be stored to memory, return the index
3309    of that argument, otherwise return -1.  */
3310 
3311 int
3312 internal_fn_stored_value_index (internal_fn fn)
3313 {
3314   switch (fn)
3315     {
3316     case IFN_MASK_STORE:
3317     case IFN_SCATTER_STORE:
3318     case IFN_MASK_SCATTER_STORE:
3319       return 3;
3320 
3321     default:
3322       return -1;
3323     }
3324 }
3325 
3326 /* Return true if the target supports gather load or scatter store function
3327    IFN.  For loads, VECTOR_TYPE is the vector type of the load result,
3328    while for stores it is the vector type of the stored data argument.
3329    MEMORY_ELEMENT_TYPE is the type of the memory elements being loaded
3330    or stored.  OFFSET_SIGN is the sign of the offset argument, which is
3331    only relevant when the offset is narrower than an address.  SCALE is
3332    the amount by which the offset should be multiplied *after* it has
3333    been extended to address width.  */
3334 
3335 bool
3336 internal_gather_scatter_fn_supported_p (internal_fn ifn, tree vector_type,
3337 					tree memory_element_type,
3338 					signop offset_sign, int scale)
3339 {
3340   if (!tree_int_cst_equal (TYPE_SIZE (TREE_TYPE (vector_type)),
3341 			   TYPE_SIZE (memory_element_type)))
3342     return false;
3343   optab optab = direct_internal_fn_optab (ifn);
3344   insn_code icode = direct_optab_handler (optab, TYPE_MODE (vector_type));
3345   int output_ops = internal_load_fn_p (ifn) ? 1 : 0;
3346   return (icode != CODE_FOR_nothing
3347 	  && insn_operand_matches (icode, 2 + output_ops,
3348 				   GEN_INT (offset_sign == UNSIGNED))
3349 	  && insn_operand_matches (icode, 3 + output_ops,
3350 				   GEN_INT (scale)));
3351 }
3352 
3353 /* Expand STMT as though it were a call to internal function FN.  */
3354 
3355 void
3356 expand_internal_call (internal_fn fn, gcall *stmt)
3357 {
3358   internal_fn_expanders[fn] (fn, stmt);
3359 }
3360 
3361 /* Expand STMT, which is a call to internal function FN.  */
3362 
3363 void
3364 expand_internal_call (gcall *stmt)
3365 {
3366   expand_internal_call (gimple_call_internal_fn (stmt), stmt);
3367 }
3368 
3369 void
3370 expand_PHI (internal_fn, gcall *)
3371 {
3372     gcc_unreachable ();
3373 }
3374