xref: /dragonfly/contrib/gcc-8.0/gcc/expmed.c (revision 73b5ca6b)
1 /* Medium-level subroutines: convert bit-field store and extract
2    and shifts, multiplies and divides to rtl instructions.
3    Copyright (C) 1987-2018 Free Software Foundation, Inc.
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11 
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15 for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not see
19 <http://www.gnu.org/licenses/>.  */
20 
21 
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "predict.h"
30 #include "memmodel.h"
31 #include "tm_p.h"
32 #include "expmed.h"
33 #include "optabs.h"
34 #include "regs.h"
35 #include "emit-rtl.h"
36 #include "diagnostic-core.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
39 #include "dojump.h"
40 #include "explow.h"
41 #include "expr.h"
42 #include "langhooks.h"
43 #include "tree-vector-builder.h"
44 
45 struct target_expmed default_target_expmed;
46 #if SWITCHABLE_TARGET
47 struct target_expmed *this_target_expmed = &default_target_expmed;
48 #endif
49 
50 static bool store_integral_bit_field (rtx, opt_scalar_int_mode,
51 				      unsigned HOST_WIDE_INT,
52 				      unsigned HOST_WIDE_INT,
53 				      poly_uint64, poly_uint64,
54 				      machine_mode, rtx, bool, bool);
55 static void store_fixed_bit_field (rtx, opt_scalar_int_mode,
56 				   unsigned HOST_WIDE_INT,
57 				   unsigned HOST_WIDE_INT,
58 				   poly_uint64, poly_uint64,
59 				   rtx, scalar_int_mode, bool);
60 static void store_fixed_bit_field_1 (rtx, scalar_int_mode,
61 				     unsigned HOST_WIDE_INT,
62 				     unsigned HOST_WIDE_INT,
63 				     rtx, scalar_int_mode, bool);
64 static void store_split_bit_field (rtx, opt_scalar_int_mode,
65 				   unsigned HOST_WIDE_INT,
66 				   unsigned HOST_WIDE_INT,
67 				   poly_uint64, poly_uint64,
68 				   rtx, scalar_int_mode, bool);
69 static rtx extract_integral_bit_field (rtx, opt_scalar_int_mode,
70 				       unsigned HOST_WIDE_INT,
71 				       unsigned HOST_WIDE_INT, int, rtx,
72 				       machine_mode, machine_mode, bool, bool);
73 static rtx extract_fixed_bit_field (machine_mode, rtx, opt_scalar_int_mode,
74 				    unsigned HOST_WIDE_INT,
75 				    unsigned HOST_WIDE_INT, rtx, int, bool);
76 static rtx extract_fixed_bit_field_1 (machine_mode, rtx, scalar_int_mode,
77 				      unsigned HOST_WIDE_INT,
78 				      unsigned HOST_WIDE_INT, rtx, int, bool);
79 static rtx lshift_value (machine_mode, unsigned HOST_WIDE_INT, int);
80 static rtx extract_split_bit_field (rtx, opt_scalar_int_mode,
81 				    unsigned HOST_WIDE_INT,
82 				    unsigned HOST_WIDE_INT, int, bool);
83 static void do_cmp_and_jump (rtx, rtx, enum rtx_code, machine_mode, rtx_code_label *);
84 static rtx expand_smod_pow2 (scalar_int_mode, rtx, HOST_WIDE_INT);
85 static rtx expand_sdiv_pow2 (scalar_int_mode, rtx, HOST_WIDE_INT);
86 
87 /* Return a constant integer mask value of mode MODE with BITSIZE ones
88    followed by BITPOS zeros, or the complement of that if COMPLEMENT.
89    The mask is truncated if necessary to the width of mode MODE.  The
90    mask is zero-extended if BITSIZE+BITPOS is too small for MODE.  */
91 
92 static inline rtx
93 mask_rtx (scalar_int_mode mode, int bitpos, int bitsize, bool complement)
94 {
95   return immed_wide_int_const
96     (wi::shifted_mask (bitpos, bitsize, complement,
97 		       GET_MODE_PRECISION (mode)), mode);
98 }
99 
100 /* Test whether a value is zero of a power of two.  */
101 #define EXACT_POWER_OF_2_OR_ZERO_P(x) \
102   (((x) & ((x) - HOST_WIDE_INT_1U)) == 0)
103 
104 struct init_expmed_rtl
105 {
106   rtx reg;
107   rtx plus;
108   rtx neg;
109   rtx mult;
110   rtx sdiv;
111   rtx udiv;
112   rtx sdiv_32;
113   rtx smod_32;
114   rtx wide_mult;
115   rtx wide_lshr;
116   rtx wide_trunc;
117   rtx shift;
118   rtx shift_mult;
119   rtx shift_add;
120   rtx shift_sub0;
121   rtx shift_sub1;
122   rtx zext;
123   rtx trunc;
124 
125   rtx pow2[MAX_BITS_PER_WORD];
126   rtx cint[MAX_BITS_PER_WORD];
127 };
128 
129 static void
130 init_expmed_one_conv (struct init_expmed_rtl *all, scalar_int_mode to_mode,
131 		      scalar_int_mode from_mode, bool speed)
132 {
133   int to_size, from_size;
134   rtx which;
135 
136   to_size = GET_MODE_PRECISION (to_mode);
137   from_size = GET_MODE_PRECISION (from_mode);
138 
139   /* Most partial integers have a precision less than the "full"
140      integer it requires for storage.  In case one doesn't, for
141      comparison purposes here, reduce the bit size by one in that
142      case.  */
143   if (GET_MODE_CLASS (to_mode) == MODE_PARTIAL_INT
144       && pow2p_hwi (to_size))
145     to_size --;
146   if (GET_MODE_CLASS (from_mode) == MODE_PARTIAL_INT
147       && pow2p_hwi (from_size))
148     from_size --;
149 
150   /* Assume cost of zero-extend and sign-extend is the same.  */
151   which = (to_size < from_size ? all->trunc : all->zext);
152 
153   PUT_MODE (all->reg, from_mode);
154   set_convert_cost (to_mode, from_mode, speed,
155 		    set_src_cost (which, to_mode, speed));
156 }
157 
158 static void
159 init_expmed_one_mode (struct init_expmed_rtl *all,
160 		      machine_mode mode, int speed)
161 {
162   int m, n, mode_bitsize;
163   machine_mode mode_from;
164 
165   mode_bitsize = GET_MODE_UNIT_BITSIZE (mode);
166 
167   PUT_MODE (all->reg, mode);
168   PUT_MODE (all->plus, mode);
169   PUT_MODE (all->neg, mode);
170   PUT_MODE (all->mult, mode);
171   PUT_MODE (all->sdiv, mode);
172   PUT_MODE (all->udiv, mode);
173   PUT_MODE (all->sdiv_32, mode);
174   PUT_MODE (all->smod_32, mode);
175   PUT_MODE (all->wide_trunc, mode);
176   PUT_MODE (all->shift, mode);
177   PUT_MODE (all->shift_mult, mode);
178   PUT_MODE (all->shift_add, mode);
179   PUT_MODE (all->shift_sub0, mode);
180   PUT_MODE (all->shift_sub1, mode);
181   PUT_MODE (all->zext, mode);
182   PUT_MODE (all->trunc, mode);
183 
184   set_add_cost (speed, mode, set_src_cost (all->plus, mode, speed));
185   set_neg_cost (speed, mode, set_src_cost (all->neg, mode, speed));
186   set_mul_cost (speed, mode, set_src_cost (all->mult, mode, speed));
187   set_sdiv_cost (speed, mode, set_src_cost (all->sdiv, mode, speed));
188   set_udiv_cost (speed, mode, set_src_cost (all->udiv, mode, speed));
189 
190   set_sdiv_pow2_cheap (speed, mode, (set_src_cost (all->sdiv_32, mode, speed)
191 				     <= 2 * add_cost (speed, mode)));
192   set_smod_pow2_cheap (speed, mode, (set_src_cost (all->smod_32, mode, speed)
193 				     <= 4 * add_cost (speed, mode)));
194 
195   set_shift_cost (speed, mode, 0, 0);
196   {
197     int cost = add_cost (speed, mode);
198     set_shiftadd_cost (speed, mode, 0, cost);
199     set_shiftsub0_cost (speed, mode, 0, cost);
200     set_shiftsub1_cost (speed, mode, 0, cost);
201   }
202 
203   n = MIN (MAX_BITS_PER_WORD, mode_bitsize);
204   for (m = 1; m < n; m++)
205     {
206       XEXP (all->shift, 1) = all->cint[m];
207       XEXP (all->shift_mult, 1) = all->pow2[m];
208 
209       set_shift_cost (speed, mode, m, set_src_cost (all->shift, mode, speed));
210       set_shiftadd_cost (speed, mode, m, set_src_cost (all->shift_add, mode,
211 						       speed));
212       set_shiftsub0_cost (speed, mode, m, set_src_cost (all->shift_sub0, mode,
213 							speed));
214       set_shiftsub1_cost (speed, mode, m, set_src_cost (all->shift_sub1, mode,
215 							speed));
216     }
217 
218   scalar_int_mode int_mode_to;
219   if (is_a <scalar_int_mode> (mode, &int_mode_to))
220     {
221       for (mode_from = MIN_MODE_INT; mode_from <= MAX_MODE_INT;
222 	   mode_from = (machine_mode)(mode_from + 1))
223 	init_expmed_one_conv (all, int_mode_to,
224 			      as_a <scalar_int_mode> (mode_from), speed);
225 
226       scalar_int_mode wider_mode;
227       if (GET_MODE_CLASS (int_mode_to) == MODE_INT
228 	  && GET_MODE_WIDER_MODE (int_mode_to).exists (&wider_mode))
229 	{
230 	  PUT_MODE (all->zext, wider_mode);
231 	  PUT_MODE (all->wide_mult, wider_mode);
232 	  PUT_MODE (all->wide_lshr, wider_mode);
233 	  XEXP (all->wide_lshr, 1)
234 	    = gen_int_shift_amount (wider_mode, mode_bitsize);
235 
236 	  set_mul_widen_cost (speed, wider_mode,
237 			      set_src_cost (all->wide_mult, wider_mode, speed));
238 	  set_mul_highpart_cost (speed, int_mode_to,
239 				 set_src_cost (all->wide_trunc,
240 					       int_mode_to, speed));
241 	}
242     }
243 }
244 
245 void
246 init_expmed (void)
247 {
248   struct init_expmed_rtl all;
249   machine_mode mode = QImode;
250   int m, speed;
251 
252   memset (&all, 0, sizeof all);
253   for (m = 1; m < MAX_BITS_PER_WORD; m++)
254     {
255       all.pow2[m] = GEN_INT (HOST_WIDE_INT_1 << m);
256       all.cint[m] = GEN_INT (m);
257     }
258 
259   /* Avoid using hard regs in ways which may be unsupported.  */
260   all.reg = gen_raw_REG (mode, LAST_VIRTUAL_REGISTER + 1);
261   all.plus = gen_rtx_PLUS (mode, all.reg, all.reg);
262   all.neg = gen_rtx_NEG (mode, all.reg);
263   all.mult = gen_rtx_MULT (mode, all.reg, all.reg);
264   all.sdiv = gen_rtx_DIV (mode, all.reg, all.reg);
265   all.udiv = gen_rtx_UDIV (mode, all.reg, all.reg);
266   all.sdiv_32 = gen_rtx_DIV (mode, all.reg, all.pow2[5]);
267   all.smod_32 = gen_rtx_MOD (mode, all.reg, all.pow2[5]);
268   all.zext = gen_rtx_ZERO_EXTEND (mode, all.reg);
269   all.wide_mult = gen_rtx_MULT (mode, all.zext, all.zext);
270   all.wide_lshr = gen_rtx_LSHIFTRT (mode, all.wide_mult, all.reg);
271   all.wide_trunc = gen_rtx_TRUNCATE (mode, all.wide_lshr);
272   all.shift = gen_rtx_ASHIFT (mode, all.reg, all.reg);
273   all.shift_mult = gen_rtx_MULT (mode, all.reg, all.reg);
274   all.shift_add = gen_rtx_PLUS (mode, all.shift_mult, all.reg);
275   all.shift_sub0 = gen_rtx_MINUS (mode, all.shift_mult, all.reg);
276   all.shift_sub1 = gen_rtx_MINUS (mode, all.reg, all.shift_mult);
277   all.trunc = gen_rtx_TRUNCATE (mode, all.reg);
278 
279   for (speed = 0; speed < 2; speed++)
280     {
281       crtl->maybe_hot_insn_p = speed;
282       set_zero_cost (speed, set_src_cost (const0_rtx, mode, speed));
283 
284       for (mode = MIN_MODE_INT; mode <= MAX_MODE_INT;
285 	   mode = (machine_mode)(mode + 1))
286 	init_expmed_one_mode (&all, mode, speed);
287 
288       if (MIN_MODE_PARTIAL_INT != VOIDmode)
289 	for (mode = MIN_MODE_PARTIAL_INT; mode <= MAX_MODE_PARTIAL_INT;
290 	     mode = (machine_mode)(mode + 1))
291 	  init_expmed_one_mode (&all, mode, speed);
292 
293       if (MIN_MODE_VECTOR_INT != VOIDmode)
294 	for (mode = MIN_MODE_VECTOR_INT; mode <= MAX_MODE_VECTOR_INT;
295 	     mode = (machine_mode)(mode + 1))
296 	  init_expmed_one_mode (&all, mode, speed);
297     }
298 
299   if (alg_hash_used_p ())
300     {
301       struct alg_hash_entry *p = alg_hash_entry_ptr (0);
302       memset (p, 0, sizeof (*p) * NUM_ALG_HASH_ENTRIES);
303     }
304   else
305     set_alg_hash_used_p (true);
306   default_rtl_profile ();
307 
308   ggc_free (all.trunc);
309   ggc_free (all.shift_sub1);
310   ggc_free (all.shift_sub0);
311   ggc_free (all.shift_add);
312   ggc_free (all.shift_mult);
313   ggc_free (all.shift);
314   ggc_free (all.wide_trunc);
315   ggc_free (all.wide_lshr);
316   ggc_free (all.wide_mult);
317   ggc_free (all.zext);
318   ggc_free (all.smod_32);
319   ggc_free (all.sdiv_32);
320   ggc_free (all.udiv);
321   ggc_free (all.sdiv);
322   ggc_free (all.mult);
323   ggc_free (all.neg);
324   ggc_free (all.plus);
325   ggc_free (all.reg);
326 }
327 
328 /* Return an rtx representing minus the value of X.
329    MODE is the intended mode of the result,
330    useful if X is a CONST_INT.  */
331 
332 rtx
333 negate_rtx (machine_mode mode, rtx x)
334 {
335   rtx result = simplify_unary_operation (NEG, mode, x, mode);
336 
337   if (result == 0)
338     result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
339 
340   return result;
341 }
342 
343 /* Whether reverse storage order is supported on the target.  */
344 static int reverse_storage_order_supported = -1;
345 
346 /* Check whether reverse storage order is supported on the target.  */
347 
348 static void
349 check_reverse_storage_order_support (void)
350 {
351   if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
352     {
353       reverse_storage_order_supported = 0;
354       sorry ("reverse scalar storage order");
355     }
356   else
357     reverse_storage_order_supported = 1;
358 }
359 
360 /* Whether reverse FP storage order is supported on the target.  */
361 static int reverse_float_storage_order_supported = -1;
362 
363 /* Check whether reverse FP storage order is supported on the target.  */
364 
365 static void
366 check_reverse_float_storage_order_support (void)
367 {
368   if (FLOAT_WORDS_BIG_ENDIAN != WORDS_BIG_ENDIAN)
369     {
370       reverse_float_storage_order_supported = 0;
371       sorry ("reverse floating-point scalar storage order");
372     }
373   else
374     reverse_float_storage_order_supported = 1;
375 }
376 
377 /* Return an rtx representing value of X with reverse storage order.
378    MODE is the intended mode of the result,
379    useful if X is a CONST_INT.  */
380 
381 rtx
382 flip_storage_order (machine_mode mode, rtx x)
383 {
384   scalar_int_mode int_mode;
385   rtx result;
386 
387   if (mode == QImode)
388     return x;
389 
390   if (COMPLEX_MODE_P (mode))
391     {
392       rtx real = read_complex_part (x, false);
393       rtx imag = read_complex_part (x, true);
394 
395       real = flip_storage_order (GET_MODE_INNER (mode), real);
396       imag = flip_storage_order (GET_MODE_INNER (mode), imag);
397 
398       return gen_rtx_CONCAT (mode, real, imag);
399     }
400 
401   if (__builtin_expect (reverse_storage_order_supported < 0, 0))
402     check_reverse_storage_order_support ();
403 
404   if (!is_a <scalar_int_mode> (mode, &int_mode))
405     {
406       if (FLOAT_MODE_P (mode)
407 	  && __builtin_expect (reverse_float_storage_order_supported < 0, 0))
408 	check_reverse_float_storage_order_support ();
409 
410       if (!int_mode_for_size (GET_MODE_PRECISION (mode), 0).exists (&int_mode))
411 	{
412 	  sorry ("reverse storage order for %smode", GET_MODE_NAME (mode));
413 	  return x;
414 	}
415       x = gen_lowpart (int_mode, x);
416     }
417 
418   result = simplify_unary_operation (BSWAP, int_mode, x, int_mode);
419   if (result == 0)
420     result = expand_unop (int_mode, bswap_optab, x, NULL_RTX, 1);
421 
422   if (int_mode != mode)
423     result = gen_lowpart (mode, result);
424 
425   return result;
426 }
427 
428 /* If MODE is set, adjust bitfield memory MEM so that it points to the
429    first unit of mode MODE that contains a bitfield of size BITSIZE at
430    bit position BITNUM.  If MODE is not set, return a BLKmode reference
431    to every byte in the bitfield.  Set *NEW_BITNUM to the bit position
432    of the field within the new memory.  */
433 
434 static rtx
435 narrow_bit_field_mem (rtx mem, opt_scalar_int_mode mode,
436 		      unsigned HOST_WIDE_INT bitsize,
437 		      unsigned HOST_WIDE_INT bitnum,
438 		      unsigned HOST_WIDE_INT *new_bitnum)
439 {
440   scalar_int_mode imode;
441   if (mode.exists (&imode))
442     {
443       unsigned int unit = GET_MODE_BITSIZE (imode);
444       *new_bitnum = bitnum % unit;
445       HOST_WIDE_INT offset = (bitnum - *new_bitnum) / BITS_PER_UNIT;
446       return adjust_bitfield_address (mem, imode, offset);
447     }
448   else
449     {
450       *new_bitnum = bitnum % BITS_PER_UNIT;
451       HOST_WIDE_INT offset = bitnum / BITS_PER_UNIT;
452       HOST_WIDE_INT size = ((*new_bitnum + bitsize + BITS_PER_UNIT - 1)
453 			    / BITS_PER_UNIT);
454       return adjust_bitfield_address_size (mem, BLKmode, offset, size);
455     }
456 }
457 
458 /* The caller wants to perform insertion or extraction PATTERN on a
459    bitfield of size BITSIZE at BITNUM bits into memory operand OP0.
460    BITREGION_START and BITREGION_END are as for store_bit_field
461    and FIELDMODE is the natural mode of the field.
462 
463    Search for a mode that is compatible with the memory access
464    restrictions and (where applicable) with a register insertion or
465    extraction.  Return the new memory on success, storing the adjusted
466    bit position in *NEW_BITNUM.  Return null otherwise.  */
467 
468 static rtx
469 adjust_bit_field_mem_for_reg (enum extraction_pattern pattern,
470 			      rtx op0, HOST_WIDE_INT bitsize,
471 			      HOST_WIDE_INT bitnum,
472 			      poly_uint64 bitregion_start,
473 			      poly_uint64 bitregion_end,
474 			      machine_mode fieldmode,
475 			      unsigned HOST_WIDE_INT *new_bitnum)
476 {
477   bit_field_mode_iterator iter (bitsize, bitnum, bitregion_start,
478 				bitregion_end, MEM_ALIGN (op0),
479 				MEM_VOLATILE_P (op0));
480   scalar_int_mode best_mode;
481   if (iter.next_mode (&best_mode))
482     {
483       /* We can use a memory in BEST_MODE.  See whether this is true for
484 	 any wider modes.  All other things being equal, we prefer to
485 	 use the widest mode possible because it tends to expose more
486 	 CSE opportunities.  */
487       if (!iter.prefer_smaller_modes ())
488 	{
489 	  /* Limit the search to the mode required by the corresponding
490 	     register insertion or extraction instruction, if any.  */
491 	  scalar_int_mode limit_mode = word_mode;
492 	  extraction_insn insn;
493 	  if (get_best_reg_extraction_insn (&insn, pattern,
494 					    GET_MODE_BITSIZE (best_mode),
495 					    fieldmode))
496 	    limit_mode = insn.field_mode;
497 
498 	  scalar_int_mode wider_mode;
499 	  while (iter.next_mode (&wider_mode)
500 		 && GET_MODE_SIZE (wider_mode) <= GET_MODE_SIZE (limit_mode))
501 	    best_mode = wider_mode;
502 	}
503       return narrow_bit_field_mem (op0, best_mode, bitsize, bitnum,
504 				   new_bitnum);
505     }
506   return NULL_RTX;
507 }
508 
509 /* Return true if a bitfield of size BITSIZE at bit number BITNUM within
510    a structure of mode STRUCT_MODE represents a lowpart subreg.   The subreg
511    offset is then BITNUM / BITS_PER_UNIT.  */
512 
513 static bool
514 lowpart_bit_field_p (poly_uint64 bitnum, poly_uint64 bitsize,
515 		     machine_mode struct_mode)
516 {
517   poly_uint64 regsize = REGMODE_NATURAL_SIZE (struct_mode);
518   if (BYTES_BIG_ENDIAN)
519     return (multiple_p (bitnum, BITS_PER_UNIT)
520 	    && (known_eq (bitnum + bitsize, GET_MODE_BITSIZE (struct_mode))
521 		|| multiple_p (bitnum + bitsize,
522 			       regsize * BITS_PER_UNIT)));
523   else
524     return multiple_p (bitnum, regsize * BITS_PER_UNIT);
525 }
526 
527 /* Return true if -fstrict-volatile-bitfields applies to an access of OP0
528    containing BITSIZE bits starting at BITNUM, with field mode FIELDMODE.
529    Return false if the access would touch memory outside the range
530    BITREGION_START to BITREGION_END for conformance to the C++ memory
531    model.  */
532 
533 static bool
534 strict_volatile_bitfield_p (rtx op0, unsigned HOST_WIDE_INT bitsize,
535 			    unsigned HOST_WIDE_INT bitnum,
536 			    scalar_int_mode fieldmode,
537 			    poly_uint64 bitregion_start,
538 			    poly_uint64 bitregion_end)
539 {
540   unsigned HOST_WIDE_INT modesize = GET_MODE_BITSIZE (fieldmode);
541 
542   /* -fstrict-volatile-bitfields must be enabled and we must have a
543      volatile MEM.  */
544   if (!MEM_P (op0)
545       || !MEM_VOLATILE_P (op0)
546       || flag_strict_volatile_bitfields <= 0)
547     return false;
548 
549   /* The bit size must not be larger than the field mode, and
550      the field mode must not be larger than a word.  */
551   if (bitsize > modesize || modesize > BITS_PER_WORD)
552     return false;
553 
554   /* Check for cases of unaligned fields that must be split.  */
555   if (bitnum % modesize + bitsize > modesize)
556     return false;
557 
558   /* The memory must be sufficiently aligned for a MODESIZE access.
559      This condition guarantees, that the memory access will not
560      touch anything after the end of the structure.  */
561   if (MEM_ALIGN (op0) < modesize)
562     return false;
563 
564   /* Check for cases where the C++ memory model applies.  */
565   if (maybe_ne (bitregion_end, 0U)
566       && (maybe_lt (bitnum - bitnum % modesize, bitregion_start)
567 	  || maybe_gt (bitnum - bitnum % modesize + modesize - 1,
568 		       bitregion_end)))
569     return false;
570 
571   return true;
572 }
573 
574 /* Return true if OP is a memory and if a bitfield of size BITSIZE at
575    bit number BITNUM can be treated as a simple value of mode MODE.
576    Store the byte offset in *BYTENUM if so.  */
577 
578 static bool
579 simple_mem_bitfield_p (rtx op0, poly_uint64 bitsize, poly_uint64 bitnum,
580 		       machine_mode mode, poly_uint64 *bytenum)
581 {
582   return (MEM_P (op0)
583 	  && multiple_p (bitnum, BITS_PER_UNIT, bytenum)
584 	  && known_eq (bitsize, GET_MODE_BITSIZE (mode))
585 	  && (!targetm.slow_unaligned_access (mode, MEM_ALIGN (op0))
586 	      || (multiple_p (bitnum, GET_MODE_ALIGNMENT (mode))
587 		  && MEM_ALIGN (op0) >= GET_MODE_ALIGNMENT (mode))));
588 }
589 
590 /* Try to use instruction INSV to store VALUE into a field of OP0.
591    If OP0_MODE is defined, it is the mode of OP0, otherwise OP0 is a
592    BLKmode MEM.  VALUE_MODE is the mode of VALUE.  BITSIZE and BITNUM
593    are as for store_bit_field.  */
594 
595 static bool
596 store_bit_field_using_insv (const extraction_insn *insv, rtx op0,
597 			    opt_scalar_int_mode op0_mode,
598 			    unsigned HOST_WIDE_INT bitsize,
599 			    unsigned HOST_WIDE_INT bitnum,
600 			    rtx value, scalar_int_mode value_mode)
601 {
602   struct expand_operand ops[4];
603   rtx value1;
604   rtx xop0 = op0;
605   rtx_insn *last = get_last_insn ();
606   bool copy_back = false;
607 
608   scalar_int_mode op_mode = insv->field_mode;
609   unsigned int unit = GET_MODE_BITSIZE (op_mode);
610   if (bitsize == 0 || bitsize > unit)
611     return false;
612 
613   if (MEM_P (xop0))
614     /* Get a reference to the first byte of the field.  */
615     xop0 = narrow_bit_field_mem (xop0, insv->struct_mode, bitsize, bitnum,
616 				 &bitnum);
617   else
618     {
619       /* Convert from counting within OP0 to counting in OP_MODE.  */
620       if (BYTES_BIG_ENDIAN)
621 	bitnum += unit - GET_MODE_BITSIZE (op0_mode.require ());
622 
623       /* If xop0 is a register, we need it in OP_MODE
624 	 to make it acceptable to the format of insv.  */
625       if (GET_CODE (xop0) == SUBREG)
626 	/* We can't just change the mode, because this might clobber op0,
627 	   and we will need the original value of op0 if insv fails.  */
628 	xop0 = gen_rtx_SUBREG (op_mode, SUBREG_REG (xop0), SUBREG_BYTE (xop0));
629       if (REG_P (xop0) && GET_MODE (xop0) != op_mode)
630 	xop0 = gen_lowpart_SUBREG (op_mode, xop0);
631     }
632 
633   /* If the destination is a paradoxical subreg such that we need a
634      truncate to the inner mode, perform the insertion on a temporary and
635      truncate the result to the original destination.  Note that we can't
636      just truncate the paradoxical subreg as (truncate:N (subreg:W (reg:N
637      X) 0)) is (reg:N X).  */
638   if (GET_CODE (xop0) == SUBREG
639       && REG_P (SUBREG_REG (xop0))
640       && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (SUBREG_REG (xop0)),
641 					 op_mode))
642     {
643       rtx tem = gen_reg_rtx (op_mode);
644       emit_move_insn (tem, xop0);
645       xop0 = tem;
646       copy_back = true;
647     }
648 
649   /* There are similar overflow check at the start of store_bit_field_1,
650      but that only check the situation where the field lies completely
651      outside the register, while there do have situation where the field
652      lies partialy in the register, we need to adjust bitsize for this
653      partial overflow situation.  Without this fix, pr48335-2.c on big-endian
654      will broken on those arch support bit insert instruction, like arm, aarch64
655      etc.  */
656   if (bitsize + bitnum > unit && bitnum < unit)
657     {
658       warning (OPT_Wextra, "write of %wu-bit data outside the bound of "
659 	       "destination object, data truncated into %wu-bit",
660 	       bitsize, unit - bitnum);
661       bitsize = unit - bitnum;
662     }
663 
664   /* If BITS_BIG_ENDIAN is zero on a BYTES_BIG_ENDIAN machine, we count
665      "backwards" from the size of the unit we are inserting into.
666      Otherwise, we count bits from the most significant on a
667      BYTES/BITS_BIG_ENDIAN machine.  */
668 
669   if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
670     bitnum = unit - bitsize - bitnum;
671 
672   /* Convert VALUE to op_mode (which insv insn wants) in VALUE1.  */
673   value1 = value;
674   if (value_mode != op_mode)
675     {
676       if (GET_MODE_BITSIZE (value_mode) >= bitsize)
677 	{
678 	  rtx tmp;
679 	  /* Optimization: Don't bother really extending VALUE
680 	     if it has all the bits we will actually use.  However,
681 	     if we must narrow it, be sure we do it correctly.  */
682 
683 	  if (GET_MODE_SIZE (value_mode) < GET_MODE_SIZE (op_mode))
684 	    {
685 	      tmp = simplify_subreg (op_mode, value1, value_mode, 0);
686 	      if (! tmp)
687 		tmp = simplify_gen_subreg (op_mode,
688 					   force_reg (value_mode, value1),
689 					   value_mode, 0);
690 	    }
691 	  else
692 	    {
693 	      tmp = gen_lowpart_if_possible (op_mode, value1);
694 	      if (! tmp)
695 		tmp = gen_lowpart (op_mode, force_reg (value_mode, value1));
696 	    }
697 	  value1 = tmp;
698 	}
699       else if (CONST_INT_P (value))
700 	value1 = gen_int_mode (INTVAL (value), op_mode);
701       else
702 	/* Parse phase is supposed to make VALUE's data type
703 	   match that of the component reference, which is a type
704 	   at least as wide as the field; so VALUE should have
705 	   a mode that corresponds to that type.  */
706 	gcc_assert (CONSTANT_P (value));
707     }
708 
709   create_fixed_operand (&ops[0], xop0);
710   create_integer_operand (&ops[1], bitsize);
711   create_integer_operand (&ops[2], bitnum);
712   create_input_operand (&ops[3], value1, op_mode);
713   if (maybe_expand_insn (insv->icode, 4, ops))
714     {
715       if (copy_back)
716 	convert_move (op0, xop0, true);
717       return true;
718     }
719   delete_insns_since (last);
720   return false;
721 }
722 
723 /* A subroutine of store_bit_field, with the same arguments.  Return true
724    if the operation could be implemented.
725 
726    If FALLBACK_P is true, fall back to store_fixed_bit_field if we have
727    no other way of implementing the operation.  If FALLBACK_P is false,
728    return false instead.  */
729 
730 static bool
731 store_bit_field_1 (rtx str_rtx, poly_uint64 bitsize, poly_uint64 bitnum,
732 		   poly_uint64 bitregion_start, poly_uint64 bitregion_end,
733 		   machine_mode fieldmode,
734 		   rtx value, bool reverse, bool fallback_p)
735 {
736   rtx op0 = str_rtx;
737 
738   while (GET_CODE (op0) == SUBREG)
739     {
740       bitnum += subreg_memory_offset (op0) * BITS_PER_UNIT;
741       op0 = SUBREG_REG (op0);
742     }
743 
744   /* No action is needed if the target is a register and if the field
745      lies completely outside that register.  This can occur if the source
746      code contains an out-of-bounds access to a small array.  */
747   if (REG_P (op0) && known_ge (bitnum, GET_MODE_BITSIZE (GET_MODE (op0))))
748     return true;
749 
750   /* Use vec_set patterns for inserting parts of vectors whenever
751      available.  */
752   machine_mode outermode = GET_MODE (op0);
753   scalar_mode innermode = GET_MODE_INNER (outermode);
754   poly_uint64 pos;
755   if (VECTOR_MODE_P (outermode)
756       && !MEM_P (op0)
757       && optab_handler (vec_set_optab, outermode) != CODE_FOR_nothing
758       && fieldmode == innermode
759       && known_eq (bitsize, GET_MODE_BITSIZE (innermode))
760       && multiple_p (bitnum, GET_MODE_BITSIZE (innermode), &pos))
761     {
762       struct expand_operand ops[3];
763       enum insn_code icode = optab_handler (vec_set_optab, outermode);
764 
765       create_fixed_operand (&ops[0], op0);
766       create_input_operand (&ops[1], value, innermode);
767       create_integer_operand (&ops[2], pos);
768       if (maybe_expand_insn (icode, 3, ops))
769 	return true;
770     }
771 
772   /* If the target is a register, overwriting the entire object, or storing
773      a full-word or multi-word field can be done with just a SUBREG.  */
774   if (!MEM_P (op0)
775       && known_eq (bitsize, GET_MODE_BITSIZE (fieldmode)))
776     {
777       /* Use the subreg machinery either to narrow OP0 to the required
778 	 words or to cope with mode punning between equal-sized modes.
779 	 In the latter case, use subreg on the rhs side, not lhs.  */
780       rtx sub;
781       HOST_WIDE_INT regnum;
782       poly_uint64 regsize = REGMODE_NATURAL_SIZE (GET_MODE (op0));
783       if (known_eq (bitnum, 0U)
784 	  && known_eq (bitsize, GET_MODE_BITSIZE (GET_MODE (op0))))
785 	{
786 	  sub = simplify_gen_subreg (GET_MODE (op0), value, fieldmode, 0);
787 	  if (sub)
788 	    {
789 	      if (reverse)
790 		sub = flip_storage_order (GET_MODE (op0), sub);
791 	      emit_move_insn (op0, sub);
792 	      return true;
793 	    }
794 	}
795       else if (constant_multiple_p (bitnum, regsize * BITS_PER_UNIT, &regnum)
796 	       && multiple_p (bitsize, regsize * BITS_PER_UNIT))
797 	{
798 	  sub = simplify_gen_subreg (fieldmode, op0, GET_MODE (op0),
799 				     regnum * regsize);
800 	  if (sub)
801 	    {
802 	      if (reverse)
803 		value = flip_storage_order (fieldmode, value);
804 	      emit_move_insn (sub, value);
805 	      return true;
806 	    }
807 	}
808     }
809 
810   /* If the target is memory, storing any naturally aligned field can be
811      done with a simple store.  For targets that support fast unaligned
812      memory, any naturally sized, unit aligned field can be done directly.  */
813   poly_uint64 bytenum;
814   if (simple_mem_bitfield_p (op0, bitsize, bitnum, fieldmode, &bytenum))
815     {
816       op0 = adjust_bitfield_address (op0, fieldmode, bytenum);
817       if (reverse)
818 	value = flip_storage_order (fieldmode, value);
819       emit_move_insn (op0, value);
820       return true;
821     }
822 
823   /* It's possible we'll need to handle other cases here for
824      polynomial bitnum and bitsize.  */
825 
826   /* From here on we need to be looking at a fixed-size insertion.  */
827   unsigned HOST_WIDE_INT ibitsize = bitsize.to_constant ();
828   unsigned HOST_WIDE_INT ibitnum = bitnum.to_constant ();
829 
830   /* Make sure we are playing with integral modes.  Pun with subregs
831      if we aren't.  This must come after the entire register case above,
832      since that case is valid for any mode.  The following cases are only
833      valid for integral modes.  */
834   opt_scalar_int_mode op0_mode = int_mode_for_mode (GET_MODE (op0));
835   scalar_int_mode imode;
836   if (!op0_mode.exists (&imode) || imode != GET_MODE (op0))
837     {
838       if (MEM_P (op0))
839 	op0 = adjust_bitfield_address_size (op0, op0_mode.else_blk (),
840 					    0, MEM_SIZE (op0));
841       else
842 	op0 = gen_lowpart (op0_mode.require (), op0);
843     }
844 
845   return store_integral_bit_field (op0, op0_mode, ibitsize, ibitnum,
846 				   bitregion_start, bitregion_end,
847 				   fieldmode, value, reverse, fallback_p);
848 }
849 
850 /* Subroutine of store_bit_field_1, with the same arguments, except
851    that BITSIZE and BITNUM are constant.  Handle cases specific to
852    integral modes.  If OP0_MODE is defined, it is the mode of OP0,
853    otherwise OP0 is a BLKmode MEM.  */
854 
855 static bool
856 store_integral_bit_field (rtx op0, opt_scalar_int_mode op0_mode,
857 			  unsigned HOST_WIDE_INT bitsize,
858 			  unsigned HOST_WIDE_INT bitnum,
859 			  poly_uint64 bitregion_start,
860 			  poly_uint64 bitregion_end,
861 			  machine_mode fieldmode,
862 			  rtx value, bool reverse, bool fallback_p)
863 {
864   /* Storing an lsb-aligned field in a register
865      can be done with a movstrict instruction.  */
866 
867   if (!MEM_P (op0)
868       && !reverse
869       && lowpart_bit_field_p (bitnum, bitsize, op0_mode.require ())
870       && known_eq (bitsize, GET_MODE_BITSIZE (fieldmode))
871       && optab_handler (movstrict_optab, fieldmode) != CODE_FOR_nothing)
872     {
873       struct expand_operand ops[2];
874       enum insn_code icode = optab_handler (movstrict_optab, fieldmode);
875       rtx arg0 = op0;
876       unsigned HOST_WIDE_INT subreg_off;
877 
878       if (GET_CODE (arg0) == SUBREG)
879 	{
880 	  /* Else we've got some float mode source being extracted into
881 	     a different float mode destination -- this combination of
882 	     subregs results in Severe Tire Damage.  */
883 	  gcc_assert (GET_MODE (SUBREG_REG (arg0)) == fieldmode
884 		      || GET_MODE_CLASS (fieldmode) == MODE_INT
885 		      || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT);
886 	  arg0 = SUBREG_REG (arg0);
887 	}
888 
889       subreg_off = bitnum / BITS_PER_UNIT;
890       if (validate_subreg (fieldmode, GET_MODE (arg0), arg0, subreg_off))
891 	{
892 	  arg0 = gen_rtx_SUBREG (fieldmode, arg0, subreg_off);
893 
894 	  create_fixed_operand (&ops[0], arg0);
895 	  /* Shrink the source operand to FIELDMODE.  */
896 	  create_convert_operand_to (&ops[1], value, fieldmode, false);
897 	  if (maybe_expand_insn (icode, 2, ops))
898 	    return true;
899 	}
900     }
901 
902   /* Handle fields bigger than a word.  */
903 
904   if (bitsize > BITS_PER_WORD)
905     {
906       /* Here we transfer the words of the field
907 	 in the order least significant first.
908 	 This is because the most significant word is the one which may
909 	 be less than full.
910 	 However, only do that if the value is not BLKmode.  */
911 
912       const bool backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
913       unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
914       unsigned int i;
915       rtx_insn *last;
916 
917       /* This is the mode we must force value to, so that there will be enough
918 	 subwords to extract.  Note that fieldmode will often (always?) be
919 	 VOIDmode, because that is what store_field uses to indicate that this
920 	 is a bit field, but passing VOIDmode to operand_subword_force
921 	 is not allowed.
922 
923 	 The mode must be fixed-size, since insertions into variable-sized
924 	 objects are meant to be handled before calling this function.  */
925       fixed_size_mode value_mode = as_a <fixed_size_mode> (GET_MODE (value));
926       if (value_mode == VOIDmode)
927 	value_mode = smallest_int_mode_for_size (nwords * BITS_PER_WORD);
928 
929       last = get_last_insn ();
930       for (i = 0; i < nwords; i++)
931 	{
932 	  /* If I is 0, use the low-order word in both field and target;
933 	     if I is 1, use the next to lowest word; and so on.  */
934 	  unsigned int wordnum = (backwards
935 				  ? GET_MODE_SIZE (value_mode) / UNITS_PER_WORD
936 				  - i - 1
937 				  : i);
938 	  unsigned int bit_offset = (backwards ^ reverse
939 				     ? MAX ((int) bitsize - ((int) i + 1)
940 					    * BITS_PER_WORD,
941 					    0)
942 				     : (int) i * BITS_PER_WORD);
943 	  rtx value_word = operand_subword_force (value, wordnum, value_mode);
944 	  unsigned HOST_WIDE_INT new_bitsize =
945 	    MIN (BITS_PER_WORD, bitsize - i * BITS_PER_WORD);
946 
947 	  /* If the remaining chunk doesn't have full wordsize we have
948 	     to make sure that for big-endian machines the higher order
949 	     bits are used.  */
950 	  if (new_bitsize < BITS_PER_WORD && BYTES_BIG_ENDIAN && !backwards)
951 	    {
952 	      int shift = BITS_PER_WORD - new_bitsize;
953 	      rtx shift_rtx = gen_int_shift_amount (word_mode, shift);
954 	      value_word = simplify_expand_binop (word_mode, lshr_optab,
955 						  value_word, shift_rtx,
956 						  NULL_RTX, true,
957 						  OPTAB_LIB_WIDEN);
958 	    }
959 
960 	  if (!store_bit_field_1 (op0, new_bitsize,
961 				  bitnum + bit_offset,
962 				  bitregion_start, bitregion_end,
963 				  word_mode,
964 				  value_word, reverse, fallback_p))
965 	    {
966 	      delete_insns_since (last);
967 	      return false;
968 	    }
969 	}
970       return true;
971     }
972 
973   /* If VALUE has a floating-point or complex mode, access it as an
974      integer of the corresponding size.  This can occur on a machine
975      with 64 bit registers that uses SFmode for float.  It can also
976      occur for unaligned float or complex fields.  */
977   rtx orig_value = value;
978   scalar_int_mode value_mode;
979   if (GET_MODE (value) == VOIDmode)
980     /* By this point we've dealt with values that are bigger than a word,
981        so word_mode is a conservatively correct choice.  */
982     value_mode = word_mode;
983   else if (!is_a <scalar_int_mode> (GET_MODE (value), &value_mode))
984     {
985       value_mode = int_mode_for_mode (GET_MODE (value)).require ();
986       value = gen_reg_rtx (value_mode);
987       emit_move_insn (gen_lowpart (GET_MODE (orig_value), value), orig_value);
988     }
989 
990   /* If OP0 is a multi-word register, narrow it to the affected word.
991      If the region spans two words, defer to store_split_bit_field.
992      Don't do this if op0 is a single hard register wider than word
993      such as a float or vector register.  */
994   if (!MEM_P (op0)
995       && GET_MODE_SIZE (op0_mode.require ()) > UNITS_PER_WORD
996       && (!REG_P (op0)
997 	  || !HARD_REGISTER_P (op0)
998 	  || hard_regno_nregs (REGNO (op0), op0_mode.require ()) != 1))
999     {
1000       if (bitnum % BITS_PER_WORD + bitsize > BITS_PER_WORD)
1001 	{
1002 	  if (!fallback_p)
1003 	    return false;
1004 
1005 	  store_split_bit_field (op0, op0_mode, bitsize, bitnum,
1006 				 bitregion_start, bitregion_end,
1007 				 value, value_mode, reverse);
1008 	  return true;
1009 	}
1010       op0 = simplify_gen_subreg (word_mode, op0, op0_mode.require (),
1011 				 bitnum / BITS_PER_WORD * UNITS_PER_WORD);
1012       gcc_assert (op0);
1013       op0_mode = word_mode;
1014       bitnum %= BITS_PER_WORD;
1015     }
1016 
1017   /* From here on we can assume that the field to be stored in fits
1018      within a word.  If the destination is a register, it too fits
1019      in a word.  */
1020 
1021   extraction_insn insv;
1022   if (!MEM_P (op0)
1023       && !reverse
1024       && get_best_reg_extraction_insn (&insv, EP_insv,
1025 				       GET_MODE_BITSIZE (op0_mode.require ()),
1026 				       fieldmode)
1027       && store_bit_field_using_insv (&insv, op0, op0_mode,
1028 				     bitsize, bitnum, value, value_mode))
1029     return true;
1030 
1031   /* If OP0 is a memory, try copying it to a register and seeing if a
1032      cheap register alternative is available.  */
1033   if (MEM_P (op0) && !reverse)
1034     {
1035       if (get_best_mem_extraction_insn (&insv, EP_insv, bitsize, bitnum,
1036 					fieldmode)
1037 	  && store_bit_field_using_insv (&insv, op0, op0_mode,
1038 					 bitsize, bitnum, value, value_mode))
1039 	return true;
1040 
1041       rtx_insn *last = get_last_insn ();
1042 
1043       /* Try loading part of OP0 into a register, inserting the bitfield
1044 	 into that, and then copying the result back to OP0.  */
1045       unsigned HOST_WIDE_INT bitpos;
1046       rtx xop0 = adjust_bit_field_mem_for_reg (EP_insv, op0, bitsize, bitnum,
1047 					       bitregion_start, bitregion_end,
1048 					       fieldmode, &bitpos);
1049       if (xop0)
1050 	{
1051 	  rtx tempreg = copy_to_reg (xop0);
1052 	  if (store_bit_field_1 (tempreg, bitsize, bitpos,
1053 				 bitregion_start, bitregion_end,
1054 				 fieldmode, orig_value, reverse, false))
1055 	    {
1056 	      emit_move_insn (xop0, tempreg);
1057 	      return true;
1058 	    }
1059 	  delete_insns_since (last);
1060 	}
1061     }
1062 
1063   if (!fallback_p)
1064     return false;
1065 
1066   store_fixed_bit_field (op0, op0_mode, bitsize, bitnum, bitregion_start,
1067 			 bitregion_end, value, value_mode, reverse);
1068   return true;
1069 }
1070 
1071 /* Generate code to store value from rtx VALUE
1072    into a bit-field within structure STR_RTX
1073    containing BITSIZE bits starting at bit BITNUM.
1074 
1075    BITREGION_START is bitpos of the first bitfield in this region.
1076    BITREGION_END is the bitpos of the ending bitfield in this region.
1077    These two fields are 0, if the C++ memory model does not apply,
1078    or we are not interested in keeping track of bitfield regions.
1079 
1080    FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
1081 
1082    If REVERSE is true, the store is to be done in reverse order.  */
1083 
1084 void
1085 store_bit_field (rtx str_rtx, poly_uint64 bitsize, poly_uint64 bitnum,
1086 		 poly_uint64 bitregion_start, poly_uint64 bitregion_end,
1087 		 machine_mode fieldmode,
1088 		 rtx value, bool reverse)
1089 {
1090   /* Handle -fstrict-volatile-bitfields in the cases where it applies.  */
1091   unsigned HOST_WIDE_INT ibitsize = 0, ibitnum = 0;
1092   scalar_int_mode int_mode;
1093   if (bitsize.is_constant (&ibitsize)
1094       && bitnum.is_constant (&ibitnum)
1095       && is_a <scalar_int_mode> (fieldmode, &int_mode)
1096       && strict_volatile_bitfield_p (str_rtx, ibitsize, ibitnum, int_mode,
1097 				     bitregion_start, bitregion_end))
1098     {
1099       /* Storing of a full word can be done with a simple store.
1100 	 We know here that the field can be accessed with one single
1101 	 instruction.  For targets that support unaligned memory,
1102 	 an unaligned access may be necessary.  */
1103       if (ibitsize == GET_MODE_BITSIZE (int_mode))
1104 	{
1105 	  str_rtx = adjust_bitfield_address (str_rtx, int_mode,
1106 					     ibitnum / BITS_PER_UNIT);
1107 	  if (reverse)
1108 	    value = flip_storage_order (int_mode, value);
1109 	  gcc_assert (ibitnum % BITS_PER_UNIT == 0);
1110 	  emit_move_insn (str_rtx, value);
1111 	}
1112       else
1113 	{
1114 	  rtx temp;
1115 
1116 	  str_rtx = narrow_bit_field_mem (str_rtx, int_mode, ibitsize,
1117 					  ibitnum, &ibitnum);
1118 	  gcc_assert (ibitnum + ibitsize <= GET_MODE_BITSIZE (int_mode));
1119 	  temp = copy_to_reg (str_rtx);
1120 	  if (!store_bit_field_1 (temp, ibitsize, ibitnum, 0, 0,
1121 				  int_mode, value, reverse, true))
1122 	    gcc_unreachable ();
1123 
1124 	  emit_move_insn (str_rtx, temp);
1125 	}
1126 
1127       return;
1128     }
1129 
1130   /* Under the C++0x memory model, we must not touch bits outside the
1131      bit region.  Adjust the address to start at the beginning of the
1132      bit region.  */
1133   if (MEM_P (str_rtx) && maybe_ne (bitregion_start, 0U))
1134     {
1135       scalar_int_mode best_mode;
1136       machine_mode addr_mode = VOIDmode;
1137 
1138       poly_uint64 offset = exact_div (bitregion_start, BITS_PER_UNIT);
1139       bitnum -= bitregion_start;
1140       poly_int64 size = bits_to_bytes_round_up (bitnum + bitsize);
1141       bitregion_end -= bitregion_start;
1142       bitregion_start = 0;
1143       if (bitsize.is_constant (&ibitsize)
1144 	  && bitnum.is_constant (&ibitnum)
1145 	  && get_best_mode (ibitsize, ibitnum,
1146 			    bitregion_start, bitregion_end,
1147 			    MEM_ALIGN (str_rtx), INT_MAX,
1148 			    MEM_VOLATILE_P (str_rtx), &best_mode))
1149 	addr_mode = best_mode;
1150       str_rtx = adjust_bitfield_address_size (str_rtx, addr_mode,
1151 					      offset, size);
1152     }
1153 
1154   if (!store_bit_field_1 (str_rtx, bitsize, bitnum,
1155 			  bitregion_start, bitregion_end,
1156 			  fieldmode, value, reverse, true))
1157     gcc_unreachable ();
1158 }
1159 
1160 /* Use shifts and boolean operations to store VALUE into a bit field of
1161    width BITSIZE in OP0, starting at bit BITNUM.  If OP0_MODE is defined,
1162    it is the mode of OP0, otherwise OP0 is a BLKmode MEM.  VALUE_MODE is
1163    the mode of VALUE.
1164 
1165    If REVERSE is true, the store is to be done in reverse order.  */
1166 
1167 static void
1168 store_fixed_bit_field (rtx op0, opt_scalar_int_mode op0_mode,
1169 		       unsigned HOST_WIDE_INT bitsize,
1170 		       unsigned HOST_WIDE_INT bitnum,
1171 		       poly_uint64 bitregion_start, poly_uint64 bitregion_end,
1172 		       rtx value, scalar_int_mode value_mode, bool reverse)
1173 {
1174   /* There is a case not handled here:
1175      a structure with a known alignment of just a halfword
1176      and a field split across two aligned halfwords within the structure.
1177      Or likewise a structure with a known alignment of just a byte
1178      and a field split across two bytes.
1179      Such cases are not supposed to be able to occur.  */
1180 
1181   scalar_int_mode best_mode;
1182   if (MEM_P (op0))
1183     {
1184       unsigned int max_bitsize = BITS_PER_WORD;
1185       scalar_int_mode imode;
1186       if (op0_mode.exists (&imode) && GET_MODE_BITSIZE (imode) < max_bitsize)
1187 	max_bitsize = GET_MODE_BITSIZE (imode);
1188 
1189       if (!get_best_mode (bitsize, bitnum, bitregion_start, bitregion_end,
1190 			  MEM_ALIGN (op0), max_bitsize, MEM_VOLATILE_P (op0),
1191 			  &best_mode))
1192 	{
1193 	  /* The only way this should occur is if the field spans word
1194 	     boundaries.  */
1195 	  store_split_bit_field (op0, op0_mode, bitsize, bitnum,
1196 				 bitregion_start, bitregion_end,
1197 				 value, value_mode, reverse);
1198 	  return;
1199 	}
1200 
1201       op0 = narrow_bit_field_mem (op0, best_mode, bitsize, bitnum, &bitnum);
1202     }
1203   else
1204     best_mode = op0_mode.require ();
1205 
1206   store_fixed_bit_field_1 (op0, best_mode, bitsize, bitnum,
1207 			   value, value_mode, reverse);
1208 }
1209 
1210 /* Helper function for store_fixed_bit_field, stores
1211    the bit field always using MODE, which is the mode of OP0.  The other
1212    arguments are as for store_fixed_bit_field.  */
1213 
1214 static void
1215 store_fixed_bit_field_1 (rtx op0, scalar_int_mode mode,
1216 			 unsigned HOST_WIDE_INT bitsize,
1217 			 unsigned HOST_WIDE_INT bitnum,
1218 			 rtx value, scalar_int_mode value_mode, bool reverse)
1219 {
1220   rtx temp;
1221   int all_zero = 0;
1222   int all_one = 0;
1223 
1224   /* Note that bitsize + bitnum can be greater than GET_MODE_BITSIZE (mode)
1225      for invalid input, such as f5 from gcc.dg/pr48335-2.c.  */
1226 
1227   if (reverse ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
1228     /* BITNUM is the distance between our msb
1229        and that of the containing datum.
1230        Convert it to the distance from the lsb.  */
1231     bitnum = GET_MODE_BITSIZE (mode) - bitsize - bitnum;
1232 
1233   /* Now BITNUM is always the distance between our lsb
1234      and that of OP0.  */
1235 
1236   /* Shift VALUE left by BITNUM bits.  If VALUE is not constant,
1237      we must first convert its mode to MODE.  */
1238 
1239   if (CONST_INT_P (value))
1240     {
1241       unsigned HOST_WIDE_INT v = UINTVAL (value);
1242 
1243       if (bitsize < HOST_BITS_PER_WIDE_INT)
1244 	v &= (HOST_WIDE_INT_1U << bitsize) - 1;
1245 
1246       if (v == 0)
1247 	all_zero = 1;
1248       else if ((bitsize < HOST_BITS_PER_WIDE_INT
1249 		&& v == (HOST_WIDE_INT_1U << bitsize) - 1)
1250 	       || (bitsize == HOST_BITS_PER_WIDE_INT
1251 		   && v == HOST_WIDE_INT_M1U))
1252 	all_one = 1;
1253 
1254       value = lshift_value (mode, v, bitnum);
1255     }
1256   else
1257     {
1258       int must_and = (GET_MODE_BITSIZE (value_mode) != bitsize
1259 		      && bitnum + bitsize != GET_MODE_BITSIZE (mode));
1260 
1261       if (value_mode != mode)
1262 	value = convert_to_mode (mode, value, 1);
1263 
1264       if (must_and)
1265 	value = expand_binop (mode, and_optab, value,
1266 			      mask_rtx (mode, 0, bitsize, 0),
1267 			      NULL_RTX, 1, OPTAB_LIB_WIDEN);
1268       if (bitnum > 0)
1269 	value = expand_shift (LSHIFT_EXPR, mode, value,
1270 			      bitnum, NULL_RTX, 1);
1271     }
1272 
1273   if (reverse)
1274     value = flip_storage_order (mode, value);
1275 
1276   /* Now clear the chosen bits in OP0,
1277      except that if VALUE is -1 we need not bother.  */
1278   /* We keep the intermediates in registers to allow CSE to combine
1279      consecutive bitfield assignments.  */
1280 
1281   temp = force_reg (mode, op0);
1282 
1283   if (! all_one)
1284     {
1285       rtx mask = mask_rtx (mode, bitnum, bitsize, 1);
1286       if (reverse)
1287 	mask = flip_storage_order (mode, mask);
1288       temp = expand_binop (mode, and_optab, temp, mask,
1289 			   NULL_RTX, 1, OPTAB_LIB_WIDEN);
1290       temp = force_reg (mode, temp);
1291     }
1292 
1293   /* Now logical-or VALUE into OP0, unless it is zero.  */
1294 
1295   if (! all_zero)
1296     {
1297       temp = expand_binop (mode, ior_optab, temp, value,
1298 			   NULL_RTX, 1, OPTAB_LIB_WIDEN);
1299       temp = force_reg (mode, temp);
1300     }
1301 
1302   if (op0 != temp)
1303     {
1304       op0 = copy_rtx (op0);
1305       emit_move_insn (op0, temp);
1306     }
1307 }
1308 
1309 /* Store a bit field that is split across multiple accessible memory objects.
1310 
1311    OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
1312    BITSIZE is the field width; BITPOS the position of its first bit
1313    (within the word).
1314    VALUE is the value to store, which has mode VALUE_MODE.
1315    If OP0_MODE is defined, it is the mode of OP0, otherwise OP0 is
1316    a BLKmode MEM.
1317 
1318    If REVERSE is true, the store is to be done in reverse order.
1319 
1320    This does not yet handle fields wider than BITS_PER_WORD.  */
1321 
1322 static void
1323 store_split_bit_field (rtx op0, opt_scalar_int_mode op0_mode,
1324 		       unsigned HOST_WIDE_INT bitsize,
1325 		       unsigned HOST_WIDE_INT bitpos,
1326 		       poly_uint64 bitregion_start, poly_uint64 bitregion_end,
1327 		       rtx value, scalar_int_mode value_mode, bool reverse)
1328 {
1329   unsigned int unit, total_bits, bitsdone = 0;
1330 
1331   /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1332      much at a time.  */
1333   if (REG_P (op0) || GET_CODE (op0) == SUBREG)
1334     unit = BITS_PER_WORD;
1335   else
1336     unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
1337 
1338   /* If OP0 is a memory with a mode, then UNIT must not be larger than
1339      OP0's mode as well.  Otherwise, store_fixed_bit_field will call us
1340      again, and we will mutually recurse forever.  */
1341   if (MEM_P (op0) && op0_mode.exists ())
1342     unit = MIN (unit, GET_MODE_BITSIZE (op0_mode.require ()));
1343 
1344   /* If VALUE is a constant other than a CONST_INT, get it into a register in
1345      WORD_MODE.  If we can do this using gen_lowpart_common, do so.  Note
1346      that VALUE might be a floating-point constant.  */
1347   if (CONSTANT_P (value) && !CONST_INT_P (value))
1348     {
1349       rtx word = gen_lowpart_common (word_mode, value);
1350 
1351       if (word && (value != word))
1352 	value = word;
1353       else
1354 	value = gen_lowpart_common (word_mode, force_reg (value_mode, value));
1355       value_mode = word_mode;
1356     }
1357 
1358   total_bits = GET_MODE_BITSIZE (value_mode);
1359 
1360   while (bitsdone < bitsize)
1361     {
1362       unsigned HOST_WIDE_INT thissize;
1363       unsigned HOST_WIDE_INT thispos;
1364       unsigned HOST_WIDE_INT offset;
1365       rtx part;
1366 
1367       offset = (bitpos + bitsdone) / unit;
1368       thispos = (bitpos + bitsdone) % unit;
1369 
1370       /* When region of bytes we can touch is restricted, decrease
1371 	 UNIT close to the end of the region as needed.  If op0 is a REG
1372 	 or SUBREG of REG, don't do this, as there can't be data races
1373 	 on a register and we can expand shorter code in some cases.  */
1374       if (maybe_ne (bitregion_end, 0U)
1375 	  && unit > BITS_PER_UNIT
1376 	  && maybe_gt (bitpos + bitsdone - thispos + unit, bitregion_end + 1)
1377 	  && !REG_P (op0)
1378 	  && (GET_CODE (op0) != SUBREG || !REG_P (SUBREG_REG (op0))))
1379 	{
1380 	  unit = unit / 2;
1381 	  continue;
1382 	}
1383 
1384       /* THISSIZE must not overrun a word boundary.  Otherwise,
1385 	 store_fixed_bit_field will call us again, and we will mutually
1386 	 recurse forever.  */
1387       thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1388       thissize = MIN (thissize, unit - thispos);
1389 
1390       if (reverse ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
1391 	{
1392 	  /* Fetch successively less significant portions.  */
1393 	  if (CONST_INT_P (value))
1394 	    part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
1395 			     >> (bitsize - bitsdone - thissize))
1396 			    & ((HOST_WIDE_INT_1 << thissize) - 1));
1397           /* Likewise, but the source is little-endian.  */
1398           else if (reverse)
1399 	    part = extract_fixed_bit_field (word_mode, value, value_mode,
1400 					    thissize,
1401 					    bitsize - bitsdone - thissize,
1402 					    NULL_RTX, 1, false);
1403 	  else
1404 	    /* The args are chosen so that the last part includes the
1405 	       lsb.  Give extract_bit_field the value it needs (with
1406 	       endianness compensation) to fetch the piece we want.  */
1407 	    part = extract_fixed_bit_field (word_mode, value, value_mode,
1408 					    thissize,
1409 					    total_bits - bitsize + bitsdone,
1410 					    NULL_RTX, 1, false);
1411 	}
1412       else
1413 	{
1414 	  /* Fetch successively more significant portions.  */
1415 	  if (CONST_INT_P (value))
1416 	    part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
1417 			     >> bitsdone)
1418 			    & ((HOST_WIDE_INT_1 << thissize) - 1));
1419 	  /* Likewise, but the source is big-endian.  */
1420           else if (reverse)
1421 	    part = extract_fixed_bit_field (word_mode, value, value_mode,
1422 					    thissize,
1423 					    total_bits - bitsdone - thissize,
1424 					    NULL_RTX, 1, false);
1425 	  else
1426 	    part = extract_fixed_bit_field (word_mode, value, value_mode,
1427 					    thissize, bitsdone, NULL_RTX,
1428 					    1, false);
1429 	}
1430 
1431       /* If OP0 is a register, then handle OFFSET here.  */
1432       rtx op0_piece = op0;
1433       opt_scalar_int_mode op0_piece_mode = op0_mode;
1434       if (SUBREG_P (op0) || REG_P (op0))
1435 	{
1436 	  scalar_int_mode imode;
1437 	  if (op0_mode.exists (&imode)
1438 	      && GET_MODE_SIZE (imode) < UNITS_PER_WORD)
1439 	    {
1440 	      if (offset)
1441 		op0_piece = const0_rtx;
1442 	    }
1443 	  else
1444 	    {
1445 	      op0_piece = operand_subword_force (op0,
1446 						 offset * unit / BITS_PER_WORD,
1447 						 GET_MODE (op0));
1448 	      op0_piece_mode = word_mode;
1449 	    }
1450 	  offset &= BITS_PER_WORD / unit - 1;
1451 	}
1452 
1453       /* OFFSET is in UNITs, and UNIT is in bits.  If WORD is const0_rtx,
1454 	 it is just an out-of-bounds access.  Ignore it.  */
1455       if (op0_piece != const0_rtx)
1456 	store_fixed_bit_field (op0_piece, op0_piece_mode, thissize,
1457 			       offset * unit + thispos, bitregion_start,
1458 			       bitregion_end, part, word_mode, reverse);
1459       bitsdone += thissize;
1460     }
1461 }
1462 
1463 /* A subroutine of extract_bit_field_1 that converts return value X
1464    to either MODE or TMODE.  MODE, TMODE and UNSIGNEDP are arguments
1465    to extract_bit_field.  */
1466 
1467 static rtx
1468 convert_extracted_bit_field (rtx x, machine_mode mode,
1469 			     machine_mode tmode, bool unsignedp)
1470 {
1471   if (GET_MODE (x) == tmode || GET_MODE (x) == mode)
1472     return x;
1473 
1474   /* If the x mode is not a scalar integral, first convert to the
1475      integer mode of that size and then access it as a floating-point
1476      value via a SUBREG.  */
1477   if (!SCALAR_INT_MODE_P (tmode))
1478     {
1479       scalar_int_mode int_mode = int_mode_for_mode (tmode).require ();
1480       x = convert_to_mode (int_mode, x, unsignedp);
1481       x = force_reg (int_mode, x);
1482       return gen_lowpart (tmode, x);
1483     }
1484 
1485   return convert_to_mode (tmode, x, unsignedp);
1486 }
1487 
1488 /* Try to use an ext(z)v pattern to extract a field from OP0.
1489    Return the extracted value on success, otherwise return null.
1490    EXTV describes the extraction instruction to use.  If OP0_MODE
1491    is defined, it is the mode of OP0, otherwise OP0 is a BLKmode MEM.
1492    The other arguments are as for extract_bit_field.  */
1493 
1494 static rtx
1495 extract_bit_field_using_extv (const extraction_insn *extv, rtx op0,
1496 			      opt_scalar_int_mode op0_mode,
1497 			      unsigned HOST_WIDE_INT bitsize,
1498 			      unsigned HOST_WIDE_INT bitnum,
1499 			      int unsignedp, rtx target,
1500 			      machine_mode mode, machine_mode tmode)
1501 {
1502   struct expand_operand ops[4];
1503   rtx spec_target = target;
1504   rtx spec_target_subreg = 0;
1505   scalar_int_mode ext_mode = extv->field_mode;
1506   unsigned unit = GET_MODE_BITSIZE (ext_mode);
1507 
1508   if (bitsize == 0 || unit < bitsize)
1509     return NULL_RTX;
1510 
1511   if (MEM_P (op0))
1512     /* Get a reference to the first byte of the field.  */
1513     op0 = narrow_bit_field_mem (op0, extv->struct_mode, bitsize, bitnum,
1514 				&bitnum);
1515   else
1516     {
1517       /* Convert from counting within OP0 to counting in EXT_MODE.  */
1518       if (BYTES_BIG_ENDIAN)
1519 	bitnum += unit - GET_MODE_BITSIZE (op0_mode.require ());
1520 
1521       /* If op0 is a register, we need it in EXT_MODE to make it
1522 	 acceptable to the format of ext(z)v.  */
1523       if (GET_CODE (op0) == SUBREG && op0_mode.require () != ext_mode)
1524 	return NULL_RTX;
1525       if (REG_P (op0) && op0_mode.require () != ext_mode)
1526 	op0 = gen_lowpart_SUBREG (ext_mode, op0);
1527     }
1528 
1529   /* If BITS_BIG_ENDIAN is zero on a BYTES_BIG_ENDIAN machine, we count
1530      "backwards" from the size of the unit we are extracting from.
1531      Otherwise, we count bits from the most significant on a
1532      BYTES/BITS_BIG_ENDIAN machine.  */
1533 
1534   if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1535     bitnum = unit - bitsize - bitnum;
1536 
1537   if (target == 0)
1538     target = spec_target = gen_reg_rtx (tmode);
1539 
1540   if (GET_MODE (target) != ext_mode)
1541     {
1542       /* Don't use LHS paradoxical subreg if explicit truncation is needed
1543 	 between the mode of the extraction (word_mode) and the target
1544 	 mode.  Instead, create a temporary and use convert_move to set
1545 	 the target.  */
1546       if (REG_P (target)
1547 	  && TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (target), ext_mode))
1548 	{
1549 	  target = gen_lowpart (ext_mode, target);
1550 	  if (partial_subreg_p (GET_MODE (spec_target), ext_mode))
1551 	    spec_target_subreg = target;
1552 	}
1553       else
1554 	target = gen_reg_rtx (ext_mode);
1555     }
1556 
1557   create_output_operand (&ops[0], target, ext_mode);
1558   create_fixed_operand (&ops[1], op0);
1559   create_integer_operand (&ops[2], bitsize);
1560   create_integer_operand (&ops[3], bitnum);
1561   if (maybe_expand_insn (extv->icode, 4, ops))
1562     {
1563       target = ops[0].value;
1564       if (target == spec_target)
1565 	return target;
1566       if (target == spec_target_subreg)
1567 	return spec_target;
1568       return convert_extracted_bit_field (target, mode, tmode, unsignedp);
1569     }
1570   return NULL_RTX;
1571 }
1572 
1573 /* See whether it would be valid to extract the part of OP0 described
1574    by BITNUM and BITSIZE into a value of mode MODE using a subreg
1575    operation.  Return the subreg if so, otherwise return null.  */
1576 
1577 static rtx
1578 extract_bit_field_as_subreg (machine_mode mode, rtx op0,
1579 			     poly_uint64 bitsize, poly_uint64 bitnum)
1580 {
1581   poly_uint64 bytenum;
1582   if (multiple_p (bitnum, BITS_PER_UNIT, &bytenum)
1583       && known_eq (bitsize, GET_MODE_BITSIZE (mode))
1584       && lowpart_bit_field_p (bitnum, bitsize, GET_MODE (op0))
1585       && TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op0)))
1586     return simplify_gen_subreg (mode, op0, GET_MODE (op0), bytenum);
1587   return NULL_RTX;
1588 }
1589 
1590 /* A subroutine of extract_bit_field, with the same arguments.
1591    If FALLBACK_P is true, fall back to extract_fixed_bit_field
1592    if we can find no other means of implementing the operation.
1593    if FALLBACK_P is false, return NULL instead.  */
1594 
1595 static rtx
1596 extract_bit_field_1 (rtx str_rtx, poly_uint64 bitsize, poly_uint64 bitnum,
1597 		     int unsignedp, rtx target, machine_mode mode,
1598 		     machine_mode tmode, bool reverse, bool fallback_p,
1599 		     rtx *alt_rtl)
1600 {
1601   rtx op0 = str_rtx;
1602   machine_mode mode1;
1603 
1604   if (tmode == VOIDmode)
1605     tmode = mode;
1606 
1607   while (GET_CODE (op0) == SUBREG)
1608     {
1609       bitnum += SUBREG_BYTE (op0) * BITS_PER_UNIT;
1610       op0 = SUBREG_REG (op0);
1611     }
1612 
1613   /* If we have an out-of-bounds access to a register, just return an
1614      uninitialized register of the required mode.  This can occur if the
1615      source code contains an out-of-bounds access to a small array.  */
1616   if (REG_P (op0) && known_ge (bitnum, GET_MODE_BITSIZE (GET_MODE (op0))))
1617     return gen_reg_rtx (tmode);
1618 
1619   if (REG_P (op0)
1620       && mode == GET_MODE (op0)
1621       && known_eq (bitnum, 0U)
1622       && known_eq (bitsize, GET_MODE_BITSIZE (GET_MODE (op0))))
1623     {
1624       if (reverse)
1625 	op0 = flip_storage_order (mode, op0);
1626       /* We're trying to extract a full register from itself.  */
1627       return op0;
1628     }
1629 
1630   /* First try to check for vector from vector extractions.  */
1631   if (VECTOR_MODE_P (GET_MODE (op0))
1632       && !MEM_P (op0)
1633       && VECTOR_MODE_P (tmode)
1634       && known_eq (bitsize, GET_MODE_BITSIZE (tmode))
1635       && maybe_gt (GET_MODE_SIZE (GET_MODE (op0)), GET_MODE_SIZE (tmode)))
1636     {
1637       machine_mode new_mode = GET_MODE (op0);
1638       if (GET_MODE_INNER (new_mode) != GET_MODE_INNER (tmode))
1639 	{
1640 	  scalar_mode inner_mode = GET_MODE_INNER (tmode);
1641 	  poly_uint64 nunits;
1642 	  if (!multiple_p (GET_MODE_BITSIZE (GET_MODE (op0)),
1643 			   GET_MODE_UNIT_BITSIZE (tmode), &nunits)
1644 	      || !mode_for_vector (inner_mode, nunits).exists (&new_mode)
1645 	      || !VECTOR_MODE_P (new_mode)
1646 	      || maybe_ne (GET_MODE_SIZE (new_mode),
1647 			   GET_MODE_SIZE (GET_MODE (op0)))
1648 	      || GET_MODE_INNER (new_mode) != GET_MODE_INNER (tmode)
1649 	      || !targetm.vector_mode_supported_p (new_mode))
1650 	    new_mode = VOIDmode;
1651 	}
1652       poly_uint64 pos;
1653       if (new_mode != VOIDmode
1654 	  && (convert_optab_handler (vec_extract_optab, new_mode, tmode)
1655 	      != CODE_FOR_nothing)
1656 	  && multiple_p (bitnum, GET_MODE_BITSIZE (tmode), &pos))
1657 	{
1658 	  struct expand_operand ops[3];
1659 	  machine_mode outermode = new_mode;
1660 	  machine_mode innermode = tmode;
1661 	  enum insn_code icode
1662 	    = convert_optab_handler (vec_extract_optab, outermode, innermode);
1663 
1664 	  if (new_mode != GET_MODE (op0))
1665 	    op0 = gen_lowpart (new_mode, op0);
1666 	  create_output_operand (&ops[0], target, innermode);
1667 	  ops[0].target = 1;
1668 	  create_input_operand (&ops[1], op0, outermode);
1669 	  create_integer_operand (&ops[2], pos);
1670 	  if (maybe_expand_insn (icode, 3, ops))
1671 	    {
1672 	      if (alt_rtl && ops[0].target)
1673 		*alt_rtl = target;
1674 	      target = ops[0].value;
1675 	      if (GET_MODE (target) != mode)
1676 		return gen_lowpart (tmode, target);
1677 	      return target;
1678 	    }
1679 	}
1680     }
1681 
1682   /* See if we can get a better vector mode before extracting.  */
1683   if (VECTOR_MODE_P (GET_MODE (op0))
1684       && !MEM_P (op0)
1685       && GET_MODE_INNER (GET_MODE (op0)) != tmode)
1686     {
1687       machine_mode new_mode;
1688 
1689       if (GET_MODE_CLASS (tmode) == MODE_FLOAT)
1690 	new_mode = MIN_MODE_VECTOR_FLOAT;
1691       else if (GET_MODE_CLASS (tmode) == MODE_FRACT)
1692 	new_mode = MIN_MODE_VECTOR_FRACT;
1693       else if (GET_MODE_CLASS (tmode) == MODE_UFRACT)
1694 	new_mode = MIN_MODE_VECTOR_UFRACT;
1695       else if (GET_MODE_CLASS (tmode) == MODE_ACCUM)
1696 	new_mode = MIN_MODE_VECTOR_ACCUM;
1697       else if (GET_MODE_CLASS (tmode) == MODE_UACCUM)
1698 	new_mode = MIN_MODE_VECTOR_UACCUM;
1699       else
1700 	new_mode = MIN_MODE_VECTOR_INT;
1701 
1702       FOR_EACH_MODE_FROM (new_mode, new_mode)
1703 	if (known_eq (GET_MODE_SIZE (new_mode), GET_MODE_SIZE (GET_MODE (op0)))
1704 	    && known_eq (GET_MODE_UNIT_SIZE (new_mode), GET_MODE_SIZE (tmode))
1705 	    && targetm.vector_mode_supported_p (new_mode))
1706 	  break;
1707       if (new_mode != VOIDmode)
1708 	op0 = gen_lowpart (new_mode, op0);
1709     }
1710 
1711   /* Use vec_extract patterns for extracting parts of vectors whenever
1712      available.  If that fails, see whether the current modes and bitregion
1713      give a natural subreg.  */
1714   machine_mode outermode = GET_MODE (op0);
1715   if (VECTOR_MODE_P (outermode) && !MEM_P (op0))
1716     {
1717       scalar_mode innermode = GET_MODE_INNER (outermode);
1718       enum insn_code icode
1719 	= convert_optab_handler (vec_extract_optab, outermode, innermode);
1720       poly_uint64 pos;
1721       if (icode != CODE_FOR_nothing
1722 	  && known_eq (bitsize, GET_MODE_BITSIZE (innermode))
1723 	  && multiple_p (bitnum, GET_MODE_BITSIZE (innermode), &pos))
1724 	{
1725 	  struct expand_operand ops[3];
1726 
1727 	  create_output_operand (&ops[0], target, innermode);
1728 	  ops[0].target = 1;
1729 	  create_input_operand (&ops[1], op0, outermode);
1730 	  create_integer_operand (&ops[2], pos);
1731 	  if (maybe_expand_insn (icode, 3, ops))
1732 	    {
1733 	      if (alt_rtl && ops[0].target)
1734 		*alt_rtl = target;
1735 	      target = ops[0].value;
1736 	      if (GET_MODE (target) != mode)
1737 		return gen_lowpart (tmode, target);
1738 	      return target;
1739 	    }
1740 	}
1741       /* Using subregs is useful if we're extracting one register vector
1742 	 from a multi-register vector.  extract_bit_field_as_subreg checks
1743 	 for valid bitsize and bitnum, so we don't need to do that here.  */
1744       if (VECTOR_MODE_P (mode))
1745 	{
1746 	  rtx sub = extract_bit_field_as_subreg (mode, op0, bitsize, bitnum);
1747 	  if (sub)
1748 	    return sub;
1749 	}
1750     }
1751 
1752   /* Make sure we are playing with integral modes.  Pun with subregs
1753      if we aren't.  */
1754   opt_scalar_int_mode op0_mode = int_mode_for_mode (GET_MODE (op0));
1755   scalar_int_mode imode;
1756   if (!op0_mode.exists (&imode) || imode != GET_MODE (op0))
1757     {
1758       if (MEM_P (op0))
1759 	op0 = adjust_bitfield_address_size (op0, op0_mode.else_blk (),
1760 					    0, MEM_SIZE (op0));
1761       else if (op0_mode.exists (&imode))
1762 	{
1763 	  op0 = gen_lowpart (imode, op0);
1764 
1765 	  /* If we got a SUBREG, force it into a register since we
1766 	     aren't going to be able to do another SUBREG on it.  */
1767 	  if (GET_CODE (op0) == SUBREG)
1768 	    op0 = force_reg (imode, op0);
1769 	}
1770       else
1771 	{
1772 	  poly_int64 size = GET_MODE_SIZE (GET_MODE (op0));
1773 	  rtx mem = assign_stack_temp (GET_MODE (op0), size);
1774 	  emit_move_insn (mem, op0);
1775 	  op0 = adjust_bitfield_address_size (mem, BLKmode, 0, size);
1776 	}
1777     }
1778 
1779   /* ??? We currently assume TARGET is at least as big as BITSIZE.
1780      If that's wrong, the solution is to test for it and set TARGET to 0
1781      if needed.  */
1782 
1783   /* Get the mode of the field to use for atomic access or subreg
1784      conversion.  */
1785   if (!SCALAR_INT_MODE_P (tmode)
1786       || !mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0).exists (&mode1))
1787     mode1 = mode;
1788   gcc_assert (mode1 != BLKmode);
1789 
1790   /* Extraction of a full MODE1 value can be done with a subreg as long
1791      as the least significant bit of the value is the least significant
1792      bit of either OP0 or a word of OP0.  */
1793   if (!MEM_P (op0) && !reverse)
1794     {
1795       rtx sub = extract_bit_field_as_subreg (mode1, op0, bitsize, bitnum);
1796       if (sub)
1797 	return convert_extracted_bit_field (sub, mode, tmode, unsignedp);
1798     }
1799 
1800   /* Extraction of a full MODE1 value can be done with a load as long as
1801      the field is on a byte boundary and is sufficiently aligned.  */
1802   poly_uint64 bytenum;
1803   if (simple_mem_bitfield_p (op0, bitsize, bitnum, mode1, &bytenum))
1804     {
1805       op0 = adjust_bitfield_address (op0, mode1, bytenum);
1806       if (reverse)
1807 	op0 = flip_storage_order (mode1, op0);
1808       return convert_extracted_bit_field (op0, mode, tmode, unsignedp);
1809     }
1810 
1811   /* If we have a memory source and a non-constant bit offset, restrict
1812      the memory to the referenced bytes.  This is a worst-case fallback
1813      but is useful for things like vector booleans.  */
1814   if (MEM_P (op0) && !bitnum.is_constant ())
1815     {
1816       bytenum = bits_to_bytes_round_down (bitnum);
1817       bitnum = num_trailing_bits (bitnum);
1818       poly_uint64 bytesize = bits_to_bytes_round_up (bitnum + bitsize);
1819       op0 = adjust_bitfield_address_size (op0, BLKmode, bytenum, bytesize);
1820       op0_mode = opt_scalar_int_mode ();
1821     }
1822 
1823   /* It's possible we'll need to handle other cases here for
1824      polynomial bitnum and bitsize.  */
1825 
1826   /* From here on we need to be looking at a fixed-size insertion.  */
1827   return extract_integral_bit_field (op0, op0_mode, bitsize.to_constant (),
1828 				     bitnum.to_constant (), unsignedp,
1829 				     target, mode, tmode, reverse, fallback_p);
1830 }
1831 
1832 /* Subroutine of extract_bit_field_1, with the same arguments, except
1833    that BITSIZE and BITNUM are constant.  Handle cases specific to
1834    integral modes.  If OP0_MODE is defined, it is the mode of OP0,
1835    otherwise OP0 is a BLKmode MEM.  */
1836 
1837 static rtx
1838 extract_integral_bit_field (rtx op0, opt_scalar_int_mode op0_mode,
1839 			    unsigned HOST_WIDE_INT bitsize,
1840 			    unsigned HOST_WIDE_INT bitnum, int unsignedp,
1841 			    rtx target, machine_mode mode, machine_mode tmode,
1842 			    bool reverse, bool fallback_p)
1843 {
1844   /* Handle fields bigger than a word.  */
1845 
1846   if (bitsize > BITS_PER_WORD)
1847     {
1848       /* Here we transfer the words of the field
1849 	 in the order least significant first.
1850 	 This is because the most significant word is the one which may
1851 	 be less than full.  */
1852 
1853       const bool backwards = WORDS_BIG_ENDIAN;
1854       unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1855       unsigned int i;
1856       rtx_insn *last;
1857 
1858       if (target == 0 || !REG_P (target) || !valid_multiword_target_p (target))
1859 	target = gen_reg_rtx (mode);
1860 
1861       /* In case we're about to clobber a base register or something
1862 	 (see gcc.c-torture/execute/20040625-1.c).   */
1863       if (reg_mentioned_p (target, op0))
1864 	target = gen_reg_rtx (mode);
1865 
1866       /* Indicate for flow that the entire target reg is being set.  */
1867       emit_clobber (target);
1868 
1869       /* The mode must be fixed-size, since extract_bit_field_1 handles
1870 	 extractions from variable-sized objects before calling this
1871 	 function.  */
1872       unsigned int target_size
1873 	= GET_MODE_SIZE (GET_MODE (target)).to_constant ();
1874       last = get_last_insn ();
1875       for (i = 0; i < nwords; i++)
1876 	{
1877 	  /* If I is 0, use the low-order word in both field and target;
1878 	     if I is 1, use the next to lowest word; and so on.  */
1879 	  /* Word number in TARGET to use.  */
1880 	  unsigned int wordnum
1881 	    = (backwards ? target_size / UNITS_PER_WORD - i - 1 : i);
1882 	  /* Offset from start of field in OP0.  */
1883 	  unsigned int bit_offset = (backwards ^ reverse
1884 				     ? MAX ((int) bitsize - ((int) i + 1)
1885 					    * BITS_PER_WORD,
1886 					    0)
1887 				     : (int) i * BITS_PER_WORD);
1888 	  rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1889 	  rtx result_part
1890 	    = extract_bit_field_1 (op0, MIN (BITS_PER_WORD,
1891 					     bitsize - i * BITS_PER_WORD),
1892 				   bitnum + bit_offset, 1, target_part,
1893 				   mode, word_mode, reverse, fallback_p, NULL);
1894 
1895 	  gcc_assert (target_part);
1896 	  if (!result_part)
1897 	    {
1898 	      delete_insns_since (last);
1899 	      return NULL;
1900 	    }
1901 
1902 	  if (result_part != target_part)
1903 	    emit_move_insn (target_part, result_part);
1904 	}
1905 
1906       if (unsignedp)
1907 	{
1908 	  /* Unless we've filled TARGET, the upper regs in a multi-reg value
1909 	     need to be zero'd out.  */
1910 	  if (target_size > nwords * UNITS_PER_WORD)
1911 	    {
1912 	      unsigned int i, total_words;
1913 
1914 	      total_words = target_size / UNITS_PER_WORD;
1915 	      for (i = nwords; i < total_words; i++)
1916 		emit_move_insn
1917 		  (operand_subword (target,
1918 				    backwards ? total_words - i - 1 : i,
1919 				    1, VOIDmode),
1920 		   const0_rtx);
1921 	    }
1922 	  return target;
1923 	}
1924 
1925       /* Signed bit field: sign-extend with two arithmetic shifts.  */
1926       target = expand_shift (LSHIFT_EXPR, mode, target,
1927 			     GET_MODE_BITSIZE (mode) - bitsize, NULL_RTX, 0);
1928       return expand_shift (RSHIFT_EXPR, mode, target,
1929 			   GET_MODE_BITSIZE (mode) - bitsize, NULL_RTX, 0);
1930     }
1931 
1932   /* If OP0 is a multi-word register, narrow it to the affected word.
1933      If the region spans two words, defer to extract_split_bit_field.  */
1934   if (!MEM_P (op0) && GET_MODE_SIZE (op0_mode.require ()) > UNITS_PER_WORD)
1935     {
1936       if (bitnum % BITS_PER_WORD + bitsize > BITS_PER_WORD)
1937 	{
1938 	  if (!fallback_p)
1939 	    return NULL_RTX;
1940 	  target = extract_split_bit_field (op0, op0_mode, bitsize, bitnum,
1941 					    unsignedp, reverse);
1942 	  return convert_extracted_bit_field (target, mode, tmode, unsignedp);
1943 	}
1944       op0 = simplify_gen_subreg (word_mode, op0, op0_mode.require (),
1945 				 bitnum / BITS_PER_WORD * UNITS_PER_WORD);
1946       op0_mode = word_mode;
1947       bitnum %= BITS_PER_WORD;
1948     }
1949 
1950   /* From here on we know the desired field is smaller than a word.
1951      If OP0 is a register, it too fits within a word.  */
1952   enum extraction_pattern pattern = unsignedp ? EP_extzv : EP_extv;
1953   extraction_insn extv;
1954   if (!MEM_P (op0)
1955       && !reverse
1956       /* ??? We could limit the structure size to the part of OP0 that
1957 	 contains the field, with appropriate checks for endianness
1958 	 and TARGET_TRULY_NOOP_TRUNCATION.  */
1959       && get_best_reg_extraction_insn (&extv, pattern,
1960 				       GET_MODE_BITSIZE (op0_mode.require ()),
1961 				       tmode))
1962     {
1963       rtx result = extract_bit_field_using_extv (&extv, op0, op0_mode,
1964 						 bitsize, bitnum,
1965 						 unsignedp, target, mode,
1966 						 tmode);
1967       if (result)
1968 	return result;
1969     }
1970 
1971   /* If OP0 is a memory, try copying it to a register and seeing if a
1972      cheap register alternative is available.  */
1973   if (MEM_P (op0) & !reverse)
1974     {
1975       if (get_best_mem_extraction_insn (&extv, pattern, bitsize, bitnum,
1976 					tmode))
1977 	{
1978 	  rtx result = extract_bit_field_using_extv (&extv, op0, op0_mode,
1979 						     bitsize, bitnum,
1980 						     unsignedp, target, mode,
1981 						     tmode);
1982 	  if (result)
1983 	    return result;
1984 	}
1985 
1986       rtx_insn *last = get_last_insn ();
1987 
1988       /* Try loading part of OP0 into a register and extracting the
1989 	 bitfield from that.  */
1990       unsigned HOST_WIDE_INT bitpos;
1991       rtx xop0 = adjust_bit_field_mem_for_reg (pattern, op0, bitsize, bitnum,
1992 					       0, 0, tmode, &bitpos);
1993       if (xop0)
1994 	{
1995 	  xop0 = copy_to_reg (xop0);
1996 	  rtx result = extract_bit_field_1 (xop0, bitsize, bitpos,
1997 					    unsignedp, target,
1998 					    mode, tmode, reverse, false, NULL);
1999 	  if (result)
2000 	    return result;
2001 	  delete_insns_since (last);
2002 	}
2003     }
2004 
2005   if (!fallback_p)
2006     return NULL;
2007 
2008   /* Find a correspondingly-sized integer field, so we can apply
2009      shifts and masks to it.  */
2010   scalar_int_mode int_mode;
2011   if (!int_mode_for_mode (tmode).exists (&int_mode))
2012     /* If this fails, we should probably push op0 out to memory and then
2013        do a load.  */
2014     int_mode = int_mode_for_mode (mode).require ();
2015 
2016   target = extract_fixed_bit_field (int_mode, op0, op0_mode, bitsize,
2017 				    bitnum, target, unsignedp, reverse);
2018 
2019   /* Complex values must be reversed piecewise, so we need to undo the global
2020      reversal, convert to the complex mode and reverse again.  */
2021   if (reverse && COMPLEX_MODE_P (tmode))
2022     {
2023       target = flip_storage_order (int_mode, target);
2024       target = convert_extracted_bit_field (target, mode, tmode, unsignedp);
2025       target = flip_storage_order (tmode, target);
2026     }
2027   else
2028     target = convert_extracted_bit_field (target, mode, tmode, unsignedp);
2029 
2030   return target;
2031 }
2032 
2033 /* Generate code to extract a byte-field from STR_RTX
2034    containing BITSIZE bits, starting at BITNUM,
2035    and put it in TARGET if possible (if TARGET is nonzero).
2036    Regardless of TARGET, we return the rtx for where the value is placed.
2037 
2038    STR_RTX is the structure containing the byte (a REG or MEM).
2039    UNSIGNEDP is nonzero if this is an unsigned bit field.
2040    MODE is the natural mode of the field value once extracted.
2041    TMODE is the mode the caller would like the value to have;
2042    but the value may be returned with type MODE instead.
2043 
2044    If REVERSE is true, the extraction is to be done in reverse order.
2045 
2046    If a TARGET is specified and we can store in it at no extra cost,
2047    we do so, and return TARGET.
2048    Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
2049    if they are equally easy.  */
2050 
2051 rtx
2052 extract_bit_field (rtx str_rtx, poly_uint64 bitsize, poly_uint64 bitnum,
2053 		   int unsignedp, rtx target, machine_mode mode,
2054 		   machine_mode tmode, bool reverse, rtx *alt_rtl)
2055 {
2056   machine_mode mode1;
2057 
2058   /* Handle -fstrict-volatile-bitfields in the cases where it applies.  */
2059   if (maybe_ne (GET_MODE_BITSIZE (GET_MODE (str_rtx)), 0))
2060     mode1 = GET_MODE (str_rtx);
2061   else if (target && maybe_ne (GET_MODE_BITSIZE (GET_MODE (target)), 0))
2062     mode1 = GET_MODE (target);
2063   else
2064     mode1 = tmode;
2065 
2066   unsigned HOST_WIDE_INT ibitsize, ibitnum;
2067   scalar_int_mode int_mode;
2068   if (bitsize.is_constant (&ibitsize)
2069       && bitnum.is_constant (&ibitnum)
2070       && is_a <scalar_int_mode> (mode1, &int_mode)
2071       && strict_volatile_bitfield_p (str_rtx, ibitsize, ibitnum,
2072 				     int_mode, 0, 0))
2073     {
2074       /* Extraction of a full INT_MODE value can be done with a simple load.
2075 	 We know here that the field can be accessed with one single
2076 	 instruction.  For targets that support unaligned memory,
2077 	 an unaligned access may be necessary.  */
2078       if (ibitsize == GET_MODE_BITSIZE (int_mode))
2079 	{
2080 	  rtx result = adjust_bitfield_address (str_rtx, int_mode,
2081 						ibitnum / BITS_PER_UNIT);
2082 	  if (reverse)
2083 	    result = flip_storage_order (int_mode, result);
2084 	  gcc_assert (ibitnum % BITS_PER_UNIT == 0);
2085 	  return convert_extracted_bit_field (result, mode, tmode, unsignedp);
2086 	}
2087 
2088       str_rtx = narrow_bit_field_mem (str_rtx, int_mode, ibitsize, ibitnum,
2089 				      &ibitnum);
2090       gcc_assert (ibitnum + ibitsize <= GET_MODE_BITSIZE (int_mode));
2091       str_rtx = copy_to_reg (str_rtx);
2092       return extract_bit_field_1 (str_rtx, ibitsize, ibitnum, unsignedp,
2093 				  target, mode, tmode, reverse, true, alt_rtl);
2094     }
2095 
2096   return extract_bit_field_1 (str_rtx, bitsize, bitnum, unsignedp,
2097 			      target, mode, tmode, reverse, true, alt_rtl);
2098 }
2099 
2100 /* Use shifts and boolean operations to extract a field of BITSIZE bits
2101    from bit BITNUM of OP0.  If OP0_MODE is defined, it is the mode of OP0,
2102    otherwise OP0 is a BLKmode MEM.
2103 
2104    UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
2105    If REVERSE is true, the extraction is to be done in reverse order.
2106 
2107    If TARGET is nonzero, attempts to store the value there
2108    and return TARGET, but this is not guaranteed.
2109    If TARGET is not used, create a pseudo-reg of mode TMODE for the value.  */
2110 
2111 static rtx
2112 extract_fixed_bit_field (machine_mode tmode, rtx op0,
2113 			 opt_scalar_int_mode op0_mode,
2114 			 unsigned HOST_WIDE_INT bitsize,
2115 			 unsigned HOST_WIDE_INT bitnum, rtx target,
2116 			 int unsignedp, bool reverse)
2117 {
2118   scalar_int_mode mode;
2119   if (MEM_P (op0))
2120     {
2121       if (!get_best_mode (bitsize, bitnum, 0, 0, MEM_ALIGN (op0),
2122 			  BITS_PER_WORD, MEM_VOLATILE_P (op0), &mode))
2123 	/* The only way this should occur is if the field spans word
2124 	   boundaries.  */
2125 	return extract_split_bit_field (op0, op0_mode, bitsize, bitnum,
2126 					unsignedp, reverse);
2127 
2128       op0 = narrow_bit_field_mem (op0, mode, bitsize, bitnum, &bitnum);
2129     }
2130   else
2131     mode = op0_mode.require ();
2132 
2133   return extract_fixed_bit_field_1 (tmode, op0, mode, bitsize, bitnum,
2134 				    target, unsignedp, reverse);
2135 }
2136 
2137 /* Helper function for extract_fixed_bit_field, extracts
2138    the bit field always using MODE, which is the mode of OP0.
2139    The other arguments are as for extract_fixed_bit_field.  */
2140 
2141 static rtx
2142 extract_fixed_bit_field_1 (machine_mode tmode, rtx op0, scalar_int_mode mode,
2143 			   unsigned HOST_WIDE_INT bitsize,
2144 			   unsigned HOST_WIDE_INT bitnum, rtx target,
2145 			   int unsignedp, bool reverse)
2146 {
2147   /* Note that bitsize + bitnum can be greater than GET_MODE_BITSIZE (mode)
2148      for invalid input, such as extract equivalent of f5 from
2149      gcc.dg/pr48335-2.c.  */
2150 
2151   if (reverse ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
2152     /* BITNUM is the distance between our msb and that of OP0.
2153        Convert it to the distance from the lsb.  */
2154     bitnum = GET_MODE_BITSIZE (mode) - bitsize - bitnum;
2155 
2156   /* Now BITNUM is always the distance between the field's lsb and that of OP0.
2157      We have reduced the big-endian case to the little-endian case.  */
2158   if (reverse)
2159     op0 = flip_storage_order (mode, op0);
2160 
2161   if (unsignedp)
2162     {
2163       if (bitnum)
2164 	{
2165 	  /* If the field does not already start at the lsb,
2166 	     shift it so it does.  */
2167 	  /* Maybe propagate the target for the shift.  */
2168 	  rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
2169 	  if (tmode != mode)
2170 	    subtarget = 0;
2171 	  op0 = expand_shift (RSHIFT_EXPR, mode, op0, bitnum, subtarget, 1);
2172 	}
2173       /* Convert the value to the desired mode.  TMODE must also be a
2174 	 scalar integer for this conversion to make sense, since we
2175 	 shouldn't reinterpret the bits.  */
2176       scalar_int_mode new_mode = as_a <scalar_int_mode> (tmode);
2177       if (mode != new_mode)
2178 	op0 = convert_to_mode (new_mode, op0, 1);
2179 
2180       /* Unless the msb of the field used to be the msb when we shifted,
2181 	 mask out the upper bits.  */
2182 
2183       if (GET_MODE_BITSIZE (mode) != bitnum + bitsize)
2184 	return expand_binop (new_mode, and_optab, op0,
2185 			     mask_rtx (new_mode, 0, bitsize, 0),
2186 			     target, 1, OPTAB_LIB_WIDEN);
2187       return op0;
2188     }
2189 
2190   /* To extract a signed bit-field, first shift its msb to the msb of the word,
2191      then arithmetic-shift its lsb to the lsb of the word.  */
2192   op0 = force_reg (mode, op0);
2193 
2194   /* Find the narrowest integer mode that contains the field.  */
2195 
2196   opt_scalar_int_mode mode_iter;
2197   FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_INT)
2198     if (GET_MODE_BITSIZE (mode_iter.require ()) >= bitsize + bitnum)
2199       break;
2200 
2201   mode = mode_iter.require ();
2202   op0 = convert_to_mode (mode, op0, 0);
2203 
2204   if (mode != tmode)
2205     target = 0;
2206 
2207   if (GET_MODE_BITSIZE (mode) != (bitsize + bitnum))
2208     {
2209       int amount = GET_MODE_BITSIZE (mode) - (bitsize + bitnum);
2210       /* Maybe propagate the target for the shift.  */
2211       rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
2212       op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
2213     }
2214 
2215   return expand_shift (RSHIFT_EXPR, mode, op0,
2216 		       GET_MODE_BITSIZE (mode) - bitsize, target, 0);
2217 }
2218 
2219 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
2220    VALUE << BITPOS.  */
2221 
2222 static rtx
2223 lshift_value (machine_mode mode, unsigned HOST_WIDE_INT value,
2224 	      int bitpos)
2225 {
2226   return immed_wide_int_const (wi::lshift (value, bitpos), mode);
2227 }
2228 
2229 /* Extract a bit field that is split across two words
2230    and return an RTX for the result.
2231 
2232    OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
2233    BITSIZE is the field width; BITPOS, position of its first bit, in the word.
2234    UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend.
2235    If OP0_MODE is defined, it is the mode of OP0, otherwise OP0 is
2236    a BLKmode MEM.
2237 
2238    If REVERSE is true, the extraction is to be done in reverse order.  */
2239 
2240 static rtx
2241 extract_split_bit_field (rtx op0, opt_scalar_int_mode op0_mode,
2242 			 unsigned HOST_WIDE_INT bitsize,
2243 			 unsigned HOST_WIDE_INT bitpos, int unsignedp,
2244 			 bool reverse)
2245 {
2246   unsigned int unit;
2247   unsigned int bitsdone = 0;
2248   rtx result = NULL_RTX;
2249   int first = 1;
2250 
2251   /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
2252      much at a time.  */
2253   if (REG_P (op0) || GET_CODE (op0) == SUBREG)
2254     unit = BITS_PER_WORD;
2255   else
2256     unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
2257 
2258   while (bitsdone < bitsize)
2259     {
2260       unsigned HOST_WIDE_INT thissize;
2261       rtx part;
2262       unsigned HOST_WIDE_INT thispos;
2263       unsigned HOST_WIDE_INT offset;
2264 
2265       offset = (bitpos + bitsdone) / unit;
2266       thispos = (bitpos + bitsdone) % unit;
2267 
2268       /* THISSIZE must not overrun a word boundary.  Otherwise,
2269 	 extract_fixed_bit_field will call us again, and we will mutually
2270 	 recurse forever.  */
2271       thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
2272       thissize = MIN (thissize, unit - thispos);
2273 
2274       /* If OP0 is a register, then handle OFFSET here.  */
2275       rtx op0_piece = op0;
2276       opt_scalar_int_mode op0_piece_mode = op0_mode;
2277       if (SUBREG_P (op0) || REG_P (op0))
2278 	{
2279 	  op0_piece = operand_subword_force (op0, offset, op0_mode.require ());
2280 	  op0_piece_mode = word_mode;
2281 	  offset = 0;
2282 	}
2283 
2284       /* Extract the parts in bit-counting order,
2285 	 whose meaning is determined by BYTES_PER_UNIT.
2286 	 OFFSET is in UNITs, and UNIT is in bits.  */
2287       part = extract_fixed_bit_field (word_mode, op0_piece, op0_piece_mode,
2288 				      thissize, offset * unit + thispos,
2289 				      0, 1, reverse);
2290       bitsdone += thissize;
2291 
2292       /* Shift this part into place for the result.  */
2293       if (reverse ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
2294 	{
2295 	  if (bitsize != bitsdone)
2296 	    part = expand_shift (LSHIFT_EXPR, word_mode, part,
2297 				 bitsize - bitsdone, 0, 1);
2298 	}
2299       else
2300 	{
2301 	  if (bitsdone != thissize)
2302 	    part = expand_shift (LSHIFT_EXPR, word_mode, part,
2303 				 bitsdone - thissize, 0, 1);
2304 	}
2305 
2306       if (first)
2307 	result = part;
2308       else
2309 	/* Combine the parts with bitwise or.  This works
2310 	   because we extracted each part as an unsigned bit field.  */
2311 	result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
2312 			       OPTAB_LIB_WIDEN);
2313 
2314       first = 0;
2315     }
2316 
2317   /* Unsigned bit field: we are done.  */
2318   if (unsignedp)
2319     return result;
2320   /* Signed bit field: sign-extend with two arithmetic shifts.  */
2321   result = expand_shift (LSHIFT_EXPR, word_mode, result,
2322 			 BITS_PER_WORD - bitsize, NULL_RTX, 0);
2323   return expand_shift (RSHIFT_EXPR, word_mode, result,
2324 		       BITS_PER_WORD - bitsize, NULL_RTX, 0);
2325 }
2326 
2327 /* Try to read the low bits of SRC as an rvalue of mode MODE, preserving
2328    the bit pattern.  SRC_MODE is the mode of SRC; if this is smaller than
2329    MODE, fill the upper bits with zeros.  Fail if the layout of either
2330    mode is unknown (as for CC modes) or if the extraction would involve
2331    unprofitable mode punning.  Return the value on success, otherwise
2332    return null.
2333 
2334    This is different from gen_lowpart* in these respects:
2335 
2336      - the returned value must always be considered an rvalue
2337 
2338      - when MODE is wider than SRC_MODE, the extraction involves
2339        a zero extension
2340 
2341      - when MODE is smaller than SRC_MODE, the extraction involves
2342        a truncation (and is thus subject to TARGET_TRULY_NOOP_TRUNCATION).
2343 
2344    In other words, this routine performs a computation, whereas the
2345    gen_lowpart* routines are conceptually lvalue or rvalue subreg
2346    operations.  */
2347 
2348 rtx
2349 extract_low_bits (machine_mode mode, machine_mode src_mode, rtx src)
2350 {
2351   scalar_int_mode int_mode, src_int_mode;
2352 
2353   if (mode == src_mode)
2354     return src;
2355 
2356   if (CONSTANT_P (src))
2357     {
2358       /* simplify_gen_subreg can't be used here, as if simplify_subreg
2359 	 fails, it will happily create (subreg (symbol_ref)) or similar
2360 	 invalid SUBREGs.  */
2361       poly_uint64 byte = subreg_lowpart_offset (mode, src_mode);
2362       rtx ret = simplify_subreg (mode, src, src_mode, byte);
2363       if (ret)
2364 	return ret;
2365 
2366       if (GET_MODE (src) == VOIDmode
2367 	  || !validate_subreg (mode, src_mode, src, byte))
2368 	return NULL_RTX;
2369 
2370       src = force_reg (GET_MODE (src), src);
2371       return gen_rtx_SUBREG (mode, src, byte);
2372     }
2373 
2374   if (GET_MODE_CLASS (mode) == MODE_CC || GET_MODE_CLASS (src_mode) == MODE_CC)
2375     return NULL_RTX;
2376 
2377   if (known_eq (GET_MODE_BITSIZE (mode), GET_MODE_BITSIZE (src_mode))
2378       && targetm.modes_tieable_p (mode, src_mode))
2379     {
2380       rtx x = gen_lowpart_common (mode, src);
2381       if (x)
2382         return x;
2383     }
2384 
2385   if (!int_mode_for_mode (src_mode).exists (&src_int_mode)
2386       || !int_mode_for_mode (mode).exists (&int_mode))
2387     return NULL_RTX;
2388 
2389   if (!targetm.modes_tieable_p (src_int_mode, src_mode))
2390     return NULL_RTX;
2391   if (!targetm.modes_tieable_p (int_mode, mode))
2392     return NULL_RTX;
2393 
2394   src = gen_lowpart (src_int_mode, src);
2395   src = convert_modes (int_mode, src_int_mode, src, true);
2396   src = gen_lowpart (mode, src);
2397   return src;
2398 }
2399 
2400 /* Add INC into TARGET.  */
2401 
2402 void
2403 expand_inc (rtx target, rtx inc)
2404 {
2405   rtx value = expand_binop (GET_MODE (target), add_optab,
2406 			    target, inc,
2407 			    target, 0, OPTAB_LIB_WIDEN);
2408   if (value != target)
2409     emit_move_insn (target, value);
2410 }
2411 
2412 /* Subtract DEC from TARGET.  */
2413 
2414 void
2415 expand_dec (rtx target, rtx dec)
2416 {
2417   rtx value = expand_binop (GET_MODE (target), sub_optab,
2418 			    target, dec,
2419 			    target, 0, OPTAB_LIB_WIDEN);
2420   if (value != target)
2421     emit_move_insn (target, value);
2422 }
2423 
2424 /* Output a shift instruction for expression code CODE,
2425    with SHIFTED being the rtx for the value to shift,
2426    and AMOUNT the rtx for the amount to shift by.
2427    Store the result in the rtx TARGET, if that is convenient.
2428    If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2429    Return the rtx for where the value is.
2430    If that cannot be done, abort the compilation unless MAY_FAIL is true,
2431    in which case 0 is returned.  */
2432 
2433 static rtx
2434 expand_shift_1 (enum tree_code code, machine_mode mode, rtx shifted,
2435 		rtx amount, rtx target, int unsignedp, bool may_fail = false)
2436 {
2437   rtx op1, temp = 0;
2438   int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
2439   int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
2440   optab lshift_optab = ashl_optab;
2441   optab rshift_arith_optab = ashr_optab;
2442   optab rshift_uns_optab = lshr_optab;
2443   optab lrotate_optab = rotl_optab;
2444   optab rrotate_optab = rotr_optab;
2445   machine_mode op1_mode;
2446   scalar_mode scalar_mode = GET_MODE_INNER (mode);
2447   int attempt;
2448   bool speed = optimize_insn_for_speed_p ();
2449 
2450   op1 = amount;
2451   op1_mode = GET_MODE (op1);
2452 
2453   /* Determine whether the shift/rotate amount is a vector, or scalar.  If the
2454      shift amount is a vector, use the vector/vector shift patterns.  */
2455   if (VECTOR_MODE_P (mode) && VECTOR_MODE_P (op1_mode))
2456     {
2457       lshift_optab = vashl_optab;
2458       rshift_arith_optab = vashr_optab;
2459       rshift_uns_optab = vlshr_optab;
2460       lrotate_optab = vrotl_optab;
2461       rrotate_optab = vrotr_optab;
2462     }
2463 
2464   /* Previously detected shift-counts computed by NEGATE_EXPR
2465      and shifted in the other direction; but that does not work
2466      on all machines.  */
2467 
2468   if (SHIFT_COUNT_TRUNCATED)
2469     {
2470       if (CONST_INT_P (op1)
2471 	  && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
2472 	      (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (scalar_mode)))
2473 	op1 = gen_int_shift_amount (mode,
2474 				    (unsigned HOST_WIDE_INT) INTVAL (op1)
2475 				    % GET_MODE_BITSIZE (scalar_mode));
2476       else if (GET_CODE (op1) == SUBREG
2477 	       && subreg_lowpart_p (op1)
2478 	       && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op1)))
2479 	       && SCALAR_INT_MODE_P (GET_MODE (op1)))
2480 	op1 = SUBREG_REG (op1);
2481     }
2482 
2483   /* Canonicalize rotates by constant amount.  If op1 is bitsize / 2,
2484      prefer left rotation, if op1 is from bitsize / 2 + 1 to
2485      bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
2486      amount instead.  */
2487   if (rotate
2488       && CONST_INT_P (op1)
2489       && IN_RANGE (INTVAL (op1), GET_MODE_BITSIZE (scalar_mode) / 2 + left,
2490 		   GET_MODE_BITSIZE (scalar_mode) - 1))
2491     {
2492       op1 = gen_int_shift_amount (mode, (GET_MODE_BITSIZE (scalar_mode)
2493 					 - INTVAL (op1)));
2494       left = !left;
2495       code = left ? LROTATE_EXPR : RROTATE_EXPR;
2496     }
2497 
2498   /* Rotation of 16bit values by 8 bits is effectively equivalent to a bswaphi.
2499      Note that this is not the case for bigger values.  For instance a rotation
2500      of 0x01020304 by 16 bits gives 0x03040102 which is different from
2501      0x04030201 (bswapsi).  */
2502   if (rotate
2503       && CONST_INT_P (op1)
2504       && INTVAL (op1) == BITS_PER_UNIT
2505       && GET_MODE_SIZE (scalar_mode) == 2
2506       && optab_handler (bswap_optab, mode) != CODE_FOR_nothing)
2507     return expand_unop (mode, bswap_optab, shifted, NULL_RTX, unsignedp);
2508 
2509   if (op1 == const0_rtx)
2510     return shifted;
2511 
2512   /* Check whether its cheaper to implement a left shift by a constant
2513      bit count by a sequence of additions.  */
2514   if (code == LSHIFT_EXPR
2515       && CONST_INT_P (op1)
2516       && INTVAL (op1) > 0
2517       && INTVAL (op1) < GET_MODE_PRECISION (scalar_mode)
2518       && INTVAL (op1) < MAX_BITS_PER_WORD
2519       && (shift_cost (speed, mode, INTVAL (op1))
2520 	  > INTVAL (op1) * add_cost (speed, mode))
2521       && shift_cost (speed, mode, INTVAL (op1)) != MAX_COST)
2522     {
2523       int i;
2524       for (i = 0; i < INTVAL (op1); i++)
2525 	{
2526 	  temp = force_reg (mode, shifted);
2527 	  shifted = expand_binop (mode, add_optab, temp, temp, NULL_RTX,
2528 				  unsignedp, OPTAB_LIB_WIDEN);
2529 	}
2530       return shifted;
2531     }
2532 
2533   for (attempt = 0; temp == 0 && attempt < 3; attempt++)
2534     {
2535       enum optab_methods methods;
2536 
2537       if (attempt == 0)
2538 	methods = OPTAB_DIRECT;
2539       else if (attempt == 1)
2540 	methods = OPTAB_WIDEN;
2541       else
2542 	methods = OPTAB_LIB_WIDEN;
2543 
2544       if (rotate)
2545 	{
2546 	  /* Widening does not work for rotation.  */
2547 	  if (methods == OPTAB_WIDEN)
2548 	    continue;
2549 	  else if (methods == OPTAB_LIB_WIDEN)
2550 	    {
2551 	      /* If we have been unable to open-code this by a rotation,
2552 		 do it as the IOR of two shifts.  I.e., to rotate A
2553 		 by N bits, compute
2554 		 (A << N) | ((unsigned) A >> ((-N) & (C - 1)))
2555 		 where C is the bitsize of A.
2556 
2557 		 It is theoretically possible that the target machine might
2558 		 not be able to perform either shift and hence we would
2559 		 be making two libcalls rather than just the one for the
2560 		 shift (similarly if IOR could not be done).  We will allow
2561 		 this extremely unlikely lossage to avoid complicating the
2562 		 code below.  */
2563 
2564 	      rtx subtarget = target == shifted ? 0 : target;
2565 	      rtx new_amount, other_amount;
2566 	      rtx temp1;
2567 
2568 	      new_amount = op1;
2569 	      if (op1 == const0_rtx)
2570 		return shifted;
2571 	      else if (CONST_INT_P (op1))
2572 		other_amount = gen_int_shift_amount
2573 		  (mode, GET_MODE_BITSIZE (scalar_mode) - INTVAL (op1));
2574 	      else
2575 		{
2576 		  other_amount
2577 		    = simplify_gen_unary (NEG, GET_MODE (op1),
2578 					  op1, GET_MODE (op1));
2579 		  HOST_WIDE_INT mask = GET_MODE_PRECISION (scalar_mode) - 1;
2580 		  other_amount
2581 		    = simplify_gen_binary (AND, GET_MODE (op1), other_amount,
2582 					   gen_int_mode (mask, GET_MODE (op1)));
2583 		}
2584 
2585 	      shifted = force_reg (mode, shifted);
2586 
2587 	      temp = expand_shift_1 (left ? LSHIFT_EXPR : RSHIFT_EXPR,
2588 				     mode, shifted, new_amount, 0, 1);
2589 	      temp1 = expand_shift_1 (left ? RSHIFT_EXPR : LSHIFT_EXPR,
2590 				      mode, shifted, other_amount,
2591 				      subtarget, 1);
2592 	      return expand_binop (mode, ior_optab, temp, temp1, target,
2593 				   unsignedp, methods);
2594 	    }
2595 
2596 	  temp = expand_binop (mode,
2597 			       left ? lrotate_optab : rrotate_optab,
2598 			       shifted, op1, target, unsignedp, methods);
2599 	}
2600       else if (unsignedp)
2601 	temp = expand_binop (mode,
2602 			     left ? lshift_optab : rshift_uns_optab,
2603 			     shifted, op1, target, unsignedp, methods);
2604 
2605       /* Do arithmetic shifts.
2606 	 Also, if we are going to widen the operand, we can just as well
2607 	 use an arithmetic right-shift instead of a logical one.  */
2608       if (temp == 0 && ! rotate
2609 	  && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
2610 	{
2611 	  enum optab_methods methods1 = methods;
2612 
2613 	  /* If trying to widen a log shift to an arithmetic shift,
2614 	     don't accept an arithmetic shift of the same size.  */
2615 	  if (unsignedp)
2616 	    methods1 = OPTAB_MUST_WIDEN;
2617 
2618 	  /* Arithmetic shift */
2619 
2620 	  temp = expand_binop (mode,
2621 			       left ? lshift_optab : rshift_arith_optab,
2622 			       shifted, op1, target, unsignedp, methods1);
2623 	}
2624 
2625       /* We used to try extzv here for logical right shifts, but that was
2626 	 only useful for one machine, the VAX, and caused poor code
2627 	 generation there for lshrdi3, so the code was deleted and a
2628 	 define_expand for lshrsi3 was added to vax.md.  */
2629     }
2630 
2631   gcc_assert (temp != NULL_RTX || may_fail);
2632   return temp;
2633 }
2634 
2635 /* Output a shift instruction for expression code CODE,
2636    with SHIFTED being the rtx for the value to shift,
2637    and AMOUNT the amount to shift by.
2638    Store the result in the rtx TARGET, if that is convenient.
2639    If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2640    Return the rtx for where the value is.  */
2641 
2642 rtx
2643 expand_shift (enum tree_code code, machine_mode mode, rtx shifted,
2644 	      poly_int64 amount, rtx target, int unsignedp)
2645 {
2646   return expand_shift_1 (code, mode, shifted,
2647 			 gen_int_shift_amount (mode, amount),
2648 			 target, unsignedp);
2649 }
2650 
2651 /* Likewise, but return 0 if that cannot be done.  */
2652 
2653 static rtx
2654 maybe_expand_shift (enum tree_code code, machine_mode mode, rtx shifted,
2655 		    int amount, rtx target, int unsignedp)
2656 {
2657   return expand_shift_1 (code, mode,
2658 			 shifted, GEN_INT (amount), target, unsignedp, true);
2659 }
2660 
2661 /* Output a shift instruction for expression code CODE,
2662    with SHIFTED being the rtx for the value to shift,
2663    and AMOUNT the tree for the amount to shift by.
2664    Store the result in the rtx TARGET, if that is convenient.
2665    If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2666    Return the rtx for where the value is.  */
2667 
2668 rtx
2669 expand_variable_shift (enum tree_code code, machine_mode mode, rtx shifted,
2670 		       tree amount, rtx target, int unsignedp)
2671 {
2672   return expand_shift_1 (code, mode,
2673 			 shifted, expand_normal (amount), target, unsignedp);
2674 }
2675 
2676 
2677 static void synth_mult (struct algorithm *, unsigned HOST_WIDE_INT,
2678 			const struct mult_cost *, machine_mode mode);
2679 static rtx expand_mult_const (machine_mode, rtx, HOST_WIDE_INT, rtx,
2680 			      const struct algorithm *, enum mult_variant);
2681 static unsigned HOST_WIDE_INT invert_mod2n (unsigned HOST_WIDE_INT, int);
2682 static rtx extract_high_half (scalar_int_mode, rtx);
2683 static rtx expmed_mult_highpart (scalar_int_mode, rtx, rtx, rtx, int, int);
2684 static rtx expmed_mult_highpart_optab (scalar_int_mode, rtx, rtx, rtx,
2685 				       int, int);
2686 /* Compute and return the best algorithm for multiplying by T.
2687    The algorithm must cost less than cost_limit
2688    If retval.cost >= COST_LIMIT, no algorithm was found and all
2689    other field of the returned struct are undefined.
2690    MODE is the machine mode of the multiplication.  */
2691 
2692 static void
2693 synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t,
2694 	    const struct mult_cost *cost_limit, machine_mode mode)
2695 {
2696   int m;
2697   struct algorithm *alg_in, *best_alg;
2698   struct mult_cost best_cost;
2699   struct mult_cost new_limit;
2700   int op_cost, op_latency;
2701   unsigned HOST_WIDE_INT orig_t = t;
2702   unsigned HOST_WIDE_INT q;
2703   int maxm, hash_index;
2704   bool cache_hit = false;
2705   enum alg_code cache_alg = alg_zero;
2706   bool speed = optimize_insn_for_speed_p ();
2707   scalar_int_mode imode;
2708   struct alg_hash_entry *entry_ptr;
2709 
2710   /* Indicate that no algorithm is yet found.  If no algorithm
2711      is found, this value will be returned and indicate failure.  */
2712   alg_out->cost.cost = cost_limit->cost + 1;
2713   alg_out->cost.latency = cost_limit->latency + 1;
2714 
2715   if (cost_limit->cost < 0
2716       || (cost_limit->cost == 0 && cost_limit->latency <= 0))
2717     return;
2718 
2719   /* Be prepared for vector modes.  */
2720   imode = as_a <scalar_int_mode> (GET_MODE_INNER (mode));
2721 
2722   maxm = MIN (BITS_PER_WORD, GET_MODE_BITSIZE (imode));
2723 
2724   /* Restrict the bits of "t" to the multiplication's mode.  */
2725   t &= GET_MODE_MASK (imode);
2726 
2727   /* t == 1 can be done in zero cost.  */
2728   if (t == 1)
2729     {
2730       alg_out->ops = 1;
2731       alg_out->cost.cost = 0;
2732       alg_out->cost.latency = 0;
2733       alg_out->op[0] = alg_m;
2734       return;
2735     }
2736 
2737   /* t == 0 sometimes has a cost.  If it does and it exceeds our limit,
2738      fail now.  */
2739   if (t == 0)
2740     {
2741       if (MULT_COST_LESS (cost_limit, zero_cost (speed)))
2742 	return;
2743       else
2744 	{
2745 	  alg_out->ops = 1;
2746 	  alg_out->cost.cost = zero_cost (speed);
2747 	  alg_out->cost.latency = zero_cost (speed);
2748 	  alg_out->op[0] = alg_zero;
2749 	  return;
2750 	}
2751     }
2752 
2753   /* We'll be needing a couple extra algorithm structures now.  */
2754 
2755   alg_in = XALLOCA (struct algorithm);
2756   best_alg = XALLOCA (struct algorithm);
2757   best_cost = *cost_limit;
2758 
2759   /* Compute the hash index.  */
2760   hash_index = (t ^ (unsigned int) mode ^ (speed * 256)) % NUM_ALG_HASH_ENTRIES;
2761 
2762   /* See if we already know what to do for T.  */
2763   entry_ptr = alg_hash_entry_ptr (hash_index);
2764   if (entry_ptr->t == t
2765       && entry_ptr->mode == mode
2766       && entry_ptr->speed == speed
2767       && entry_ptr->alg != alg_unknown)
2768     {
2769       cache_alg = entry_ptr->alg;
2770 
2771       if (cache_alg == alg_impossible)
2772 	{
2773 	  /* The cache tells us that it's impossible to synthesize
2774 	     multiplication by T within entry_ptr->cost.  */
2775 	  if (!CHEAPER_MULT_COST (&entry_ptr->cost, cost_limit))
2776 	    /* COST_LIMIT is at least as restrictive as the one
2777 	       recorded in the hash table, in which case we have no
2778 	       hope of synthesizing a multiplication.  Just
2779 	       return.  */
2780 	    return;
2781 
2782 	  /* If we get here, COST_LIMIT is less restrictive than the
2783 	     one recorded in the hash table, so we may be able to
2784 	     synthesize a multiplication.  Proceed as if we didn't
2785 	     have the cache entry.  */
2786 	}
2787       else
2788 	{
2789 	  if (CHEAPER_MULT_COST (cost_limit, &entry_ptr->cost))
2790 	    /* The cached algorithm shows that this multiplication
2791 	       requires more cost than COST_LIMIT.  Just return.  This
2792 	       way, we don't clobber this cache entry with
2793 	       alg_impossible but retain useful information.  */
2794 	    return;
2795 
2796 	  cache_hit = true;
2797 
2798 	  switch (cache_alg)
2799 	    {
2800 	    case alg_shift:
2801 	      goto do_alg_shift;
2802 
2803 	    case alg_add_t_m2:
2804 	    case alg_sub_t_m2:
2805 	      goto do_alg_addsub_t_m2;
2806 
2807 	    case alg_add_factor:
2808 	    case alg_sub_factor:
2809 	      goto do_alg_addsub_factor;
2810 
2811 	    case alg_add_t2_m:
2812 	      goto do_alg_add_t2_m;
2813 
2814 	    case alg_sub_t2_m:
2815 	      goto do_alg_sub_t2_m;
2816 
2817 	    default:
2818 	      gcc_unreachable ();
2819 	    }
2820 	}
2821     }
2822 
2823   /* If we have a group of zero bits at the low-order part of T, try
2824      multiplying by the remaining bits and then doing a shift.  */
2825 
2826   if ((t & 1) == 0)
2827     {
2828     do_alg_shift:
2829       m = ctz_or_zero (t); /* m = number of low zero bits */
2830       if (m < maxm)
2831 	{
2832 	  q = t >> m;
2833 	  /* The function expand_shift will choose between a shift and
2834 	     a sequence of additions, so the observed cost is given as
2835 	     MIN (m * add_cost(speed, mode), shift_cost(speed, mode, m)).  */
2836 	  op_cost = m * add_cost (speed, mode);
2837 	  if (shift_cost (speed, mode, m) < op_cost)
2838 	    op_cost = shift_cost (speed, mode, m);
2839 	  new_limit.cost = best_cost.cost - op_cost;
2840 	  new_limit.latency = best_cost.latency - op_cost;
2841 	  synth_mult (alg_in, q, &new_limit, mode);
2842 
2843 	  alg_in->cost.cost += op_cost;
2844 	  alg_in->cost.latency += op_cost;
2845 	  if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2846 	    {
2847 	      best_cost = alg_in->cost;
2848 	      std::swap (alg_in, best_alg);
2849 	      best_alg->log[best_alg->ops] = m;
2850 	      best_alg->op[best_alg->ops] = alg_shift;
2851 	    }
2852 
2853 	  /* See if treating ORIG_T as a signed number yields a better
2854 	     sequence.  Try this sequence only for a negative ORIG_T
2855 	     as it would be useless for a non-negative ORIG_T.  */
2856 	  if ((HOST_WIDE_INT) orig_t < 0)
2857 	    {
2858 	      /* Shift ORIG_T as follows because a right shift of a
2859 		 negative-valued signed type is implementation
2860 		 defined.  */
2861 	      q = ~(~orig_t >> m);
2862 	      /* The function expand_shift will choose between a shift
2863 		 and a sequence of additions, so the observed cost is
2864 		 given as MIN (m * add_cost(speed, mode),
2865 		 shift_cost(speed, mode, m)).  */
2866 	      op_cost = m * add_cost (speed, mode);
2867 	      if (shift_cost (speed, mode, m) < op_cost)
2868 		op_cost = shift_cost (speed, mode, m);
2869 	      new_limit.cost = best_cost.cost - op_cost;
2870 	      new_limit.latency = best_cost.latency - op_cost;
2871 	      synth_mult (alg_in, q, &new_limit, mode);
2872 
2873 	      alg_in->cost.cost += op_cost;
2874 	      alg_in->cost.latency += op_cost;
2875 	      if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2876 		{
2877 		  best_cost = alg_in->cost;
2878 		  std::swap (alg_in, best_alg);
2879 		  best_alg->log[best_alg->ops] = m;
2880 		  best_alg->op[best_alg->ops] = alg_shift;
2881 		}
2882 	    }
2883 	}
2884       if (cache_hit)
2885 	goto done;
2886     }
2887 
2888   /* If we have an odd number, add or subtract one.  */
2889   if ((t & 1) != 0)
2890     {
2891       unsigned HOST_WIDE_INT w;
2892 
2893     do_alg_addsub_t_m2:
2894       for (w = 1; (w & t) != 0; w <<= 1)
2895 	;
2896       /* If T was -1, then W will be zero after the loop.  This is another
2897 	 case where T ends with ...111.  Handling this with (T + 1) and
2898 	 subtract 1 produces slightly better code and results in algorithm
2899 	 selection much faster than treating it like the ...0111 case
2900 	 below.  */
2901       if (w == 0
2902 	  || (w > 2
2903 	      /* Reject the case where t is 3.
2904 		 Thus we prefer addition in that case.  */
2905 	      && t != 3))
2906 	{
2907 	  /* T ends with ...111.  Multiply by (T + 1) and subtract T.  */
2908 
2909 	  op_cost = add_cost (speed, mode);
2910 	  new_limit.cost = best_cost.cost - op_cost;
2911 	  new_limit.latency = best_cost.latency - op_cost;
2912 	  synth_mult (alg_in, t + 1, &new_limit, mode);
2913 
2914 	  alg_in->cost.cost += op_cost;
2915 	  alg_in->cost.latency += op_cost;
2916 	  if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2917 	    {
2918 	      best_cost = alg_in->cost;
2919 	      std::swap (alg_in, best_alg);
2920 	      best_alg->log[best_alg->ops] = 0;
2921 	      best_alg->op[best_alg->ops] = alg_sub_t_m2;
2922 	    }
2923 	}
2924       else
2925 	{
2926 	  /* T ends with ...01 or ...011.  Multiply by (T - 1) and add T.  */
2927 
2928 	  op_cost = add_cost (speed, mode);
2929 	  new_limit.cost = best_cost.cost - op_cost;
2930 	  new_limit.latency = best_cost.latency - op_cost;
2931 	  synth_mult (alg_in, t - 1, &new_limit, mode);
2932 
2933 	  alg_in->cost.cost += op_cost;
2934 	  alg_in->cost.latency += op_cost;
2935 	  if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2936 	    {
2937 	      best_cost = alg_in->cost;
2938 	      std::swap (alg_in, best_alg);
2939 	      best_alg->log[best_alg->ops] = 0;
2940 	      best_alg->op[best_alg->ops] = alg_add_t_m2;
2941 	    }
2942 	}
2943 
2944       /* We may be able to calculate a * -7, a * -15, a * -31, etc
2945 	 quickly with a - a * n for some appropriate constant n.  */
2946       m = exact_log2 (-orig_t + 1);
2947       if (m >= 0 && m < maxm)
2948 	{
2949 	  op_cost = add_cost (speed, mode) + shift_cost (speed, mode, m);
2950 	  /* If the target has a cheap shift-and-subtract insn use
2951 	     that in preference to a shift insn followed by a sub insn.
2952 	     Assume that the shift-and-sub is "atomic" with a latency
2953 	     equal to it's cost, otherwise assume that on superscalar
2954 	     hardware the shift may be executed concurrently with the
2955 	     earlier steps in the algorithm.  */
2956 	  if (shiftsub1_cost (speed, mode, m) <= op_cost)
2957 	    {
2958 	      op_cost = shiftsub1_cost (speed, mode, m);
2959 	      op_latency = op_cost;
2960 	    }
2961 	  else
2962 	    op_latency = add_cost (speed, mode);
2963 
2964 	  new_limit.cost = best_cost.cost - op_cost;
2965 	  new_limit.latency = best_cost.latency - op_latency;
2966 	  synth_mult (alg_in, (unsigned HOST_WIDE_INT) (-orig_t + 1) >> m,
2967 		      &new_limit, mode);
2968 
2969 	  alg_in->cost.cost += op_cost;
2970 	  alg_in->cost.latency += op_latency;
2971 	  if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2972 	    {
2973 	      best_cost = alg_in->cost;
2974 	      std::swap (alg_in, best_alg);
2975 	      best_alg->log[best_alg->ops] = m;
2976 	      best_alg->op[best_alg->ops] = alg_sub_t_m2;
2977 	    }
2978 	}
2979 
2980       if (cache_hit)
2981 	goto done;
2982     }
2983 
2984   /* Look for factors of t of the form
2985      t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2986      If we find such a factor, we can multiply by t using an algorithm that
2987      multiplies by q, shift the result by m and add/subtract it to itself.
2988 
2989      We search for large factors first and loop down, even if large factors
2990      are less probable than small; if we find a large factor we will find a
2991      good sequence quickly, and therefore be able to prune (by decreasing
2992      COST_LIMIT) the search.  */
2993 
2994  do_alg_addsub_factor:
2995   for (m = floor_log2 (t - 1); m >= 2; m--)
2996     {
2997       unsigned HOST_WIDE_INT d;
2998 
2999       d = (HOST_WIDE_INT_1U << m) + 1;
3000       if (t % d == 0 && t > d && m < maxm
3001 	  && (!cache_hit || cache_alg == alg_add_factor))
3002 	{
3003 	  op_cost = add_cost (speed, mode) + shift_cost (speed, mode, m);
3004 	  if (shiftadd_cost (speed, mode, m) <= op_cost)
3005 	    op_cost = shiftadd_cost (speed, mode, m);
3006 
3007 	  op_latency = op_cost;
3008 
3009 
3010 	  new_limit.cost = best_cost.cost - op_cost;
3011 	  new_limit.latency = best_cost.latency - op_latency;
3012 	  synth_mult (alg_in, t / d, &new_limit, mode);
3013 
3014 	  alg_in->cost.cost += op_cost;
3015 	  alg_in->cost.latency += op_latency;
3016 	  if (alg_in->cost.latency < op_cost)
3017 	    alg_in->cost.latency = op_cost;
3018 	  if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
3019 	    {
3020 	      best_cost = alg_in->cost;
3021 	      std::swap (alg_in, best_alg);
3022 	      best_alg->log[best_alg->ops] = m;
3023 	      best_alg->op[best_alg->ops] = alg_add_factor;
3024 	    }
3025 	  /* Other factors will have been taken care of in the recursion.  */
3026 	  break;
3027 	}
3028 
3029       d = (HOST_WIDE_INT_1U << m) - 1;
3030       if (t % d == 0 && t > d && m < maxm
3031 	  && (!cache_hit || cache_alg == alg_sub_factor))
3032 	{
3033 	  op_cost = add_cost (speed, mode) + shift_cost (speed, mode, m);
3034 	  if (shiftsub0_cost (speed, mode, m) <= op_cost)
3035 	    op_cost = shiftsub0_cost (speed, mode, m);
3036 
3037 	  op_latency = op_cost;
3038 
3039 	  new_limit.cost = best_cost.cost - op_cost;
3040 	  new_limit.latency = best_cost.latency - op_latency;
3041 	  synth_mult (alg_in, t / d, &new_limit, mode);
3042 
3043 	  alg_in->cost.cost += op_cost;
3044 	  alg_in->cost.latency += op_latency;
3045 	  if (alg_in->cost.latency < op_cost)
3046 	    alg_in->cost.latency = op_cost;
3047 	  if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
3048 	    {
3049 	      best_cost = alg_in->cost;
3050 	      std::swap (alg_in, best_alg);
3051 	      best_alg->log[best_alg->ops] = m;
3052 	      best_alg->op[best_alg->ops] = alg_sub_factor;
3053 	    }
3054 	  break;
3055 	}
3056     }
3057   if (cache_hit)
3058     goto done;
3059 
3060   /* Try shift-and-add (load effective address) instructions,
3061      i.e. do a*3, a*5, a*9.  */
3062   if ((t & 1) != 0)
3063     {
3064     do_alg_add_t2_m:
3065       q = t - 1;
3066       m = ctz_hwi (q);
3067       if (q && m < maxm)
3068 	{
3069 	  op_cost = shiftadd_cost (speed, mode, m);
3070 	  new_limit.cost = best_cost.cost - op_cost;
3071 	  new_limit.latency = best_cost.latency - op_cost;
3072 	  synth_mult (alg_in, (t - 1) >> m, &new_limit, mode);
3073 
3074 	  alg_in->cost.cost += op_cost;
3075 	  alg_in->cost.latency += op_cost;
3076 	  if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
3077 	    {
3078 	      best_cost = alg_in->cost;
3079 	      std::swap (alg_in, best_alg);
3080 	      best_alg->log[best_alg->ops] = m;
3081 	      best_alg->op[best_alg->ops] = alg_add_t2_m;
3082 	    }
3083 	}
3084       if (cache_hit)
3085 	goto done;
3086 
3087     do_alg_sub_t2_m:
3088       q = t + 1;
3089       m = ctz_hwi (q);
3090       if (q && m < maxm)
3091 	{
3092 	  op_cost = shiftsub0_cost (speed, mode, m);
3093 	  new_limit.cost = best_cost.cost - op_cost;
3094 	  new_limit.latency = best_cost.latency - op_cost;
3095 	  synth_mult (alg_in, (t + 1) >> m, &new_limit, mode);
3096 
3097 	  alg_in->cost.cost += op_cost;
3098 	  alg_in->cost.latency += op_cost;
3099 	  if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
3100 	    {
3101 	      best_cost = alg_in->cost;
3102 	      std::swap (alg_in, best_alg);
3103 	      best_alg->log[best_alg->ops] = m;
3104 	      best_alg->op[best_alg->ops] = alg_sub_t2_m;
3105 	    }
3106 	}
3107       if (cache_hit)
3108 	goto done;
3109     }
3110 
3111  done:
3112   /* If best_cost has not decreased, we have not found any algorithm.  */
3113   if (!CHEAPER_MULT_COST (&best_cost, cost_limit))
3114     {
3115       /* We failed to find an algorithm.  Record alg_impossible for
3116 	 this case (that is, <T, MODE, COST_LIMIT>) so that next time
3117 	 we are asked to find an algorithm for T within the same or
3118 	 lower COST_LIMIT, we can immediately return to the
3119 	 caller.  */
3120       entry_ptr->t = t;
3121       entry_ptr->mode = mode;
3122       entry_ptr->speed = speed;
3123       entry_ptr->alg = alg_impossible;
3124       entry_ptr->cost = *cost_limit;
3125       return;
3126     }
3127 
3128   /* Cache the result.  */
3129   if (!cache_hit)
3130     {
3131       entry_ptr->t = t;
3132       entry_ptr->mode = mode;
3133       entry_ptr->speed = speed;
3134       entry_ptr->alg = best_alg->op[best_alg->ops];
3135       entry_ptr->cost.cost = best_cost.cost;
3136       entry_ptr->cost.latency = best_cost.latency;
3137     }
3138 
3139   /* If we are getting a too long sequence for `struct algorithm'
3140      to record, make this search fail.  */
3141   if (best_alg->ops == MAX_BITS_PER_WORD)
3142     return;
3143 
3144   /* Copy the algorithm from temporary space to the space at alg_out.
3145      We avoid using structure assignment because the majority of
3146      best_alg is normally undefined, and this is a critical function.  */
3147   alg_out->ops = best_alg->ops + 1;
3148   alg_out->cost = best_cost;
3149   memcpy (alg_out->op, best_alg->op,
3150 	  alg_out->ops * sizeof *alg_out->op);
3151   memcpy (alg_out->log, best_alg->log,
3152 	  alg_out->ops * sizeof *alg_out->log);
3153 }
3154 
3155 /* Find the cheapest way of multiplying a value of mode MODE by VAL.
3156    Try three variations:
3157 
3158        - a shift/add sequence based on VAL itself
3159        - a shift/add sequence based on -VAL, followed by a negation
3160        - a shift/add sequence based on VAL - 1, followed by an addition.
3161 
3162    Return true if the cheapest of these cost less than MULT_COST,
3163    describing the algorithm in *ALG and final fixup in *VARIANT.  */
3164 
3165 bool
3166 choose_mult_variant (machine_mode mode, HOST_WIDE_INT val,
3167 		     struct algorithm *alg, enum mult_variant *variant,
3168 		     int mult_cost)
3169 {
3170   struct algorithm alg2;
3171   struct mult_cost limit;
3172   int op_cost;
3173   bool speed = optimize_insn_for_speed_p ();
3174 
3175   /* Fail quickly for impossible bounds.  */
3176   if (mult_cost < 0)
3177     return false;
3178 
3179   /* Ensure that mult_cost provides a reasonable upper bound.
3180      Any constant multiplication can be performed with less
3181      than 2 * bits additions.  */
3182   op_cost = 2 * GET_MODE_UNIT_BITSIZE (mode) * add_cost (speed, mode);
3183   if (mult_cost > op_cost)
3184     mult_cost = op_cost;
3185 
3186   *variant = basic_variant;
3187   limit.cost = mult_cost;
3188   limit.latency = mult_cost;
3189   synth_mult (alg, val, &limit, mode);
3190 
3191   /* This works only if the inverted value actually fits in an
3192      `unsigned int' */
3193   if (HOST_BITS_PER_INT >= GET_MODE_UNIT_BITSIZE (mode))
3194     {
3195       op_cost = neg_cost (speed, mode);
3196       if (MULT_COST_LESS (&alg->cost, mult_cost))
3197 	{
3198 	  limit.cost = alg->cost.cost - op_cost;
3199 	  limit.latency = alg->cost.latency - op_cost;
3200 	}
3201       else
3202 	{
3203 	  limit.cost = mult_cost - op_cost;
3204 	  limit.latency = mult_cost - op_cost;
3205 	}
3206 
3207       synth_mult (&alg2, -val, &limit, mode);
3208       alg2.cost.cost += op_cost;
3209       alg2.cost.latency += op_cost;
3210       if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
3211 	*alg = alg2, *variant = negate_variant;
3212     }
3213 
3214   /* This proves very useful for division-by-constant.  */
3215   op_cost = add_cost (speed, mode);
3216   if (MULT_COST_LESS (&alg->cost, mult_cost))
3217     {
3218       limit.cost = alg->cost.cost - op_cost;
3219       limit.latency = alg->cost.latency - op_cost;
3220     }
3221   else
3222     {
3223       limit.cost = mult_cost - op_cost;
3224       limit.latency = mult_cost - op_cost;
3225     }
3226 
3227   synth_mult (&alg2, val - 1, &limit, mode);
3228   alg2.cost.cost += op_cost;
3229   alg2.cost.latency += op_cost;
3230   if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
3231     *alg = alg2, *variant = add_variant;
3232 
3233   return MULT_COST_LESS (&alg->cost, mult_cost);
3234 }
3235 
3236 /* A subroutine of expand_mult, used for constant multiplications.
3237    Multiply OP0 by VAL in mode MODE, storing the result in TARGET if
3238    convenient.  Use the shift/add sequence described by ALG and apply
3239    the final fixup specified by VARIANT.  */
3240 
3241 static rtx
3242 expand_mult_const (machine_mode mode, rtx op0, HOST_WIDE_INT val,
3243 		   rtx target, const struct algorithm *alg,
3244 		   enum mult_variant variant)
3245 {
3246   unsigned HOST_WIDE_INT val_so_far;
3247   rtx_insn *insn;
3248   rtx accum, tem;
3249   int opno;
3250   machine_mode nmode;
3251 
3252   /* Avoid referencing memory over and over and invalid sharing
3253      on SUBREGs.  */
3254   op0 = force_reg (mode, op0);
3255 
3256   /* ACCUM starts out either as OP0 or as a zero, depending on
3257      the first operation.  */
3258 
3259   if (alg->op[0] == alg_zero)
3260     {
3261       accum = copy_to_mode_reg (mode, CONST0_RTX (mode));
3262       val_so_far = 0;
3263     }
3264   else if (alg->op[0] == alg_m)
3265     {
3266       accum = copy_to_mode_reg (mode, op0);
3267       val_so_far = 1;
3268     }
3269   else
3270     gcc_unreachable ();
3271 
3272   for (opno = 1; opno < alg->ops; opno++)
3273     {
3274       int log = alg->log[opno];
3275       rtx shift_subtarget = optimize ? 0 : accum;
3276       rtx add_target
3277 	= (opno == alg->ops - 1 && target != 0 && variant != add_variant
3278 	   && !optimize)
3279 	  ? target : 0;
3280       rtx accum_target = optimize ? 0 : accum;
3281       rtx accum_inner;
3282 
3283       switch (alg->op[opno])
3284 	{
3285 	case alg_shift:
3286 	  tem = expand_shift (LSHIFT_EXPR, mode, accum, log, NULL_RTX, 0);
3287 	  /* REG_EQUAL note will be attached to the following insn.  */
3288 	  emit_move_insn (accum, tem);
3289 	  val_so_far <<= log;
3290 	  break;
3291 
3292 	case alg_add_t_m2:
3293 	  tem = expand_shift (LSHIFT_EXPR, mode, op0, log, NULL_RTX, 0);
3294 	  accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
3295 				 add_target ? add_target : accum_target);
3296 	  val_so_far += HOST_WIDE_INT_1U << log;
3297 	  break;
3298 
3299 	case alg_sub_t_m2:
3300 	  tem = expand_shift (LSHIFT_EXPR, mode, op0, log, NULL_RTX, 0);
3301 	  accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
3302 				 add_target ? add_target : accum_target);
3303 	  val_so_far -= HOST_WIDE_INT_1U << log;
3304 	  break;
3305 
3306 	case alg_add_t2_m:
3307 	  accum = expand_shift (LSHIFT_EXPR, mode, accum,
3308 				log, shift_subtarget, 0);
3309 	  accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
3310 				 add_target ? add_target : accum_target);
3311 	  val_so_far = (val_so_far << log) + 1;
3312 	  break;
3313 
3314 	case alg_sub_t2_m:
3315 	  accum = expand_shift (LSHIFT_EXPR, mode, accum,
3316 				log, shift_subtarget, 0);
3317 	  accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
3318 				 add_target ? add_target : accum_target);
3319 	  val_so_far = (val_so_far << log) - 1;
3320 	  break;
3321 
3322 	case alg_add_factor:
3323 	  tem = expand_shift (LSHIFT_EXPR, mode, accum, log, NULL_RTX, 0);
3324 	  accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
3325 				 add_target ? add_target : accum_target);
3326 	  val_so_far += val_so_far << log;
3327 	  break;
3328 
3329 	case alg_sub_factor:
3330 	  tem = expand_shift (LSHIFT_EXPR, mode, accum, log, NULL_RTX, 0);
3331 	  accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
3332 				 (add_target
3333 				  ? add_target : (optimize ? 0 : tem)));
3334 	  val_so_far = (val_so_far << log) - val_so_far;
3335 	  break;
3336 
3337 	default:
3338 	  gcc_unreachable ();
3339 	}
3340 
3341       if (SCALAR_INT_MODE_P (mode))
3342 	{
3343 	  /* Write a REG_EQUAL note on the last insn so that we can cse
3344 	     multiplication sequences.  Note that if ACCUM is a SUBREG,
3345 	     we've set the inner register and must properly indicate that.  */
3346 	  tem = op0, nmode = mode;
3347 	  accum_inner = accum;
3348 	  if (GET_CODE (accum) == SUBREG)
3349 	    {
3350 	      accum_inner = SUBREG_REG (accum);
3351 	      nmode = GET_MODE (accum_inner);
3352 	      tem = gen_lowpart (nmode, op0);
3353 	    }
3354 
3355 	  insn = get_last_insn ();
3356 	  wide_int wval_so_far
3357 	    = wi::uhwi (val_so_far,
3358 			GET_MODE_PRECISION (as_a <scalar_mode> (nmode)));
3359 	  rtx c = immed_wide_int_const (wval_so_far, nmode);
3360 	  set_dst_reg_note (insn, REG_EQUAL, gen_rtx_MULT (nmode, tem, c),
3361 			    accum_inner);
3362 	}
3363     }
3364 
3365   if (variant == negate_variant)
3366     {
3367       val_so_far = -val_so_far;
3368       accum = expand_unop (mode, neg_optab, accum, target, 0);
3369     }
3370   else if (variant == add_variant)
3371     {
3372       val_so_far = val_so_far + 1;
3373       accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
3374     }
3375 
3376   /* Compare only the bits of val and val_so_far that are significant
3377      in the result mode, to avoid sign-/zero-extension confusion.  */
3378   nmode = GET_MODE_INNER (mode);
3379   val &= GET_MODE_MASK (nmode);
3380   val_so_far &= GET_MODE_MASK (nmode);
3381   gcc_assert (val == (HOST_WIDE_INT) val_so_far);
3382 
3383   return accum;
3384 }
3385 
3386 /* Perform a multiplication and return an rtx for the result.
3387    MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3388    TARGET is a suggestion for where to store the result (an rtx).
3389 
3390    We check specially for a constant integer as OP1.
3391    If you want this check for OP0 as well, then before calling
3392    you should swap the two operands if OP0 would be constant.  */
3393 
3394 rtx
3395 expand_mult (machine_mode mode, rtx op0, rtx op1, rtx target,
3396 	     int unsignedp, bool no_libcall)
3397 {
3398   enum mult_variant variant;
3399   struct algorithm algorithm;
3400   rtx scalar_op1;
3401   int max_cost;
3402   bool speed = optimize_insn_for_speed_p ();
3403   bool do_trapv = flag_trapv && SCALAR_INT_MODE_P (mode) && !unsignedp;
3404 
3405   if (CONSTANT_P (op0))
3406     std::swap (op0, op1);
3407 
3408   /* For vectors, there are several simplifications that can be made if
3409      all elements of the vector constant are identical.  */
3410   scalar_op1 = unwrap_const_vec_duplicate (op1);
3411 
3412   if (INTEGRAL_MODE_P (mode))
3413     {
3414       rtx fake_reg;
3415       HOST_WIDE_INT coeff;
3416       bool is_neg;
3417       int mode_bitsize;
3418 
3419       if (op1 == CONST0_RTX (mode))
3420 	return op1;
3421       if (op1 == CONST1_RTX (mode))
3422 	return op0;
3423       if (op1 == CONSTM1_RTX (mode))
3424 	return expand_unop (mode, do_trapv ? negv_optab : neg_optab,
3425 			    op0, target, 0);
3426 
3427       if (do_trapv)
3428 	goto skip_synth;
3429 
3430       /* If mode is integer vector mode, check if the backend supports
3431 	 vector lshift (by scalar or vector) at all.  If not, we can't use
3432 	 synthetized multiply.  */
3433       if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
3434 	  && optab_handler (vashl_optab, mode) == CODE_FOR_nothing
3435 	  && optab_handler (ashl_optab, mode) == CODE_FOR_nothing)
3436 	goto skip_synth;
3437 
3438       /* These are the operations that are potentially turned into
3439 	 a sequence of shifts and additions.  */
3440       mode_bitsize = GET_MODE_UNIT_BITSIZE (mode);
3441 
3442       /* synth_mult does an `unsigned int' multiply.  As long as the mode is
3443 	 less than or equal in size to `unsigned int' this doesn't matter.
3444 	 If the mode is larger than `unsigned int', then synth_mult works
3445 	 only if the constant value exactly fits in an `unsigned int' without
3446 	 any truncation.  This means that multiplying by negative values does
3447 	 not work; results are off by 2^32 on a 32 bit machine.  */
3448       if (CONST_INT_P (scalar_op1))
3449 	{
3450 	  coeff = INTVAL (scalar_op1);
3451 	  is_neg = coeff < 0;
3452 	}
3453 #if TARGET_SUPPORTS_WIDE_INT
3454       else if (CONST_WIDE_INT_P (scalar_op1))
3455 #else
3456       else if (CONST_DOUBLE_AS_INT_P (scalar_op1))
3457 #endif
3458 	{
3459 	  int shift = wi::exact_log2 (rtx_mode_t (scalar_op1, mode));
3460 	  /* Perfect power of 2 (other than 1, which is handled above).  */
3461 	  if (shift > 0)
3462 	    return expand_shift (LSHIFT_EXPR, mode, op0,
3463 				 shift, target, unsignedp);
3464 	  else
3465 	    goto skip_synth;
3466 	}
3467       else
3468 	goto skip_synth;
3469 
3470       /* We used to test optimize here, on the grounds that it's better to
3471 	 produce a smaller program when -O is not used.  But this causes
3472 	 such a terrible slowdown sometimes that it seems better to always
3473 	 use synth_mult.  */
3474 
3475       /* Special case powers of two.  */
3476       if (EXACT_POWER_OF_2_OR_ZERO_P (coeff)
3477 	  && !(is_neg && mode_bitsize > HOST_BITS_PER_WIDE_INT))
3478 	return expand_shift (LSHIFT_EXPR, mode, op0,
3479 			     floor_log2 (coeff), target, unsignedp);
3480 
3481       fake_reg = gen_raw_REG (mode, LAST_VIRTUAL_REGISTER + 1);
3482 
3483       /* Attempt to handle multiplication of DImode values by negative
3484 	 coefficients, by performing the multiplication by a positive
3485 	 multiplier and then inverting the result.  */
3486       if (is_neg && mode_bitsize > HOST_BITS_PER_WIDE_INT)
3487 	{
3488 	  /* Its safe to use -coeff even for INT_MIN, as the
3489 	     result is interpreted as an unsigned coefficient.
3490 	     Exclude cost of op0 from max_cost to match the cost
3491 	     calculation of the synth_mult.  */
3492 	  coeff = -(unsigned HOST_WIDE_INT) coeff;
3493 	  max_cost = (set_src_cost (gen_rtx_MULT (mode, fake_reg, op1),
3494 				    mode, speed)
3495 		      - neg_cost (speed, mode));
3496 	  if (max_cost <= 0)
3497 	    goto skip_synth;
3498 
3499 	  /* Special case powers of two.  */
3500 	  if (EXACT_POWER_OF_2_OR_ZERO_P (coeff))
3501 	    {
3502 	      rtx temp = expand_shift (LSHIFT_EXPR, mode, op0,
3503 				       floor_log2 (coeff), target, unsignedp);
3504 	      return expand_unop (mode, neg_optab, temp, target, 0);
3505 	    }
3506 
3507 	  if (choose_mult_variant (mode, coeff, &algorithm, &variant,
3508 				   max_cost))
3509 	    {
3510 	      rtx temp = expand_mult_const (mode, op0, coeff, NULL_RTX,
3511 					    &algorithm, variant);
3512 	      return expand_unop (mode, neg_optab, temp, target, 0);
3513 	    }
3514 	  goto skip_synth;
3515 	}
3516 
3517       /* Exclude cost of op0 from max_cost to match the cost
3518 	 calculation of the synth_mult.  */
3519       max_cost = set_src_cost (gen_rtx_MULT (mode, fake_reg, op1), mode, speed);
3520       if (choose_mult_variant (mode, coeff, &algorithm, &variant, max_cost))
3521 	return expand_mult_const (mode, op0, coeff, target,
3522 				  &algorithm, variant);
3523     }
3524  skip_synth:
3525 
3526   /* Expand x*2.0 as x+x.  */
3527   if (CONST_DOUBLE_AS_FLOAT_P (scalar_op1)
3528       && real_equal (CONST_DOUBLE_REAL_VALUE (scalar_op1), &dconst2))
3529     {
3530       op0 = force_reg (GET_MODE (op0), op0);
3531       return expand_binop (mode, add_optab, op0, op0,
3532 			   target, unsignedp,
3533 			   no_libcall ? OPTAB_WIDEN : OPTAB_LIB_WIDEN);
3534     }
3535 
3536   /* This used to use umul_optab if unsigned, but for non-widening multiply
3537      there is no difference between signed and unsigned.  */
3538   op0 = expand_binop (mode, do_trapv ? smulv_optab : smul_optab,
3539 		      op0, op1, target, unsignedp,
3540 		      no_libcall ? OPTAB_WIDEN : OPTAB_LIB_WIDEN);
3541   gcc_assert (op0 || no_libcall);
3542   return op0;
3543 }
3544 
3545 /* Return a cost estimate for multiplying a register by the given
3546    COEFFicient in the given MODE and SPEED.  */
3547 
3548 int
3549 mult_by_coeff_cost (HOST_WIDE_INT coeff, machine_mode mode, bool speed)
3550 {
3551   int max_cost;
3552   struct algorithm algorithm;
3553   enum mult_variant variant;
3554 
3555   rtx fake_reg = gen_raw_REG (mode, LAST_VIRTUAL_REGISTER + 1);
3556   max_cost = set_src_cost (gen_rtx_MULT (mode, fake_reg, fake_reg),
3557 			   mode, speed);
3558   if (choose_mult_variant (mode, coeff, &algorithm, &variant, max_cost))
3559     return algorithm.cost.cost;
3560   else
3561     return max_cost;
3562 }
3563 
3564 /* Perform a widening multiplication and return an rtx for the result.
3565    MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3566    TARGET is a suggestion for where to store the result (an rtx).
3567    THIS_OPTAB is the optab we should use, it must be either umul_widen_optab
3568    or smul_widen_optab.
3569 
3570    We check specially for a constant integer as OP1, comparing the
3571    cost of a widening multiply against the cost of a sequence of shifts
3572    and adds.  */
3573 
3574 rtx
3575 expand_widening_mult (machine_mode mode, rtx op0, rtx op1, rtx target,
3576 		      int unsignedp, optab this_optab)
3577 {
3578   bool speed = optimize_insn_for_speed_p ();
3579   rtx cop1;
3580 
3581   if (CONST_INT_P (op1)
3582       && GET_MODE (op0) != VOIDmode
3583       && (cop1 = convert_modes (mode, GET_MODE (op0), op1,
3584 				this_optab == umul_widen_optab))
3585       && CONST_INT_P (cop1)
3586       && (INTVAL (cop1) >= 0
3587 	  || HWI_COMPUTABLE_MODE_P (mode)))
3588     {
3589       HOST_WIDE_INT coeff = INTVAL (cop1);
3590       int max_cost;
3591       enum mult_variant variant;
3592       struct algorithm algorithm;
3593 
3594       if (coeff == 0)
3595 	return CONST0_RTX (mode);
3596 
3597       /* Special case powers of two.  */
3598       if (EXACT_POWER_OF_2_OR_ZERO_P (coeff))
3599 	{
3600 	  op0 = convert_to_mode (mode, op0, this_optab == umul_widen_optab);
3601 	  return expand_shift (LSHIFT_EXPR, mode, op0,
3602 			       floor_log2 (coeff), target, unsignedp);
3603 	}
3604 
3605       /* Exclude cost of op0 from max_cost to match the cost
3606 	 calculation of the synth_mult.  */
3607       max_cost = mul_widen_cost (speed, mode);
3608       if (choose_mult_variant (mode, coeff, &algorithm, &variant,
3609 			       max_cost))
3610 	{
3611 	  op0 = convert_to_mode (mode, op0, this_optab == umul_widen_optab);
3612 	  return expand_mult_const (mode, op0, coeff, target,
3613 				    &algorithm, variant);
3614 	}
3615     }
3616   return expand_binop (mode, this_optab, op0, op1, target,
3617 		       unsignedp, OPTAB_LIB_WIDEN);
3618 }
3619 
3620 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
3621    replace division by D, and put the least significant N bits of the result
3622    in *MULTIPLIER_PTR and return the most significant bit.
3623 
3624    The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
3625    needed precision is in PRECISION (should be <= N).
3626 
3627    PRECISION should be as small as possible so this function can choose
3628    multiplier more freely.
3629 
3630    The rounded-up logarithm of D is placed in *lgup_ptr.  A shift count that
3631    is to be used for a final right shift is placed in *POST_SHIFT_PTR.
3632 
3633    Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
3634    where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier.  */
3635 
3636 unsigned HOST_WIDE_INT
3637 choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision,
3638 		   unsigned HOST_WIDE_INT *multiplier_ptr,
3639 		   int *post_shift_ptr, int *lgup_ptr)
3640 {
3641   int lgup, post_shift;
3642   int pow, pow2;
3643 
3644   /* lgup = ceil(log2(divisor)); */
3645   lgup = ceil_log2 (d);
3646 
3647   gcc_assert (lgup <= n);
3648 
3649   pow = n + lgup;
3650   pow2 = n + lgup - precision;
3651 
3652   /* mlow = 2^(N + lgup)/d */
3653   wide_int val = wi::set_bit_in_zero (pow, HOST_BITS_PER_DOUBLE_INT);
3654   wide_int mlow = wi::udiv_trunc (val, d);
3655 
3656   /* mhigh = (2^(N + lgup) + 2^(N + lgup - precision))/d */
3657   val |= wi::set_bit_in_zero (pow2, HOST_BITS_PER_DOUBLE_INT);
3658   wide_int mhigh = wi::udiv_trunc (val, d);
3659 
3660   /* If precision == N, then mlow, mhigh exceed 2^N
3661      (but they do not exceed 2^(N+1)).  */
3662 
3663   /* Reduce to lowest terms.  */
3664   for (post_shift = lgup; post_shift > 0; post_shift--)
3665     {
3666       unsigned HOST_WIDE_INT ml_lo = wi::extract_uhwi (mlow, 1,
3667 						       HOST_BITS_PER_WIDE_INT);
3668       unsigned HOST_WIDE_INT mh_lo = wi::extract_uhwi (mhigh, 1,
3669 						       HOST_BITS_PER_WIDE_INT);
3670       if (ml_lo >= mh_lo)
3671 	break;
3672 
3673       mlow = wi::uhwi (ml_lo, HOST_BITS_PER_DOUBLE_INT);
3674       mhigh = wi::uhwi (mh_lo, HOST_BITS_PER_DOUBLE_INT);
3675     }
3676 
3677   *post_shift_ptr = post_shift;
3678   *lgup_ptr = lgup;
3679   if (n < HOST_BITS_PER_WIDE_INT)
3680     {
3681       unsigned HOST_WIDE_INT mask = (HOST_WIDE_INT_1U << n) - 1;
3682       *multiplier_ptr = mhigh.to_uhwi () & mask;
3683       return mhigh.to_uhwi () >= mask;
3684     }
3685   else
3686     {
3687       *multiplier_ptr = mhigh.to_uhwi ();
3688       return wi::extract_uhwi (mhigh, HOST_BITS_PER_WIDE_INT, 1);
3689     }
3690 }
3691 
3692 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
3693    congruent to 1 (mod 2**N).  */
3694 
3695 static unsigned HOST_WIDE_INT
3696 invert_mod2n (unsigned HOST_WIDE_INT x, int n)
3697 {
3698   /* Solve x*y == 1 (mod 2^n), where x is odd.  Return y.  */
3699 
3700   /* The algorithm notes that the choice y = x satisfies
3701      x*y == 1 mod 2^3, since x is assumed odd.
3702      Each iteration doubles the number of bits of significance in y.  */
3703 
3704   unsigned HOST_WIDE_INT mask;
3705   unsigned HOST_WIDE_INT y = x;
3706   int nbit = 3;
3707 
3708   mask = (n == HOST_BITS_PER_WIDE_INT
3709 	  ? HOST_WIDE_INT_M1U
3710 	  : (HOST_WIDE_INT_1U << n) - 1);
3711 
3712   while (nbit < n)
3713     {
3714       y = y * (2 - x*y) & mask;		/* Modulo 2^N */
3715       nbit *= 2;
3716     }
3717   return y;
3718 }
3719 
3720 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
3721    flavor of OP0 and OP1.  ADJ_OPERAND is already the high half of the
3722    product OP0 x OP1.  If UNSIGNEDP is nonzero, adjust the signed product
3723    to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
3724    become signed.
3725 
3726    The result is put in TARGET if that is convenient.
3727 
3728    MODE is the mode of operation.  */
3729 
3730 rtx
3731 expand_mult_highpart_adjust (scalar_int_mode mode, rtx adj_operand, rtx op0,
3732 			     rtx op1, rtx target, int unsignedp)
3733 {
3734   rtx tem;
3735   enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
3736 
3737   tem = expand_shift (RSHIFT_EXPR, mode, op0,
3738 		      GET_MODE_BITSIZE (mode) - 1, NULL_RTX, 0);
3739   tem = expand_and (mode, tem, op1, NULL_RTX);
3740   adj_operand
3741     = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3742 		     adj_operand);
3743 
3744   tem = expand_shift (RSHIFT_EXPR, mode, op1,
3745 		      GET_MODE_BITSIZE (mode) - 1, NULL_RTX, 0);
3746   tem = expand_and (mode, tem, op0, NULL_RTX);
3747   target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3748 			  target);
3749 
3750   return target;
3751 }
3752 
3753 /* Subroutine of expmed_mult_highpart.  Return the MODE high part of OP.  */
3754 
3755 static rtx
3756 extract_high_half (scalar_int_mode mode, rtx op)
3757 {
3758   if (mode == word_mode)
3759     return gen_highpart (mode, op);
3760 
3761   scalar_int_mode wider_mode = GET_MODE_WIDER_MODE (mode).require ();
3762 
3763   op = expand_shift (RSHIFT_EXPR, wider_mode, op,
3764 		     GET_MODE_BITSIZE (mode), 0, 1);
3765   return convert_modes (mode, wider_mode, op, 0);
3766 }
3767 
3768 /* Like expmed_mult_highpart, but only consider using a multiplication
3769    optab.  OP1 is an rtx for the constant operand.  */
3770 
3771 static rtx
3772 expmed_mult_highpart_optab (scalar_int_mode mode, rtx op0, rtx op1,
3773 			    rtx target, int unsignedp, int max_cost)
3774 {
3775   rtx narrow_op1 = gen_int_mode (INTVAL (op1), mode);
3776   optab moptab;
3777   rtx tem;
3778   int size;
3779   bool speed = optimize_insn_for_speed_p ();
3780 
3781   scalar_int_mode wider_mode = GET_MODE_WIDER_MODE (mode).require ();
3782 
3783   size = GET_MODE_BITSIZE (mode);
3784 
3785   /* Firstly, try using a multiplication insn that only generates the needed
3786      high part of the product, and in the sign flavor of unsignedp.  */
3787   if (mul_highpart_cost (speed, mode) < max_cost)
3788     {
3789       moptab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
3790       tem = expand_binop (mode, moptab, op0, narrow_op1, target,
3791 			  unsignedp, OPTAB_DIRECT);
3792       if (tem)
3793 	return tem;
3794     }
3795 
3796   /* Secondly, same as above, but use sign flavor opposite of unsignedp.
3797      Need to adjust the result after the multiplication.  */
3798   if (size - 1 < BITS_PER_WORD
3799       && (mul_highpart_cost (speed, mode)
3800 	  + 2 * shift_cost (speed, mode, size-1)
3801 	  + 4 * add_cost (speed, mode) < max_cost))
3802     {
3803       moptab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
3804       tem = expand_binop (mode, moptab, op0, narrow_op1, target,
3805 			  unsignedp, OPTAB_DIRECT);
3806       if (tem)
3807 	/* We used the wrong signedness.  Adjust the result.  */
3808 	return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
3809 					    tem, unsignedp);
3810     }
3811 
3812   /* Try widening multiplication.  */
3813   moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
3814   if (convert_optab_handler (moptab, wider_mode, mode) != CODE_FOR_nothing
3815       && mul_widen_cost (speed, wider_mode) < max_cost)
3816     {
3817       tem = expand_binop (wider_mode, moptab, op0, narrow_op1, 0,
3818 			  unsignedp, OPTAB_WIDEN);
3819       if (tem)
3820 	return extract_high_half (mode, tem);
3821     }
3822 
3823   /* Try widening the mode and perform a non-widening multiplication.  */
3824   if (optab_handler (smul_optab, wider_mode) != CODE_FOR_nothing
3825       && size - 1 < BITS_PER_WORD
3826       && (mul_cost (speed, wider_mode) + shift_cost (speed, mode, size-1)
3827 	  < max_cost))
3828     {
3829       rtx_insn *insns;
3830       rtx wop0, wop1;
3831 
3832       /* We need to widen the operands, for example to ensure the
3833 	 constant multiplier is correctly sign or zero extended.
3834 	 Use a sequence to clean-up any instructions emitted by
3835 	 the conversions if things don't work out.  */
3836       start_sequence ();
3837       wop0 = convert_modes (wider_mode, mode, op0, unsignedp);
3838       wop1 = convert_modes (wider_mode, mode, op1, unsignedp);
3839       tem = expand_binop (wider_mode, smul_optab, wop0, wop1, 0,
3840 			  unsignedp, OPTAB_WIDEN);
3841       insns = get_insns ();
3842       end_sequence ();
3843 
3844       if (tem)
3845 	{
3846 	  emit_insn (insns);
3847 	  return extract_high_half (mode, tem);
3848 	}
3849     }
3850 
3851   /* Try widening multiplication of opposite signedness, and adjust.  */
3852   moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
3853   if (convert_optab_handler (moptab, wider_mode, mode) != CODE_FOR_nothing
3854       && size - 1 < BITS_PER_WORD
3855       && (mul_widen_cost (speed, wider_mode)
3856 	  + 2 * shift_cost (speed, mode, size-1)
3857 	  + 4 * add_cost (speed, mode) < max_cost))
3858     {
3859       tem = expand_binop (wider_mode, moptab, op0, narrow_op1,
3860 			  NULL_RTX, ! unsignedp, OPTAB_WIDEN);
3861       if (tem != 0)
3862 	{
3863 	  tem = extract_high_half (mode, tem);
3864 	  /* We used the wrong signedness.  Adjust the result.  */
3865 	  return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
3866 					      target, unsignedp);
3867 	}
3868     }
3869 
3870   return 0;
3871 }
3872 
3873 /* Emit code to multiply OP0 and OP1 (where OP1 is an integer constant),
3874    putting the high half of the result in TARGET if that is convenient,
3875    and return where the result is.  If the operation can not be performed,
3876    0 is returned.
3877 
3878    MODE is the mode of operation and result.
3879 
3880    UNSIGNEDP nonzero means unsigned multiply.
3881 
3882    MAX_COST is the total allowed cost for the expanded RTL.  */
3883 
3884 static rtx
3885 expmed_mult_highpart (scalar_int_mode mode, rtx op0, rtx op1,
3886 		      rtx target, int unsignedp, int max_cost)
3887 {
3888   unsigned HOST_WIDE_INT cnst1;
3889   int extra_cost;
3890   bool sign_adjust = false;
3891   enum mult_variant variant;
3892   struct algorithm alg;
3893   rtx tem;
3894   bool speed = optimize_insn_for_speed_p ();
3895 
3896   /* We can't support modes wider than HOST_BITS_PER_INT.  */
3897   gcc_assert (HWI_COMPUTABLE_MODE_P (mode));
3898 
3899   cnst1 = INTVAL (op1) & GET_MODE_MASK (mode);
3900 
3901   /* We can't optimize modes wider than BITS_PER_WORD.
3902      ??? We might be able to perform double-word arithmetic if
3903      mode == word_mode, however all the cost calculations in
3904      synth_mult etc. assume single-word operations.  */
3905   scalar_int_mode wider_mode = GET_MODE_WIDER_MODE (mode).require ();
3906   if (GET_MODE_BITSIZE (wider_mode) > BITS_PER_WORD)
3907     return expmed_mult_highpart_optab (mode, op0, op1, target,
3908 				       unsignedp, max_cost);
3909 
3910   extra_cost = shift_cost (speed, mode, GET_MODE_BITSIZE (mode) - 1);
3911 
3912   /* Check whether we try to multiply by a negative constant.  */
3913   if (!unsignedp && ((cnst1 >> (GET_MODE_BITSIZE (mode) - 1)) & 1))
3914     {
3915       sign_adjust = true;
3916       extra_cost += add_cost (speed, mode);
3917     }
3918 
3919   /* See whether shift/add multiplication is cheap enough.  */
3920   if (choose_mult_variant (wider_mode, cnst1, &alg, &variant,
3921 			   max_cost - extra_cost))
3922     {
3923       /* See whether the specialized multiplication optabs are
3924 	 cheaper than the shift/add version.  */
3925       tem = expmed_mult_highpart_optab (mode, op0, op1, target, unsignedp,
3926 					alg.cost.cost + extra_cost);
3927       if (tem)
3928 	return tem;
3929 
3930       tem = convert_to_mode (wider_mode, op0, unsignedp);
3931       tem = expand_mult_const (wider_mode, tem, cnst1, 0, &alg, variant);
3932       tem = extract_high_half (mode, tem);
3933 
3934       /* Adjust result for signedness.  */
3935       if (sign_adjust)
3936 	tem = force_operand (gen_rtx_MINUS (mode, tem, op0), tem);
3937 
3938       return tem;
3939     }
3940   return expmed_mult_highpart_optab (mode, op0, op1, target,
3941 				     unsignedp, max_cost);
3942 }
3943 
3944 
3945 /* Expand signed modulus of OP0 by a power of two D in mode MODE.  */
3946 
3947 static rtx
3948 expand_smod_pow2 (scalar_int_mode mode, rtx op0, HOST_WIDE_INT d)
3949 {
3950   rtx result, temp, shift;
3951   rtx_code_label *label;
3952   int logd;
3953   int prec = GET_MODE_PRECISION (mode);
3954 
3955   logd = floor_log2 (d);
3956   result = gen_reg_rtx (mode);
3957 
3958   /* Avoid conditional branches when they're expensive.  */
3959   if (BRANCH_COST (optimize_insn_for_speed_p (), false) >= 2
3960       && optimize_insn_for_speed_p ())
3961     {
3962       rtx signmask = emit_store_flag (result, LT, op0, const0_rtx,
3963 				      mode, 0, -1);
3964       if (signmask)
3965 	{
3966 	  HOST_WIDE_INT masklow = (HOST_WIDE_INT_1 << logd) - 1;
3967 	  signmask = force_reg (mode, signmask);
3968 	  shift = gen_int_shift_amount (mode, GET_MODE_BITSIZE (mode) - logd);
3969 
3970 	  /* Use the rtx_cost of a LSHIFTRT instruction to determine
3971 	     which instruction sequence to use.  If logical right shifts
3972 	     are expensive the use 2 XORs, 2 SUBs and an AND, otherwise
3973 	     use a LSHIFTRT, 1 ADD, 1 SUB and an AND.  */
3974 
3975 	  temp = gen_rtx_LSHIFTRT (mode, result, shift);
3976 	  if (optab_handler (lshr_optab, mode) == CODE_FOR_nothing
3977 	      || (set_src_cost (temp, mode, optimize_insn_for_speed_p ())
3978 		  > COSTS_N_INSNS (2)))
3979 	    {
3980 	      temp = expand_binop (mode, xor_optab, op0, signmask,
3981 				   NULL_RTX, 1, OPTAB_LIB_WIDEN);
3982 	      temp = expand_binop (mode, sub_optab, temp, signmask,
3983 				   NULL_RTX, 1, OPTAB_LIB_WIDEN);
3984 	      temp = expand_binop (mode, and_optab, temp,
3985 				   gen_int_mode (masklow, mode),
3986 				   NULL_RTX, 1, OPTAB_LIB_WIDEN);
3987 	      temp = expand_binop (mode, xor_optab, temp, signmask,
3988 				   NULL_RTX, 1, OPTAB_LIB_WIDEN);
3989 	      temp = expand_binop (mode, sub_optab, temp, signmask,
3990 				   NULL_RTX, 1, OPTAB_LIB_WIDEN);
3991 	    }
3992 	  else
3993 	    {
3994 	      signmask = expand_binop (mode, lshr_optab, signmask, shift,
3995 				       NULL_RTX, 1, OPTAB_LIB_WIDEN);
3996 	      signmask = force_reg (mode, signmask);
3997 
3998 	      temp = expand_binop (mode, add_optab, op0, signmask,
3999 				   NULL_RTX, 1, OPTAB_LIB_WIDEN);
4000 	      temp = expand_binop (mode, and_optab, temp,
4001 				   gen_int_mode (masklow, mode),
4002 				   NULL_RTX, 1, OPTAB_LIB_WIDEN);
4003 	      temp = expand_binop (mode, sub_optab, temp, signmask,
4004 				   NULL_RTX, 1, OPTAB_LIB_WIDEN);
4005 	    }
4006 	  return temp;
4007 	}
4008     }
4009 
4010   /* Mask contains the mode's signbit and the significant bits of the
4011      modulus.  By including the signbit in the operation, many targets
4012      can avoid an explicit compare operation in the following comparison
4013      against zero.  */
4014   wide_int mask = wi::mask (logd, false, prec);
4015   mask = wi::set_bit (mask, prec - 1);
4016 
4017   temp = expand_binop (mode, and_optab, op0,
4018 		       immed_wide_int_const (mask, mode),
4019 		       result, 1, OPTAB_LIB_WIDEN);
4020   if (temp != result)
4021     emit_move_insn (result, temp);
4022 
4023   label = gen_label_rtx ();
4024   do_cmp_and_jump (result, const0_rtx, GE, mode, label);
4025 
4026   temp = expand_binop (mode, sub_optab, result, const1_rtx, result,
4027 		       0, OPTAB_LIB_WIDEN);
4028 
4029   mask = wi::mask (logd, true, prec);
4030   temp = expand_binop (mode, ior_optab, temp,
4031 		       immed_wide_int_const (mask, mode),
4032 		       result, 1, OPTAB_LIB_WIDEN);
4033   temp = expand_binop (mode, add_optab, temp, const1_rtx, result,
4034 		       0, OPTAB_LIB_WIDEN);
4035   if (temp != result)
4036     emit_move_insn (result, temp);
4037   emit_label (label);
4038   return result;
4039 }
4040 
4041 /* Expand signed division of OP0 by a power of two D in mode MODE.
4042    This routine is only called for positive values of D.  */
4043 
4044 static rtx
4045 expand_sdiv_pow2 (scalar_int_mode mode, rtx op0, HOST_WIDE_INT d)
4046 {
4047   rtx temp;
4048   rtx_code_label *label;
4049   int logd;
4050 
4051   logd = floor_log2 (d);
4052 
4053   if (d == 2
4054       && BRANCH_COST (optimize_insn_for_speed_p (),
4055 		      false) >= 1)
4056     {
4057       temp = gen_reg_rtx (mode);
4058       temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, 1);
4059       temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
4060 			   0, OPTAB_LIB_WIDEN);
4061       return expand_shift (RSHIFT_EXPR, mode, temp, logd, NULL_RTX, 0);
4062     }
4063 
4064   if (HAVE_conditional_move
4065       && BRANCH_COST (optimize_insn_for_speed_p (), false) >= 2)
4066     {
4067       rtx temp2;
4068 
4069       start_sequence ();
4070       temp2 = copy_to_mode_reg (mode, op0);
4071       temp = expand_binop (mode, add_optab, temp2, gen_int_mode (d - 1, mode),
4072 			   NULL_RTX, 0, OPTAB_LIB_WIDEN);
4073       temp = force_reg (mode, temp);
4074 
4075       /* Construct "temp2 = (temp2 < 0) ? temp : temp2".  */
4076       temp2 = emit_conditional_move (temp2, LT, temp2, const0_rtx,
4077 				     mode, temp, temp2, mode, 0);
4078       if (temp2)
4079 	{
4080 	  rtx_insn *seq = get_insns ();
4081 	  end_sequence ();
4082 	  emit_insn (seq);
4083 	  return expand_shift (RSHIFT_EXPR, mode, temp2, logd, NULL_RTX, 0);
4084 	}
4085       end_sequence ();
4086     }
4087 
4088   if (BRANCH_COST (optimize_insn_for_speed_p (),
4089 		   false) >= 2)
4090     {
4091       int ushift = GET_MODE_BITSIZE (mode) - logd;
4092 
4093       temp = gen_reg_rtx (mode);
4094       temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, -1);
4095       if (GET_MODE_BITSIZE (mode) >= BITS_PER_WORD
4096 	  || shift_cost (optimize_insn_for_speed_p (), mode, ushift)
4097 	     > COSTS_N_INSNS (1))
4098 	temp = expand_binop (mode, and_optab, temp, gen_int_mode (d - 1, mode),
4099 			     NULL_RTX, 0, OPTAB_LIB_WIDEN);
4100       else
4101 	temp = expand_shift (RSHIFT_EXPR, mode, temp,
4102 			     ushift, NULL_RTX, 1);
4103       temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
4104 			   0, OPTAB_LIB_WIDEN);
4105       return expand_shift (RSHIFT_EXPR, mode, temp, logd, NULL_RTX, 0);
4106     }
4107 
4108   label = gen_label_rtx ();
4109   temp = copy_to_mode_reg (mode, op0);
4110   do_cmp_and_jump (temp, const0_rtx, GE, mode, label);
4111   expand_inc (temp, gen_int_mode (d - 1, mode));
4112   emit_label (label);
4113   return expand_shift (RSHIFT_EXPR, mode, temp, logd, NULL_RTX, 0);
4114 }
4115 
4116 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
4117    if that is convenient, and returning where the result is.
4118    You may request either the quotient or the remainder as the result;
4119    specify REM_FLAG nonzero to get the remainder.
4120 
4121    CODE is the expression code for which kind of division this is;
4122    it controls how rounding is done.  MODE is the machine mode to use.
4123    UNSIGNEDP nonzero means do unsigned division.  */
4124 
4125 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
4126    and then correct it by or'ing in missing high bits
4127    if result of ANDI is nonzero.
4128    For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
4129    This could optimize to a bfexts instruction.
4130    But C doesn't use these operations, so their optimizations are
4131    left for later.  */
4132 /* ??? For modulo, we don't actually need the highpart of the first product,
4133    the low part will do nicely.  And for small divisors, the second multiply
4134    can also be a low-part only multiply or even be completely left out.
4135    E.g. to calculate the remainder of a division by 3 with a 32 bit
4136    multiply, multiply with 0x55555556 and extract the upper two bits;
4137    the result is exact for inputs up to 0x1fffffff.
4138    The input range can be reduced by using cross-sum rules.
4139    For odd divisors >= 3, the following table gives right shift counts
4140    so that if a number is shifted by an integer multiple of the given
4141    amount, the remainder stays the same:
4142    2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
4143    14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
4144    0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
4145    20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
4146    0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
4147 
4148    Cross-sum rules for even numbers can be derived by leaving as many bits
4149    to the right alone as the divisor has zeros to the right.
4150    E.g. if x is an unsigned 32 bit number:
4151    (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
4152    */
4153 
4154 rtx
4155 expand_divmod (int rem_flag, enum tree_code code, machine_mode mode,
4156 	       rtx op0, rtx op1, rtx target, int unsignedp)
4157 {
4158   machine_mode compute_mode;
4159   rtx tquotient;
4160   rtx quotient = 0, remainder = 0;
4161   rtx_insn *last;
4162   rtx_insn *insn;
4163   optab optab1, optab2;
4164   int op1_is_constant, op1_is_pow2 = 0;
4165   int max_cost, extra_cost;
4166   static HOST_WIDE_INT last_div_const = 0;
4167   bool speed = optimize_insn_for_speed_p ();
4168 
4169   op1_is_constant = CONST_INT_P (op1);
4170   if (op1_is_constant)
4171     {
4172       wide_int ext_op1 = rtx_mode_t (op1, mode);
4173       op1_is_pow2 = (wi::popcount (ext_op1) == 1
4174 		     || (! unsignedp
4175 			 && wi::popcount (wi::neg (ext_op1)) == 1));
4176     }
4177 
4178   /*
4179      This is the structure of expand_divmod:
4180 
4181      First comes code to fix up the operands so we can perform the operations
4182      correctly and efficiently.
4183 
4184      Second comes a switch statement with code specific for each rounding mode.
4185      For some special operands this code emits all RTL for the desired
4186      operation, for other cases, it generates only a quotient and stores it in
4187      QUOTIENT.  The case for trunc division/remainder might leave quotient = 0,
4188      to indicate that it has not done anything.
4189 
4190      Last comes code that finishes the operation.  If QUOTIENT is set and
4191      REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1.  If
4192      QUOTIENT is not set, it is computed using trunc rounding.
4193 
4194      We try to generate special code for division and remainder when OP1 is a
4195      constant.  If |OP1| = 2**n we can use shifts and some other fast
4196      operations.  For other values of OP1, we compute a carefully selected
4197      fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
4198      by m.
4199 
4200      In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
4201      half of the product.  Different strategies for generating the product are
4202      implemented in expmed_mult_highpart.
4203 
4204      If what we actually want is the remainder, we generate that by another
4205      by-constant multiplication and a subtraction.  */
4206 
4207   /* We shouldn't be called with OP1 == const1_rtx, but some of the
4208      code below will malfunction if we are, so check here and handle
4209      the special case if so.  */
4210   if (op1 == const1_rtx)
4211     return rem_flag ? const0_rtx : op0;
4212 
4213     /* When dividing by -1, we could get an overflow.
4214      negv_optab can handle overflows.  */
4215   if (! unsignedp && op1 == constm1_rtx)
4216     {
4217       if (rem_flag)
4218 	return const0_rtx;
4219       return expand_unop (mode, flag_trapv && GET_MODE_CLASS (mode) == MODE_INT
4220 			  ? negv_optab : neg_optab, op0, target, 0);
4221     }
4222 
4223   if (target
4224       /* Don't use the function value register as a target
4225 	 since we have to read it as well as write it,
4226 	 and function-inlining gets confused by this.  */
4227       && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
4228 	  /* Don't clobber an operand while doing a multi-step calculation.  */
4229 	  || ((rem_flag || op1_is_constant)
4230 	      && (reg_mentioned_p (target, op0)
4231 		  || (MEM_P (op0) && MEM_P (target))))
4232 	  || reg_mentioned_p (target, op1)
4233 	  || (MEM_P (op1) && MEM_P (target))))
4234     target = 0;
4235 
4236   /* Get the mode in which to perform this computation.  Normally it will
4237      be MODE, but sometimes we can't do the desired operation in MODE.
4238      If so, pick a wider mode in which we can do the operation.  Convert
4239      to that mode at the start to avoid repeated conversions.
4240 
4241      First see what operations we need.  These depend on the expression
4242      we are evaluating.  (We assume that divxx3 insns exist under the
4243      same conditions that modxx3 insns and that these insns don't normally
4244      fail.  If these assumptions are not correct, we may generate less
4245      efficient code in some cases.)
4246 
4247      Then see if we find a mode in which we can open-code that operation
4248      (either a division, modulus, or shift).  Finally, check for the smallest
4249      mode for which we can do the operation with a library call.  */
4250 
4251   /* We might want to refine this now that we have division-by-constant
4252      optimization.  Since expmed_mult_highpart tries so many variants, it is
4253      not straightforward to generalize this.  Maybe we should make an array
4254      of possible modes in init_expmed?  Save this for GCC 2.7.  */
4255 
4256   optab1 = (op1_is_pow2
4257 	    ? (unsignedp ? lshr_optab : ashr_optab)
4258 	    : (unsignedp ? udiv_optab : sdiv_optab));
4259   optab2 = (op1_is_pow2 ? optab1
4260 	    : (unsignedp ? udivmod_optab : sdivmod_optab));
4261 
4262   FOR_EACH_MODE_FROM (compute_mode, mode)
4263     if (optab_handler (optab1, compute_mode) != CODE_FOR_nothing
4264 	|| optab_handler (optab2, compute_mode) != CODE_FOR_nothing)
4265       break;
4266 
4267   if (compute_mode == VOIDmode)
4268     FOR_EACH_MODE_FROM (compute_mode, mode)
4269       if (optab_libfunc (optab1, compute_mode)
4270 	  || optab_libfunc (optab2, compute_mode))
4271 	break;
4272 
4273   /* If we still couldn't find a mode, use MODE, but expand_binop will
4274      probably die.  */
4275   if (compute_mode == VOIDmode)
4276     compute_mode = mode;
4277 
4278   if (target && GET_MODE (target) == compute_mode)
4279     tquotient = target;
4280   else
4281     tquotient = gen_reg_rtx (compute_mode);
4282 
4283 #if 0
4284   /* It should be possible to restrict the precision to GET_MODE_BITSIZE
4285      (mode), and thereby get better code when OP1 is a constant.  Do that
4286      later.  It will require going over all usages of SIZE below.  */
4287   size = GET_MODE_BITSIZE (mode);
4288 #endif
4289 
4290   /* Only deduct something for a REM if the last divide done was
4291      for a different constant.   Then set the constant of the last
4292      divide.  */
4293   max_cost = (unsignedp
4294 	      ? udiv_cost (speed, compute_mode)
4295 	      : sdiv_cost (speed, compute_mode));
4296   if (rem_flag && ! (last_div_const != 0 && op1_is_constant
4297 		     && INTVAL (op1) == last_div_const))
4298     max_cost -= (mul_cost (speed, compute_mode)
4299 		 + add_cost (speed, compute_mode));
4300 
4301   last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
4302 
4303   /* Now convert to the best mode to use.  */
4304   if (compute_mode != mode)
4305     {
4306       op0 = convert_modes (compute_mode, mode, op0, unsignedp);
4307       op1 = convert_modes (compute_mode, mode, op1, unsignedp);
4308 
4309       /* convert_modes may have placed op1 into a register, so we
4310 	 must recompute the following.  */
4311       op1_is_constant = CONST_INT_P (op1);
4312       if (op1_is_constant)
4313 	{
4314 	  wide_int ext_op1 = rtx_mode_t (op1, compute_mode);
4315 	  op1_is_pow2 = (wi::popcount (ext_op1) == 1
4316 			 || (! unsignedp
4317 			     && wi::popcount (wi::neg (ext_op1)) == 1));
4318 	}
4319       else
4320 	op1_is_pow2 = 0;
4321     }
4322 
4323   /* If one of the operands is a volatile MEM, copy it into a register.  */
4324 
4325   if (MEM_P (op0) && MEM_VOLATILE_P (op0))
4326     op0 = force_reg (compute_mode, op0);
4327   if (MEM_P (op1) && MEM_VOLATILE_P (op1))
4328     op1 = force_reg (compute_mode, op1);
4329 
4330   /* If we need the remainder or if OP1 is constant, we need to
4331      put OP0 in a register in case it has any queued subexpressions.  */
4332   if (rem_flag || op1_is_constant)
4333     op0 = force_reg (compute_mode, op0);
4334 
4335   last = get_last_insn ();
4336 
4337   /* Promote floor rounding to trunc rounding for unsigned operations.  */
4338   if (unsignedp)
4339     {
4340       if (code == FLOOR_DIV_EXPR)
4341 	code = TRUNC_DIV_EXPR;
4342       if (code == FLOOR_MOD_EXPR)
4343 	code = TRUNC_MOD_EXPR;
4344       if (code == EXACT_DIV_EXPR && op1_is_pow2)
4345 	code = TRUNC_DIV_EXPR;
4346     }
4347 
4348   if (op1 != const0_rtx)
4349     switch (code)
4350       {
4351       case TRUNC_MOD_EXPR:
4352       case TRUNC_DIV_EXPR:
4353 	if (op1_is_constant)
4354 	  {
4355 	    scalar_int_mode int_mode = as_a <scalar_int_mode> (compute_mode);
4356 	    int size = GET_MODE_BITSIZE (int_mode);
4357 	    if (unsignedp)
4358 	      {
4359 		unsigned HOST_WIDE_INT mh, ml;
4360 		int pre_shift, post_shift;
4361 		int dummy;
4362 		wide_int wd = rtx_mode_t (op1, int_mode);
4363 		unsigned HOST_WIDE_INT d = wd.to_uhwi ();
4364 
4365 		if (wi::popcount (wd) == 1)
4366 		  {
4367 		    pre_shift = floor_log2 (d);
4368 		    if (rem_flag)
4369 		      {
4370 			unsigned HOST_WIDE_INT mask
4371 			  = (HOST_WIDE_INT_1U << pre_shift) - 1;
4372 			remainder
4373 			  = expand_binop (int_mode, and_optab, op0,
4374 					  gen_int_mode (mask, int_mode),
4375 					  remainder, 1,
4376 					  OPTAB_LIB_WIDEN);
4377 			if (remainder)
4378 			  return gen_lowpart (mode, remainder);
4379 		      }
4380 		    quotient = expand_shift (RSHIFT_EXPR, int_mode, op0,
4381 					     pre_shift, tquotient, 1);
4382 		  }
4383 		else if (size <= HOST_BITS_PER_WIDE_INT)
4384 		  {
4385 		    if (d >= (HOST_WIDE_INT_1U << (size - 1)))
4386 		      {
4387 			/* Most significant bit of divisor is set; emit an scc
4388 			   insn.  */
4389 			quotient = emit_store_flag_force (tquotient, GEU, op0, op1,
4390 							  int_mode, 1, 1);
4391 		      }
4392 		    else
4393 		      {
4394 			/* Find a suitable multiplier and right shift count
4395 			   instead of multiplying with D.  */
4396 
4397 			mh = choose_multiplier (d, size, size,
4398 						&ml, &post_shift, &dummy);
4399 
4400 			/* If the suggested multiplier is more than SIZE bits,
4401 			   we can do better for even divisors, using an
4402 			   initial right shift.  */
4403 			if (mh != 0 && (d & 1) == 0)
4404 			  {
4405 			    pre_shift = ctz_or_zero (d);
4406 			    mh = choose_multiplier (d >> pre_shift, size,
4407 						    size - pre_shift,
4408 						    &ml, &post_shift, &dummy);
4409 			    gcc_assert (!mh);
4410 			  }
4411 			else
4412 			  pre_shift = 0;
4413 
4414 			if (mh != 0)
4415 			  {
4416 			    rtx t1, t2, t3, t4;
4417 
4418 			    if (post_shift - 1 >= BITS_PER_WORD)
4419 			      goto fail1;
4420 
4421 			    extra_cost
4422 			      = (shift_cost (speed, int_mode, post_shift - 1)
4423 				 + shift_cost (speed, int_mode, 1)
4424 				 + 2 * add_cost (speed, int_mode));
4425 			    t1 = expmed_mult_highpart
4426 			      (int_mode, op0, gen_int_mode (ml, int_mode),
4427 			       NULL_RTX, 1, max_cost - extra_cost);
4428 			    if (t1 == 0)
4429 			      goto fail1;
4430 			    t2 = force_operand (gen_rtx_MINUS (int_mode,
4431 							       op0, t1),
4432 						NULL_RTX);
4433 			    t3 = expand_shift (RSHIFT_EXPR, int_mode,
4434 					       t2, 1, NULL_RTX, 1);
4435 			    t4 = force_operand (gen_rtx_PLUS (int_mode,
4436 							      t1, t3),
4437 						NULL_RTX);
4438 			    quotient = expand_shift
4439 			      (RSHIFT_EXPR, int_mode, t4,
4440 			       post_shift - 1, tquotient, 1);
4441 			  }
4442 			else
4443 			  {
4444 			    rtx t1, t2;
4445 
4446 			    if (pre_shift >= BITS_PER_WORD
4447 				|| post_shift >= BITS_PER_WORD)
4448 			      goto fail1;
4449 
4450 			    t1 = expand_shift
4451 			      (RSHIFT_EXPR, int_mode, op0,
4452 			       pre_shift, NULL_RTX, 1);
4453 			    extra_cost
4454 			      = (shift_cost (speed, int_mode, pre_shift)
4455 				 + shift_cost (speed, int_mode, post_shift));
4456 			    t2 = expmed_mult_highpart
4457 			      (int_mode, t1,
4458 			       gen_int_mode (ml, int_mode),
4459 			       NULL_RTX, 1, max_cost - extra_cost);
4460 			    if (t2 == 0)
4461 			      goto fail1;
4462 			    quotient = expand_shift
4463 			      (RSHIFT_EXPR, int_mode, t2,
4464 			       post_shift, tquotient, 1);
4465 			  }
4466 		      }
4467 		  }
4468 		else		/* Too wide mode to use tricky code */
4469 		  break;
4470 
4471 		insn = get_last_insn ();
4472 		if (insn != last)
4473 		  set_dst_reg_note (insn, REG_EQUAL,
4474 				    gen_rtx_UDIV (int_mode, op0, op1),
4475 				    quotient);
4476 	      }
4477 	    else		/* TRUNC_DIV, signed */
4478 	      {
4479 		unsigned HOST_WIDE_INT ml;
4480 		int lgup, post_shift;
4481 		rtx mlr;
4482 		HOST_WIDE_INT d = INTVAL (op1);
4483 		unsigned HOST_WIDE_INT abs_d;
4484 
4485 		/* Not prepared to handle division/remainder by
4486 		   0xffffffffffffffff8000000000000000 etc.  */
4487 		if (d == HOST_WIDE_INT_MIN && size > HOST_BITS_PER_WIDE_INT)
4488 		  break;
4489 
4490 		/* Since d might be INT_MIN, we have to cast to
4491 		   unsigned HOST_WIDE_INT before negating to avoid
4492 		   undefined signed overflow.  */
4493 		abs_d = (d >= 0
4494 			 ? (unsigned HOST_WIDE_INT) d
4495 			 : - (unsigned HOST_WIDE_INT) d);
4496 
4497 		/* n rem d = n rem -d */
4498 		if (rem_flag && d < 0)
4499 		  {
4500 		    d = abs_d;
4501 		    op1 = gen_int_mode (abs_d, int_mode);
4502 		  }
4503 
4504 		if (d == 1)
4505 		  quotient = op0;
4506 		else if (d == -1)
4507 		  quotient = expand_unop (int_mode, neg_optab, op0,
4508 					  tquotient, 0);
4509 		else if (size <= HOST_BITS_PER_WIDE_INT
4510 			 && abs_d == HOST_WIDE_INT_1U << (size - 1))
4511 		  {
4512 		    /* This case is not handled correctly below.  */
4513 		    quotient = emit_store_flag (tquotient, EQ, op0, op1,
4514 						int_mode, 1, 1);
4515 		    if (quotient == 0)
4516 		      goto fail1;
4517 		  }
4518 		else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
4519 			 && (size <= HOST_BITS_PER_WIDE_INT || d >= 0)
4520 			 && (rem_flag
4521 			     ? smod_pow2_cheap (speed, int_mode)
4522 			     : sdiv_pow2_cheap (speed, int_mode))
4523 			 /* We assume that cheap metric is true if the
4524 			    optab has an expander for this mode.  */
4525 			 && ((optab_handler ((rem_flag ? smod_optab
4526 					      : sdiv_optab),
4527 					     int_mode)
4528 			      != CODE_FOR_nothing)
4529 			     || (optab_handler (sdivmod_optab, int_mode)
4530 				 != CODE_FOR_nothing)))
4531 		  ;
4532 		else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
4533 		  {
4534 		    if (rem_flag)
4535 		      {
4536 			remainder = expand_smod_pow2 (int_mode, op0, d);
4537 			if (remainder)
4538 			  return gen_lowpart (mode, remainder);
4539 		      }
4540 
4541 		    if (sdiv_pow2_cheap (speed, int_mode)
4542 			&& ((optab_handler (sdiv_optab, int_mode)
4543 			     != CODE_FOR_nothing)
4544 			    || (optab_handler (sdivmod_optab, int_mode)
4545 				!= CODE_FOR_nothing)))
4546 		      quotient = expand_divmod (0, TRUNC_DIV_EXPR,
4547 						int_mode, op0,
4548 						gen_int_mode (abs_d,
4549 							      int_mode),
4550 						NULL_RTX, 0);
4551 		    else
4552 		      quotient = expand_sdiv_pow2 (int_mode, op0, abs_d);
4553 
4554 		    /* We have computed OP0 / abs(OP1).  If OP1 is negative,
4555 		       negate the quotient.  */
4556 		    if (d < 0)
4557 		      {
4558 			insn = get_last_insn ();
4559 			if (insn != last
4560 			    && abs_d < (HOST_WIDE_INT_1U
4561 					<< (HOST_BITS_PER_WIDE_INT - 1)))
4562 			  set_dst_reg_note (insn, REG_EQUAL,
4563 					    gen_rtx_DIV (int_mode, op0,
4564 							 gen_int_mode
4565 							   (abs_d,
4566 							    int_mode)),
4567 					    quotient);
4568 
4569 			quotient = expand_unop (int_mode, neg_optab,
4570 						quotient, quotient, 0);
4571 		      }
4572 		  }
4573 		else if (size <= HOST_BITS_PER_WIDE_INT)
4574 		  {
4575 		    choose_multiplier (abs_d, size, size - 1,
4576 				       &ml, &post_shift, &lgup);
4577 		    if (ml < HOST_WIDE_INT_1U << (size - 1))
4578 		      {
4579 			rtx t1, t2, t3;
4580 
4581 			if (post_shift >= BITS_PER_WORD
4582 			    || size - 1 >= BITS_PER_WORD)
4583 			  goto fail1;
4584 
4585 			extra_cost = (shift_cost (speed, int_mode, post_shift)
4586 				      + shift_cost (speed, int_mode, size - 1)
4587 				      + add_cost (speed, int_mode));
4588 			t1 = expmed_mult_highpart
4589 			  (int_mode, op0, gen_int_mode (ml, int_mode),
4590 			   NULL_RTX, 0, max_cost - extra_cost);
4591 			if (t1 == 0)
4592 			  goto fail1;
4593 			t2 = expand_shift
4594 			  (RSHIFT_EXPR, int_mode, t1,
4595 			   post_shift, NULL_RTX, 0);
4596 			t3 = expand_shift
4597 			  (RSHIFT_EXPR, int_mode, op0,
4598 			   size - 1, NULL_RTX, 0);
4599 			if (d < 0)
4600 			  quotient
4601 			    = force_operand (gen_rtx_MINUS (int_mode, t3, t2),
4602 					     tquotient);
4603 			else
4604 			  quotient
4605 			    = force_operand (gen_rtx_MINUS (int_mode, t2, t3),
4606 					     tquotient);
4607 		      }
4608 		    else
4609 		      {
4610 			rtx t1, t2, t3, t4;
4611 
4612 			if (post_shift >= BITS_PER_WORD
4613 			    || size - 1 >= BITS_PER_WORD)
4614 			  goto fail1;
4615 
4616 			ml |= HOST_WIDE_INT_M1U << (size - 1);
4617 			mlr = gen_int_mode (ml, int_mode);
4618 			extra_cost = (shift_cost (speed, int_mode, post_shift)
4619 				      + shift_cost (speed, int_mode, size - 1)
4620 				      + 2 * add_cost (speed, int_mode));
4621 			t1 = expmed_mult_highpart (int_mode, op0, mlr,
4622 						   NULL_RTX, 0,
4623 						   max_cost - extra_cost);
4624 			if (t1 == 0)
4625 			  goto fail1;
4626 			t2 = force_operand (gen_rtx_PLUS (int_mode, t1, op0),
4627 					    NULL_RTX);
4628 			t3 = expand_shift
4629 			  (RSHIFT_EXPR, int_mode, t2,
4630 			   post_shift, NULL_RTX, 0);
4631 			t4 = expand_shift
4632 			  (RSHIFT_EXPR, int_mode, op0,
4633 			   size - 1, NULL_RTX, 0);
4634 			if (d < 0)
4635 			  quotient
4636 			    = force_operand (gen_rtx_MINUS (int_mode, t4, t3),
4637 					     tquotient);
4638 			else
4639 			  quotient
4640 			    = force_operand (gen_rtx_MINUS (int_mode, t3, t4),
4641 					     tquotient);
4642 		      }
4643 		  }
4644 		else		/* Too wide mode to use tricky code */
4645 		  break;
4646 
4647 		insn = get_last_insn ();
4648 		if (insn != last)
4649 		  set_dst_reg_note (insn, REG_EQUAL,
4650 				    gen_rtx_DIV (int_mode, op0, op1),
4651 				    quotient);
4652 	      }
4653 	    break;
4654 	  }
4655       fail1:
4656 	delete_insns_since (last);
4657 	break;
4658 
4659       case FLOOR_DIV_EXPR:
4660       case FLOOR_MOD_EXPR:
4661       /* We will come here only for signed operations.  */
4662 	if (op1_is_constant && HWI_COMPUTABLE_MODE_P (compute_mode))
4663 	  {
4664 	    scalar_int_mode int_mode = as_a <scalar_int_mode> (compute_mode);
4665 	    int size = GET_MODE_BITSIZE (int_mode);
4666 	    unsigned HOST_WIDE_INT mh, ml;
4667 	    int pre_shift, lgup, post_shift;
4668 	    HOST_WIDE_INT d = INTVAL (op1);
4669 
4670 	    if (d > 0)
4671 	      {
4672 		/* We could just as easily deal with negative constants here,
4673 		   but it does not seem worth the trouble for GCC 2.6.  */
4674 		if (EXACT_POWER_OF_2_OR_ZERO_P (d))
4675 		  {
4676 		    pre_shift = floor_log2 (d);
4677 		    if (rem_flag)
4678 		      {
4679 			unsigned HOST_WIDE_INT mask
4680 			  = (HOST_WIDE_INT_1U << pre_shift) - 1;
4681 			remainder = expand_binop
4682 			  (int_mode, and_optab, op0,
4683 			   gen_int_mode (mask, int_mode),
4684 			   remainder, 0, OPTAB_LIB_WIDEN);
4685 			if (remainder)
4686 			  return gen_lowpart (mode, remainder);
4687 		      }
4688 		    quotient = expand_shift
4689 		      (RSHIFT_EXPR, int_mode, op0,
4690 		       pre_shift, tquotient, 0);
4691 		  }
4692 		else
4693 		  {
4694 		    rtx t1, t2, t3, t4;
4695 
4696 		    mh = choose_multiplier (d, size, size - 1,
4697 					    &ml, &post_shift, &lgup);
4698 		    gcc_assert (!mh);
4699 
4700 		    if (post_shift < BITS_PER_WORD
4701 			&& size - 1 < BITS_PER_WORD)
4702 		      {
4703 			t1 = expand_shift
4704 			  (RSHIFT_EXPR, int_mode, op0,
4705 			   size - 1, NULL_RTX, 0);
4706 			t2 = expand_binop (int_mode, xor_optab, op0, t1,
4707 					   NULL_RTX, 0, OPTAB_WIDEN);
4708 			extra_cost = (shift_cost (speed, int_mode, post_shift)
4709 				      + shift_cost (speed, int_mode, size - 1)
4710 				      + 2 * add_cost (speed, int_mode));
4711 			t3 = expmed_mult_highpart
4712 			  (int_mode, t2, gen_int_mode (ml, int_mode),
4713 			   NULL_RTX, 1, max_cost - extra_cost);
4714 			if (t3 != 0)
4715 			  {
4716 			    t4 = expand_shift
4717 			      (RSHIFT_EXPR, int_mode, t3,
4718 			       post_shift, NULL_RTX, 1);
4719 			    quotient = expand_binop (int_mode, xor_optab,
4720 						     t4, t1, tquotient, 0,
4721 						     OPTAB_WIDEN);
4722 			  }
4723 		      }
4724 		  }
4725 	      }
4726 	    else
4727 	      {
4728 		rtx nsign, t1, t2, t3, t4;
4729 		t1 = force_operand (gen_rtx_PLUS (int_mode,
4730 						  op0, constm1_rtx), NULL_RTX);
4731 		t2 = expand_binop (int_mode, ior_optab, op0, t1, NULL_RTX,
4732 				   0, OPTAB_WIDEN);
4733 		nsign = expand_shift (RSHIFT_EXPR, int_mode, t2,
4734 				      size - 1, NULL_RTX, 0);
4735 		t3 = force_operand (gen_rtx_MINUS (int_mode, t1, nsign),
4736 				    NULL_RTX);
4737 		t4 = expand_divmod (0, TRUNC_DIV_EXPR, int_mode, t3, op1,
4738 				    NULL_RTX, 0);
4739 		if (t4)
4740 		  {
4741 		    rtx t5;
4742 		    t5 = expand_unop (int_mode, one_cmpl_optab, nsign,
4743 				      NULL_RTX, 0);
4744 		    quotient = force_operand (gen_rtx_PLUS (int_mode, t4, t5),
4745 					      tquotient);
4746 		  }
4747 	      }
4748 	  }
4749 
4750 	if (quotient != 0)
4751 	  break;
4752 	delete_insns_since (last);
4753 
4754 	/* Try using an instruction that produces both the quotient and
4755 	   remainder, using truncation.  We can easily compensate the quotient
4756 	   or remainder to get floor rounding, once we have the remainder.
4757 	   Notice that we compute also the final remainder value here,
4758 	   and return the result right away.  */
4759 	if (target == 0 || GET_MODE (target) != compute_mode)
4760 	  target = gen_reg_rtx (compute_mode);
4761 
4762 	if (rem_flag)
4763 	  {
4764 	    remainder
4765 	      = REG_P (target) ? target : gen_reg_rtx (compute_mode);
4766 	    quotient = gen_reg_rtx (compute_mode);
4767 	  }
4768 	else
4769 	  {
4770 	    quotient
4771 	      = REG_P (target) ? target : gen_reg_rtx (compute_mode);
4772 	    remainder = gen_reg_rtx (compute_mode);
4773 	  }
4774 
4775 	if (expand_twoval_binop (sdivmod_optab, op0, op1,
4776 				 quotient, remainder, 0))
4777 	  {
4778 	    /* This could be computed with a branch-less sequence.
4779 	       Save that for later.  */
4780 	    rtx tem;
4781 	    rtx_code_label *label = gen_label_rtx ();
4782 	    do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
4783 	    tem = expand_binop (compute_mode, xor_optab, op0, op1,
4784 				NULL_RTX, 0, OPTAB_WIDEN);
4785 	    do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
4786 	    expand_dec (quotient, const1_rtx);
4787 	    expand_inc (remainder, op1);
4788 	    emit_label (label);
4789 	    return gen_lowpart (mode, rem_flag ? remainder : quotient);
4790 	  }
4791 
4792 	/* No luck with division elimination or divmod.  Have to do it
4793 	   by conditionally adjusting op0 *and* the result.  */
4794 	{
4795 	  rtx_code_label *label1, *label2, *label3, *label4, *label5;
4796 	  rtx adjusted_op0;
4797 	  rtx tem;
4798 
4799 	  quotient = gen_reg_rtx (compute_mode);
4800 	  adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4801 	  label1 = gen_label_rtx ();
4802 	  label2 = gen_label_rtx ();
4803 	  label3 = gen_label_rtx ();
4804 	  label4 = gen_label_rtx ();
4805 	  label5 = gen_label_rtx ();
4806 	  do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
4807 	  do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
4808 	  tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4809 			      quotient, 0, OPTAB_LIB_WIDEN);
4810 	  if (tem != quotient)
4811 	    emit_move_insn (quotient, tem);
4812 	  emit_jump_insn (targetm.gen_jump (label5));
4813 	  emit_barrier ();
4814 	  emit_label (label1);
4815 	  expand_inc (adjusted_op0, const1_rtx);
4816 	  emit_jump_insn (targetm.gen_jump (label4));
4817 	  emit_barrier ();
4818 	  emit_label (label2);
4819 	  do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
4820 	  tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4821 			      quotient, 0, OPTAB_LIB_WIDEN);
4822 	  if (tem != quotient)
4823 	    emit_move_insn (quotient, tem);
4824 	  emit_jump_insn (targetm.gen_jump (label5));
4825 	  emit_barrier ();
4826 	  emit_label (label3);
4827 	  expand_dec (adjusted_op0, const1_rtx);
4828 	  emit_label (label4);
4829 	  tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4830 			      quotient, 0, OPTAB_LIB_WIDEN);
4831 	  if (tem != quotient)
4832 	    emit_move_insn (quotient, tem);
4833 	  expand_dec (quotient, const1_rtx);
4834 	  emit_label (label5);
4835 	}
4836 	break;
4837 
4838       case CEIL_DIV_EXPR:
4839       case CEIL_MOD_EXPR:
4840 	if (unsignedp)
4841 	  {
4842 	    if (op1_is_constant
4843 		&& EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
4844 		&& (HWI_COMPUTABLE_MODE_P (compute_mode)
4845 		    || INTVAL (op1) >= 0))
4846 	      {
4847 		scalar_int_mode int_mode
4848 		  = as_a <scalar_int_mode> (compute_mode);
4849 		rtx t1, t2, t3;
4850 		unsigned HOST_WIDE_INT d = INTVAL (op1);
4851 		t1 = expand_shift (RSHIFT_EXPR, int_mode, op0,
4852 				   floor_log2 (d), tquotient, 1);
4853 		t2 = expand_binop (int_mode, and_optab, op0,
4854 				   gen_int_mode (d - 1, int_mode),
4855 				   NULL_RTX, 1, OPTAB_LIB_WIDEN);
4856 		t3 = gen_reg_rtx (int_mode);
4857 		t3 = emit_store_flag (t3, NE, t2, const0_rtx, int_mode, 1, 1);
4858 		if (t3 == 0)
4859 		  {
4860 		    rtx_code_label *lab;
4861 		    lab = gen_label_rtx ();
4862 		    do_cmp_and_jump (t2, const0_rtx, EQ, int_mode, lab);
4863 		    expand_inc (t1, const1_rtx);
4864 		    emit_label (lab);
4865 		    quotient = t1;
4866 		  }
4867 		else
4868 		  quotient = force_operand (gen_rtx_PLUS (int_mode, t1, t3),
4869 					    tquotient);
4870 		break;
4871 	      }
4872 
4873 	    /* Try using an instruction that produces both the quotient and
4874 	       remainder, using truncation.  We can easily compensate the
4875 	       quotient or remainder to get ceiling rounding, once we have the
4876 	       remainder.  Notice that we compute also the final remainder
4877 	       value here, and return the result right away.  */
4878 	    if (target == 0 || GET_MODE (target) != compute_mode)
4879 	      target = gen_reg_rtx (compute_mode);
4880 
4881 	    if (rem_flag)
4882 	      {
4883 		remainder = (REG_P (target)
4884 			     ? target : gen_reg_rtx (compute_mode));
4885 		quotient = gen_reg_rtx (compute_mode);
4886 	      }
4887 	    else
4888 	      {
4889 		quotient = (REG_P (target)
4890 			    ? target : gen_reg_rtx (compute_mode));
4891 		remainder = gen_reg_rtx (compute_mode);
4892 	      }
4893 
4894 	    if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
4895 				     remainder, 1))
4896 	      {
4897 		/* This could be computed with a branch-less sequence.
4898 		   Save that for later.  */
4899 		rtx_code_label *label = gen_label_rtx ();
4900 		do_cmp_and_jump (remainder, const0_rtx, EQ,
4901 				 compute_mode, label);
4902 		expand_inc (quotient, const1_rtx);
4903 		expand_dec (remainder, op1);
4904 		emit_label (label);
4905 		return gen_lowpart (mode, rem_flag ? remainder : quotient);
4906 	      }
4907 
4908 	    /* No luck with division elimination or divmod.  Have to do it
4909 	       by conditionally adjusting op0 *and* the result.  */
4910 	    {
4911 	      rtx_code_label *label1, *label2;
4912 	      rtx adjusted_op0, tem;
4913 
4914 	      quotient = gen_reg_rtx (compute_mode);
4915 	      adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4916 	      label1 = gen_label_rtx ();
4917 	      label2 = gen_label_rtx ();
4918 	      do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
4919 			       compute_mode, label1);
4920 	      emit_move_insn  (quotient, const0_rtx);
4921 	      emit_jump_insn (targetm.gen_jump (label2));
4922 	      emit_barrier ();
4923 	      emit_label (label1);
4924 	      expand_dec (adjusted_op0, const1_rtx);
4925 	      tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
4926 				  quotient, 1, OPTAB_LIB_WIDEN);
4927 	      if (tem != quotient)
4928 		emit_move_insn (quotient, tem);
4929 	      expand_inc (quotient, const1_rtx);
4930 	      emit_label (label2);
4931 	    }
4932 	  }
4933 	else /* signed */
4934 	  {
4935 	    if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
4936 		&& INTVAL (op1) >= 0)
4937 	      {
4938 		/* This is extremely similar to the code for the unsigned case
4939 		   above.  For 2.7 we should merge these variants, but for
4940 		   2.6.1 I don't want to touch the code for unsigned since that
4941 		   get used in C.  The signed case will only be used by other
4942 		   languages (Ada).  */
4943 
4944 		rtx t1, t2, t3;
4945 		unsigned HOST_WIDE_INT d = INTVAL (op1);
4946 		t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4947 				   floor_log2 (d), tquotient, 0);
4948 		t2 = expand_binop (compute_mode, and_optab, op0,
4949 				   gen_int_mode (d - 1, compute_mode),
4950 				   NULL_RTX, 1, OPTAB_LIB_WIDEN);
4951 		t3 = gen_reg_rtx (compute_mode);
4952 		t3 = emit_store_flag (t3, NE, t2, const0_rtx,
4953 				      compute_mode, 1, 1);
4954 		if (t3 == 0)
4955 		  {
4956 		    rtx_code_label *lab;
4957 		    lab = gen_label_rtx ();
4958 		    do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
4959 		    expand_inc (t1, const1_rtx);
4960 		    emit_label (lab);
4961 		    quotient = t1;
4962 		  }
4963 		else
4964 		  quotient = force_operand (gen_rtx_PLUS (compute_mode,
4965 							  t1, t3),
4966 					    tquotient);
4967 		break;
4968 	      }
4969 
4970 	    /* Try using an instruction that produces both the quotient and
4971 	       remainder, using truncation.  We can easily compensate the
4972 	       quotient or remainder to get ceiling rounding, once we have the
4973 	       remainder.  Notice that we compute also the final remainder
4974 	       value here, and return the result right away.  */
4975 	    if (target == 0 || GET_MODE (target) != compute_mode)
4976 	      target = gen_reg_rtx (compute_mode);
4977 	    if (rem_flag)
4978 	      {
4979 		remainder= (REG_P (target)
4980 			    ? target : gen_reg_rtx (compute_mode));
4981 		quotient = gen_reg_rtx (compute_mode);
4982 	      }
4983 	    else
4984 	      {
4985 		quotient = (REG_P (target)
4986 			    ? target : gen_reg_rtx (compute_mode));
4987 		remainder = gen_reg_rtx (compute_mode);
4988 	      }
4989 
4990 	    if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
4991 				     remainder, 0))
4992 	      {
4993 		/* This could be computed with a branch-less sequence.
4994 		   Save that for later.  */
4995 		rtx tem;
4996 		rtx_code_label *label = gen_label_rtx ();
4997 		do_cmp_and_jump (remainder, const0_rtx, EQ,
4998 				 compute_mode, label);
4999 		tem = expand_binop (compute_mode, xor_optab, op0, op1,
5000 				    NULL_RTX, 0, OPTAB_WIDEN);
5001 		do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
5002 		expand_inc (quotient, const1_rtx);
5003 		expand_dec (remainder, op1);
5004 		emit_label (label);
5005 		return gen_lowpart (mode, rem_flag ? remainder : quotient);
5006 	      }
5007 
5008 	    /* No luck with division elimination or divmod.  Have to do it
5009 	       by conditionally adjusting op0 *and* the result.  */
5010 	    {
5011 	      rtx_code_label *label1, *label2, *label3, *label4, *label5;
5012 	      rtx adjusted_op0;
5013 	      rtx tem;
5014 
5015 	      quotient = gen_reg_rtx (compute_mode);
5016 	      adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
5017 	      label1 = gen_label_rtx ();
5018 	      label2 = gen_label_rtx ();
5019 	      label3 = gen_label_rtx ();
5020 	      label4 = gen_label_rtx ();
5021 	      label5 = gen_label_rtx ();
5022 	      do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
5023 	      do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
5024 			       compute_mode, label1);
5025 	      tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
5026 				  quotient, 0, OPTAB_LIB_WIDEN);
5027 	      if (tem != quotient)
5028 		emit_move_insn (quotient, tem);
5029 	      emit_jump_insn (targetm.gen_jump (label5));
5030 	      emit_barrier ();
5031 	      emit_label (label1);
5032 	      expand_dec (adjusted_op0, const1_rtx);
5033 	      emit_jump_insn (targetm.gen_jump (label4));
5034 	      emit_barrier ();
5035 	      emit_label (label2);
5036 	      do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
5037 			       compute_mode, label3);
5038 	      tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
5039 				  quotient, 0, OPTAB_LIB_WIDEN);
5040 	      if (tem != quotient)
5041 		emit_move_insn (quotient, tem);
5042 	      emit_jump_insn (targetm.gen_jump (label5));
5043 	      emit_barrier ();
5044 	      emit_label (label3);
5045 	      expand_inc (adjusted_op0, const1_rtx);
5046 	      emit_label (label4);
5047 	      tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
5048 				  quotient, 0, OPTAB_LIB_WIDEN);
5049 	      if (tem != quotient)
5050 		emit_move_insn (quotient, tem);
5051 	      expand_inc (quotient, const1_rtx);
5052 	      emit_label (label5);
5053 	    }
5054 	  }
5055 	break;
5056 
5057       case EXACT_DIV_EXPR:
5058 	if (op1_is_constant && HWI_COMPUTABLE_MODE_P (compute_mode))
5059 	  {
5060 	    scalar_int_mode int_mode = as_a <scalar_int_mode> (compute_mode);
5061 	    int size = GET_MODE_BITSIZE (int_mode);
5062 	    HOST_WIDE_INT d = INTVAL (op1);
5063 	    unsigned HOST_WIDE_INT ml;
5064 	    int pre_shift;
5065 	    rtx t1;
5066 
5067 	    pre_shift = ctz_or_zero (d);
5068 	    ml = invert_mod2n (d >> pre_shift, size);
5069 	    t1 = expand_shift (RSHIFT_EXPR, int_mode, op0,
5070 			       pre_shift, NULL_RTX, unsignedp);
5071 	    quotient = expand_mult (int_mode, t1, gen_int_mode (ml, int_mode),
5072 				    NULL_RTX, 1);
5073 
5074 	    insn = get_last_insn ();
5075 	    set_dst_reg_note (insn, REG_EQUAL,
5076 			      gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
5077 					      int_mode, op0, op1),
5078 			      quotient);
5079 	  }
5080 	break;
5081 
5082       case ROUND_DIV_EXPR:
5083       case ROUND_MOD_EXPR:
5084 	if (unsignedp)
5085 	  {
5086 	    scalar_int_mode int_mode = as_a <scalar_int_mode> (compute_mode);
5087 	    rtx tem;
5088 	    rtx_code_label *label;
5089 	    label = gen_label_rtx ();
5090 	    quotient = gen_reg_rtx (int_mode);
5091 	    remainder = gen_reg_rtx (int_mode);
5092 	    if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
5093 	      {
5094 		rtx tem;
5095 		quotient = expand_binop (int_mode, udiv_optab, op0, op1,
5096 					 quotient, 1, OPTAB_LIB_WIDEN);
5097 		tem = expand_mult (int_mode, quotient, op1, NULL_RTX, 1);
5098 		remainder = expand_binop (int_mode, sub_optab, op0, tem,
5099 					  remainder, 1, OPTAB_LIB_WIDEN);
5100 	      }
5101 	    tem = plus_constant (int_mode, op1, -1);
5102 	    tem = expand_shift (RSHIFT_EXPR, int_mode, tem, 1, NULL_RTX, 1);
5103 	    do_cmp_and_jump (remainder, tem, LEU, int_mode, label);
5104 	    expand_inc (quotient, const1_rtx);
5105 	    expand_dec (remainder, op1);
5106 	    emit_label (label);
5107 	  }
5108 	else
5109 	  {
5110 	    scalar_int_mode int_mode = as_a <scalar_int_mode> (compute_mode);
5111 	    int size = GET_MODE_BITSIZE (int_mode);
5112 	    rtx abs_rem, abs_op1, tem, mask;
5113 	    rtx_code_label *label;
5114 	    label = gen_label_rtx ();
5115 	    quotient = gen_reg_rtx (int_mode);
5116 	    remainder = gen_reg_rtx (int_mode);
5117 	    if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
5118 	      {
5119 		rtx tem;
5120 		quotient = expand_binop (int_mode, sdiv_optab, op0, op1,
5121 					 quotient, 0, OPTAB_LIB_WIDEN);
5122 		tem = expand_mult (int_mode, quotient, op1, NULL_RTX, 0);
5123 		remainder = expand_binop (int_mode, sub_optab, op0, tem,
5124 					  remainder, 0, OPTAB_LIB_WIDEN);
5125 	      }
5126 	    abs_rem = expand_abs (int_mode, remainder, NULL_RTX, 1, 0);
5127 	    abs_op1 = expand_abs (int_mode, op1, NULL_RTX, 1, 0);
5128 	    tem = expand_shift (LSHIFT_EXPR, int_mode, abs_rem,
5129 				1, NULL_RTX, 1);
5130 	    do_cmp_and_jump (tem, abs_op1, LTU, int_mode, label);
5131 	    tem = expand_binop (int_mode, xor_optab, op0, op1,
5132 				NULL_RTX, 0, OPTAB_WIDEN);
5133 	    mask = expand_shift (RSHIFT_EXPR, int_mode, tem,
5134 				 size - 1, NULL_RTX, 0);
5135 	    tem = expand_binop (int_mode, xor_optab, mask, const1_rtx,
5136 				NULL_RTX, 0, OPTAB_WIDEN);
5137 	    tem = expand_binop (int_mode, sub_optab, tem, mask,
5138 				NULL_RTX, 0, OPTAB_WIDEN);
5139 	    expand_inc (quotient, tem);
5140 	    tem = expand_binop (int_mode, xor_optab, mask, op1,
5141 				NULL_RTX, 0, OPTAB_WIDEN);
5142 	    tem = expand_binop (int_mode, sub_optab, tem, mask,
5143 				NULL_RTX, 0, OPTAB_WIDEN);
5144 	    expand_dec (remainder, tem);
5145 	    emit_label (label);
5146 	  }
5147 	return gen_lowpart (mode, rem_flag ? remainder : quotient);
5148 
5149       default:
5150 	gcc_unreachable ();
5151       }
5152 
5153   if (quotient == 0)
5154     {
5155       if (target && GET_MODE (target) != compute_mode)
5156 	target = 0;
5157 
5158       if (rem_flag)
5159 	{
5160 	  /* Try to produce the remainder without producing the quotient.
5161 	     If we seem to have a divmod pattern that does not require widening,
5162 	     don't try widening here.  We should really have a WIDEN argument
5163 	     to expand_twoval_binop, since what we'd really like to do here is
5164 	     1) try a mod insn in compute_mode
5165 	     2) try a divmod insn in compute_mode
5166 	     3) try a div insn in compute_mode and multiply-subtract to get
5167 	        remainder
5168 	     4) try the same things with widening allowed.  */
5169 	  remainder
5170 	    = sign_expand_binop (compute_mode, umod_optab, smod_optab,
5171 				 op0, op1, target,
5172 				 unsignedp,
5173 				 ((optab_handler (optab2, compute_mode)
5174 				   != CODE_FOR_nothing)
5175 				  ? OPTAB_DIRECT : OPTAB_WIDEN));
5176 	  if (remainder == 0)
5177 	    {
5178 	      /* No luck there.  Can we do remainder and divide at once
5179 		 without a library call?  */
5180 	      remainder = gen_reg_rtx (compute_mode);
5181 	      if (! expand_twoval_binop ((unsignedp
5182 					  ? udivmod_optab
5183 					  : sdivmod_optab),
5184 					 op0, op1,
5185 					 NULL_RTX, remainder, unsignedp))
5186 		remainder = 0;
5187 	    }
5188 
5189 	  if (remainder)
5190 	    return gen_lowpart (mode, remainder);
5191 	}
5192 
5193       /* Produce the quotient.  Try a quotient insn, but not a library call.
5194 	 If we have a divmod in this mode, use it in preference to widening
5195 	 the div (for this test we assume it will not fail). Note that optab2
5196 	 is set to the one of the two optabs that the call below will use.  */
5197       quotient
5198 	= sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
5199 			     op0, op1, rem_flag ? NULL_RTX : target,
5200 			     unsignedp,
5201 			     ((optab_handler (optab2, compute_mode)
5202 			       != CODE_FOR_nothing)
5203 			      ? OPTAB_DIRECT : OPTAB_WIDEN));
5204 
5205       if (quotient == 0)
5206 	{
5207 	  /* No luck there.  Try a quotient-and-remainder insn,
5208 	     keeping the quotient alone.  */
5209 	  quotient = gen_reg_rtx (compute_mode);
5210 	  if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
5211 				     op0, op1,
5212 				     quotient, NULL_RTX, unsignedp))
5213 	    {
5214 	      quotient = 0;
5215 	      if (! rem_flag)
5216 		/* Still no luck.  If we are not computing the remainder,
5217 		   use a library call for the quotient.  */
5218 		quotient = sign_expand_binop (compute_mode,
5219 					      udiv_optab, sdiv_optab,
5220 					      op0, op1, target,
5221 					      unsignedp, OPTAB_LIB_WIDEN);
5222 	    }
5223 	}
5224     }
5225 
5226   if (rem_flag)
5227     {
5228       if (target && GET_MODE (target) != compute_mode)
5229 	target = 0;
5230 
5231       if (quotient == 0)
5232 	{
5233 	  /* No divide instruction either.  Use library for remainder.  */
5234 	  remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
5235 					 op0, op1, target,
5236 					 unsignedp, OPTAB_LIB_WIDEN);
5237 	  /* No remainder function.  Try a quotient-and-remainder
5238 	     function, keeping the remainder.  */
5239 	  if (!remainder)
5240 	    {
5241 	      remainder = gen_reg_rtx (compute_mode);
5242 	      if (!expand_twoval_binop_libfunc
5243 		  (unsignedp ? udivmod_optab : sdivmod_optab,
5244 		   op0, op1,
5245 		   NULL_RTX, remainder,
5246 		   unsignedp ? UMOD : MOD))
5247 		remainder = NULL_RTX;
5248 	    }
5249 	}
5250       else
5251 	{
5252 	  /* We divided.  Now finish doing X - Y * (X / Y).  */
5253 	  remainder = expand_mult (compute_mode, quotient, op1,
5254 				   NULL_RTX, unsignedp);
5255 	  remainder = expand_binop (compute_mode, sub_optab, op0,
5256 				    remainder, target, unsignedp,
5257 				    OPTAB_LIB_WIDEN);
5258 	}
5259     }
5260 
5261   return gen_lowpart (mode, rem_flag ? remainder : quotient);
5262 }
5263 
5264 /* Return a tree node with data type TYPE, describing the value of X.
5265    Usually this is an VAR_DECL, if there is no obvious better choice.
5266    X may be an expression, however we only support those expressions
5267    generated by loop.c.  */
5268 
5269 tree
5270 make_tree (tree type, rtx x)
5271 {
5272   tree t;
5273 
5274   switch (GET_CODE (x))
5275     {
5276     case CONST_INT:
5277     case CONST_WIDE_INT:
5278       t = wide_int_to_tree (type, rtx_mode_t (x, TYPE_MODE (type)));
5279       return t;
5280 
5281     case CONST_DOUBLE:
5282       STATIC_ASSERT (HOST_BITS_PER_WIDE_INT * 2 <= MAX_BITSIZE_MODE_ANY_INT);
5283       if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (x) == VOIDmode)
5284 	t = wide_int_to_tree (type,
5285 			      wide_int::from_array (&CONST_DOUBLE_LOW (x), 2,
5286 						    HOST_BITS_PER_WIDE_INT * 2));
5287       else
5288 	t = build_real (type, *CONST_DOUBLE_REAL_VALUE (x));
5289 
5290       return t;
5291 
5292     case CONST_VECTOR:
5293       {
5294 	unsigned int npatterns = CONST_VECTOR_NPATTERNS (x);
5295 	unsigned int nelts_per_pattern = CONST_VECTOR_NELTS_PER_PATTERN (x);
5296 	tree itype = TREE_TYPE (type);
5297 
5298 	/* Build a tree with vector elements.  */
5299 	tree_vector_builder elts (type, npatterns, nelts_per_pattern);
5300 	unsigned int count = elts.encoded_nelts ();
5301 	for (unsigned int i = 0; i < count; ++i)
5302 	  {
5303 	    rtx elt = CONST_VECTOR_ELT (x, i);
5304 	    elts.quick_push (make_tree (itype, elt));
5305 	  }
5306 
5307 	return elts.build ();
5308       }
5309 
5310     case PLUS:
5311       return fold_build2 (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
5312 			  make_tree (type, XEXP (x, 1)));
5313 
5314     case MINUS:
5315       return fold_build2 (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
5316 			  make_tree (type, XEXP (x, 1)));
5317 
5318     case NEG:
5319       return fold_build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0)));
5320 
5321     case MULT:
5322       return fold_build2 (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
5323 			  make_tree (type, XEXP (x, 1)));
5324 
5325     case ASHIFT:
5326       return fold_build2 (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
5327 			  make_tree (type, XEXP (x, 1)));
5328 
5329     case LSHIFTRT:
5330       t = unsigned_type_for (type);
5331       return fold_convert (type, build2 (RSHIFT_EXPR, t,
5332 			    		 make_tree (t, XEXP (x, 0)),
5333 				    	 make_tree (type, XEXP (x, 1))));
5334 
5335     case ASHIFTRT:
5336       t = signed_type_for (type);
5337       return fold_convert (type, build2 (RSHIFT_EXPR, t,
5338 					 make_tree (t, XEXP (x, 0)),
5339 				    	 make_tree (type, XEXP (x, 1))));
5340 
5341     case DIV:
5342       if (TREE_CODE (type) != REAL_TYPE)
5343 	t = signed_type_for (type);
5344       else
5345 	t = type;
5346 
5347       return fold_convert (type, build2 (TRUNC_DIV_EXPR, t,
5348 				    	 make_tree (t, XEXP (x, 0)),
5349 				    	 make_tree (t, XEXP (x, 1))));
5350     case UDIV:
5351       t = unsigned_type_for (type);
5352       return fold_convert (type, build2 (TRUNC_DIV_EXPR, t,
5353 				    	 make_tree (t, XEXP (x, 0)),
5354 				    	 make_tree (t, XEXP (x, 1))));
5355 
5356     case SIGN_EXTEND:
5357     case ZERO_EXTEND:
5358       t = lang_hooks.types.type_for_mode (GET_MODE (XEXP (x, 0)),
5359 					  GET_CODE (x) == ZERO_EXTEND);
5360       return fold_convert (type, make_tree (t, XEXP (x, 0)));
5361 
5362     case CONST:
5363       return make_tree (type, XEXP (x, 0));
5364 
5365     case SYMBOL_REF:
5366       t = SYMBOL_REF_DECL (x);
5367       if (t)
5368 	return fold_convert (type, build_fold_addr_expr (t));
5369       /* fall through.  */
5370 
5371     default:
5372       if (CONST_POLY_INT_P (x))
5373 	return wide_int_to_tree (t, const_poly_int_value (x));
5374 
5375       t = build_decl (RTL_LOCATION (x), VAR_DECL, NULL_TREE, type);
5376 
5377       /* If TYPE is a POINTER_TYPE, we might need to convert X from
5378 	 address mode to pointer mode.  */
5379       if (POINTER_TYPE_P (type))
5380 	x = convert_memory_address_addr_space
5381 	  (SCALAR_INT_TYPE_MODE (type), x, TYPE_ADDR_SPACE (TREE_TYPE (type)));
5382 
5383       /* Note that we do *not* use SET_DECL_RTL here, because we do not
5384 	 want set_decl_rtl to go adjusting REG_ATTRS for this temporary.  */
5385       t->decl_with_rtl.rtl = x;
5386 
5387       return t;
5388     }
5389 }
5390 
5391 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
5392    and returning TARGET.
5393 
5394    If TARGET is 0, a pseudo-register or constant is returned.  */
5395 
5396 rtx
5397 expand_and (machine_mode mode, rtx op0, rtx op1, rtx target)
5398 {
5399   rtx tem = 0;
5400 
5401   if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
5402     tem = simplify_binary_operation (AND, mode, op0, op1);
5403   if (tem == 0)
5404     tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
5405 
5406   if (target == 0)
5407     target = tem;
5408   else if (tem != target)
5409     emit_move_insn (target, tem);
5410   return target;
5411 }
5412 
5413 /* Helper function for emit_store_flag.  */
5414 rtx
5415 emit_cstore (rtx target, enum insn_code icode, enum rtx_code code,
5416 	     machine_mode mode, machine_mode compare_mode,
5417 	     int unsignedp, rtx x, rtx y, int normalizep,
5418 	     machine_mode target_mode)
5419 {
5420   struct expand_operand ops[4];
5421   rtx op0, comparison, subtarget;
5422   rtx_insn *last;
5423   scalar_int_mode result_mode = targetm.cstore_mode (icode);
5424   scalar_int_mode int_target_mode;
5425 
5426   last = get_last_insn ();
5427   x = prepare_operand (icode, x, 2, mode, compare_mode, unsignedp);
5428   y = prepare_operand (icode, y, 3, mode, compare_mode, unsignedp);
5429   if (!x || !y)
5430     {
5431       delete_insns_since (last);
5432       return NULL_RTX;
5433     }
5434 
5435   if (target_mode == VOIDmode)
5436     int_target_mode = result_mode;
5437   else
5438     int_target_mode = as_a <scalar_int_mode> (target_mode);
5439   if (!target)
5440     target = gen_reg_rtx (int_target_mode);
5441 
5442   comparison = gen_rtx_fmt_ee (code, result_mode, x, y);
5443 
5444   create_output_operand (&ops[0], optimize ? NULL_RTX : target, result_mode);
5445   create_fixed_operand (&ops[1], comparison);
5446   create_fixed_operand (&ops[2], x);
5447   create_fixed_operand (&ops[3], y);
5448   if (!maybe_expand_insn (icode, 4, ops))
5449     {
5450       delete_insns_since (last);
5451       return NULL_RTX;
5452     }
5453   subtarget = ops[0].value;
5454 
5455   /* If we are converting to a wider mode, first convert to
5456      INT_TARGET_MODE, then normalize.  This produces better combining
5457      opportunities on machines that have a SIGN_EXTRACT when we are
5458      testing a single bit.  This mostly benefits the 68k.
5459 
5460      If STORE_FLAG_VALUE does not have the sign bit set when
5461      interpreted in MODE, we can do this conversion as unsigned, which
5462      is usually more efficient.  */
5463   if (GET_MODE_SIZE (int_target_mode) > GET_MODE_SIZE (result_mode))
5464     {
5465       convert_move (target, subtarget,
5466 		    val_signbit_known_clear_p (result_mode,
5467 					       STORE_FLAG_VALUE));
5468       op0 = target;
5469       result_mode = int_target_mode;
5470     }
5471   else
5472     op0 = subtarget;
5473 
5474   /* If we want to keep subexpressions around, don't reuse our last
5475      target.  */
5476   if (optimize)
5477     subtarget = 0;
5478 
5479   /* Now normalize to the proper value in MODE.  Sometimes we don't
5480      have to do anything.  */
5481   if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
5482     ;
5483   /* STORE_FLAG_VALUE might be the most negative number, so write
5484      the comparison this way to avoid a compiler-time warning.  */
5485   else if (- normalizep == STORE_FLAG_VALUE)
5486     op0 = expand_unop (result_mode, neg_optab, op0, subtarget, 0);
5487 
5488   /* We don't want to use STORE_FLAG_VALUE < 0 below since this makes
5489      it hard to use a value of just the sign bit due to ANSI integer
5490      constant typing rules.  */
5491   else if (val_signbit_known_set_p (result_mode, STORE_FLAG_VALUE))
5492     op0 = expand_shift (RSHIFT_EXPR, result_mode, op0,
5493 			GET_MODE_BITSIZE (result_mode) - 1, subtarget,
5494 			normalizep == 1);
5495   else
5496     {
5497       gcc_assert (STORE_FLAG_VALUE & 1);
5498 
5499       op0 = expand_and (result_mode, op0, const1_rtx, subtarget);
5500       if (normalizep == -1)
5501 	op0 = expand_unop (result_mode, neg_optab, op0, op0, 0);
5502     }
5503 
5504   /* If we were converting to a smaller mode, do the conversion now.  */
5505   if (int_target_mode != result_mode)
5506     {
5507       convert_move (target, op0, 0);
5508       return target;
5509     }
5510   else
5511     return op0;
5512 }
5513 
5514 
5515 /* A subroutine of emit_store_flag only including "tricks" that do not
5516    need a recursive call.  These are kept separate to avoid infinite
5517    loops.  */
5518 
5519 static rtx
5520 emit_store_flag_1 (rtx target, enum rtx_code code, rtx op0, rtx op1,
5521 		   machine_mode mode, int unsignedp, int normalizep,
5522 		   machine_mode target_mode)
5523 {
5524   rtx subtarget;
5525   enum insn_code icode;
5526   machine_mode compare_mode;
5527   enum mode_class mclass;
5528   enum rtx_code scode;
5529 
5530   if (unsignedp)
5531     code = unsigned_condition (code);
5532   scode = swap_condition (code);
5533 
5534   /* If one operand is constant, make it the second one.  Only do this
5535      if the other operand is not constant as well.  */
5536 
5537   if (swap_commutative_operands_p (op0, op1))
5538     {
5539       std::swap (op0, op1);
5540       code = swap_condition (code);
5541     }
5542 
5543   if (mode == VOIDmode)
5544     mode = GET_MODE (op0);
5545 
5546   /* For some comparisons with 1 and -1, we can convert this to
5547      comparisons with zero.  This will often produce more opportunities for
5548      store-flag insns.  */
5549 
5550   switch (code)
5551     {
5552     case LT:
5553       if (op1 == const1_rtx)
5554 	op1 = const0_rtx, code = LE;
5555       break;
5556     case LE:
5557       if (op1 == constm1_rtx)
5558 	op1 = const0_rtx, code = LT;
5559       break;
5560     case GE:
5561       if (op1 == const1_rtx)
5562 	op1 = const0_rtx, code = GT;
5563       break;
5564     case GT:
5565       if (op1 == constm1_rtx)
5566 	op1 = const0_rtx, code = GE;
5567       break;
5568     case GEU:
5569       if (op1 == const1_rtx)
5570 	op1 = const0_rtx, code = NE;
5571       break;
5572     case LTU:
5573       if (op1 == const1_rtx)
5574 	op1 = const0_rtx, code = EQ;
5575       break;
5576     default:
5577       break;
5578     }
5579 
5580   /* If we are comparing a double-word integer with zero or -1, we can
5581      convert the comparison into one involving a single word.  */
5582   scalar_int_mode int_mode;
5583   if (is_int_mode (mode, &int_mode)
5584       && GET_MODE_BITSIZE (int_mode) == BITS_PER_WORD * 2
5585       && (!MEM_P (op0) || ! MEM_VOLATILE_P (op0)))
5586     {
5587       rtx tem;
5588       if ((code == EQ || code == NE)
5589 	  && (op1 == const0_rtx || op1 == constm1_rtx))
5590 	{
5591 	  rtx op00, op01;
5592 
5593 	  /* Do a logical OR or AND of the two words and compare the
5594 	     result.  */
5595 	  op00 = simplify_gen_subreg (word_mode, op0, int_mode, 0);
5596 	  op01 = simplify_gen_subreg (word_mode, op0, int_mode, UNITS_PER_WORD);
5597 	  tem = expand_binop (word_mode,
5598 			      op1 == const0_rtx ? ior_optab : and_optab,
5599 			      op00, op01, NULL_RTX, unsignedp,
5600 			      OPTAB_DIRECT);
5601 
5602 	  if (tem != 0)
5603 	    tem = emit_store_flag (NULL_RTX, code, tem, op1, word_mode,
5604 				   unsignedp, normalizep);
5605 	}
5606       else if ((code == LT || code == GE) && op1 == const0_rtx)
5607 	{
5608 	  rtx op0h;
5609 
5610 	  /* If testing the sign bit, can just test on high word.  */
5611 	  op0h = simplify_gen_subreg (word_mode, op0, int_mode,
5612 				      subreg_highpart_offset (word_mode,
5613 							      int_mode));
5614 	  tem = emit_store_flag (NULL_RTX, code, op0h, op1, word_mode,
5615 				 unsignedp, normalizep);
5616 	}
5617       else
5618 	tem = NULL_RTX;
5619 
5620       if (tem)
5621 	{
5622 	  if (target_mode == VOIDmode || GET_MODE (tem) == target_mode)
5623 	    return tem;
5624 	  if (!target)
5625 	    target = gen_reg_rtx (target_mode);
5626 
5627 	  convert_move (target, tem,
5628 			!val_signbit_known_set_p (word_mode,
5629 						  (normalizep ? normalizep
5630 						   : STORE_FLAG_VALUE)));
5631 	  return target;
5632 	}
5633     }
5634 
5635   /* If this is A < 0 or A >= 0, we can do this by taking the ones
5636      complement of A (for GE) and shifting the sign bit to the low bit.  */
5637   if (op1 == const0_rtx && (code == LT || code == GE)
5638       && is_int_mode (mode, &int_mode)
5639       && (normalizep || STORE_FLAG_VALUE == 1
5640 	  || val_signbit_p (int_mode, STORE_FLAG_VALUE)))
5641     {
5642       scalar_int_mode int_target_mode;
5643       subtarget = target;
5644 
5645       if (!target)
5646 	int_target_mode = int_mode;
5647       else
5648 	{
5649 	  /* If the result is to be wider than OP0, it is best to convert it
5650 	     first.  If it is to be narrower, it is *incorrect* to convert it
5651 	     first.  */
5652 	  int_target_mode = as_a <scalar_int_mode> (target_mode);
5653 	  if (GET_MODE_SIZE (int_target_mode) > GET_MODE_SIZE (int_mode))
5654 	    {
5655 	      op0 = convert_modes (int_target_mode, int_mode, op0, 0);
5656 	      int_mode = int_target_mode;
5657 	    }
5658 	}
5659 
5660       if (int_target_mode != int_mode)
5661 	subtarget = 0;
5662 
5663       if (code == GE)
5664 	op0 = expand_unop (int_mode, one_cmpl_optab, op0,
5665 			   ((STORE_FLAG_VALUE == 1 || normalizep)
5666 			    ? 0 : subtarget), 0);
5667 
5668       if (STORE_FLAG_VALUE == 1 || normalizep)
5669 	/* If we are supposed to produce a 0/1 value, we want to do
5670 	   a logical shift from the sign bit to the low-order bit; for
5671 	   a -1/0 value, we do an arithmetic shift.  */
5672 	op0 = expand_shift (RSHIFT_EXPR, int_mode, op0,
5673 			    GET_MODE_BITSIZE (int_mode) - 1,
5674 			    subtarget, normalizep != -1);
5675 
5676       if (int_mode != int_target_mode)
5677 	op0 = convert_modes (int_target_mode, int_mode, op0, 0);
5678 
5679       return op0;
5680     }
5681 
5682   mclass = GET_MODE_CLASS (mode);
5683   FOR_EACH_MODE_FROM (compare_mode, mode)
5684     {
5685      machine_mode optab_mode = mclass == MODE_CC ? CCmode : compare_mode;
5686      icode = optab_handler (cstore_optab, optab_mode);
5687      if (icode != CODE_FOR_nothing)
5688 	{
5689 	  do_pending_stack_adjust ();
5690 	  rtx tem = emit_cstore (target, icode, code, mode, compare_mode,
5691 				 unsignedp, op0, op1, normalizep, target_mode);
5692 	  if (tem)
5693 	    return tem;
5694 
5695 	  if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5696 	    {
5697 	      tem = emit_cstore (target, icode, scode, mode, compare_mode,
5698 				 unsignedp, op1, op0, normalizep, target_mode);
5699 	      if (tem)
5700 	        return tem;
5701 	    }
5702 	  break;
5703 	}
5704     }
5705 
5706   return 0;
5707 }
5708 
5709 /* Subroutine of emit_store_flag that handles cases in which the operands
5710    are scalar integers.  SUBTARGET is the target to use for temporary
5711    operations and TRUEVAL is the value to store when the condition is
5712    true.  All other arguments are as for emit_store_flag.  */
5713 
5714 rtx
5715 emit_store_flag_int (rtx target, rtx subtarget, enum rtx_code code, rtx op0,
5716 		     rtx op1, scalar_int_mode mode, int unsignedp,
5717 		     int normalizep, rtx trueval)
5718 {
5719   machine_mode target_mode = target ? GET_MODE (target) : VOIDmode;
5720   rtx_insn *last = get_last_insn ();
5721 
5722   /* If this is an equality comparison of integers, we can try to exclusive-or
5723      (or subtract) the two operands and use a recursive call to try the
5724      comparison with zero.  Don't do any of these cases if branches are
5725      very cheap.  */
5726 
5727   if ((code == EQ || code == NE) && op1 != const0_rtx)
5728     {
5729       rtx tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
5730 			      OPTAB_WIDEN);
5731 
5732       if (tem == 0)
5733 	tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
5734 			    OPTAB_WIDEN);
5735       if (tem != 0)
5736 	tem = emit_store_flag (target, code, tem, const0_rtx,
5737 			       mode, unsignedp, normalizep);
5738       if (tem != 0)
5739 	return tem;
5740 
5741       delete_insns_since (last);
5742     }
5743 
5744   /* For integer comparisons, try the reverse comparison.  However, for
5745      small X and if we'd have anyway to extend, implementing "X != 0"
5746      as "-(int)X >> 31" is still cheaper than inverting "(int)X == 0".  */
5747   rtx_code rcode = reverse_condition (code);
5748   if (can_compare_p (rcode, mode, ccp_store_flag)
5749       && ! (optab_handler (cstore_optab, mode) == CODE_FOR_nothing
5750 	    && code == NE
5751 	    && GET_MODE_SIZE (mode) < UNITS_PER_WORD
5752 	    && op1 == const0_rtx))
5753     {
5754       int want_add = ((STORE_FLAG_VALUE == 1 && normalizep == -1)
5755 		      || (STORE_FLAG_VALUE == -1 && normalizep == 1));
5756 
5757       /* Again, for the reverse comparison, use either an addition or a XOR.  */
5758       if (want_add
5759 	  && rtx_cost (GEN_INT (normalizep), mode, PLUS, 1,
5760 		       optimize_insn_for_speed_p ()) == 0)
5761 	{
5762 	  rtx tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5763 				       STORE_FLAG_VALUE, target_mode);
5764 	  if (tem != 0)
5765 	    tem = expand_binop (target_mode, add_optab, tem,
5766 				gen_int_mode (normalizep, target_mode),
5767 				target, 0, OPTAB_WIDEN);
5768 	  if (tem != 0)
5769 	    return tem;
5770 	}
5771       else if (!want_add
5772 	       && rtx_cost (trueval, mode, XOR, 1,
5773 			    optimize_insn_for_speed_p ()) == 0)
5774 	{
5775 	  rtx tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5776 				       normalizep, target_mode);
5777 	  if (tem != 0)
5778 	    tem = expand_binop (target_mode, xor_optab, tem, trueval, target,
5779 				INTVAL (trueval) >= 0, OPTAB_WIDEN);
5780 	  if (tem != 0)
5781 	    return tem;
5782 	}
5783 
5784       delete_insns_since (last);
5785     }
5786 
5787   /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
5788      the constant zero.  Reject all other comparisons at this point.  Only
5789      do LE and GT if branches are expensive since they are expensive on
5790      2-operand machines.  */
5791 
5792   if (op1 != const0_rtx
5793       || (code != EQ && code != NE
5794 	  && (BRANCH_COST (optimize_insn_for_speed_p (),
5795 			   false) <= 1 || (code != LE && code != GT))))
5796     return 0;
5797 
5798   /* Try to put the result of the comparison in the sign bit.  Assume we can't
5799      do the necessary operation below.  */
5800 
5801   rtx tem = 0;
5802 
5803   /* To see if A <= 0, compute (A | (A - 1)).  A <= 0 iff that result has
5804      the sign bit set.  */
5805 
5806   if (code == LE)
5807     {
5808       /* This is destructive, so SUBTARGET can't be OP0.  */
5809       if (rtx_equal_p (subtarget, op0))
5810 	subtarget = 0;
5811 
5812       tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
5813 			  OPTAB_WIDEN);
5814       if (tem)
5815 	tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
5816 			    OPTAB_WIDEN);
5817     }
5818 
5819   /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
5820      number of bits in the mode of OP0, minus one.  */
5821 
5822   if (code == GT)
5823     {
5824       if (rtx_equal_p (subtarget, op0))
5825 	subtarget = 0;
5826 
5827       tem = maybe_expand_shift (RSHIFT_EXPR, mode, op0,
5828 				GET_MODE_BITSIZE (mode) - 1,
5829 				subtarget, 0);
5830       if (tem)
5831 	tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
5832 			    OPTAB_WIDEN);
5833     }
5834 
5835   if (code == EQ || code == NE)
5836     {
5837       /* For EQ or NE, one way to do the comparison is to apply an operation
5838 	 that converts the operand into a positive number if it is nonzero
5839 	 or zero if it was originally zero.  Then, for EQ, we subtract 1 and
5840 	 for NE we negate.  This puts the result in the sign bit.  Then we
5841 	 normalize with a shift, if needed.
5842 
5843 	 Two operations that can do the above actions are ABS and FFS, so try
5844 	 them.  If that doesn't work, and MODE is smaller than a full word,
5845 	 we can use zero-extension to the wider mode (an unsigned conversion)
5846 	 as the operation.  */
5847 
5848       /* Note that ABS doesn't yield a positive number for INT_MIN, but
5849 	 that is compensated by the subsequent overflow when subtracting
5850 	 one / negating.  */
5851 
5852       if (optab_handler (abs_optab, mode) != CODE_FOR_nothing)
5853 	tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
5854       else if (optab_handler (ffs_optab, mode) != CODE_FOR_nothing)
5855 	tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
5856       else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5857 	{
5858 	  tem = convert_modes (word_mode, mode, op0, 1);
5859 	  mode = word_mode;
5860 	}
5861 
5862       if (tem != 0)
5863 	{
5864 	  if (code == EQ)
5865 	    tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
5866 				0, OPTAB_WIDEN);
5867 	  else
5868 	    tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
5869 	}
5870 
5871       /* If we couldn't do it that way, for NE we can "or" the two's complement
5872 	 of the value with itself.  For EQ, we take the one's complement of
5873 	 that "or", which is an extra insn, so we only handle EQ if branches
5874 	 are expensive.  */
5875 
5876       if (tem == 0
5877 	  && (code == NE
5878 	      || BRANCH_COST (optimize_insn_for_speed_p (),
5879 			      false) > 1))
5880 	{
5881 	  if (rtx_equal_p (subtarget, op0))
5882 	    subtarget = 0;
5883 
5884 	  tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
5885 	  tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
5886 			      OPTAB_WIDEN);
5887 
5888 	  if (tem && code == EQ)
5889 	    tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
5890 	}
5891     }
5892 
5893   if (tem && normalizep)
5894     tem = maybe_expand_shift (RSHIFT_EXPR, mode, tem,
5895 			      GET_MODE_BITSIZE (mode) - 1,
5896 			      subtarget, normalizep == 1);
5897 
5898   if (tem)
5899     {
5900       if (!target)
5901 	;
5902       else if (GET_MODE (tem) != target_mode)
5903 	{
5904 	  convert_move (target, tem, 0);
5905 	  tem = target;
5906 	}
5907       else if (!subtarget)
5908 	{
5909 	  emit_move_insn (target, tem);
5910 	  tem = target;
5911 	}
5912     }
5913   else
5914     delete_insns_since (last);
5915 
5916   return tem;
5917 }
5918 
5919 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
5920    and storing in TARGET.  Normally return TARGET.
5921    Return 0 if that cannot be done.
5922 
5923    MODE is the mode to use for OP0 and OP1 should they be CONST_INTs.  If
5924    it is VOIDmode, they cannot both be CONST_INT.
5925 
5926    UNSIGNEDP is for the case where we have to widen the operands
5927    to perform the operation.  It says to use zero-extension.
5928 
5929    NORMALIZEP is 1 if we should convert the result to be either zero
5930    or one.  Normalize is -1 if we should convert the result to be
5931    either zero or -1.  If NORMALIZEP is zero, the result will be left
5932    "raw" out of the scc insn.  */
5933 
5934 rtx
5935 emit_store_flag (rtx target, enum rtx_code code, rtx op0, rtx op1,
5936 		 machine_mode mode, int unsignedp, int normalizep)
5937 {
5938   machine_mode target_mode = target ? GET_MODE (target) : VOIDmode;
5939   enum rtx_code rcode;
5940   rtx subtarget;
5941   rtx tem, trueval;
5942   rtx_insn *last;
5943 
5944   /* If we compare constants, we shouldn't use a store-flag operation,
5945      but a constant load.  We can get there via the vanilla route that
5946      usually generates a compare-branch sequence, but will in this case
5947      fold the comparison to a constant, and thus elide the branch.  */
5948   if (CONSTANT_P (op0) && CONSTANT_P (op1))
5949     return NULL_RTX;
5950 
5951   tem = emit_store_flag_1 (target, code, op0, op1, mode, unsignedp, normalizep,
5952 			   target_mode);
5953   if (tem)
5954     return tem;
5955 
5956   /* If we reached here, we can't do this with a scc insn, however there
5957      are some comparisons that can be done in other ways.  Don't do any
5958      of these cases if branches are very cheap.  */
5959   if (BRANCH_COST (optimize_insn_for_speed_p (), false) == 0)
5960     return 0;
5961 
5962   /* See what we need to return.  We can only return a 1, -1, or the
5963      sign bit.  */
5964 
5965   if (normalizep == 0)
5966     {
5967       if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
5968 	normalizep = STORE_FLAG_VALUE;
5969 
5970       else if (val_signbit_p (mode, STORE_FLAG_VALUE))
5971 	;
5972       else
5973 	return 0;
5974     }
5975 
5976   last = get_last_insn ();
5977 
5978   /* If optimizing, use different pseudo registers for each insn, instead
5979      of reusing the same pseudo.  This leads to better CSE, but slows
5980      down the compiler, since there are more pseudos.  */
5981   subtarget = (!optimize
5982 	       && (target_mode == mode)) ? target : NULL_RTX;
5983   trueval = GEN_INT (normalizep ? normalizep : STORE_FLAG_VALUE);
5984 
5985   /* For floating-point comparisons, try the reverse comparison or try
5986      changing the "orderedness" of the comparison.  */
5987   if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5988     {
5989       enum rtx_code first_code;
5990       bool and_them;
5991 
5992       rcode = reverse_condition_maybe_unordered (code);
5993       if (can_compare_p (rcode, mode, ccp_store_flag)
5994 	  && (code == ORDERED || code == UNORDERED
5995 	      || (! HONOR_NANS (mode) && (code == LTGT || code == UNEQ))
5996 	      || (! HONOR_SNANS (mode) && (code == EQ || code == NE))))
5997 	{
5998 	  int want_add = ((STORE_FLAG_VALUE == 1 && normalizep == -1)
5999 			  || (STORE_FLAG_VALUE == -1 && normalizep == 1));
6000 
6001 	  /* For the reverse comparison, use either an addition or a XOR.  */
6002 	  if (want_add
6003 	      && rtx_cost (GEN_INT (normalizep), mode, PLUS, 1,
6004 			   optimize_insn_for_speed_p ()) == 0)
6005 	    {
6006 	      tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
6007 				       STORE_FLAG_VALUE, target_mode);
6008 	      if (tem)
6009 		return expand_binop (target_mode, add_optab, tem,
6010 				     gen_int_mode (normalizep, target_mode),
6011 				     target, 0, OPTAB_WIDEN);
6012 	    }
6013 	  else if (!want_add
6014 		   && rtx_cost (trueval, mode, XOR, 1,
6015 				optimize_insn_for_speed_p ()) == 0)
6016 	    {
6017 	      tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
6018 				       normalizep, target_mode);
6019 	      if (tem)
6020 		return expand_binop (target_mode, xor_optab, tem, trueval,
6021 				     target, INTVAL (trueval) >= 0,
6022 				     OPTAB_WIDEN);
6023 	    }
6024 	}
6025 
6026       delete_insns_since (last);
6027 
6028       /* Cannot split ORDERED and UNORDERED, only try the above trick.  */
6029       if (code == ORDERED || code == UNORDERED)
6030 	return 0;
6031 
6032       and_them = split_comparison (code, mode, &first_code, &code);
6033 
6034       /* If there are no NaNs, the first comparison should always fall through.
6035 	 Effectively change the comparison to the other one.  */
6036       if (!HONOR_NANS (mode))
6037 	{
6038 	  gcc_assert (first_code == (and_them ? ORDERED : UNORDERED));
6039 	  return emit_store_flag_1 (target, code, op0, op1, mode, 0, normalizep,
6040 				    target_mode);
6041 	}
6042 
6043       if (!HAVE_conditional_move)
6044 	return 0;
6045 
6046       /* Do not turn a trapping comparison into a non-trapping one.  */
6047       if ((code != EQ && code != NE && code != UNEQ && code != LTGT)
6048 	  && flag_trapping_math)
6049 	return 0;
6050 
6051       /* Try using a setcc instruction for ORDERED/UNORDERED, followed by a
6052 	 conditional move.  */
6053       tem = emit_store_flag_1 (subtarget, first_code, op0, op1, mode, 0,
6054 			       normalizep, target_mode);
6055       if (tem == 0)
6056 	return 0;
6057 
6058       if (and_them)
6059 	tem = emit_conditional_move (target, code, op0, op1, mode,
6060 				     tem, const0_rtx, GET_MODE (tem), 0);
6061       else
6062 	tem = emit_conditional_move (target, code, op0, op1, mode,
6063 				     trueval, tem, GET_MODE (tem), 0);
6064 
6065       if (tem == 0)
6066 	delete_insns_since (last);
6067       return tem;
6068     }
6069 
6070   /* The remaining tricks only apply to integer comparisons.  */
6071 
6072   scalar_int_mode int_mode;
6073   if (is_int_mode (mode, &int_mode))
6074     return emit_store_flag_int (target, subtarget, code, op0, op1, int_mode,
6075 				unsignedp, normalizep, trueval);
6076 
6077   return 0;
6078 }
6079 
6080 /* Like emit_store_flag, but always succeeds.  */
6081 
6082 rtx
6083 emit_store_flag_force (rtx target, enum rtx_code code, rtx op0, rtx op1,
6084 		       machine_mode mode, int unsignedp, int normalizep)
6085 {
6086   rtx tem;
6087   rtx_code_label *label;
6088   rtx trueval, falseval;
6089 
6090   /* First see if emit_store_flag can do the job.  */
6091   tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
6092   if (tem != 0)
6093     return tem;
6094 
6095   /* If one operand is constant, make it the second one.  Only do this
6096      if the other operand is not constant as well.  */
6097   if (swap_commutative_operands_p (op0, op1))
6098     {
6099       std::swap (op0, op1);
6100       code = swap_condition (code);
6101     }
6102 
6103   if (mode == VOIDmode)
6104     mode = GET_MODE (op0);
6105 
6106   if (!target)
6107     target = gen_reg_rtx (word_mode);
6108 
6109   /* If this failed, we have to do this with set/compare/jump/set code.
6110      For foo != 0, if foo is in OP0, just replace it with 1 if nonzero.  */
6111   trueval = normalizep ? GEN_INT (normalizep) : const1_rtx;
6112   if (code == NE
6113       && GET_MODE_CLASS (mode) == MODE_INT
6114       && REG_P (target)
6115       && op0 == target
6116       && op1 == const0_rtx)
6117     {
6118       label = gen_label_rtx ();
6119       do_compare_rtx_and_jump (target, const0_rtx, EQ, unsignedp, mode,
6120 			       NULL_RTX, NULL, label,
6121 			       profile_probability::uninitialized ());
6122       emit_move_insn (target, trueval);
6123       emit_label (label);
6124       return target;
6125     }
6126 
6127   if (!REG_P (target)
6128       || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
6129     target = gen_reg_rtx (GET_MODE (target));
6130 
6131   /* Jump in the right direction if the target cannot implement CODE
6132      but can jump on its reverse condition.  */
6133   falseval = const0_rtx;
6134   if (! can_compare_p (code, mode, ccp_jump)
6135       && (! FLOAT_MODE_P (mode)
6136           || code == ORDERED || code == UNORDERED
6137           || (! HONOR_NANS (mode) && (code == LTGT || code == UNEQ))
6138           || (! HONOR_SNANS (mode) && (code == EQ || code == NE))))
6139     {
6140       enum rtx_code rcode;
6141       if (FLOAT_MODE_P (mode))
6142         rcode = reverse_condition_maybe_unordered (code);
6143       else
6144         rcode = reverse_condition (code);
6145 
6146       /* Canonicalize to UNORDERED for the libcall.  */
6147       if (can_compare_p (rcode, mode, ccp_jump)
6148           || (code == ORDERED && ! can_compare_p (ORDERED, mode, ccp_jump)))
6149 	{
6150 	  falseval = trueval;
6151 	  trueval = const0_rtx;
6152 	  code = rcode;
6153 	}
6154     }
6155 
6156   emit_move_insn (target, trueval);
6157   label = gen_label_rtx ();
6158   do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX, NULL,
6159 			   label, profile_probability::uninitialized ());
6160 
6161   emit_move_insn (target, falseval);
6162   emit_label (label);
6163 
6164   return target;
6165 }
6166 
6167 /* Perform possibly multi-word comparison and conditional jump to LABEL
6168    if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE.  This is
6169    now a thin wrapper around do_compare_rtx_and_jump.  */
6170 
6171 static void
6172 do_cmp_and_jump (rtx arg1, rtx arg2, enum rtx_code op, machine_mode mode,
6173 		 rtx_code_label *label)
6174 {
6175   int unsignedp = (op == LTU || op == LEU || op == GTU || op == GEU);
6176   do_compare_rtx_and_jump (arg1, arg2, op, unsignedp, mode, NULL_RTX,
6177 			   NULL, label, profile_probability::uninitialized ());
6178 }
6179