1 /* IR-agnostic target query functions relating to optabs
2    Copyright (C) 1987-2018 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "target.h"
25 #include "insn-codes.h"
26 #include "optabs-query.h"
27 #include "optabs-libfuncs.h"
28 #include "insn-config.h"
29 #include "rtl.h"
30 #include "recog.h"
31 #include "vec-perm-indices.h"
32 
33 struct target_optabs default_target_optabs;
34 struct target_optabs *this_fn_optabs = &default_target_optabs;
35 #if SWITCHABLE_TARGET
36 struct target_optabs *this_target_optabs = &default_target_optabs;
37 #endif
38 
39 /* Return the insn used to perform conversion OP from mode FROM_MODE
40    to mode TO_MODE; return CODE_FOR_nothing if the target does not have
41    such an insn, or if it is unsuitable for optimization type OPT_TYPE.  */
42 
43 insn_code
44 convert_optab_handler (convert_optab optab, machine_mode to_mode,
45 		       machine_mode from_mode, optimization_type opt_type)
46 {
47   insn_code icode = convert_optab_handler (optab, to_mode, from_mode);
48   if (icode == CODE_FOR_nothing
49       || !targetm.optab_supported_p (optab, to_mode, from_mode, opt_type))
50     return CODE_FOR_nothing;
51   return icode;
52 }
53 
54 /* Return the insn used to implement mode MODE of OP; return
55    CODE_FOR_nothing if the target does not have such an insn,
56    or if it is unsuitable for optimization type OPT_TYPE.  */
57 
58 insn_code
59 direct_optab_handler (convert_optab optab, machine_mode mode,
60 		      optimization_type opt_type)
61 {
62   insn_code icode = direct_optab_handler (optab, mode);
63   if (icode == CODE_FOR_nothing
64       || !targetm.optab_supported_p (optab, mode, mode, opt_type))
65     return CODE_FOR_nothing;
66   return icode;
67 }
68 
69 /* Enumerates the possible types of structure operand to an
70    extraction_insn.  */
71 enum extraction_type { ET_unaligned_mem, ET_reg };
72 
73 /* Check whether insv, extv or extzv pattern ICODE can be used for an
74    insertion or extraction of type TYPE on a structure of mode MODE.
75    Return true if so and fill in *INSN accordingly.  STRUCT_OP is the
76    operand number of the structure (the first sign_extract or zero_extract
77    operand) and FIELD_OP is the operand number of the field (the other
78    side of the set from the sign_extract or zero_extract).  */
79 
80 static bool
81 get_traditional_extraction_insn (extraction_insn *insn,
82 				 enum extraction_type type,
83 				 machine_mode mode,
84 				 enum insn_code icode,
85 				 int struct_op, int field_op)
86 {
87   const struct insn_data_d *data = &insn_data[icode];
88 
89   machine_mode struct_mode = data->operand[struct_op].mode;
90   if (struct_mode == VOIDmode)
91     struct_mode = word_mode;
92   if (mode != struct_mode)
93     return false;
94 
95   machine_mode field_mode = data->operand[field_op].mode;
96   if (field_mode == VOIDmode)
97     field_mode = word_mode;
98 
99   machine_mode pos_mode = data->operand[struct_op + 2].mode;
100   if (pos_mode == VOIDmode)
101     pos_mode = word_mode;
102 
103   insn->icode = icode;
104   insn->field_mode = as_a <scalar_int_mode> (field_mode);
105   if (type == ET_unaligned_mem)
106     insn->struct_mode = byte_mode;
107   else if (struct_mode == BLKmode)
108     insn->struct_mode = opt_scalar_int_mode ();
109   else
110     insn->struct_mode = as_a <scalar_int_mode> (struct_mode);
111   insn->pos_mode = as_a <scalar_int_mode> (pos_mode);
112   return true;
113 }
114 
115 /* Return true if an optab exists to perform an insertion or extraction
116    of type TYPE in mode MODE.  Describe the instruction in *INSN if so.
117 
118    REG_OPTAB is the optab to use for register structures and
119    MISALIGN_OPTAB is the optab to use for misaligned memory structures.
120    POS_OP is the operand number of the bit position.  */
121 
122 static bool
123 get_optab_extraction_insn (struct extraction_insn *insn,
124 			   enum extraction_type type,
125 			   machine_mode mode, direct_optab reg_optab,
126 			   direct_optab misalign_optab, int pos_op)
127 {
128   direct_optab optab = (type == ET_unaligned_mem ? misalign_optab : reg_optab);
129   enum insn_code icode = direct_optab_handler (optab, mode);
130   if (icode == CODE_FOR_nothing)
131     return false;
132 
133   const struct insn_data_d *data = &insn_data[icode];
134 
135   machine_mode pos_mode = data->operand[pos_op].mode;
136   if (pos_mode == VOIDmode)
137     pos_mode = word_mode;
138 
139   insn->icode = icode;
140   insn->field_mode = as_a <scalar_int_mode> (mode);
141   if (type == ET_unaligned_mem)
142     insn->struct_mode = opt_scalar_int_mode ();
143   else
144     insn->struct_mode = insn->field_mode;
145   insn->pos_mode = as_a <scalar_int_mode> (pos_mode);
146   return true;
147 }
148 
149 /* Return true if an instruction exists to perform an insertion or
150    extraction (PATTERN says which) of type TYPE in mode MODE.
151    Describe the instruction in *INSN if so.  */
152 
153 static bool
154 get_extraction_insn (extraction_insn *insn,
155 		     enum extraction_pattern pattern,
156 		     enum extraction_type type,
157 		     machine_mode mode)
158 {
159   switch (pattern)
160     {
161     case EP_insv:
162       if (targetm.have_insv ()
163 	  && get_traditional_extraction_insn (insn, type, mode,
164 					      targetm.code_for_insv, 0, 3))
165 	return true;
166       return get_optab_extraction_insn (insn, type, mode, insv_optab,
167 					insvmisalign_optab, 2);
168 
169     case EP_extv:
170       if (targetm.have_extv ()
171 	  && get_traditional_extraction_insn (insn, type, mode,
172 					      targetm.code_for_extv, 1, 0))
173 	return true;
174       return get_optab_extraction_insn (insn, type, mode, extv_optab,
175 					extvmisalign_optab, 3);
176 
177     case EP_extzv:
178       if (targetm.have_extzv ()
179 	  && get_traditional_extraction_insn (insn, type, mode,
180 					      targetm.code_for_extzv, 1, 0))
181 	return true;
182       return get_optab_extraction_insn (insn, type, mode, extzv_optab,
183 					extzvmisalign_optab, 3);
184 
185     default:
186       gcc_unreachable ();
187     }
188 }
189 
190 /* Return true if an instruction exists to access a field of mode
191    FIELDMODE in a structure that has STRUCT_BITS significant bits.
192    Describe the "best" such instruction in *INSN if so.  PATTERN and
193    TYPE describe the type of insertion or extraction we want to perform.
194 
195    For an insertion, the number of significant structure bits includes
196    all bits of the target.  For an extraction, it need only include the
197    most significant bit of the field.  Larger widths are acceptable
198    in both cases.  */
199 
200 static bool
201 get_best_extraction_insn (extraction_insn *insn,
202 			  enum extraction_pattern pattern,
203 			  enum extraction_type type,
204 			  unsigned HOST_WIDE_INT struct_bits,
205 			  machine_mode field_mode)
206 {
207   opt_scalar_int_mode mode_iter;
208   FOR_EACH_MODE_FROM (mode_iter, smallest_int_mode_for_size (struct_bits))
209     {
210       scalar_int_mode mode = mode_iter.require ();
211       if (get_extraction_insn (insn, pattern, type, mode))
212 	{
213 	  FOR_EACH_MODE_FROM (mode_iter, mode)
214 	    {
215 	      mode = mode_iter.require ();
216 	      if (maybe_gt (GET_MODE_SIZE (mode), GET_MODE_SIZE (field_mode))
217 		  || TRULY_NOOP_TRUNCATION_MODES_P (insn->field_mode,
218 						    field_mode))
219 		break;
220 	      get_extraction_insn (insn, pattern, type, mode);
221 	    }
222 	  return true;
223 	}
224     }
225   return false;
226 }
227 
228 /* Return true if an instruction exists to access a field of mode
229    FIELDMODE in a register structure that has STRUCT_BITS significant bits.
230    Describe the "best" such instruction in *INSN if so.  PATTERN describes
231    the type of insertion or extraction we want to perform.
232 
233    For an insertion, the number of significant structure bits includes
234    all bits of the target.  For an extraction, it need only include the
235    most significant bit of the field.  Larger widths are acceptable
236    in both cases.  */
237 
238 bool
239 get_best_reg_extraction_insn (extraction_insn *insn,
240 			      enum extraction_pattern pattern,
241 			      unsigned HOST_WIDE_INT struct_bits,
242 			      machine_mode field_mode)
243 {
244   return get_best_extraction_insn (insn, pattern, ET_reg, struct_bits,
245 				   field_mode);
246 }
247 
248 /* Return true if an instruction exists to access a field of BITSIZE
249    bits starting BITNUM bits into a memory structure.  Describe the
250    "best" such instruction in *INSN if so.  PATTERN describes the type
251    of insertion or extraction we want to perform and FIELDMODE is the
252    natural mode of the extracted field.
253 
254    The instructions considered here only access bytes that overlap
255    the bitfield; they do not touch any surrounding bytes.  */
256 
257 bool
258 get_best_mem_extraction_insn (extraction_insn *insn,
259 			      enum extraction_pattern pattern,
260 			      HOST_WIDE_INT bitsize, HOST_WIDE_INT bitnum,
261 			      machine_mode field_mode)
262 {
263   unsigned HOST_WIDE_INT struct_bits = (bitnum % BITS_PER_UNIT
264 					+ bitsize
265 					+ BITS_PER_UNIT - 1);
266   struct_bits -= struct_bits % BITS_PER_UNIT;
267   return get_best_extraction_insn (insn, pattern, ET_unaligned_mem,
268 				   struct_bits, field_mode);
269 }
270 
271 /* Return the insn code used to extend FROM_MODE to TO_MODE.
272    UNSIGNEDP specifies zero-extension instead of sign-extension.  If
273    no such operation exists, CODE_FOR_nothing will be returned.  */
274 
275 enum insn_code
276 can_extend_p (machine_mode to_mode, machine_mode from_mode,
277 	      int unsignedp)
278 {
279   if (unsignedp < 0 && targetm.have_ptr_extend ())
280     return targetm.code_for_ptr_extend;
281 
282   convert_optab tab = unsignedp ? zext_optab : sext_optab;
283   return convert_optab_handler (tab, to_mode, from_mode);
284 }
285 
286 /* Return the insn code to convert fixed-point mode FIXMODE to floating-point
287    mode FLTMODE, or CODE_FOR_nothing if no such instruction exists.
288    UNSIGNEDP specifies whether FIXMODE is unsigned.  */
289 
290 enum insn_code
291 can_float_p (machine_mode fltmode, machine_mode fixmode,
292 	     int unsignedp)
293 {
294   convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
295   return convert_optab_handler (tab, fltmode, fixmode);
296 }
297 
298 /* Return the insn code to convert floating-point mode FLTMODE to fixed-point
299    mode FIXMODE, or CODE_FOR_nothing if no such instruction exists.
300    UNSIGNEDP specifies whether FIXMODE is unsigned.
301 
302    On a successful return, set *TRUNCP_PTR to true if it is necessary to
303    output an explicit FTRUNC before the instruction.  */
304 
305 enum insn_code
306 can_fix_p (machine_mode fixmode, machine_mode fltmode,
307 	   int unsignedp, bool *truncp_ptr)
308 {
309   convert_optab tab;
310   enum insn_code icode;
311 
312   tab = unsignedp ? ufixtrunc_optab : sfixtrunc_optab;
313   icode = convert_optab_handler (tab, fixmode, fltmode);
314   if (icode != CODE_FOR_nothing)
315     {
316       *truncp_ptr = false;
317       return icode;
318     }
319 
320   /* FIXME: This requires a port to define both FIX and FTRUNC pattern
321      for this to work.  We need to rework the fix* and ftrunc* patterns
322      and documentation.  */
323   tab = unsignedp ? ufix_optab : sfix_optab;
324   icode = convert_optab_handler (tab, fixmode, fltmode);
325   if (icode != CODE_FOR_nothing
326       && optab_handler (ftrunc_optab, fltmode) != CODE_FOR_nothing)
327     {
328       *truncp_ptr = true;
329       return icode;
330     }
331 
332   return CODE_FOR_nothing;
333 }
334 
335 /* Return nonzero if a conditional move of mode MODE is supported.
336 
337    This function is for combine so it can tell whether an insn that looks
338    like a conditional move is actually supported by the hardware.  If we
339    guess wrong we lose a bit on optimization, but that's it.  */
340 /* ??? sparc64 supports conditionally moving integers values based on fp
341    comparisons, and vice versa.  How do we handle them?  */
342 
343 bool
344 can_conditionally_move_p (machine_mode mode)
345 {
346   return direct_optab_handler (movcc_optab, mode) != CODE_FOR_nothing;
347 }
348 
349 /* If a target doesn't implement a permute on a vector with multibyte
350    elements, we can try to do the same permute on byte elements.
351    If this makes sense for vector mode MODE then return the appropriate
352    byte vector mode.  */
353 
354 opt_machine_mode
355 qimode_for_vec_perm (machine_mode mode)
356 {
357   machine_mode qimode;
358   if (GET_MODE_INNER (mode) != QImode
359       && mode_for_vector (QImode, GET_MODE_SIZE (mode)).exists (&qimode)
360       && VECTOR_MODE_P (qimode))
361     return qimode;
362   return opt_machine_mode ();
363 }
364 
365 /* Return true if selector SEL can be represented in the integer
366    equivalent of vector mode MODE.  */
367 
368 bool
369 selector_fits_mode_p (machine_mode mode, const vec_perm_indices &sel)
370 {
371   unsigned HOST_WIDE_INT mask = GET_MODE_MASK (GET_MODE_INNER (mode));
372   return (mask == HOST_WIDE_INT_M1U
373 	  || sel.all_in_range_p (0, mask + 1));
374 }
375 
376 /* Return true if VEC_PERM_EXPRs with variable selector operands can be
377    expanded using SIMD extensions of the CPU.  MODE is the mode of the
378    vectors being permuted.  */
379 
380 bool
381 can_vec_perm_var_p (machine_mode mode)
382 {
383   /* If the target doesn't implement a vector mode for the vector type,
384      then no operations are supported.  */
385   if (!VECTOR_MODE_P (mode))
386     return false;
387 
388   if (direct_optab_handler (vec_perm_optab, mode) != CODE_FOR_nothing)
389     return true;
390 
391   /* We allow fallback to a QI vector mode, and adjust the mask.  */
392   machine_mode qimode;
393   if (!qimode_for_vec_perm (mode).exists (&qimode)
394       || maybe_gt (GET_MODE_NUNITS (qimode), GET_MODE_MASK (QImode) + 1))
395     return false;
396 
397   if (direct_optab_handler (vec_perm_optab, qimode) == CODE_FOR_nothing)
398     return false;
399 
400   /* In order to support the lowering of variable permutations,
401      we need to support shifts and adds.  */
402   if (GET_MODE_UNIT_SIZE (mode) > 2
403       && optab_handler (ashl_optab, mode) == CODE_FOR_nothing
404       && optab_handler (vashl_optab, mode) == CODE_FOR_nothing)
405     return false;
406   if (optab_handler (add_optab, qimode) == CODE_FOR_nothing)
407     return false;
408 
409   return true;
410 }
411 
412 /* Return true if the target directly supports VEC_PERM_EXPRs on vectors
413    of mode MODE using the selector SEL.  ALLOW_VARIABLE_P is true if it
414    is acceptable to force the selector into a register and use a variable
415    permute (if the target supports that).
416 
417    Note that additional permutations representing whole-vector shifts may
418    also be handled via the vec_shr optab, but only where the second input
419    vector is entirely constant zeroes; this case is not dealt with here.  */
420 
421 bool
422 can_vec_perm_const_p (machine_mode mode, const vec_perm_indices &sel,
423 		      bool allow_variable_p)
424 {
425   /* If the target doesn't implement a vector mode for the vector type,
426      then no operations are supported.  */
427   if (!VECTOR_MODE_P (mode))
428     return false;
429 
430   /* It's probably cheaper to test for the variable case first.  */
431   if (allow_variable_p && selector_fits_mode_p (mode, sel))
432     {
433       if (direct_optab_handler (vec_perm_optab, mode) != CODE_FOR_nothing)
434 	return true;
435 
436       /* Unlike can_vec_perm_var_p, we don't need to test for optabs
437 	 related computing the QImode selector, since that happens at
438 	 compile time.  */
439       machine_mode qimode;
440       if (qimode_for_vec_perm (mode).exists (&qimode))
441 	{
442 	  vec_perm_indices qimode_indices;
443 	  qimode_indices.new_expanded_vector (sel, GET_MODE_UNIT_SIZE (mode));
444 	  if (selector_fits_mode_p (qimode, qimode_indices)
445 	      && (direct_optab_handler (vec_perm_optab, qimode)
446 		  != CODE_FOR_nothing))
447 	    return true;
448 	}
449     }
450 
451   if (targetm.vectorize.vec_perm_const != NULL)
452     {
453       if (targetm.vectorize.vec_perm_const (mode, NULL_RTX, NULL_RTX,
454 					    NULL_RTX, sel))
455 	return true;
456 
457       /* ??? For completeness, we ought to check the QImode version of
458 	 vec_perm_const_optab.  But all users of this implicit lowering
459 	 feature implement the variable vec_perm_optab, and the ia64
460 	 port specifically doesn't want us to lower V2SF operations
461 	 into integer operations.  */
462     }
463 
464   return false;
465 }
466 
467 /* Find a widening optab even if it doesn't widen as much as we want.
468    E.g. if from_mode is HImode, and to_mode is DImode, and there is no
469    direct HI->SI insn, then return SI->DI, if that exists.  */
470 
471 enum insn_code
472 find_widening_optab_handler_and_mode (optab op, machine_mode to_mode,
473 				      machine_mode from_mode,
474 				      machine_mode *found_mode)
475 {
476   machine_mode limit_mode = to_mode;
477   if (is_a <scalar_int_mode> (from_mode))
478     {
479       gcc_checking_assert (is_a <scalar_int_mode> (to_mode)
480 			   && known_lt (GET_MODE_PRECISION (from_mode),
481 					GET_MODE_PRECISION (to_mode)));
482       /* The modes after FROM_MODE are all MODE_INT, so the only
483 	 MODE_PARTIAL_INT mode we consider is FROM_MODE itself.
484 	 If LIMIT_MODE is MODE_PARTIAL_INT, stop at the containing
485 	 MODE_INT.  */
486       if (GET_MODE_CLASS (limit_mode) == MODE_PARTIAL_INT)
487 	limit_mode = GET_MODE_WIDER_MODE (limit_mode).require ();
488     }
489   else
490     gcc_checking_assert (GET_MODE_CLASS (from_mode) == GET_MODE_CLASS (to_mode)
491 			 && from_mode < to_mode);
492   FOR_EACH_MODE (from_mode, from_mode, limit_mode)
493     {
494       enum insn_code handler = convert_optab_handler (op, to_mode, from_mode);
495 
496       if (handler != CODE_FOR_nothing)
497 	{
498 	  if (found_mode)
499 	    *found_mode = from_mode;
500 	  return handler;
501 	}
502     }
503 
504   return CODE_FOR_nothing;
505 }
506 
507 /* Return non-zero if a highpart multiply is supported of can be synthisized.
508    For the benefit of expand_mult_highpart, the return value is 1 for direct,
509    2 for even/odd widening, and 3 for hi/lo widening.  */
510 
511 int
512 can_mult_highpart_p (machine_mode mode, bool uns_p)
513 {
514   optab op;
515 
516   op = uns_p ? umul_highpart_optab : smul_highpart_optab;
517   if (optab_handler (op, mode) != CODE_FOR_nothing)
518     return 1;
519 
520   /* If the mode is an integral vector, synth from widening operations.  */
521   if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
522     return 0;
523 
524   poly_int64 nunits = GET_MODE_NUNITS (mode);
525 
526   op = uns_p ? vec_widen_umult_even_optab : vec_widen_smult_even_optab;
527   if (optab_handler (op, mode) != CODE_FOR_nothing)
528     {
529       op = uns_p ? vec_widen_umult_odd_optab : vec_widen_smult_odd_optab;
530       if (optab_handler (op, mode) != CODE_FOR_nothing)
531 	{
532 	  /* The encoding has 2 interleaved stepped patterns.  */
533 	  vec_perm_builder sel (nunits, 2, 3);
534 	  for (unsigned int i = 0; i < 6; ++i)
535 	    sel.quick_push (!BYTES_BIG_ENDIAN
536 			    + (i & ~1)
537 			    + ((i & 1) ? nunits : 0));
538 	  vec_perm_indices indices (sel, 2, nunits);
539 	  if (can_vec_perm_const_p (mode, indices))
540 	    return 2;
541 	}
542     }
543 
544   op = uns_p ? vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
545   if (optab_handler (op, mode) != CODE_FOR_nothing)
546     {
547       op = uns_p ? vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
548       if (optab_handler (op, mode) != CODE_FOR_nothing)
549 	{
550 	  /* The encoding has a single stepped pattern.  */
551 	  vec_perm_builder sel (nunits, 1, 3);
552 	  for (unsigned int i = 0; i < 3; ++i)
553 	    sel.quick_push (2 * i + (BYTES_BIG_ENDIAN ? 0 : 1));
554 	  vec_perm_indices indices (sel, 2, nunits);
555 	  if (can_vec_perm_const_p (mode, indices))
556 	    return 3;
557 	}
558     }
559 
560   return 0;
561 }
562 
563 /* Return true if target supports vector masked load/store for mode.  */
564 
565 bool
566 can_vec_mask_load_store_p (machine_mode mode,
567 			   machine_mode mask_mode,
568 			   bool is_load)
569 {
570   optab op = is_load ? maskload_optab : maskstore_optab;
571   machine_mode vmode;
572 
573   /* If mode is vector mode, check it directly.  */
574   if (VECTOR_MODE_P (mode))
575     return convert_optab_handler (op, mode, mask_mode) != CODE_FOR_nothing;
576 
577   /* Otherwise, return true if there is some vector mode with
578      the mask load/store supported.  */
579 
580   /* See if there is any chance the mask load or store might be
581      vectorized.  If not, punt.  */
582   scalar_mode smode;
583   if (!is_a <scalar_mode> (mode, &smode))
584     return false;
585 
586   vmode = targetm.vectorize.preferred_simd_mode (smode);
587   if (!VECTOR_MODE_P (vmode))
588     return false;
589 
590   if ((targetm.vectorize.get_mask_mode
591        (GET_MODE_NUNITS (vmode), GET_MODE_SIZE (vmode)).exists (&mask_mode))
592       && convert_optab_handler (op, vmode, mask_mode) != CODE_FOR_nothing)
593     return true;
594 
595   auto_vector_sizes vector_sizes;
596   targetm.vectorize.autovectorize_vector_sizes (&vector_sizes);
597   for (unsigned int i = 0; i < vector_sizes.length (); ++i)
598     {
599       poly_uint64 cur = vector_sizes[i];
600       poly_uint64 nunits;
601       if (!multiple_p (cur, GET_MODE_SIZE (smode), &nunits))
602 	continue;
603       if (mode_for_vector (smode, nunits).exists (&vmode)
604 	  && VECTOR_MODE_P (vmode)
605 	  && targetm.vectorize.get_mask_mode (nunits, cur).exists (&mask_mode)
606 	  && convert_optab_handler (op, vmode, mask_mode) != CODE_FOR_nothing)
607 	return true;
608     }
609   return false;
610 }
611 
612 /* Return true if there is a compare_and_swap pattern.  */
613 
614 bool
615 can_compare_and_swap_p (machine_mode mode, bool allow_libcall)
616 {
617   enum insn_code icode;
618 
619   /* Check for __atomic_compare_and_swap.  */
620   icode = direct_optab_handler (atomic_compare_and_swap_optab, mode);
621   if (icode != CODE_FOR_nothing)
622     return true;
623 
624   /* Check for __sync_compare_and_swap.  */
625   icode = optab_handler (sync_compare_and_swap_optab, mode);
626   if (icode != CODE_FOR_nothing)
627     return true;
628   if (allow_libcall && optab_libfunc (sync_compare_and_swap_optab, mode))
629     return true;
630 
631   /* No inline compare and swap.  */
632   return false;
633 }
634 
635 /* Return true if an atomic exchange can be performed.  */
636 
637 bool
638 can_atomic_exchange_p (machine_mode mode, bool allow_libcall)
639 {
640   enum insn_code icode;
641 
642   /* Check for __atomic_exchange.  */
643   icode = direct_optab_handler (atomic_exchange_optab, mode);
644   if (icode != CODE_FOR_nothing)
645     return true;
646 
647   /* Don't check __sync_test_and_set, as on some platforms that
648      has reduced functionality.  Targets that really do support
649      a proper exchange should simply be updated to the __atomics.  */
650 
651   return can_compare_and_swap_p (mode, allow_libcall);
652 }
653 
654 /* Return true if an atomic load can be performed without falling back to
655    a compare-and-swap.  */
656 
657 bool
658 can_atomic_load_p (machine_mode mode)
659 {
660   enum insn_code icode;
661 
662   /* Does the target supports the load directly?  */
663   icode = direct_optab_handler (atomic_load_optab, mode);
664   if (icode != CODE_FOR_nothing)
665     return true;
666 
667   /* If the size of the object is greater than word size on this target,
668      then we assume that a load will not be atomic.  Also see
669      expand_atomic_load.  */
670   return known_le (GET_MODE_PRECISION (mode), BITS_PER_WORD);
671 }
672 
673 /* Determine whether "1 << x" is relatively cheap in word_mode.  */
674 
675 bool
676 lshift_cheap_p (bool speed_p)
677 {
678   /* FIXME: This should be made target dependent via this "this_target"
679      mechanism, similar to e.g. can_copy_init_p in gcse.c.  */
680   static bool init[2] = { false, false };
681   static bool cheap[2] = { true, true };
682 
683   /* If the targer has no lshift in word_mode, the operation will most
684      probably not be cheap.  ??? Does GCC even work for such targets?  */
685   if (optab_handler (ashl_optab, word_mode) == CODE_FOR_nothing)
686     return false;
687 
688   if (!init[speed_p])
689     {
690       rtx reg = gen_raw_REG (word_mode, 10000);
691       int cost = set_src_cost (gen_rtx_ASHIFT (word_mode, const1_rtx, reg),
692 			       word_mode, speed_p);
693       cheap[speed_p] = cost < COSTS_N_INSNS (3);
694       init[speed_p] = true;
695     }
696 
697   return cheap[speed_p];
698 }
699 
700 /* Return true if optab OP supports at least one mode.  */
701 
702 static bool
703 supports_at_least_one_mode_p (optab op)
704 {
705   for (int i = 0; i < NUM_MACHINE_MODES; ++i)
706     if (direct_optab_handler (op, (machine_mode) i) != CODE_FOR_nothing)
707       return true;
708 
709   return false;
710 }
711 
712 /* Return true if vec_gather_load is available for at least one vector
713    mode.  */
714 
715 bool
716 supports_vec_gather_load_p ()
717 {
718   if (this_fn_optabs->supports_vec_gather_load_cached)
719     return this_fn_optabs->supports_vec_gather_load;
720 
721   this_fn_optabs->supports_vec_gather_load_cached = true;
722 
723   this_fn_optabs->supports_vec_gather_load
724     = supports_at_least_one_mode_p (gather_load_optab);
725 
726   return this_fn_optabs->supports_vec_gather_load;
727 }
728 
729 /* Return true if vec_scatter_store is available for at least one vector
730    mode.  */
731 
732 bool
733 supports_vec_scatter_store_p ()
734 {
735   if (this_fn_optabs->supports_vec_scatter_store_cached)
736     return this_fn_optabs->supports_vec_scatter_store;
737 
738   this_fn_optabs->supports_vec_scatter_store_cached = true;
739 
740   this_fn_optabs->supports_vec_scatter_store
741     = supports_at_least_one_mode_p (scatter_store_optab);
742 
743   return this_fn_optabs->supports_vec_scatter_store;
744 }
745 
746