1;; Predicate definitions for IA-32 and x86-64.
2;; Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
3;; Free Software Foundation, Inc.
4;;
5;; This file is part of GCC.
6;;
7;; GCC is free software; you can redistribute it and/or modify
8;; it under the terms of the GNU General Public License as published by
9;; the Free Software Foundation; either version 3, or (at your option)
10;; any later version.
11;;
12;; GCC is distributed in the hope that it will be useful,
13;; but WITHOUT ANY WARRANTY; without even the implied warranty of
14;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15;; GNU General Public License for more details.
16;;
17;; You should have received a copy of the GNU General Public License
18;; along with GCC; see the file COPYING3.  If not see
19;; <http://www.gnu.org/licenses/>.
20
21;; Return true if OP is either a i387 or SSE fp register.
22(define_predicate "any_fp_register_operand"
23  (and (match_code "reg")
24       (match_test "ANY_FP_REGNO_P (REGNO (op))")))
25
26;; Return true if OP is an i387 fp register.
27(define_predicate "fp_register_operand"
28  (and (match_code "reg")
29       (match_test "FP_REGNO_P (REGNO (op))")))
30
31;; Return true if OP is a non-fp register_operand.
32(define_predicate "register_and_not_any_fp_reg_operand"
33  (and (match_code "reg")
34       (not (match_test "ANY_FP_REGNO_P (REGNO (op))"))))
35
36;; Return true if OP is a register operand other than an i387 fp register.
37(define_predicate "register_and_not_fp_reg_operand"
38  (and (match_code "reg")
39       (not (match_test "FP_REGNO_P (REGNO (op))"))))
40
41;; True if the operand is an MMX register.
42(define_predicate "mmx_reg_operand"
43  (and (match_code "reg")
44       (match_test "MMX_REGNO_P (REGNO (op))")))
45
46;; True if the operand is an SSE register.
47(define_predicate "sse_reg_operand"
48  (and (match_code "reg")
49       (match_test "SSE_REGNO_P (REGNO (op))")))
50
51;; True if the operand is a Q_REGS class register.
52(define_predicate "q_regs_operand"
53  (match_operand 0 "register_operand")
54{
55  if (GET_CODE (op) == SUBREG)
56    op = SUBREG_REG (op);
57  return ANY_QI_REG_P (op);
58})
59
60;; Match an SI or HImode register for a zero_extract.
61(define_special_predicate "ext_register_operand"
62  (match_operand 0 "register_operand")
63{
64  if ((!TARGET_64BIT || GET_MODE (op) != DImode)
65      && GET_MODE (op) != SImode && GET_MODE (op) != HImode)
66    return false;
67  if (GET_CODE (op) == SUBREG)
68    op = SUBREG_REG (op);
69
70  /* Be careful to accept only registers having upper parts.  */
71  return (REG_P (op)
72	  && (REGNO (op) > LAST_VIRTUAL_REGISTER || REGNO (op) <= BX_REG));
73})
74
75;; Return true if op is the AX register.
76(define_predicate "ax_reg_operand"
77  (and (match_code "reg")
78       (match_test "REGNO (op) == AX_REG")))
79
80;; Return true if op is the flags register.
81(define_predicate "flags_reg_operand"
82  (and (match_code "reg")
83       (match_test "REGNO (op) == FLAGS_REG")))
84
85;; Return true if op is one of QImode registers: %[abcd][hl].
86(define_predicate "QIreg_operand"
87  (match_test "QI_REG_P (op)"))
88
89;; Return true if op is a QImode register operand other than
90;; %[abcd][hl].
91(define_predicate "ext_QIreg_operand"
92  (and (match_code "reg")
93       (match_test "TARGET_64BIT")
94       (match_test "REGNO (op) > BX_REG")))
95
96;; Return true if op is not xmm0 register.
97(define_predicate "reg_not_xmm0_operand"
98  (match_operand 0 "register_operand")
99{
100  if (GET_CODE (op) == SUBREG)
101    op = SUBREG_REG (op);
102
103  return !REG_P (op) || REGNO (op) != FIRST_SSE_REG;
104})
105
106;; As above, but also allow memory operands.
107(define_predicate "nonimm_not_xmm0_operand"
108  (ior (match_operand 0 "memory_operand")
109       (match_operand 0 "reg_not_xmm0_operand")))
110
111;; Return true if op is not xmm0 register, but only for non-AVX targets.
112(define_predicate "reg_not_xmm0_operand_maybe_avx"
113  (if_then_else (match_test "TARGET_AVX")
114    (match_operand 0 "register_operand")
115    (match_operand 0 "reg_not_xmm0_operand")))
116
117;; As above, but also allow memory operands.
118(define_predicate "nonimm_not_xmm0_operand_maybe_avx"
119  (if_then_else (match_test "TARGET_AVX")
120    (match_operand 0 "nonimmediate_operand")
121    (match_operand 0 "nonimm_not_xmm0_operand")))
122
123;; Return true if VALUE can be stored in a sign extended immediate field.
124(define_predicate "x86_64_immediate_operand"
125  (match_code "const_int,symbol_ref,label_ref,const")
126{
127  if (!TARGET_64BIT)
128    return immediate_operand (op, mode);
129
130  switch (GET_CODE (op))
131    {
132    case CONST_INT:
133      /* CONST_DOUBLES never match, since HOST_BITS_PER_WIDE_INT is known
134         to be at least 32 and this all acceptable constants are
135	 represented as CONST_INT.  */
136      if (HOST_BITS_PER_WIDE_INT == 32)
137	return true;
138      else
139	{
140	  HOST_WIDE_INT val = trunc_int_for_mode (INTVAL (op), DImode);
141	  return trunc_int_for_mode (val, SImode) == val;
142	}
143      break;
144
145    case SYMBOL_REF:
146      /* For certain code models, the symbolic references are known to fit.
147	 in CM_SMALL_PIC model we know it fits if it is local to the shared
148	 library.  Don't count TLS SYMBOL_REFs here, since they should fit
149	 only if inside of UNSPEC handled below.  */
150      /* TLS symbols are not constant.  */
151      if (SYMBOL_REF_TLS_MODEL (op))
152	return false;
153      return (ix86_cmodel == CM_SMALL || ix86_cmodel == CM_KERNEL
154	      || (ix86_cmodel == CM_MEDIUM && !SYMBOL_REF_FAR_ADDR_P (op)));
155
156    case LABEL_REF:
157      /* For certain code models, the code is near as well.  */
158      return (ix86_cmodel == CM_SMALL || ix86_cmodel == CM_MEDIUM
159	      || ix86_cmodel == CM_KERNEL);
160
161    case CONST:
162      /* We also may accept the offsetted memory references in certain
163	 special cases.  */
164      if (GET_CODE (XEXP (op, 0)) == UNSPEC)
165	switch (XINT (XEXP (op, 0), 1))
166	  {
167	  case UNSPEC_GOTPCREL:
168	  case UNSPEC_DTPOFF:
169	  case UNSPEC_GOTNTPOFF:
170	  case UNSPEC_NTPOFF:
171	    return true;
172	  default:
173	    break;
174	  }
175
176      if (GET_CODE (XEXP (op, 0)) == PLUS)
177	{
178	  rtx op1 = XEXP (XEXP (op, 0), 0);
179	  rtx op2 = XEXP (XEXP (op, 0), 1);
180	  HOST_WIDE_INT offset;
181
182	  if (ix86_cmodel == CM_LARGE)
183	    return false;
184	  if (!CONST_INT_P (op2))
185	    return false;
186	  offset = trunc_int_for_mode (INTVAL (op2), DImode);
187	  switch (GET_CODE (op1))
188	    {
189	    case SYMBOL_REF:
190	      /* TLS symbols are not constant.  */
191	      if (SYMBOL_REF_TLS_MODEL (op1))
192		return false;
193	      /* For CM_SMALL assume that latest object is 16MB before
194		 end of 31bits boundary.  We may also accept pretty
195		 large negative constants knowing that all objects are
196		 in the positive half of address space.  */
197	      if ((ix86_cmodel == CM_SMALL
198		   || (ix86_cmodel == CM_MEDIUM
199		       && !SYMBOL_REF_FAR_ADDR_P (op1)))
200		  && offset < 16*1024*1024
201		  && trunc_int_for_mode (offset, SImode) == offset)
202		return true;
203	      /* For CM_KERNEL we know that all object resist in the
204		 negative half of 32bits address space.  We may not
205		 accept negative offsets, since they may be just off
206		 and we may accept pretty large positive ones.  */
207	      if (ix86_cmodel == CM_KERNEL
208		  && offset > 0
209		  && trunc_int_for_mode (offset, SImode) == offset)
210		return true;
211	      break;
212
213	    case LABEL_REF:
214	      /* These conditions are similar to SYMBOL_REF ones, just the
215		 constraints for code models differ.  */
216	      if ((ix86_cmodel == CM_SMALL || ix86_cmodel == CM_MEDIUM)
217		  && offset < 16*1024*1024
218		  && trunc_int_for_mode (offset, SImode) == offset)
219		return true;
220	      if (ix86_cmodel == CM_KERNEL
221		  && offset > 0
222		  && trunc_int_for_mode (offset, SImode) == offset)
223		return true;
224	      break;
225
226	    case UNSPEC:
227	      switch (XINT (op1, 1))
228		{
229		case UNSPEC_DTPOFF:
230		case UNSPEC_NTPOFF:
231		  if (offset > 0
232		      && trunc_int_for_mode (offset, SImode) == offset)
233		    return true;
234		}
235	      break;
236
237	    default:
238	      break;
239	    }
240	}
241      break;
242
243      default:
244	gcc_unreachable ();
245    }
246
247  return false;
248})
249
250;; Return true if VALUE can be stored in the zero extended immediate field.
251(define_predicate "x86_64_zext_immediate_operand"
252  (match_code "const_double,const_int,symbol_ref,label_ref,const")
253{
254  switch (GET_CODE (op))
255    {
256    case CONST_DOUBLE:
257      if (HOST_BITS_PER_WIDE_INT == 32)
258	return (GET_MODE (op) == VOIDmode && !CONST_DOUBLE_HIGH (op));
259      else
260	return false;
261
262    case CONST_INT:
263      if (HOST_BITS_PER_WIDE_INT == 32)
264	return INTVAL (op) >= 0;
265      else
266	return !(INTVAL (op) & ~(HOST_WIDE_INT) 0xffffffff);
267
268    case SYMBOL_REF:
269      /* For certain code models, the symbolic references are known to fit.  */
270      /* TLS symbols are not constant.  */
271      if (SYMBOL_REF_TLS_MODEL (op))
272	return false;
273      return (ix86_cmodel == CM_SMALL
274	      || (ix86_cmodel == CM_MEDIUM
275		  && !SYMBOL_REF_FAR_ADDR_P (op)));
276
277    case LABEL_REF:
278      /* For certain code models, the code is near as well.  */
279      return ix86_cmodel == CM_SMALL || ix86_cmodel == CM_MEDIUM;
280
281    case CONST:
282      /* We also may accept the offsetted memory references in certain
283	 special cases.  */
284      if (GET_CODE (XEXP (op, 0)) == PLUS)
285	{
286	  rtx op1 = XEXP (XEXP (op, 0), 0);
287	  rtx op2 = XEXP (XEXP (op, 0), 1);
288
289	  if (ix86_cmodel == CM_LARGE)
290	    return false;
291	  switch (GET_CODE (op1))
292	    {
293	    case SYMBOL_REF:
294	      /* TLS symbols are not constant.  */
295	      if (SYMBOL_REF_TLS_MODEL (op1))
296		return false;
297	      /* For small code model we may accept pretty large positive
298		 offsets, since one bit is available for free.  Negative
299		 offsets are limited by the size of NULL pointer area
300		 specified by the ABI.  */
301	      if ((ix86_cmodel == CM_SMALL
302		   || (ix86_cmodel == CM_MEDIUM
303		       && !SYMBOL_REF_FAR_ADDR_P (op1)))
304		  && CONST_INT_P (op2)
305		  && trunc_int_for_mode (INTVAL (op2), DImode) > -0x10000
306		  && trunc_int_for_mode (INTVAL (op2), SImode) == INTVAL (op2))
307		return true;
308	      /* ??? For the kernel, we may accept adjustment of
309		 -0x10000000, since we know that it will just convert
310		 negative address space to positive, but perhaps this
311		 is not worthwhile.  */
312	      break;
313
314	    case LABEL_REF:
315	      /* These conditions are similar to SYMBOL_REF ones, just the
316		 constraints for code models differ.  */
317	      if ((ix86_cmodel == CM_SMALL || ix86_cmodel == CM_MEDIUM)
318		  && CONST_INT_P (op2)
319		  && trunc_int_for_mode (INTVAL (op2), DImode) > -0x10000
320		  && trunc_int_for_mode (INTVAL (op2), SImode) == INTVAL (op2))
321		return true;
322	      break;
323
324	    default:
325	      return false;
326	    }
327	}
328      break;
329
330    default:
331      gcc_unreachable ();
332    }
333  return false;
334})
335
336;; Return true if OP is general operand representable on x86_64.
337(define_predicate "x86_64_general_operand"
338  (if_then_else (match_test "TARGET_64BIT")
339    (ior (match_operand 0 "nonimmediate_operand")
340	 (match_operand 0 "x86_64_immediate_operand"))
341    (match_operand 0 "general_operand")))
342
343;; Return true if OP is general operand representable on x86_64
344;; as either sign extended or zero extended constant.
345(define_predicate "x86_64_szext_general_operand"
346  (if_then_else (match_test "TARGET_64BIT")
347    (ior (match_operand 0 "nonimmediate_operand")
348	 (match_operand 0 "x86_64_immediate_operand")
349	 (match_operand 0 "x86_64_zext_immediate_operand"))
350    (match_operand 0 "general_operand")))
351
352;; Return true if OP is nonmemory operand representable on x86_64.
353(define_predicate "x86_64_nonmemory_operand"
354  (if_then_else (match_test "TARGET_64BIT")
355    (ior (match_operand 0 "register_operand")
356	 (match_operand 0 "x86_64_immediate_operand"))
357    (match_operand 0 "nonmemory_operand")))
358
359;; Return true if OP is nonmemory operand representable on x86_64.
360(define_predicate "x86_64_szext_nonmemory_operand"
361  (if_then_else (match_test "TARGET_64BIT")
362    (ior (match_operand 0 "register_operand")
363	 (match_operand 0 "x86_64_immediate_operand")
364	 (match_operand 0 "x86_64_zext_immediate_operand"))
365    (match_operand 0 "nonmemory_operand")))
366
367;; Return true when operand is PIC expression that can be computed by lea
368;; operation.
369(define_predicate "pic_32bit_operand"
370  (match_code "const,symbol_ref,label_ref")
371{
372  if (!flag_pic)
373    return false;
374
375  /* Rule out relocations that translate into 64bit constants.  */
376  if (TARGET_64BIT && GET_CODE (op) == CONST)
377    {
378      op = XEXP (op, 0);
379      if (GET_CODE (op) == PLUS && CONST_INT_P (XEXP (op, 1)))
380	op = XEXP (op, 0);
381      if (GET_CODE (op) == UNSPEC
382	  && (XINT (op, 1) == UNSPEC_GOTOFF
383	      || XINT (op, 1) == UNSPEC_GOT))
384	return false;
385    }
386
387  return symbolic_operand (op, mode);
388})
389
390;; Return true if OP is nonmemory operand acceptable by movabs patterns.
391(define_predicate "x86_64_movabs_operand"
392  (and (match_operand 0 "nonmemory_operand")
393       (not (match_operand 0 "pic_32bit_operand"))))
394
395;; Return true if OP is either a symbol reference or a sum of a symbol
396;; reference and a constant.
397(define_predicate "symbolic_operand"
398  (match_code "symbol_ref,label_ref,const")
399{
400  switch (GET_CODE (op))
401    {
402    case SYMBOL_REF:
403    case LABEL_REF:
404      return true;
405
406    case CONST:
407      op = XEXP (op, 0);
408      if (GET_CODE (op) == SYMBOL_REF
409	  || GET_CODE (op) == LABEL_REF
410	  || (GET_CODE (op) == UNSPEC
411	      && (XINT (op, 1) == UNSPEC_GOT
412		  || XINT (op, 1) == UNSPEC_GOTOFF
413		  || XINT (op, 1) == UNSPEC_PCREL
414		  || XINT (op, 1) == UNSPEC_GOTPCREL)))
415	return true;
416      if (GET_CODE (op) != PLUS
417	  || !CONST_INT_P (XEXP (op, 1)))
418	return false;
419
420      op = XEXP (op, 0);
421      if (GET_CODE (op) == SYMBOL_REF
422	  || GET_CODE (op) == LABEL_REF)
423	return true;
424      /* Only @GOTOFF gets offsets.  */
425      if (GET_CODE (op) != UNSPEC
426	  || XINT (op, 1) != UNSPEC_GOTOFF)
427	return false;
428
429      op = XVECEXP (op, 0, 0);
430      if (GET_CODE (op) == SYMBOL_REF
431	  || GET_CODE (op) == LABEL_REF)
432	return true;
433      return false;
434
435    default:
436      gcc_unreachable ();
437    }
438})
439
440;; Return true if OP is a symbolic operand that resolves locally.
441(define_predicate "local_symbolic_operand"
442  (match_code "const,label_ref,symbol_ref")
443{
444  if (GET_CODE (op) == CONST
445      && GET_CODE (XEXP (op, 0)) == PLUS
446      && CONST_INT_P (XEXP (XEXP (op, 0), 1)))
447    op = XEXP (XEXP (op, 0), 0);
448
449  if (GET_CODE (op) == LABEL_REF)
450    return true;
451
452  if (GET_CODE (op) != SYMBOL_REF)
453    return false;
454
455  if (SYMBOL_REF_TLS_MODEL (op))
456    return false;
457
458  if (SYMBOL_REF_LOCAL_P (op))
459    return true;
460
461  /* There is, however, a not insubstantial body of code in the rest of
462     the compiler that assumes it can just stick the results of
463     ASM_GENERATE_INTERNAL_LABEL in a symbol_ref and have done.  */
464  /* ??? This is a hack.  Should update the body of the compiler to
465     always create a DECL an invoke targetm.encode_section_info.  */
466  if (strncmp (XSTR (op, 0), internal_label_prefix,
467	       internal_label_prefix_len) == 0)
468    return true;
469
470  return false;
471})
472
473;; Test for a legitimate @GOTOFF operand.
474;;
475;; VxWorks does not impose a fixed gap between segments; the run-time
476;; gap can be different from the object-file gap.  We therefore can't
477;; use @GOTOFF unless we are absolutely sure that the symbol is in the
478;; same segment as the GOT.  Unfortunately, the flexibility of linker
479;; scripts means that we can't be sure of that in general, so assume
480;; that @GOTOFF is never valid on VxWorks.
481(define_predicate "gotoff_operand"
482  (and (not (match_test "TARGET_VXWORKS_RTP"))
483       (match_operand 0 "local_symbolic_operand")))
484
485;; Test for various thread-local symbols.
486(define_predicate "tls_symbolic_operand"
487  (and (match_code "symbol_ref")
488       (match_test "SYMBOL_REF_TLS_MODEL (op)")))
489
490(define_predicate "tls_modbase_operand"
491  (and (match_code "symbol_ref")
492       (match_test "op == ix86_tls_module_base ()")))
493
494;; Test for a pc-relative call operand
495(define_predicate "constant_call_address_operand"
496  (match_code "symbol_ref")
497{
498  if (ix86_cmodel == CM_LARGE || ix86_cmodel == CM_LARGE_PIC)
499    return false;
500  if (TARGET_DLLIMPORT_DECL_ATTRIBUTES && SYMBOL_REF_DLLIMPORT_P (op))
501    return false;
502  return true;
503})
504
505;; P6 processors will jump to the address after the decrement when %esp
506;; is used as a call operand, so they will execute return address as a code.
507;; See Pentium Pro errata 70, Pentium 2 errata A33 and Pentium 3 errata E17.
508
509(define_predicate "call_register_no_elim_operand"
510  (match_operand 0 "register_operand")
511{
512  if (GET_CODE (op) == SUBREG)
513    op = SUBREG_REG (op);
514
515  if (!TARGET_64BIT && op == stack_pointer_rtx)
516    return false;
517
518  return register_no_elim_operand (op, mode);
519})
520
521;; True for any non-virtual or eliminable register.  Used in places where
522;; instantiation of such a register may cause the pattern to not be recognized.
523(define_predicate "register_no_elim_operand"
524  (match_operand 0 "register_operand")
525{
526  if (GET_CODE (op) == SUBREG)
527    op = SUBREG_REG (op);
528  return !(op == arg_pointer_rtx
529	   || op == frame_pointer_rtx
530	   || IN_RANGE (REGNO (op),
531			FIRST_PSEUDO_REGISTER, LAST_VIRTUAL_REGISTER));
532})
533
534;; Similarly, but include the stack pointer.  This is used to prevent esp
535;; from being used as an index reg.
536(define_predicate "index_register_operand"
537  (match_operand 0 "register_operand")
538{
539  if (GET_CODE (op) == SUBREG)
540    op = SUBREG_REG (op);
541  if (reload_in_progress || reload_completed)
542    return REG_OK_FOR_INDEX_STRICT_P (op);
543  else
544    return REG_OK_FOR_INDEX_NONSTRICT_P (op);
545})
546
547;; Return false if this is any eliminable register.  Otherwise general_operand.
548(define_predicate "general_no_elim_operand"
549  (if_then_else (match_code "reg,subreg")
550    (match_operand 0 "register_no_elim_operand")
551    (match_operand 0 "general_operand")))
552
553;; Return false if this is any eliminable register.  Otherwise
554;; register_operand or a constant.
555(define_predicate "nonmemory_no_elim_operand"
556  (ior (match_operand 0 "register_no_elim_operand")
557       (match_operand 0 "immediate_operand")))
558
559;; Test for a valid operand for indirect branch.
560(define_predicate "indirect_branch_operand"
561  (if_then_else (match_test "TARGET_X32")
562    (match_operand 0 "register_operand")
563    (match_operand 0 "nonimmediate_operand")))
564
565;; Test for a valid operand for a call instruction.
566(define_predicate "call_insn_operand"
567  (ior (match_operand 0 "constant_call_address_operand")
568       (match_operand 0 "call_register_no_elim_operand")
569       (and (not (match_test "TARGET_X32"))
570	    (match_operand 0 "memory_operand"))))
571
572;; Similarly, but for tail calls, in which we cannot allow memory references.
573(define_predicate "sibcall_insn_operand"
574  (ior (match_operand 0 "constant_call_address_operand")
575       (match_operand 0 "register_no_elim_operand")))
576
577;; Match exactly zero.
578(define_predicate "const0_operand"
579  (match_code "const_int,const_double,const_vector")
580{
581  if (mode == VOIDmode)
582    mode = GET_MODE (op);
583  return op == CONST0_RTX (mode);
584})
585
586;; Match exactly one.
587(define_predicate "const1_operand"
588  (and (match_code "const_int")
589       (match_test "op == const1_rtx")))
590
591;; Match exactly eight.
592(define_predicate "const8_operand"
593  (and (match_code "const_int")
594       (match_test "INTVAL (op) == 8")))
595
596;; Match exactly 128.
597(define_predicate "const128_operand"
598  (and (match_code "const_int")
599       (match_test "INTVAL (op) == 128")))
600
601;; Match exactly 0x0FFFFFFFF in anddi as a zero-extension operation
602(define_predicate "const_32bit_mask"
603  (and (match_code "const_int")
604       (match_test "trunc_int_for_mode (INTVAL (op), DImode)
605		    == (HOST_WIDE_INT) 0xffffffff")))
606
607;; Match 2, 4, or 8.  Used for leal multiplicands.
608(define_predicate "const248_operand"
609  (match_code "const_int")
610{
611  HOST_WIDE_INT i = INTVAL (op);
612  return i == 2 || i == 4 || i == 8;
613})
614
615;; Match 1, 2, 4, or 8
616(define_predicate "const1248_operand"
617  (match_code "const_int")
618{
619  HOST_WIDE_INT i = INTVAL (op);
620  return i == 1 || i == 2 || i == 4 || i == 8;
621})
622
623;; Match 3, 5, or 9.  Used for leal multiplicands.
624(define_predicate "const359_operand"
625  (match_code "const_int")
626{
627  HOST_WIDE_INT i = INTVAL (op);
628  return i == 3 || i == 5 || i == 9;
629})
630
631;; Match 0 or 1.
632(define_predicate "const_0_to_1_operand"
633  (and (match_code "const_int")
634       (ior (match_test "op == const0_rtx")
635	    (match_test "op == const1_rtx"))))
636
637;; Match 0 to 3.
638(define_predicate "const_0_to_3_operand"
639  (and (match_code "const_int")
640       (match_test "IN_RANGE (INTVAL (op), 0, 3)")))
641
642;; Match 0 to 7.
643(define_predicate "const_0_to_7_operand"
644  (and (match_code "const_int")
645       (match_test "IN_RANGE (INTVAL (op), 0, 7)")))
646
647;; Match 0 to 15.
648(define_predicate "const_0_to_15_operand"
649  (and (match_code "const_int")
650       (match_test "IN_RANGE (INTVAL (op), 0, 15)")))
651
652;; Match 0 to 31.
653(define_predicate "const_0_to_31_operand"
654  (and (match_code "const_int")
655       (match_test "IN_RANGE (INTVAL (op), 0, 31)")))
656
657;; Match 0 to 63.
658(define_predicate "const_0_to_63_operand"
659  (and (match_code "const_int")
660       (match_test "IN_RANGE (INTVAL (op), 0, 63)")))
661
662;; Match 0 to 255.
663(define_predicate "const_0_to_255_operand"
664  (and (match_code "const_int")
665       (match_test "IN_RANGE (INTVAL (op), 0, 255)")))
666
667;; Match (0 to 255) * 8
668(define_predicate "const_0_to_255_mul_8_operand"
669  (match_code "const_int")
670{
671  unsigned HOST_WIDE_INT val = INTVAL (op);
672  return val <= 255*8 && val % 8 == 0;
673})
674
675;; Return true if OP is CONST_INT >= 1 and <= 31 (a valid operand
676;; for shift & compare patterns, as shifting by 0 does not change flags).
677(define_predicate "const_1_to_31_operand"
678  (and (match_code "const_int")
679       (match_test "IN_RANGE (INTVAL (op), 1, 31)")))
680
681;; Return true if OP is CONST_INT >= 1 and <= 63 (a valid operand
682;; for 64bit shift & compare patterns, as shifting by 0 does not change flags).
683(define_predicate "const_1_to_63_operand"
684  (and (match_code "const_int")
685       (match_test "IN_RANGE (INTVAL (op), 1, 63)")))
686
687;; Match 2 or 3.
688(define_predicate "const_2_to_3_operand"
689  (and (match_code "const_int")
690       (match_test "IN_RANGE (INTVAL (op), 2, 3)")))
691
692;; Match 4 to 5.
693(define_predicate "const_4_to_5_operand"
694  (and (match_code "const_int")
695       (match_test "IN_RANGE (INTVAL (op), 4, 5)")))
696
697;; Match 4 to 7.
698(define_predicate "const_4_to_7_operand"
699  (and (match_code "const_int")
700       (match_test "IN_RANGE (INTVAL (op), 4, 7)")))
701
702;; Match 6 to 7.
703(define_predicate "const_6_to_7_operand"
704  (and (match_code "const_int")
705       (match_test "IN_RANGE (INTVAL (op), 6, 7)")))
706
707;; Match 8 to 11.
708(define_predicate "const_8_to_11_operand"
709  (and (match_code "const_int")
710       (match_test "IN_RANGE (INTVAL (op), 8, 11)")))
711
712;; Match 12 to 15.
713(define_predicate "const_12_to_15_operand"
714  (and (match_code "const_int")
715       (match_test "IN_RANGE (INTVAL (op), 12, 15)")))
716
717;; True if this is a constant appropriate for an increment or decrement.
718(define_predicate "incdec_operand"
719  (match_code "const_int")
720{
721  /* On Pentium4, the inc and dec operations causes extra dependency on flag
722     registers, since carry flag is not set.  */
723  if (!TARGET_USE_INCDEC && !optimize_insn_for_size_p ())
724    return false;
725  return op == const1_rtx || op == constm1_rtx;
726})
727
728;; True for registers, or 1 or -1.  Used to optimize double-word shifts.
729(define_predicate "reg_or_pm1_operand"
730  (ior (match_operand 0 "register_operand")
731       (and (match_code "const_int")
732	    (ior (match_test "op == const1_rtx")
733		 (match_test "op == constm1_rtx")))))
734
735;; True if OP is acceptable as operand of DImode shift expander.
736(define_predicate "shiftdi_operand"
737  (if_then_else (match_test "TARGET_64BIT")
738    (match_operand 0 "nonimmediate_operand")
739    (match_operand 0 "register_operand")))
740
741(define_predicate "ashldi_input_operand"
742  (if_then_else (match_test "TARGET_64BIT")
743    (match_operand 0 "nonimmediate_operand")
744    (match_operand 0 "reg_or_pm1_operand")))
745
746;; Return true if OP is a vector load from the constant pool with just
747;; the first element nonzero.
748(define_predicate "zero_extended_scalar_load_operand"
749  (match_code "mem")
750{
751  unsigned n_elts;
752  op = maybe_get_pool_constant (op);
753
754  if (!(op && GET_CODE (op) == CONST_VECTOR))
755    return false;
756
757  n_elts = CONST_VECTOR_NUNITS (op);
758
759  for (n_elts--; n_elts > 0; n_elts--)
760    {
761      rtx elt = CONST_VECTOR_ELT (op, n_elts);
762      if (elt != CONST0_RTX (GET_MODE_INNER (GET_MODE (op))))
763	return false;
764    }
765  return true;
766})
767
768/* Return true if operand is a vector constant that is all ones. */
769(define_predicate "vector_all_ones_operand"
770  (match_code "const_vector")
771{
772  int nunits = GET_MODE_NUNITS (mode);
773
774  if (GET_CODE (op) == CONST_VECTOR
775      && CONST_VECTOR_NUNITS (op) == nunits)
776    {
777      int i;
778      for (i = 0; i < nunits; ++i)
779        {
780          rtx x = CONST_VECTOR_ELT (op, i);
781          if (x != constm1_rtx)
782            return false;
783        }
784      return true;
785    }
786
787  return false;
788})
789
790; Return true when OP is operand acceptable for standard SSE move.
791(define_predicate "vector_move_operand"
792  (ior (match_operand 0 "nonimmediate_operand")
793       (match_operand 0 "const0_operand")))
794
795;; Return true when OP is nonimmediate or standard SSE constant.
796(define_predicate "nonimmediate_or_sse_const_operand"
797  (match_operand 0 "general_operand")
798{
799  if (nonimmediate_operand (op, mode))
800    return true;
801  if (standard_sse_constant_p (op) > 0)
802    return true;
803  return false;
804})
805
806;; Return true if OP is a register or a zero.
807(define_predicate "reg_or_0_operand"
808  (ior (match_operand 0 "register_operand")
809       (match_operand 0 "const0_operand")))
810
811;; Return true if op if a valid address for LEA, and does not contain
812;; a segment override.  Defined as a special predicate to allow
813;; mode-less const_int operands pass to address_operand.
814(define_special_predicate "lea_address_operand"
815  (match_operand 0 "address_operand")
816{
817  struct ix86_address parts;
818  int ok;
819
820  ok = ix86_decompose_address (op, &parts);
821  gcc_assert (ok);
822  return parts.seg == SEG_DEFAULT;
823})
824
825;; Return true for RTX codes that force SImode address.
826(define_predicate "SImode_address_operand"
827  (match_code "subreg,zero_extend,and"))
828
829;; Return true if op if a valid base register, displacement or
830;; sum of base register and displacement for VSIB addressing.
831(define_predicate "vsib_address_operand"
832  (match_operand 0 "address_operand")
833{
834  struct ix86_address parts;
835  int ok;
836  rtx disp;
837
838  ok = ix86_decompose_address (op, &parts);
839  gcc_assert (ok);
840  if (parts.index || parts.seg != SEG_DEFAULT)
841    return false;
842
843  /* VSIB addressing doesn't support (%rip).  */
844  if (parts.disp)
845    {
846      disp = parts.disp;
847      if (GET_CODE (disp) == CONST)
848	{
849	  disp = XEXP (disp, 0);
850	  if (GET_CODE (disp) == PLUS)
851	    disp = XEXP (disp, 0);
852	  if (GET_CODE (disp) == UNSPEC)
853	    switch (XINT (disp, 1))
854	      {
855	      case UNSPEC_GOTPCREL:
856	      case UNSPEC_PCREL:
857	      case UNSPEC_GOTNTPOFF:
858		return false;
859	      }
860	}
861      if (TARGET_64BIT
862	  && flag_pic
863	  && (GET_CODE (disp) == SYMBOL_REF
864	      || GET_CODE (disp) == LABEL_REF))
865	return false;
866    }
867
868  return true;
869})
870
871(define_predicate "vsib_mem_operator"
872  (match_code "mem"))
873
874;; Return true if the rtx is known to be at least 32 bits aligned.
875(define_predicate "aligned_operand"
876  (match_operand 0 "general_operand")
877{
878  struct ix86_address parts;
879  int ok;
880
881  /* Registers and immediate operands are always "aligned".  */
882  if (!MEM_P (op))
883    return true;
884
885  /* All patterns using aligned_operand on memory operands ends up
886     in promoting memory operand to 64bit and thus causing memory mismatch.  */
887  if (TARGET_MEMORY_MISMATCH_STALL && !optimize_insn_for_size_p ())
888    return false;
889
890  /* Don't even try to do any aligned optimizations with volatiles.  */
891  if (MEM_VOLATILE_P (op))
892    return false;
893
894  if (MEM_ALIGN (op) >= 32)
895    return true;
896
897  op = XEXP (op, 0);
898
899  /* Pushes and pops are only valid on the stack pointer.  */
900  if (GET_CODE (op) == PRE_DEC
901      || GET_CODE (op) == POST_INC)
902    return true;
903
904  /* Decode the address.  */
905  ok = ix86_decompose_address (op, &parts);
906  gcc_assert (ok);
907
908  if (parts.base && GET_CODE (parts.base) == SUBREG)
909    parts.base = SUBREG_REG (parts.base);
910  if (parts.index && GET_CODE (parts.index) == SUBREG)
911    parts.index = SUBREG_REG (parts.index);
912
913  /* Look for some component that isn't known to be aligned.  */
914  if (parts.index)
915    {
916      if (REGNO_POINTER_ALIGN (REGNO (parts.index)) * parts.scale < 32)
917	return false;
918    }
919  if (parts.base)
920    {
921      if (REGNO_POINTER_ALIGN (REGNO (parts.base)) < 32)
922	return false;
923    }
924  if (parts.disp)
925    {
926      if (!CONST_INT_P (parts.disp)
927	  || (INTVAL (parts.disp) & 3))
928	return false;
929    }
930
931  /* Didn't find one -- this must be an aligned address.  */
932  return true;
933})
934
935;; Return true if OP is memory operand with a displacement.
936(define_predicate "memory_displacement_operand"
937  (match_operand 0 "memory_operand")
938{
939  struct ix86_address parts;
940  int ok;
941
942  ok = ix86_decompose_address (XEXP (op, 0), &parts);
943  gcc_assert (ok);
944  return parts.disp != NULL_RTX;
945})
946
947;; Return true if OP is memory operand with a displacement only.
948(define_predicate "memory_displacement_only_operand"
949  (match_operand 0 "memory_operand")
950{
951  struct ix86_address parts;
952  int ok;
953
954  if (TARGET_64BIT)
955    return false;
956
957  ok = ix86_decompose_address (XEXP (op, 0), &parts);
958  gcc_assert (ok);
959
960  if (parts.base || parts.index)
961    return false;
962
963  return parts.disp != NULL_RTX;
964})
965
966;; Return true if OP is memory operand which will need zero or
967;; one register at most, not counting stack pointer or frame pointer.
968(define_predicate "cmpxchg8b_pic_memory_operand"
969  (match_operand 0 "memory_operand")
970{
971  struct ix86_address parts;
972  int ok;
973
974  if (TARGET_64BIT || !flag_pic)
975    return true;
976
977  ok = ix86_decompose_address (XEXP (op, 0), &parts);
978  gcc_assert (ok);
979
980  if (parts.base && GET_CODE (parts.base) == SUBREG)
981    parts.base = SUBREG_REG (parts.base);
982  if (parts.index && GET_CODE (parts.index) == SUBREG)
983    parts.index = SUBREG_REG (parts.index);
984
985  if (parts.base == NULL_RTX
986      || parts.base == arg_pointer_rtx
987      || parts.base == frame_pointer_rtx
988      || parts.base == hard_frame_pointer_rtx
989      || parts.base == stack_pointer_rtx)
990    return true;
991
992  if (parts.index == NULL_RTX
993      || parts.index == arg_pointer_rtx
994      || parts.index == frame_pointer_rtx
995      || parts.index == hard_frame_pointer_rtx
996      || parts.index == stack_pointer_rtx)
997    return true;
998
999  return false;
1000})
1001
1002
1003;; Return true if OP is memory operand that cannot be represented
1004;; by the modRM array.
1005(define_predicate "long_memory_operand"
1006  (and (match_operand 0 "memory_operand")
1007       (match_test "memory_address_length (op, false)")))
1008
1009;; Return true if OP is a comparison operator that can be issued by fcmov.
1010(define_predicate "fcmov_comparison_operator"
1011  (match_operand 0 "comparison_operator")
1012{
1013  enum machine_mode inmode = GET_MODE (XEXP (op, 0));
1014  enum rtx_code code = GET_CODE (op);
1015
1016  if (inmode == CCFPmode || inmode == CCFPUmode)
1017    {
1018      if (!ix86_trivial_fp_comparison_operator (op, mode))
1019	return false;
1020      code = ix86_fp_compare_code_to_integer (code);
1021    }
1022  /* i387 supports just limited amount of conditional codes.  */
1023  switch (code)
1024    {
1025    case LTU: case GTU: case LEU: case GEU:
1026      if (inmode == CCmode || inmode == CCFPmode || inmode == CCFPUmode
1027	  || inmode == CCCmode)
1028	return true;
1029      return false;
1030    case ORDERED: case UNORDERED:
1031    case EQ: case NE:
1032      return true;
1033    default:
1034      return false;
1035    }
1036})
1037
1038;; Return true if OP is a comparison that can be used in the CMPSS/CMPPS insns.
1039;; The first set are supported directly; the second set can't be done with
1040;; full IEEE support, i.e. NaNs.
1041
1042(define_predicate "sse_comparison_operator"
1043  (ior (match_code "eq,ne,lt,le,unordered,unge,ungt,ordered")
1044       (and (match_test "TARGET_AVX")
1045	    (match_code "ge,gt,uneq,unle,unlt,ltgt"))))
1046
1047(define_predicate "ix86_comparison_int_operator"
1048  (match_code "ne,eq,ge,gt,le,lt"))
1049
1050(define_predicate "ix86_comparison_uns_operator"
1051  (match_code "ne,eq,geu,gtu,leu,ltu"))
1052
1053(define_predicate "bt_comparison_operator"
1054  (match_code "ne,eq"))
1055
1056;; Return true if OP is a valid comparison operator in valid mode.
1057(define_predicate "ix86_comparison_operator"
1058  (match_operand 0 "comparison_operator")
1059{
1060  enum machine_mode inmode = GET_MODE (XEXP (op, 0));
1061  enum rtx_code code = GET_CODE (op);
1062
1063  if (inmode == CCFPmode || inmode == CCFPUmode)
1064    return ix86_trivial_fp_comparison_operator (op, mode);
1065
1066  switch (code)
1067    {
1068    case EQ: case NE:
1069      return true;
1070    case LT: case GE:
1071      if (inmode == CCmode || inmode == CCGCmode
1072	  || inmode == CCGOCmode || inmode == CCNOmode)
1073	return true;
1074      return false;
1075    case LTU: case GTU: case LEU: case GEU:
1076      if (inmode == CCmode || inmode == CCCmode)
1077	return true;
1078      return false;
1079    case ORDERED: case UNORDERED:
1080      if (inmode == CCmode)
1081	return true;
1082      return false;
1083    case GT: case LE:
1084      if (inmode == CCmode || inmode == CCGCmode || inmode == CCNOmode)
1085	return true;
1086      return false;
1087    default:
1088      return false;
1089    }
1090})
1091
1092;; Return true if OP is a valid comparison operator
1093;; testing carry flag to be set.
1094(define_predicate "ix86_carry_flag_operator"
1095  (match_code "ltu,lt,unlt,gtu,gt,ungt,le,unle,ge,unge,ltgt,uneq")
1096{
1097  enum machine_mode inmode = GET_MODE (XEXP (op, 0));
1098  enum rtx_code code = GET_CODE (op);
1099
1100  if (inmode == CCFPmode || inmode == CCFPUmode)
1101    {
1102      if (!ix86_trivial_fp_comparison_operator (op, mode))
1103	return false;
1104      code = ix86_fp_compare_code_to_integer (code);
1105    }
1106  else if (inmode == CCCmode)
1107   return code == LTU || code == GTU;
1108  else if (inmode != CCmode)
1109    return false;
1110
1111  return code == LTU;
1112})
1113
1114;; Return true if this comparison only requires testing one flag bit.
1115(define_predicate "ix86_trivial_fp_comparison_operator"
1116  (match_code "gt,ge,unlt,unle,uneq,ltgt,ordered,unordered"))
1117
1118;; Return true if we know how to do this comparison.  Others require
1119;; testing more than one flag bit, and we let the generic middle-end
1120;; code do that.
1121(define_predicate "ix86_fp_comparison_operator"
1122  (if_then_else (match_test "ix86_fp_comparison_strategy (GET_CODE (op))
1123                             == IX86_FPCMP_ARITH")
1124               (match_operand 0 "comparison_operator")
1125               (match_operand 0 "ix86_trivial_fp_comparison_operator")))
1126
1127;; Same as above, but for swapped comparison used in fp_jcc_4_387.
1128(define_predicate "ix86_swapped_fp_comparison_operator"
1129  (match_operand 0 "comparison_operator")
1130{
1131  enum rtx_code code = GET_CODE (op);
1132  bool ret;
1133
1134  PUT_CODE (op, swap_condition (code));
1135  ret = ix86_fp_comparison_operator (op, mode);
1136  PUT_CODE (op, code);
1137  return ret;
1138})
1139
1140;; Nearly general operand, but accept any const_double, since we wish
1141;; to be able to drop them into memory rather than have them get pulled
1142;; into registers.
1143(define_predicate "cmp_fp_expander_operand"
1144  (ior (match_code "const_double")
1145       (match_operand 0 "general_operand")))
1146
1147;; Return true if this is a valid binary floating-point operation.
1148(define_predicate "binary_fp_operator"
1149  (match_code "plus,minus,mult,div"))
1150
1151;; Return true if this is a multiply operation.
1152(define_predicate "mult_operator"
1153  (match_code "mult"))
1154
1155;; Return true if this is a division operation.
1156(define_predicate "div_operator"
1157  (match_code "div"))
1158
1159;; Return true if this is a plus, minus, and, ior or xor operation.
1160(define_predicate "plusminuslogic_operator"
1161  (match_code "plus,minus,and,ior,xor"))
1162
1163;; Return true if this is a float extend operation.
1164(define_predicate "float_operator"
1165  (match_code "float"))
1166
1167;; Return true for ARITHMETIC_P.
1168(define_predicate "arith_or_logical_operator"
1169  (match_code "plus,mult,and,ior,xor,smin,smax,umin,umax,compare,minus,div,
1170	       mod,udiv,umod,ashift,rotate,ashiftrt,lshiftrt,rotatert"))
1171
1172;; Return true for COMMUTATIVE_P.
1173(define_predicate "commutative_operator"
1174  (match_code "plus,mult,and,ior,xor,smin,smax,umin,umax"))
1175
1176;; Return true if OP is a binary operator that can be promoted to wider mode.
1177(define_predicate "promotable_binary_operator"
1178  (ior (match_code "plus,minus,and,ior,xor,ashift")
1179       (and (match_code "mult")
1180	    (match_test "TARGET_TUNE_PROMOTE_HIMODE_IMUL"))))
1181
1182(define_predicate "compare_operator"
1183  (match_code "compare"))
1184
1185(define_predicate "absneg_operator"
1186  (match_code "abs,neg"))
1187
1188;; Return true if OP is misaligned memory operand
1189(define_predicate "misaligned_operand"
1190  (and (match_code "mem")
1191       (match_test "MEM_ALIGN (op) < GET_MODE_ALIGNMENT (mode)")))
1192
1193;; Return true if OP is a emms operation, known to be a PARALLEL.
1194(define_predicate "emms_operation"
1195  (match_code "parallel")
1196{
1197  unsigned i;
1198
1199  if (XVECLEN (op, 0) != 17)
1200    return false;
1201
1202  for (i = 0; i < 8; i++)
1203    {
1204      rtx elt = XVECEXP (op, 0, i+1);
1205
1206      if (GET_CODE (elt) != CLOBBER
1207	  || GET_CODE (SET_DEST (elt)) != REG
1208	  || GET_MODE (SET_DEST (elt)) != XFmode
1209	  || REGNO (SET_DEST (elt)) != FIRST_STACK_REG + i)
1210        return false;
1211
1212      elt = XVECEXP (op, 0, i+9);
1213
1214      if (GET_CODE (elt) != CLOBBER
1215	  || GET_CODE (SET_DEST (elt)) != REG
1216	  || GET_MODE (SET_DEST (elt)) != DImode
1217	  || REGNO (SET_DEST (elt)) != FIRST_MMX_REG + i)
1218	return false;
1219    }
1220  return true;
1221})
1222
1223;; Return true if OP is a vzeroall operation, known to be a PARALLEL.
1224(define_predicate "vzeroall_operation"
1225  (match_code "parallel")
1226{
1227  unsigned i, nregs = TARGET_64BIT ? 16 : 8;
1228
1229  if ((unsigned) XVECLEN (op, 0) != 1 + nregs)
1230    return false;
1231
1232  for (i = 0; i < nregs; i++)
1233    {
1234      rtx elt = XVECEXP (op, 0, i+1);
1235
1236      if (GET_CODE (elt) != SET
1237	  || GET_CODE (SET_DEST (elt)) != REG
1238	  || GET_MODE (SET_DEST (elt)) != V8SImode
1239	  || REGNO (SET_DEST (elt)) != SSE_REGNO (i)
1240	  || SET_SRC (elt) != CONST0_RTX (V8SImode))
1241	return false;
1242    }
1243  return true;
1244})
1245
1246;; Return true if OP is a parallel for a vbroadcast permute.
1247
1248(define_predicate "avx_vbroadcast_operand"
1249  (and (match_code "parallel")
1250       (match_code "const_int" "a"))
1251{
1252  rtx elt = XVECEXP (op, 0, 0);
1253  int i, nelt = XVECLEN (op, 0);
1254
1255  /* Don't bother checking there are the right number of operands,
1256     merely that they're all identical.  */
1257  for (i = 1; i < nelt; ++i)
1258    if (XVECEXP (op, 0, i) != elt)
1259      return false;
1260  return true;
1261})
1262
1263;; Return true if OP is a proper third operand to vpblendw256.
1264(define_predicate "avx2_pblendw_operand"
1265  (match_code "const_int")
1266{
1267  HOST_WIDE_INT val = INTVAL (op);
1268  HOST_WIDE_INT low = val & 0xff;
1269  return val == ((low << 8) | low);
1270})
1271