1;; Predicate definitions for IA-32 and x86-64.
2;; Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
3;; Free Software Foundation, Inc.
4;;
5;; This file is part of GCC.
6;;
7;; GCC is free software; you can redistribute it and/or modify
8;; it under the terms of the GNU General Public License as published by
9;; the Free Software Foundation; either version 3, or (at your option)
10;; any later version.
11;;
12;; GCC is distributed in the hope that it will be useful,
13;; but WITHOUT ANY WARRANTY; without even the implied warranty of
14;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15;; GNU General Public License for more details.
16;;
17;; You should have received a copy of the GNU General Public License
18;; along with GCC; see the file COPYING3.  If not see
19;; <http://www.gnu.org/licenses/>.
20
21;; Return true if OP is either a i387 or SSE fp register.
22(define_predicate "any_fp_register_operand"
23  (and (match_code "reg")
24       (match_test "ANY_FP_REGNO_P (REGNO (op))")))
25
26;; Return true if OP is an i387 fp register.
27(define_predicate "fp_register_operand"
28  (and (match_code "reg")
29       (match_test "FP_REGNO_P (REGNO (op))")))
30
31;; Return true if OP is a non-fp register_operand.
32(define_predicate "register_and_not_any_fp_reg_operand"
33  (and (match_code "reg")
34       (not (match_test "ANY_FP_REGNO_P (REGNO (op))"))))
35
36;; Return true if OP is a register operand other than an i387 fp register.
37(define_predicate "register_and_not_fp_reg_operand"
38  (and (match_code "reg")
39       (not (match_test "FP_REGNO_P (REGNO (op))"))))
40
41;; True if the operand is an MMX register.
42(define_predicate "mmx_reg_operand"
43  (and (match_code "reg")
44       (match_test "MMX_REGNO_P (REGNO (op))")))
45
46;; True if the operand is an SSE register.
47(define_predicate "sse_reg_operand"
48  (and (match_code "reg")
49       (match_test "SSE_REGNO_P (REGNO (op))")))
50
51;; True if the operand is a Q_REGS class register.
52(define_predicate "q_regs_operand"
53  (match_operand 0 "register_operand")
54{
55  if (GET_CODE (op) == SUBREG)
56    op = SUBREG_REG (op);
57  return ANY_QI_REG_P (op);
58})
59
60;; Match an SI or HImode register for a zero_extract.
61(define_special_predicate "ext_register_operand"
62  (match_operand 0 "register_operand")
63{
64  if ((!TARGET_64BIT || GET_MODE (op) != DImode)
65      && GET_MODE (op) != SImode && GET_MODE (op) != HImode)
66    return false;
67  if (GET_CODE (op) == SUBREG)
68    op = SUBREG_REG (op);
69
70  /* Be careful to accept only registers having upper parts.  */
71  return (REG_P (op)
72	  && (REGNO (op) > LAST_VIRTUAL_REGISTER || REGNO (op) <= BX_REG));
73})
74
75;; Return true if op is the AX register.
76(define_predicate "ax_reg_operand"
77  (and (match_code "reg")
78       (match_test "REGNO (op) == AX_REG")))
79
80;; Return true if op is the flags register.
81(define_predicate "flags_reg_operand"
82  (and (match_code "reg")
83       (match_test "REGNO (op) == FLAGS_REG")))
84
85;; Return true if op is one of QImode registers: %[abcd][hl].
86(define_predicate "QIreg_operand"
87  (match_test "QI_REG_P (op)"))
88
89;; Return true if op is a QImode register operand other than
90;; %[abcd][hl].
91(define_predicate "ext_QIreg_operand"
92  (and (match_code "reg")
93       (match_test "TARGET_64BIT")
94       (match_test "REGNO (op) > BX_REG")))
95
96;; Return true if op is not xmm0 register.
97(define_predicate "reg_not_xmm0_operand"
98  (match_operand 0 "register_operand")
99{
100  if (GET_CODE (op) == SUBREG)
101    op = SUBREG_REG (op);
102
103  return !REG_P (op) || REGNO (op) != FIRST_SSE_REG;
104})
105
106;; As above, but also allow memory operands.
107(define_predicate "nonimm_not_xmm0_operand"
108  (ior (match_operand 0 "memory_operand")
109       (match_operand 0 "reg_not_xmm0_operand")))
110
111;; Return true if op is not xmm0 register, but only for non-AVX targets.
112(define_predicate "reg_not_xmm0_operand_maybe_avx"
113  (if_then_else (match_test "TARGET_AVX")
114    (match_operand 0 "register_operand")
115    (match_operand 0 "reg_not_xmm0_operand")))
116
117;; As above, but also allow memory operands.
118(define_predicate "nonimm_not_xmm0_operand_maybe_avx"
119  (if_then_else (match_test "TARGET_AVX")
120    (match_operand 0 "nonimmediate_operand")
121    (match_operand 0 "nonimm_not_xmm0_operand")))
122
123;; Return true if VALUE can be stored in a sign extended immediate field.
124(define_predicate "x86_64_immediate_operand"
125  (match_code "const_int,symbol_ref,label_ref,const")
126{
127  if (!TARGET_64BIT)
128    return immediate_operand (op, mode);
129
130  switch (GET_CODE (op))
131    {
132    case CONST_INT:
133      /* CONST_DOUBLES never match, since HOST_BITS_PER_WIDE_INT is known
134         to be at least 32 and this all acceptable constants are
135	 represented as CONST_INT.  */
136      if (HOST_BITS_PER_WIDE_INT == 32)
137	return true;
138      else
139	{
140	  HOST_WIDE_INT val = trunc_int_for_mode (INTVAL (op), DImode);
141	  return trunc_int_for_mode (val, SImode) == val;
142	}
143      break;
144
145    case SYMBOL_REF:
146      /* For certain code models, the symbolic references are known to fit.
147	 in CM_SMALL_PIC model we know it fits if it is local to the shared
148	 library.  Don't count TLS SYMBOL_REFs here, since they should fit
149	 only if inside of UNSPEC handled below.  */
150      /* TLS symbols are not constant.  */
151      if (SYMBOL_REF_TLS_MODEL (op))
152	return false;
153      return (ix86_cmodel == CM_SMALL || ix86_cmodel == CM_KERNEL
154	      || (ix86_cmodel == CM_MEDIUM && !SYMBOL_REF_FAR_ADDR_P (op)));
155
156    case LABEL_REF:
157      /* For certain code models, the code is near as well.  */
158      return (ix86_cmodel == CM_SMALL || ix86_cmodel == CM_MEDIUM
159	      || ix86_cmodel == CM_KERNEL);
160
161    case CONST:
162      /* We also may accept the offsetted memory references in certain
163	 special cases.  */
164      if (GET_CODE (XEXP (op, 0)) == UNSPEC)
165	switch (XINT (XEXP (op, 0), 1))
166	  {
167	  case UNSPEC_GOTPCREL:
168	  case UNSPEC_DTPOFF:
169	  case UNSPEC_GOTNTPOFF:
170	  case UNSPEC_NTPOFF:
171	    return true;
172	  default:
173	    break;
174	  }
175
176      if (GET_CODE (XEXP (op, 0)) == PLUS)
177	{
178	  rtx op1 = XEXP (XEXP (op, 0), 0);
179	  rtx op2 = XEXP (XEXP (op, 0), 1);
180	  HOST_WIDE_INT offset;
181
182	  if (ix86_cmodel == CM_LARGE)
183	    return false;
184	  if (!CONST_INT_P (op2))
185	    return false;
186	  offset = trunc_int_for_mode (INTVAL (op2), DImode);
187	  switch (GET_CODE (op1))
188	    {
189	    case SYMBOL_REF:
190	      /* TLS symbols are not constant.  */
191	      if (SYMBOL_REF_TLS_MODEL (op1))
192		return false;
193	      /* For CM_SMALL assume that latest object is 16MB before
194		 end of 31bits boundary.  We may also accept pretty
195		 large negative constants knowing that all objects are
196		 in the positive half of address space.  */
197	      if ((ix86_cmodel == CM_SMALL
198		   || (ix86_cmodel == CM_MEDIUM
199		       && !SYMBOL_REF_FAR_ADDR_P (op1)))
200		  && offset < 16*1024*1024
201		  && trunc_int_for_mode (offset, SImode) == offset)
202		return true;
203	      /* For CM_KERNEL we know that all object resist in the
204		 negative half of 32bits address space.  We may not
205		 accept negative offsets, since they may be just off
206		 and we may accept pretty large positive ones.  */
207	      if (ix86_cmodel == CM_KERNEL
208		  && offset > 0
209		  && trunc_int_for_mode (offset, SImode) == offset)
210		return true;
211	      break;
212
213	    case LABEL_REF:
214	      /* These conditions are similar to SYMBOL_REF ones, just the
215		 constraints for code models differ.  */
216	      if ((ix86_cmodel == CM_SMALL || ix86_cmodel == CM_MEDIUM)
217		  && offset < 16*1024*1024
218		  && trunc_int_for_mode (offset, SImode) == offset)
219		return true;
220	      if (ix86_cmodel == CM_KERNEL
221		  && offset > 0
222		  && trunc_int_for_mode (offset, SImode) == offset)
223		return true;
224	      break;
225
226	    case UNSPEC:
227	      switch (XINT (op1, 1))
228		{
229		case UNSPEC_DTPOFF:
230		case UNSPEC_NTPOFF:
231		  if (offset > 0
232		      && trunc_int_for_mode (offset, SImode) == offset)
233		    return true;
234		}
235	      break;
236
237	    default:
238	      break;
239	    }
240	}
241      break;
242
243      default:
244	gcc_unreachable ();
245    }
246
247  return false;
248})
249
250;; Return true if VALUE can be stored in the zero extended immediate field.
251(define_predicate "x86_64_zext_immediate_operand"
252  (match_code "const_double,const_int,symbol_ref,label_ref,const")
253{
254  switch (GET_CODE (op))
255    {
256    case CONST_DOUBLE:
257      if (HOST_BITS_PER_WIDE_INT == 32)
258	return (GET_MODE (op) == VOIDmode && !CONST_DOUBLE_HIGH (op));
259      else
260	return false;
261
262    case CONST_INT:
263      if (HOST_BITS_PER_WIDE_INT == 32)
264	return INTVAL (op) >= 0;
265      else
266	return !(INTVAL (op) & ~(HOST_WIDE_INT) 0xffffffff);
267
268    case SYMBOL_REF:
269      /* For certain code models, the symbolic references are known to fit.  */
270      /* TLS symbols are not constant.  */
271      if (SYMBOL_REF_TLS_MODEL (op))
272	return false;
273      return (ix86_cmodel == CM_SMALL
274	      || (ix86_cmodel == CM_MEDIUM
275		  && !SYMBOL_REF_FAR_ADDR_P (op)));
276
277    case LABEL_REF:
278      /* For certain code models, the code is near as well.  */
279      return ix86_cmodel == CM_SMALL || ix86_cmodel == CM_MEDIUM;
280
281    case CONST:
282      /* We also may accept the offsetted memory references in certain
283	 special cases.  */
284      if (GET_CODE (XEXP (op, 0)) == PLUS)
285	{
286	  rtx op1 = XEXP (XEXP (op, 0), 0);
287	  rtx op2 = XEXP (XEXP (op, 0), 1);
288
289	  if (ix86_cmodel == CM_LARGE)
290	    return false;
291	  switch (GET_CODE (op1))
292	    {
293	    case SYMBOL_REF:
294	      /* TLS symbols are not constant.  */
295	      if (SYMBOL_REF_TLS_MODEL (op1))
296		return false;
297	      /* For small code model we may accept pretty large positive
298		 offsets, since one bit is available for free.  Negative
299		 offsets are limited by the size of NULL pointer area
300		 specified by the ABI.  */
301	      if ((ix86_cmodel == CM_SMALL
302		   || (ix86_cmodel == CM_MEDIUM
303		       && !SYMBOL_REF_FAR_ADDR_P (op1)))
304		  && CONST_INT_P (op2)
305		  && trunc_int_for_mode (INTVAL (op2), DImode) > -0x10000
306		  && trunc_int_for_mode (INTVAL (op2), SImode) == INTVAL (op2))
307		return true;
308	      /* ??? For the kernel, we may accept adjustment of
309		 -0x10000000, since we know that it will just convert
310		 negative address space to positive, but perhaps this
311		 is not worthwhile.  */
312	      break;
313
314	    case LABEL_REF:
315	      /* These conditions are similar to SYMBOL_REF ones, just the
316		 constraints for code models differ.  */
317	      if ((ix86_cmodel == CM_SMALL || ix86_cmodel == CM_MEDIUM)
318		  && CONST_INT_P (op2)
319		  && trunc_int_for_mode (INTVAL (op2), DImode) > -0x10000
320		  && trunc_int_for_mode (INTVAL (op2), SImode) == INTVAL (op2))
321		return true;
322	      break;
323
324	    default:
325	      return false;
326	    }
327	}
328      break;
329
330    default:
331      gcc_unreachable ();
332    }
333  return false;
334})
335
336;; Return true if OP is general operand representable on x86_64.
337(define_predicate "x86_64_general_operand"
338  (if_then_else (match_test "TARGET_64BIT")
339    (ior (match_operand 0 "nonimmediate_operand")
340	 (match_operand 0 "x86_64_immediate_operand"))
341    (match_operand 0 "general_operand")))
342
343;; Return true if OP is general operand representable on x86_64
344;; as either sign extended or zero extended constant.
345(define_predicate "x86_64_szext_general_operand"
346  (if_then_else (match_test "TARGET_64BIT")
347    (ior (match_operand 0 "nonimmediate_operand")
348	 (match_operand 0 "x86_64_immediate_operand")
349	 (match_operand 0 "x86_64_zext_immediate_operand"))
350    (match_operand 0 "general_operand")))
351
352;; Return true if OP is nonmemory operand representable on x86_64.
353(define_predicate "x86_64_nonmemory_operand"
354  (if_then_else (match_test "TARGET_64BIT")
355    (ior (match_operand 0 "register_operand")
356	 (match_operand 0 "x86_64_immediate_operand"))
357    (match_operand 0 "nonmemory_operand")))
358
359;; Return true if OP is nonmemory operand representable on x86_64.
360(define_predicate "x86_64_szext_nonmemory_operand"
361  (if_then_else (match_test "TARGET_64BIT")
362    (ior (match_operand 0 "register_operand")
363	 (match_operand 0 "x86_64_immediate_operand")
364	 (match_operand 0 "x86_64_zext_immediate_operand"))
365    (match_operand 0 "nonmemory_operand")))
366
367;; Return true when operand is PIC expression that can be computed by lea
368;; operation.
369(define_predicate "pic_32bit_operand"
370  (match_code "const,symbol_ref,label_ref")
371{
372  if (!flag_pic)
373    return false;
374
375  /* Rule out relocations that translate into 64bit constants.  */
376  if (TARGET_64BIT && GET_CODE (op) == CONST)
377    {
378      op = XEXP (op, 0);
379      if (GET_CODE (op) == PLUS && CONST_INT_P (XEXP (op, 1)))
380	op = XEXP (op, 0);
381      if (GET_CODE (op) == UNSPEC
382	  && (XINT (op, 1) == UNSPEC_GOTOFF
383	      || XINT (op, 1) == UNSPEC_GOT))
384	return false;
385    }
386
387  return symbolic_operand (op, mode);
388})
389
390;; Return true if OP is nonmemory operand acceptable by movabs patterns.
391(define_predicate "x86_64_movabs_operand"
392  (and (match_operand 0 "nonmemory_operand")
393       (not (match_operand 0 "pic_32bit_operand"))))
394
395;; Return true if OP is either a symbol reference or a sum of a symbol
396;; reference and a constant.
397(define_predicate "symbolic_operand"
398  (match_code "symbol_ref,label_ref,const")
399{
400  switch (GET_CODE (op))
401    {
402    case SYMBOL_REF:
403    case LABEL_REF:
404      return true;
405
406    case CONST:
407      op = XEXP (op, 0);
408      if (GET_CODE (op) == SYMBOL_REF
409	  || GET_CODE (op) == LABEL_REF
410	  || (GET_CODE (op) == UNSPEC
411	      && (XINT (op, 1) == UNSPEC_GOT
412		  || XINT (op, 1) == UNSPEC_GOTOFF
413		  || XINT (op, 1) == UNSPEC_PCREL
414		  || XINT (op, 1) == UNSPEC_GOTPCREL)))
415	return true;
416      if (GET_CODE (op) != PLUS
417	  || !CONST_INT_P (XEXP (op, 1)))
418	return false;
419
420      op = XEXP (op, 0);
421      if (GET_CODE (op) == SYMBOL_REF
422	  || GET_CODE (op) == LABEL_REF)
423	return true;
424      /* Only @GOTOFF gets offsets.  */
425      if (GET_CODE (op) != UNSPEC
426	  || XINT (op, 1) != UNSPEC_GOTOFF)
427	return false;
428
429      op = XVECEXP (op, 0, 0);
430      if (GET_CODE (op) == SYMBOL_REF
431	  || GET_CODE (op) == LABEL_REF)
432	return true;
433      return false;
434
435    default:
436      gcc_unreachable ();
437    }
438})
439
440;; Return true if OP is a symbolic operand that resolves locally.
441(define_predicate "local_symbolic_operand"
442  (match_code "const,label_ref,symbol_ref")
443{
444  if (GET_CODE (op) == CONST
445      && GET_CODE (XEXP (op, 0)) == PLUS
446      && CONST_INT_P (XEXP (XEXP (op, 0), 1)))
447    op = XEXP (XEXP (op, 0), 0);
448
449  if (GET_CODE (op) == LABEL_REF)
450    return true;
451
452  if (GET_CODE (op) != SYMBOL_REF)
453    return false;
454
455  if (SYMBOL_REF_TLS_MODEL (op))
456    return false;
457
458  if (SYMBOL_REF_LOCAL_P (op))
459    return true;
460
461  /* There is, however, a not insubstantial body of code in the rest of
462     the compiler that assumes it can just stick the results of
463     ASM_GENERATE_INTERNAL_LABEL in a symbol_ref and have done.  */
464  /* ??? This is a hack.  Should update the body of the compiler to
465     always create a DECL an invoke targetm.encode_section_info.  */
466  if (strncmp (XSTR (op, 0), internal_label_prefix,
467	       internal_label_prefix_len) == 0)
468    return true;
469
470  return false;
471})
472
473;; Test for a legitimate @GOTOFF operand.
474;;
475;; VxWorks does not impose a fixed gap between segments; the run-time
476;; gap can be different from the object-file gap.  We therefore can't
477;; use @GOTOFF unless we are absolutely sure that the symbol is in the
478;; same segment as the GOT.  Unfortunately, the flexibility of linker
479;; scripts means that we can't be sure of that in general, so assume
480;; that @GOTOFF is never valid on VxWorks.
481(define_predicate "gotoff_operand"
482  (and (not (match_test "TARGET_VXWORKS_RTP"))
483       (match_operand 0 "local_symbolic_operand")))
484
485;; Test for various thread-local symbols.
486(define_predicate "tls_symbolic_operand"
487  (and (match_code "symbol_ref")
488       (match_test "SYMBOL_REF_TLS_MODEL (op)")))
489
490(define_predicate "tls_modbase_operand"
491  (and (match_code "symbol_ref")
492       (match_test "op == ix86_tls_module_base ()")))
493
494;; Test for a pc-relative call operand
495(define_predicate "constant_call_address_operand"
496  (match_code "symbol_ref")
497{
498  if (ix86_cmodel == CM_LARGE || ix86_cmodel == CM_LARGE_PIC)
499    return false;
500  if (TARGET_DLLIMPORT_DECL_ATTRIBUTES && SYMBOL_REF_DLLIMPORT_P (op))
501    return false;
502  return true;
503})
504
505;; P6 processors will jump to the address after the decrement when %esp
506;; is used as a call operand, so they will execute return address as a code.
507;; See Pentium Pro errata 70, Pentium 2 errata A33 and Pentium 3 errata E17.
508
509(define_predicate "call_register_no_elim_operand"
510  (match_operand 0 "register_operand")
511{
512  if (GET_CODE (op) == SUBREG)
513    op = SUBREG_REG (op);
514
515  if (!TARGET_64BIT && op == stack_pointer_rtx)
516    return false;
517
518  return register_no_elim_operand (op, mode);
519})
520
521;; True for any non-virtual or eliminable register.  Used in places where
522;; instantiation of such a register may cause the pattern to not be recognized.
523(define_predicate "register_no_elim_operand"
524  (match_operand 0 "register_operand")
525{
526  if (GET_CODE (op) == SUBREG)
527    op = SUBREG_REG (op);
528  return !(op == arg_pointer_rtx
529	   || op == frame_pointer_rtx
530	   || IN_RANGE (REGNO (op),
531			FIRST_PSEUDO_REGISTER, LAST_VIRTUAL_REGISTER));
532})
533
534;; Similarly, but include the stack pointer.  This is used to prevent esp
535;; from being used as an index reg.
536(define_predicate "index_register_operand"
537  (match_operand 0 "register_operand")
538{
539  if (GET_CODE (op) == SUBREG)
540    op = SUBREG_REG (op);
541  if (reload_in_progress || reload_completed)
542    return REG_OK_FOR_INDEX_STRICT_P (op);
543  else
544    return REG_OK_FOR_INDEX_NONSTRICT_P (op);
545})
546
547;; Return false if this is any eliminable register.  Otherwise general_operand.
548(define_predicate "general_no_elim_operand"
549  (if_then_else (match_code "reg,subreg")
550    (match_operand 0 "register_no_elim_operand")
551    (match_operand 0 "general_operand")))
552
553;; Return false if this is any eliminable register.  Otherwise
554;; register_operand or a constant.
555(define_predicate "nonmemory_no_elim_operand"
556  (ior (match_operand 0 "register_no_elim_operand")
557       (match_operand 0 "immediate_operand")))
558
559;; Test for a valid operand for indirect branch.
560(define_predicate "indirect_branch_operand"
561  (if_then_else (match_test "TARGET_X32")
562    (match_operand 0 "register_operand")
563    (match_operand 0 "nonimmediate_operand")))
564
565;; Test for a valid operand for a call instruction.
566(define_predicate "call_insn_operand"
567  (ior (match_operand 0 "constant_call_address_operand")
568       (match_operand 0 "call_register_no_elim_operand")
569       (and (not (match_test "TARGET_X32"))
570	    (match_operand 0 "memory_operand"))))
571
572;; Similarly, but for tail calls, in which we cannot allow memory references.
573(define_predicate "sibcall_insn_operand"
574  (ior (match_operand 0 "constant_call_address_operand")
575       (match_operand 0 "register_no_elim_operand")))
576
577;; Match exactly zero.
578(define_predicate "const0_operand"
579  (match_code "const_int,const_double,const_vector")
580{
581  if (mode == VOIDmode)
582    mode = GET_MODE (op);
583  return op == CONST0_RTX (mode);
584})
585
586;; Match exactly one.
587(define_predicate "const1_operand"
588  (and (match_code "const_int")
589       (match_test "op == const1_rtx")))
590
591;; Match exactly eight.
592(define_predicate "const8_operand"
593  (and (match_code "const_int")
594       (match_test "INTVAL (op) == 8")))
595
596;; Match exactly 128.
597(define_predicate "const128_operand"
598  (and (match_code "const_int")
599       (match_test "INTVAL (op) == 128")))
600
601;; Match exactly 0x0FFFFFFFF in anddi as a zero-extension operation
602(define_predicate "const_32bit_mask"
603  (and (match_code "const_int")
604       (match_test "trunc_int_for_mode (INTVAL (op), DImode)
605		    == (HOST_WIDE_INT) 0xffffffff")))
606
607;; Match 2, 4, or 8.  Used for leal multiplicands.
608(define_predicate "const248_operand"
609  (match_code "const_int")
610{
611  HOST_WIDE_INT i = INTVAL (op);
612  return i == 2 || i == 4 || i == 8;
613})
614
615;; Match 1, 2, 4, or 8
616(define_predicate "const1248_operand"
617  (match_code "const_int")
618{
619  HOST_WIDE_INT i = INTVAL (op);
620  return i == 1 || i == 2 || i == 4 || i == 8;
621})
622
623;; Match 3, 5, or 9.  Used for leal multiplicands.
624(define_predicate "const359_operand"
625  (match_code "const_int")
626{
627  HOST_WIDE_INT i = INTVAL (op);
628  return i == 3 || i == 5 || i == 9;
629})
630
631;; Match 0 or 1.
632(define_predicate "const_0_to_1_operand"
633  (and (match_code "const_int")
634       (ior (match_test "op == const0_rtx")
635	    (match_test "op == const1_rtx"))))
636
637;; Match 0 to 3.
638(define_predicate "const_0_to_3_operand"
639  (and (match_code "const_int")
640       (match_test "IN_RANGE (INTVAL (op), 0, 3)")))
641
642;; Match 0 to 7.
643(define_predicate "const_0_to_7_operand"
644  (and (match_code "const_int")
645       (match_test "IN_RANGE (INTVAL (op), 0, 7)")))
646
647;; Match 0 to 15.
648(define_predicate "const_0_to_15_operand"
649  (and (match_code "const_int")
650       (match_test "IN_RANGE (INTVAL (op), 0, 15)")))
651
652;; Match 0 to 31.
653(define_predicate "const_0_to_31_operand"
654  (and (match_code "const_int")
655       (match_test "IN_RANGE (INTVAL (op), 0, 31)")))
656
657;; Match 0 to 63.
658(define_predicate "const_0_to_63_operand"
659  (and (match_code "const_int")
660       (match_test "IN_RANGE (INTVAL (op), 0, 63)")))
661
662;; Match 0 to 255.
663(define_predicate "const_0_to_255_operand"
664  (and (match_code "const_int")
665       (match_test "IN_RANGE (INTVAL (op), 0, 255)")))
666
667;; Match (0 to 255) * 8
668(define_predicate "const_0_to_255_mul_8_operand"
669  (match_code "const_int")
670{
671  unsigned HOST_WIDE_INT val = INTVAL (op);
672  return val <= 255*8 && val % 8 == 0;
673})
674
675;; Return true if OP is CONST_INT >= 1 and <= 31 (a valid operand
676;; for shift & compare patterns, as shifting by 0 does not change flags).
677(define_predicate "const_1_to_31_operand"
678  (and (match_code "const_int")
679       (match_test "IN_RANGE (INTVAL (op), 1, 31)")))
680
681;; Return true if OP is CONST_INT >= 1 and <= 63 (a valid operand
682;; for 64bit shift & compare patterns, as shifting by 0 does not change flags).
683(define_predicate "const_1_to_63_operand"
684  (and (match_code "const_int")
685       (match_test "IN_RANGE (INTVAL (op), 1, 63)")))
686
687;; Match 2 or 3.
688(define_predicate "const_2_to_3_operand"
689  (and (match_code "const_int")
690       (match_test "IN_RANGE (INTVAL (op), 2, 3)")))
691
692;; Match 4 to 5.
693(define_predicate "const_4_to_5_operand"
694  (and (match_code "const_int")
695       (match_test "IN_RANGE (INTVAL (op), 4, 5)")))
696
697;; Match 4 to 7.
698(define_predicate "const_4_to_7_operand"
699  (and (match_code "const_int")
700       (match_test "IN_RANGE (INTVAL (op), 4, 7)")))
701
702;; Match 6 to 7.
703(define_predicate "const_6_to_7_operand"
704  (and (match_code "const_int")
705       (match_test "IN_RANGE (INTVAL (op), 6, 7)")))
706
707;; Match 8 to 11.
708(define_predicate "const_8_to_11_operand"
709  (and (match_code "const_int")
710       (match_test "IN_RANGE (INTVAL (op), 8, 11)")))
711
712;; Match 12 to 15.
713(define_predicate "const_12_to_15_operand"
714  (and (match_code "const_int")
715       (match_test "IN_RANGE (INTVAL (op), 12, 15)")))
716
717;; True if this is a constant appropriate for an increment or decrement.
718(define_predicate "incdec_operand"
719  (match_code "const_int")
720{
721  /* On Pentium4, the inc and dec operations causes extra dependency on flag
722     registers, since carry flag is not set.  */
723  if (!TARGET_USE_INCDEC && !optimize_insn_for_size_p ())
724    return false;
725  return op == const1_rtx || op == constm1_rtx;
726})
727
728;; True for registers, or 1 or -1.  Used to optimize double-word shifts.
729(define_predicate "reg_or_pm1_operand"
730  (ior (match_operand 0 "register_operand")
731       (and (match_code "const_int")
732	    (ior (match_test "op == const1_rtx")
733		 (match_test "op == constm1_rtx")))))
734
735;; True if OP is acceptable as operand of DImode shift expander.
736(define_predicate "shiftdi_operand"
737  (if_then_else (match_test "TARGET_64BIT")
738    (match_operand 0 "nonimmediate_operand")
739    (match_operand 0 "register_operand")))
740
741(define_predicate "ashldi_input_operand"
742  (if_then_else (match_test "TARGET_64BIT")
743    (match_operand 0 "nonimmediate_operand")
744    (match_operand 0 "reg_or_pm1_operand")))
745
746;; Return true if OP is a vector load from the constant pool with just
747;; the first element nonzero.
748(define_predicate "zero_extended_scalar_load_operand"
749  (match_code "mem")
750{
751  unsigned n_elts;
752  op = maybe_get_pool_constant (op);
753
754  if (!(op && GET_CODE (op) == CONST_VECTOR))
755    return false;
756
757  n_elts = CONST_VECTOR_NUNITS (op);
758
759  for (n_elts--; n_elts > 0; n_elts--)
760    {
761      rtx elt = CONST_VECTOR_ELT (op, n_elts);
762      if (elt != CONST0_RTX (GET_MODE_INNER (GET_MODE (op))))
763	return false;
764    }
765  return true;
766})
767
768/* Return true if operand is a vector constant that is all ones. */
769(define_predicate "vector_all_ones_operand"
770  (match_code "const_vector")
771{
772  int nunits = GET_MODE_NUNITS (mode);
773
774  if (GET_CODE (op) == CONST_VECTOR
775      && CONST_VECTOR_NUNITS (op) == nunits)
776    {
777      int i;
778      for (i = 0; i < nunits; ++i)
779        {
780          rtx x = CONST_VECTOR_ELT (op, i);
781          if (x != constm1_rtx)
782            return false;
783        }
784      return true;
785    }
786
787  return false;
788})
789
790; Return true when OP is operand acceptable for standard SSE move.
791(define_predicate "vector_move_operand"
792  (ior (match_operand 0 "nonimmediate_operand")
793       (match_operand 0 "const0_operand")))
794
795;; Return true when OP is nonimmediate or standard SSE constant.
796(define_predicate "nonimmediate_or_sse_const_operand"
797  (match_operand 0 "general_operand")
798{
799  if (nonimmediate_operand (op, mode))
800    return true;
801  if (standard_sse_constant_p (op) > 0)
802    return true;
803  return false;
804})
805
806;; Return true if OP is a register or a zero.
807(define_predicate "reg_or_0_operand"
808  (ior (match_operand 0 "register_operand")
809       (match_operand 0 "const0_operand")))
810
811;; Return true if op if a valid address for LEA, and does not contain
812;; a segment override.  Defined as a special predicate to allow
813;; mode-less const_int operands pass to address_operand.
814(define_special_predicate "lea_address_operand"
815  (match_operand 0 "address_operand")
816{
817  struct ix86_address parts;
818  int ok;
819
820  ok = ix86_decompose_address (op, &parts);
821  gcc_assert (ok);
822  return parts.seg == SEG_DEFAULT;
823})
824
825;; Return true for RTX codes that force SImode address.
826(define_predicate "SImode_address_operand"
827  (match_code "subreg,zero_extend,and"))
828
829;; Return true if op if a valid base register, displacement or
830;; sum of base register and displacement for VSIB addressing.
831(define_predicate "vsib_address_operand"
832  (match_operand 0 "address_operand")
833{
834  struct ix86_address parts;
835  int ok;
836  rtx disp;
837
838  ok = ix86_decompose_address (op, &parts);
839  gcc_assert (ok);
840  if (parts.index || parts.seg != SEG_DEFAULT)
841    return false;
842
843  /* VSIB addressing doesn't support (%rip).  */
844  if (parts.disp && GET_CODE (parts.disp) == CONST)
845    {
846      disp = XEXP (parts.disp, 0);
847      if (GET_CODE (disp) == PLUS)
848	disp = XEXP (disp, 0);
849      if (GET_CODE (disp) == UNSPEC)
850	switch (XINT (disp, 1))
851	  {
852	  case UNSPEC_GOTPCREL:
853	  case UNSPEC_PCREL:
854	  case UNSPEC_GOTNTPOFF:
855	    return false;
856	  }
857    }
858
859  return true;
860})
861
862(define_predicate "vsib_mem_operator"
863  (match_code "mem"))
864
865;; Return true if the rtx is known to be at least 32 bits aligned.
866(define_predicate "aligned_operand"
867  (match_operand 0 "general_operand")
868{
869  struct ix86_address parts;
870  int ok;
871
872  /* Registers and immediate operands are always "aligned".  */
873  if (!MEM_P (op))
874    return true;
875
876  /* All patterns using aligned_operand on memory operands ends up
877     in promoting memory operand to 64bit and thus causing memory mismatch.  */
878  if (TARGET_MEMORY_MISMATCH_STALL && !optimize_insn_for_size_p ())
879    return false;
880
881  /* Don't even try to do any aligned optimizations with volatiles.  */
882  if (MEM_VOLATILE_P (op))
883    return false;
884
885  if (MEM_ALIGN (op) >= 32)
886    return true;
887
888  op = XEXP (op, 0);
889
890  /* Pushes and pops are only valid on the stack pointer.  */
891  if (GET_CODE (op) == PRE_DEC
892      || GET_CODE (op) == POST_INC)
893    return true;
894
895  /* Decode the address.  */
896  ok = ix86_decompose_address (op, &parts);
897  gcc_assert (ok);
898
899  if (parts.base && GET_CODE (parts.base) == SUBREG)
900    parts.base = SUBREG_REG (parts.base);
901  if (parts.index && GET_CODE (parts.index) == SUBREG)
902    parts.index = SUBREG_REG (parts.index);
903
904  /* Look for some component that isn't known to be aligned.  */
905  if (parts.index)
906    {
907      if (REGNO_POINTER_ALIGN (REGNO (parts.index)) * parts.scale < 32)
908	return false;
909    }
910  if (parts.base)
911    {
912      if (REGNO_POINTER_ALIGN (REGNO (parts.base)) < 32)
913	return false;
914    }
915  if (parts.disp)
916    {
917      if (!CONST_INT_P (parts.disp)
918	  || (INTVAL (parts.disp) & 3))
919	return false;
920    }
921
922  /* Didn't find one -- this must be an aligned address.  */
923  return true;
924})
925
926;; Return true if OP is memory operand with a displacement.
927(define_predicate "memory_displacement_operand"
928  (match_operand 0 "memory_operand")
929{
930  struct ix86_address parts;
931  int ok;
932
933  ok = ix86_decompose_address (XEXP (op, 0), &parts);
934  gcc_assert (ok);
935  return parts.disp != NULL_RTX;
936})
937
938;; Return true if OP is memory operand with a displacement only.
939(define_predicate "memory_displacement_only_operand"
940  (match_operand 0 "memory_operand")
941{
942  struct ix86_address parts;
943  int ok;
944
945  if (TARGET_64BIT)
946    return false;
947
948  ok = ix86_decompose_address (XEXP (op, 0), &parts);
949  gcc_assert (ok);
950
951  if (parts.base || parts.index)
952    return false;
953
954  return parts.disp != NULL_RTX;
955})
956
957;; Return true if OP is memory operand which will need zero or
958;; one register at most, not counting stack pointer or frame pointer.
959(define_predicate "cmpxchg8b_pic_memory_operand"
960  (match_operand 0 "memory_operand")
961{
962  struct ix86_address parts;
963  int ok;
964
965  if (TARGET_64BIT || !flag_pic)
966    return true;
967
968  ok = ix86_decompose_address (XEXP (op, 0), &parts);
969  gcc_assert (ok);
970
971  if (parts.base && GET_CODE (parts.base) == SUBREG)
972    parts.base = SUBREG_REG (parts.base);
973  if (parts.index && GET_CODE (parts.index) == SUBREG)
974    parts.index = SUBREG_REG (parts.index);
975
976  if (parts.base == NULL_RTX
977      || parts.base == arg_pointer_rtx
978      || parts.base == frame_pointer_rtx
979      || parts.base == hard_frame_pointer_rtx
980      || parts.base == stack_pointer_rtx)
981    return true;
982
983  if (parts.index == NULL_RTX
984      || parts.index == arg_pointer_rtx
985      || parts.index == frame_pointer_rtx
986      || parts.index == hard_frame_pointer_rtx
987      || parts.index == stack_pointer_rtx)
988    return true;
989
990  return false;
991})
992
993
994;; Return true if OP is memory operand that cannot be represented
995;; by the modRM array.
996(define_predicate "long_memory_operand"
997  (and (match_operand 0 "memory_operand")
998       (match_test "memory_address_length (op, false)")))
999
1000;; Return true if OP is a comparison operator that can be issued by fcmov.
1001(define_predicate "fcmov_comparison_operator"
1002  (match_operand 0 "comparison_operator")
1003{
1004  enum machine_mode inmode = GET_MODE (XEXP (op, 0));
1005  enum rtx_code code = GET_CODE (op);
1006
1007  if (inmode == CCFPmode || inmode == CCFPUmode)
1008    {
1009      if (!ix86_trivial_fp_comparison_operator (op, mode))
1010	return false;
1011      code = ix86_fp_compare_code_to_integer (code);
1012    }
1013  /* i387 supports just limited amount of conditional codes.  */
1014  switch (code)
1015    {
1016    case LTU: case GTU: case LEU: case GEU:
1017      if (inmode == CCmode || inmode == CCFPmode || inmode == CCFPUmode
1018	  || inmode == CCCmode)
1019	return true;
1020      return false;
1021    case ORDERED: case UNORDERED:
1022    case EQ: case NE:
1023      return true;
1024    default:
1025      return false;
1026    }
1027})
1028
1029;; Return true if OP is a comparison that can be used in the CMPSS/CMPPS insns.
1030;; The first set are supported directly; the second set can't be done with
1031;; full IEEE support, i.e. NaNs.
1032
1033(define_predicate "sse_comparison_operator"
1034  (ior (match_code "eq,ne,lt,le,unordered,unge,ungt,ordered")
1035       (and (match_test "TARGET_AVX")
1036	    (match_code "ge,gt,uneq,unle,unlt,ltgt"))))
1037
1038(define_predicate "ix86_comparison_int_operator"
1039  (match_code "ne,eq,ge,gt,le,lt"))
1040
1041(define_predicate "ix86_comparison_uns_operator"
1042  (match_code "ne,eq,geu,gtu,leu,ltu"))
1043
1044(define_predicate "bt_comparison_operator"
1045  (match_code "ne,eq"))
1046
1047;; Return true if OP is a valid comparison operator in valid mode.
1048(define_predicate "ix86_comparison_operator"
1049  (match_operand 0 "comparison_operator")
1050{
1051  enum machine_mode inmode = GET_MODE (XEXP (op, 0));
1052  enum rtx_code code = GET_CODE (op);
1053
1054  if (inmode == CCFPmode || inmode == CCFPUmode)
1055    return ix86_trivial_fp_comparison_operator (op, mode);
1056
1057  switch (code)
1058    {
1059    case EQ: case NE:
1060      return true;
1061    case LT: case GE:
1062      if (inmode == CCmode || inmode == CCGCmode
1063	  || inmode == CCGOCmode || inmode == CCNOmode)
1064	return true;
1065      return false;
1066    case LTU: case GTU: case LEU: case GEU:
1067      if (inmode == CCmode || inmode == CCCmode)
1068	return true;
1069      return false;
1070    case ORDERED: case UNORDERED:
1071      if (inmode == CCmode)
1072	return true;
1073      return false;
1074    case GT: case LE:
1075      if (inmode == CCmode || inmode == CCGCmode || inmode == CCNOmode)
1076	return true;
1077      return false;
1078    default:
1079      return false;
1080    }
1081})
1082
1083;; Return true if OP is a valid comparison operator
1084;; testing carry flag to be set.
1085(define_predicate "ix86_carry_flag_operator"
1086  (match_code "ltu,lt,unlt,gtu,gt,ungt,le,unle,ge,unge,ltgt,uneq")
1087{
1088  enum machine_mode inmode = GET_MODE (XEXP (op, 0));
1089  enum rtx_code code = GET_CODE (op);
1090
1091  if (inmode == CCFPmode || inmode == CCFPUmode)
1092    {
1093      if (!ix86_trivial_fp_comparison_operator (op, mode))
1094	return false;
1095      code = ix86_fp_compare_code_to_integer (code);
1096    }
1097  else if (inmode == CCCmode)
1098   return code == LTU || code == GTU;
1099  else if (inmode != CCmode)
1100    return false;
1101
1102  return code == LTU;
1103})
1104
1105;; Return true if this comparison only requires testing one flag bit.
1106(define_predicate "ix86_trivial_fp_comparison_operator"
1107  (match_code "gt,ge,unlt,unle,uneq,ltgt,ordered,unordered"))
1108
1109;; Return true if we know how to do this comparison.  Others require
1110;; testing more than one flag bit, and we let the generic middle-end
1111;; code do that.
1112(define_predicate "ix86_fp_comparison_operator"
1113  (if_then_else (match_test "ix86_fp_comparison_strategy (GET_CODE (op))
1114                             == IX86_FPCMP_ARITH")
1115               (match_operand 0 "comparison_operator")
1116               (match_operand 0 "ix86_trivial_fp_comparison_operator")))
1117
1118;; Same as above, but for swapped comparison used in fp_jcc_4_387.
1119(define_predicate "ix86_swapped_fp_comparison_operator"
1120  (match_operand 0 "comparison_operator")
1121{
1122  enum rtx_code code = GET_CODE (op);
1123  bool ret;
1124
1125  PUT_CODE (op, swap_condition (code));
1126  ret = ix86_fp_comparison_operator (op, mode);
1127  PUT_CODE (op, code);
1128  return ret;
1129})
1130
1131;; Nearly general operand, but accept any const_double, since we wish
1132;; to be able to drop them into memory rather than have them get pulled
1133;; into registers.
1134(define_predicate "cmp_fp_expander_operand"
1135  (ior (match_code "const_double")
1136       (match_operand 0 "general_operand")))
1137
1138;; Return true if this is a valid binary floating-point operation.
1139(define_predicate "binary_fp_operator"
1140  (match_code "plus,minus,mult,div"))
1141
1142;; Return true if this is a multiply operation.
1143(define_predicate "mult_operator"
1144  (match_code "mult"))
1145
1146;; Return true if this is a division operation.
1147(define_predicate "div_operator"
1148  (match_code "div"))
1149
1150;; Return true if this is a plus, minus, and, ior or xor operation.
1151(define_predicate "plusminuslogic_operator"
1152  (match_code "plus,minus,and,ior,xor"))
1153
1154;; Return true if this is a float extend operation.
1155(define_predicate "float_operator"
1156  (match_code "float"))
1157
1158;; Return true for ARITHMETIC_P.
1159(define_predicate "arith_or_logical_operator"
1160  (match_code "plus,mult,and,ior,xor,smin,smax,umin,umax,compare,minus,div,
1161	       mod,udiv,umod,ashift,rotate,ashiftrt,lshiftrt,rotatert"))
1162
1163;; Return true for COMMUTATIVE_P.
1164(define_predicate "commutative_operator"
1165  (match_code "plus,mult,and,ior,xor,smin,smax,umin,umax"))
1166
1167;; Return true if OP is a binary operator that can be promoted to wider mode.
1168(define_predicate "promotable_binary_operator"
1169  (ior (match_code "plus,minus,and,ior,xor,ashift")
1170       (and (match_code "mult")
1171	    (match_test "TARGET_TUNE_PROMOTE_HIMODE_IMUL"))))
1172
1173(define_predicate "compare_operator"
1174  (match_code "compare"))
1175
1176(define_predicate "absneg_operator"
1177  (match_code "abs,neg"))
1178
1179;; Return true if OP is misaligned memory operand
1180(define_predicate "misaligned_operand"
1181  (and (match_code "mem")
1182       (match_test "MEM_ALIGN (op) < GET_MODE_ALIGNMENT (mode)")))
1183
1184;; Return true if OP is a emms operation, known to be a PARALLEL.
1185(define_predicate "emms_operation"
1186  (match_code "parallel")
1187{
1188  unsigned i;
1189
1190  if (XVECLEN (op, 0) != 17)
1191    return false;
1192
1193  for (i = 0; i < 8; i++)
1194    {
1195      rtx elt = XVECEXP (op, 0, i+1);
1196
1197      if (GET_CODE (elt) != CLOBBER
1198	  || GET_CODE (SET_DEST (elt)) != REG
1199	  || GET_MODE (SET_DEST (elt)) != XFmode
1200	  || REGNO (SET_DEST (elt)) != FIRST_STACK_REG + i)
1201        return false;
1202
1203      elt = XVECEXP (op, 0, i+9);
1204
1205      if (GET_CODE (elt) != CLOBBER
1206	  || GET_CODE (SET_DEST (elt)) != REG
1207	  || GET_MODE (SET_DEST (elt)) != DImode
1208	  || REGNO (SET_DEST (elt)) != FIRST_MMX_REG + i)
1209	return false;
1210    }
1211  return true;
1212})
1213
1214;; Return true if OP is a vzeroall operation, known to be a PARALLEL.
1215(define_predicate "vzeroall_operation"
1216  (match_code "parallel")
1217{
1218  unsigned i, nregs = TARGET_64BIT ? 16 : 8;
1219
1220  if ((unsigned) XVECLEN (op, 0) != 1 + nregs)
1221    return false;
1222
1223  for (i = 0; i < nregs; i++)
1224    {
1225      rtx elt = XVECEXP (op, 0, i+1);
1226
1227      if (GET_CODE (elt) != SET
1228	  || GET_CODE (SET_DEST (elt)) != REG
1229	  || GET_MODE (SET_DEST (elt)) != V8SImode
1230	  || REGNO (SET_DEST (elt)) != SSE_REGNO (i)
1231	  || SET_SRC (elt) != CONST0_RTX (V8SImode))
1232	return false;
1233    }
1234  return true;
1235})
1236
1237;; Return true if OP is a parallel for a vbroadcast permute.
1238
1239(define_predicate "avx_vbroadcast_operand"
1240  (and (match_code "parallel")
1241       (match_code "const_int" "a"))
1242{
1243  rtx elt = XVECEXP (op, 0, 0);
1244  int i, nelt = XVECLEN (op, 0);
1245
1246  /* Don't bother checking there are the right number of operands,
1247     merely that they're all identical.  */
1248  for (i = 1; i < nelt; ++i)
1249    if (XVECEXP (op, 0, i) != elt)
1250      return false;
1251  return true;
1252})
1253
1254;; Return true if OP is a proper third operand to vpblendw256.
1255(define_predicate "avx2_pblendw_operand"
1256  (match_code "const_int")
1257{
1258  HOST_WIDE_INT val = INTVAL (op);
1259  HOST_WIDE_INT low = val & 0xff;
1260  return val == ((low << 8) | low);
1261})
1262