1;; Predicate definitions for IA-32 and x86-64.
2;; Copyright (C) 2004-2018 Free Software Foundation, Inc.
3;;
4;; This file is part of GCC.
5;;
6;; GCC is free software; you can redistribute it and/or modify
7;; it under the terms of the GNU General Public License as published by
8;; the Free Software Foundation; either version 3, or (at your option)
9;; any later version.
10;;
11;; GCC is distributed in the hope that it will be useful,
12;; but WITHOUT ANY WARRANTY; without even the implied warranty of
13;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14;; GNU General Public License for more details.
15;;
16;; You should have received a copy of the GNU General Public License
17;; along with GCC; see the file COPYING3.  If not see
18;; <http://www.gnu.org/licenses/>.
19
20;; Return true if OP is either a i387 or SSE fp register.
21(define_predicate "any_fp_register_operand"
22  (and (match_code "reg")
23       (match_test "ANY_FP_REGNO_P (REGNO (op))")))
24
25;; Return true if OP is an i387 fp register.
26(define_predicate "fp_register_operand"
27  (and (match_code "reg")
28       (match_test "STACK_REGNO_P (REGNO (op))")))
29
30;; True if the operand is a GENERAL class register.
31(define_predicate "general_reg_operand"
32  (and (match_code "reg")
33       (match_test "GENERAL_REGNO_P (REGNO (op))")))
34
35;; True if the operand is a nonimmediate operand with GENERAL class register.
36(define_predicate "nonimmediate_gr_operand"
37  (if_then_else (match_code "reg")
38    (match_test "GENERAL_REGNO_P (REGNO (op))")
39    (match_operand 0 "nonimmediate_operand")))
40
41;; True if the operand is a general operand with GENERAL class register.
42(define_predicate "general_gr_operand"
43  (if_then_else (match_code "reg")
44    (match_test "GENERAL_REGNO_P (REGNO (op))")
45    (match_operand 0 "general_operand")))
46
47;; True if the operand is an MMX register.
48(define_predicate "mmx_reg_operand"
49  (and (match_code "reg")
50       (match_test "MMX_REGNO_P (REGNO (op))")))
51
52;; True if the operand is an SSE register.
53(define_predicate "sse_reg_operand"
54  (and (match_code "reg")
55       (match_test "SSE_REGNO_P (REGNO (op))")))
56
57;; True if the operand is an AVX-512 new register.
58(define_predicate "ext_sse_reg_operand"
59  (and (match_code "reg")
60       (match_test "EXT_REX_SSE_REGNO_P (REGNO (op))")))
61
62;; Return true if op is a QImode register.
63(define_predicate "any_QIreg_operand"
64  (and (match_code "reg")
65       (match_test "ANY_QI_REGNO_P (REGNO (op))")))
66
67;; Return true if op is one of QImode registers: %[abcd][hl].
68(define_predicate "QIreg_operand"
69  (and (match_code "reg")
70       (match_test "QI_REGNO_P (REGNO (op))")))
71
72;; Return true if op is a QImode register operand other than %[abcd][hl].
73(define_predicate "ext_QIreg_operand"
74  (and (match_test "TARGET_64BIT")
75       (match_code "reg")
76       (not (match_test "QI_REGNO_P (REGNO (op))"))))
77
78;; Return true if op is the AX register.
79(define_predicate "ax_reg_operand"
80  (and (match_code "reg")
81       (match_test "REGNO (op) == AX_REG")))
82
83;; Return true if op is the flags register.
84(define_predicate "flags_reg_operand"
85  (and (match_code "reg")
86       (match_test "REGNO (op) == FLAGS_REG")))
87
88;; Match a DI, SI or HImode register for a zero_extract.
89(define_special_predicate "ext_register_operand"
90  (and (match_operand 0 "register_operand")
91       (ior (and (match_test "TARGET_64BIT")
92		 (match_test "GET_MODE (op) == DImode"))
93	    (match_test "GET_MODE (op) == SImode")
94	    (match_test "GET_MODE (op) == HImode"))))
95
96;; Match register operands, but include memory operands for TARGET_SSE_MATH.
97(define_predicate "register_ssemem_operand"
98  (if_then_else
99    (match_test "SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH")
100    (match_operand 0 "nonimmediate_operand")
101    (match_operand 0 "register_operand")))
102
103;; Match nonimmediate operands, but exclude memory operands
104;; for TARGET_SSE_MATH if TARGET_MIX_SSE_I387 is not enabled.
105(define_predicate "nonimm_ssenomem_operand"
106  (if_then_else
107    (and (match_test "SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH")
108	 (not (match_test "TARGET_MIX_SSE_I387")))
109    (match_operand 0 "register_operand")
110    (match_operand 0 "nonimmediate_operand")))
111
112;; The above predicate, suitable for x87 arithmetic operators.
113(define_predicate "x87nonimm_ssenomem_operand"
114  (if_then_else
115    (and (match_test "SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH")
116	 (not (match_test "TARGET_MIX_SSE_I387 && X87_ENABLE_ARITH (mode)")))
117    (match_operand 0 "register_operand")
118    (match_operand 0 "nonimmediate_operand")))
119
120;; Match register operands, include memory operand for TARGET_SSE4_1.
121(define_predicate "register_sse4nonimm_operand"
122  (if_then_else (match_test "TARGET_SSE4_1")
123    (match_operand 0 "nonimmediate_operand")
124    (match_operand 0 "register_operand")))
125
126;; Return true if VALUE is symbol reference
127(define_predicate "symbol_operand"
128  (match_code "symbol_ref"))
129
130;; Return true if VALUE can be stored in a sign extended immediate field.
131(define_predicate "x86_64_immediate_operand"
132  (match_code "const_int,symbol_ref,label_ref,const")
133{
134  if (!TARGET_64BIT)
135    return immediate_operand (op, mode);
136
137  switch (GET_CODE (op))
138    {
139    case CONST_INT:
140      {
141        HOST_WIDE_INT val = INTVAL (op);
142        return trunc_int_for_mode (val, SImode) == val;
143      }
144    case SYMBOL_REF:
145      /* TLS symbols are not constant.  */
146      if (SYMBOL_REF_TLS_MODEL (op))
147	return false;
148
149      /* Load the external function address via the GOT slot.  */
150      if (ix86_force_load_from_GOT_p (op))
151	return false;
152
153      /* For certain code models, the symbolic references are known to fit.
154	 in CM_SMALL_PIC model we know it fits if it is local to the shared
155	 library.  Don't count TLS SYMBOL_REFs here, since they should fit
156	 only if inside of UNSPEC handled below.  */
157      return (ix86_cmodel == CM_SMALL || ix86_cmodel == CM_KERNEL
158	      || (ix86_cmodel == CM_MEDIUM && !SYMBOL_REF_FAR_ADDR_P (op)));
159
160    case LABEL_REF:
161      /* For certain code models, the code is near as well.  */
162      return (ix86_cmodel == CM_SMALL || ix86_cmodel == CM_MEDIUM
163	      || ix86_cmodel == CM_KERNEL);
164
165    case CONST:
166      /* We also may accept the offsetted memory references in certain
167	 special cases.  */
168      if (GET_CODE (XEXP (op, 0)) == UNSPEC)
169	switch (XINT (XEXP (op, 0), 1))
170	  {
171	  case UNSPEC_GOTPCREL:
172	  case UNSPEC_DTPOFF:
173	  case UNSPEC_GOTNTPOFF:
174	  case UNSPEC_NTPOFF:
175	    return true;
176	  default:
177	    break;
178	  }
179
180      if (GET_CODE (XEXP (op, 0)) == PLUS)
181	{
182	  rtx op1 = XEXP (XEXP (op, 0), 0);
183	  rtx op2 = XEXP (XEXP (op, 0), 1);
184
185	  if (ix86_cmodel == CM_LARGE && GET_CODE (op1) != UNSPEC)
186	    return false;
187	  if (!CONST_INT_P (op2))
188	    return false;
189
190	  HOST_WIDE_INT offset = INTVAL (op2);
191	  if (trunc_int_for_mode (offset, SImode) != offset)
192	    return false;
193
194	  switch (GET_CODE (op1))
195	    {
196	    case SYMBOL_REF:
197	      /* TLS symbols are not constant.  */
198	      if (SYMBOL_REF_TLS_MODEL (op1))
199		return false;
200
201	      /* Load the external function address via the GOT slot.  */
202	      if (ix86_force_load_from_GOT_p (op1))
203	        return false;
204
205	      /* For CM_SMALL assume that latest object is 16MB before
206		 end of 31bits boundary.  We may also accept pretty
207		 large negative constants knowing that all objects are
208		 in the positive half of address space.  */
209	      if ((ix86_cmodel == CM_SMALL
210		   || (ix86_cmodel == CM_MEDIUM
211		       && !SYMBOL_REF_FAR_ADDR_P (op1)))
212		  && offset < 16*1024*1024)
213		return true;
214	      /* For CM_KERNEL we know that all object resist in the
215		 negative half of 32bits address space.  We may not
216		 accept negative offsets, since they may be just off
217		 and we may accept pretty large positive ones.  */
218	      if (ix86_cmodel == CM_KERNEL
219		  && offset > 0)
220		return true;
221	      break;
222
223	    case LABEL_REF:
224	      /* These conditions are similar to SYMBOL_REF ones, just the
225		 constraints for code models differ.  */
226	      if ((ix86_cmodel == CM_SMALL || ix86_cmodel == CM_MEDIUM)
227		  && offset < 16*1024*1024)
228		return true;
229	      if (ix86_cmodel == CM_KERNEL
230		  && offset > 0)
231		return true;
232	      break;
233
234	    case UNSPEC:
235	      switch (XINT (op1, 1))
236		{
237		case UNSPEC_DTPOFF:
238		case UNSPEC_NTPOFF:
239		  return true;
240		}
241	      break;
242
243	    default:
244	      break;
245	    }
246	}
247      break;
248
249      default:
250	gcc_unreachable ();
251    }
252
253  return false;
254})
255
256;; Return true if VALUE can be stored in the zero extended immediate field.
257(define_predicate "x86_64_zext_immediate_operand"
258  (match_code "const_int,symbol_ref,label_ref,const")
259{
260  switch (GET_CODE (op))
261    {
262    case CONST_INT:
263      return !(INTVAL (op) & ~(HOST_WIDE_INT) 0xffffffff);
264
265    case SYMBOL_REF:
266      /* TLS symbols are not constant.  */
267      if (SYMBOL_REF_TLS_MODEL (op))
268	return false;
269
270      /* Load the external function address via the GOT slot.  */
271      if (ix86_force_load_from_GOT_p (op))
272	return false;
273
274     /* For certain code models, the symbolic references are known to fit.  */
275      return (ix86_cmodel == CM_SMALL
276	      || (ix86_cmodel == CM_MEDIUM
277		  && !SYMBOL_REF_FAR_ADDR_P (op)));
278
279    case LABEL_REF:
280      /* For certain code models, the code is near as well.  */
281      return ix86_cmodel == CM_SMALL || ix86_cmodel == CM_MEDIUM;
282
283    case CONST:
284      /* We also may accept the offsetted memory references in certain
285	 special cases.  */
286      if (GET_CODE (XEXP (op, 0)) == PLUS)
287	{
288	  rtx op1 = XEXP (XEXP (op, 0), 0);
289	  rtx op2 = XEXP (XEXP (op, 0), 1);
290
291	  if (ix86_cmodel == CM_LARGE)
292	    return false;
293	  if (!CONST_INT_P (op2))
294	    return false;
295
296	  HOST_WIDE_INT offset = INTVAL (op2);
297	  if (trunc_int_for_mode (offset, SImode) != offset)
298	    return false;
299
300	  switch (GET_CODE (op1))
301	    {
302	    case SYMBOL_REF:
303	      /* TLS symbols are not constant.  */
304	      if (SYMBOL_REF_TLS_MODEL (op1))
305		return false;
306
307	      /* Load the external function address via the GOT slot.  */
308	      if (ix86_force_load_from_GOT_p (op1))
309	        return false;
310
311	      /* For small code model we may accept pretty large positive
312		 offsets, since one bit is available for free.  Negative
313		 offsets are limited by the size of NULL pointer area
314		 specified by the ABI.  */
315	      if ((ix86_cmodel == CM_SMALL
316		   || (ix86_cmodel == CM_MEDIUM
317		       && !SYMBOL_REF_FAR_ADDR_P (op1)))
318		  && offset > -0x10000)
319		return true;
320	      /* ??? For the kernel, we may accept adjustment of
321		 -0x10000000, since we know that it will just convert
322		 negative address space to positive, but perhaps this
323		 is not worthwhile.  */
324	      break;
325
326	    case LABEL_REF:
327	      /* These conditions are similar to SYMBOL_REF ones, just the
328		 constraints for code models differ.  */
329	      if ((ix86_cmodel == CM_SMALL || ix86_cmodel == CM_MEDIUM)
330		  && offset > -0x10000)
331		return true;
332	      break;
333
334	    default:
335	      return false;
336	    }
337	}
338      break;
339
340    default:
341      gcc_unreachable ();
342    }
343  return false;
344})
345
346;; Return true if VALUE is a constant integer whose low and high words satisfy
347;; x86_64_immediate_operand.
348(define_predicate "x86_64_hilo_int_operand"
349  (match_code "const_int,const_wide_int")
350{
351  switch (GET_CODE (op))
352    {
353    case CONST_INT:
354      return x86_64_immediate_operand (op, mode);
355
356    case CONST_WIDE_INT:
357      gcc_assert (CONST_WIDE_INT_NUNITS (op) == 2);
358      return (x86_64_immediate_operand (GEN_INT (CONST_WIDE_INT_ELT (op, 0)),
359					DImode)
360	      && x86_64_immediate_operand (GEN_INT (CONST_WIDE_INT_ELT (op,
361									1)),
362					   DImode));
363
364    default:
365      gcc_unreachable ();
366    }
367})
368
369;; Return true if VALUE is a constant integer whose value is
370;; x86_64_immediate_operand value zero extended from word mode to mode.
371(define_predicate "x86_64_dwzext_immediate_operand"
372  (match_code "const_int,const_wide_int")
373{
374  switch (GET_CODE (op))
375    {
376    case CONST_INT:
377      if (!TARGET_64BIT)
378	return UINTVAL (op) <= HOST_WIDE_INT_UC (0xffffffff);
379      return UINTVAL (op) <= HOST_WIDE_INT_UC (0x7fffffff);
380
381    case CONST_WIDE_INT:
382      if (!TARGET_64BIT)
383	return false;
384      return (CONST_WIDE_INT_NUNITS (op) == 2
385	      && CONST_WIDE_INT_ELT (op, 1) == 0
386	      && (trunc_int_for_mode (CONST_WIDE_INT_ELT (op, 0), SImode)
387		  == (HOST_WIDE_INT) CONST_WIDE_INT_ELT (op, 0)));
388
389    default:
390      gcc_unreachable ();
391    }
392})
393
394;; Return true if size of VALUE can be stored in a sign
395;; extended immediate field.
396(define_predicate "x86_64_immediate_size_operand"
397  (and (match_code "symbol_ref")
398       (ior (not (match_test "TARGET_64BIT"))
399	    (match_test "ix86_cmodel == CM_SMALL")
400	    (match_test "ix86_cmodel == CM_KERNEL"))))
401
402;; Return true if OP is general operand representable on x86_64.
403(define_predicate "x86_64_general_operand"
404  (if_then_else (match_test "TARGET_64BIT")
405    (ior (match_operand 0 "nonimmediate_operand")
406	 (match_operand 0 "x86_64_immediate_operand"))
407    (match_operand 0 "general_operand")))
408
409;; Return true if OP's both words are general operands representable
410;; on x86_64.
411(define_predicate "x86_64_hilo_general_operand"
412  (if_then_else (match_test "TARGET_64BIT")
413    (ior (match_operand 0 "nonimmediate_operand")
414	 (match_operand 0 "x86_64_hilo_int_operand"))
415    (match_operand 0 "general_operand")))
416
417;; Return true if OP is non-VOIDmode general operand representable
418;; on x86_64.  This predicate is used in sign-extending conversion
419;; operations that require non-VOIDmode immediate operands.
420(define_predicate "x86_64_sext_operand"
421  (and (match_test "GET_MODE (op) != VOIDmode")
422       (match_operand 0 "x86_64_general_operand")))
423
424;; Return true if OP is non-VOIDmode general operand.  This predicate
425;; is used in sign-extending conversion operations that require
426;; non-VOIDmode immediate operands.
427(define_predicate "sext_operand"
428  (and (match_test "GET_MODE (op) != VOIDmode")
429       (match_operand 0 "general_operand")))
430
431;; Return true if OP is representable on x86_64 as zero-extended operand.
432;; This predicate is used in zero-extending conversion operations that
433;; require non-VOIDmode immediate operands.
434(define_predicate "x86_64_zext_operand"
435  (if_then_else (match_test "TARGET_64BIT")
436    (ior (match_operand 0 "nonimmediate_operand")
437	 (and (match_operand 0 "x86_64_zext_immediate_operand")
438	      (match_test "GET_MODE (op) != VOIDmode")))
439    (match_operand 0 "nonimmediate_operand")))
440
441;; Return true if OP is general operand representable on x86_64
442;; as either sign extended or zero extended constant.
443(define_predicate "x86_64_szext_general_operand"
444  (if_then_else (match_test "TARGET_64BIT")
445    (ior (match_operand 0 "nonimmediate_operand")
446	 (match_operand 0 "x86_64_immediate_operand")
447	 (match_operand 0 "x86_64_zext_immediate_operand"))
448    (match_operand 0 "general_operand")))
449
450;; Return true if OP is nonmemory operand representable on x86_64.
451(define_predicate "x86_64_nonmemory_operand"
452  (if_then_else (match_test "TARGET_64BIT")
453    (ior (match_operand 0 "register_operand")
454	 (match_operand 0 "x86_64_immediate_operand"))
455    (match_operand 0 "nonmemory_operand")))
456
457;; Return true if OP is nonmemory operand representable on x86_64.
458(define_predicate "x86_64_szext_nonmemory_operand"
459  (if_then_else (match_test "TARGET_64BIT")
460    (ior (match_operand 0 "register_operand")
461	 (match_operand 0 "x86_64_immediate_operand")
462	 (match_operand 0 "x86_64_zext_immediate_operand"))
463    (match_operand 0 "nonmemory_operand")))
464
465;; Return true when operand is PIC expression that can be computed by lea
466;; operation.
467(define_predicate "pic_32bit_operand"
468  (match_code "const,symbol_ref,label_ref")
469{
470  if (!flag_pic)
471    return false;
472
473  /* Rule out relocations that translate into 64bit constants.  */
474  if (TARGET_64BIT && GET_CODE (op) == CONST)
475    {
476      op = XEXP (op, 0);
477      if (GET_CODE (op) == PLUS && CONST_INT_P (XEXP (op, 1)))
478	op = XEXP (op, 0);
479      if (GET_CODE (op) == UNSPEC
480	  && (XINT (op, 1) == UNSPEC_GOTOFF
481	      || XINT (op, 1) == UNSPEC_GOT))
482	return false;
483    }
484
485  return symbolic_operand (op, mode);
486})
487
488;; Return true if OP is nonmemory operand acceptable by movabs patterns.
489(define_predicate "x86_64_movabs_operand"
490  (and (match_operand 0 "nonmemory_operand")
491       (not (match_operand 0 "pic_32bit_operand"))))
492
493;; Return true if OP is either a symbol reference or a sum of a symbol
494;; reference and a constant.
495(define_predicate "symbolic_operand"
496  (match_code "symbol_ref,label_ref,const")
497{
498  switch (GET_CODE (op))
499    {
500    case SYMBOL_REF:
501    case LABEL_REF:
502      return true;
503
504    case CONST:
505      op = XEXP (op, 0);
506      if (GET_CODE (op) == SYMBOL_REF
507	  || GET_CODE (op) == LABEL_REF
508	  || (GET_CODE (op) == UNSPEC
509	      && (XINT (op, 1) == UNSPEC_GOT
510		  || XINT (op, 1) == UNSPEC_GOTOFF
511		  || XINT (op, 1) == UNSPEC_PCREL
512		  || XINT (op, 1) == UNSPEC_GOTPCREL)))
513	return true;
514      if (GET_CODE (op) != PLUS
515	  || !CONST_INT_P (XEXP (op, 1)))
516	return false;
517
518      op = XEXP (op, 0);
519      if (GET_CODE (op) == SYMBOL_REF
520	  || GET_CODE (op) == LABEL_REF)
521	return true;
522      /* Only @GOTOFF gets offsets.  */
523      if (GET_CODE (op) != UNSPEC
524	  || XINT (op, 1) != UNSPEC_GOTOFF)
525	return false;
526
527      op = XVECEXP (op, 0, 0);
528      if (GET_CODE (op) == SYMBOL_REF
529	  || GET_CODE (op) == LABEL_REF)
530	return true;
531      return false;
532
533    default:
534      gcc_unreachable ();
535    }
536})
537
538;; Return true if OP is a symbolic operand that resolves locally.
539(define_predicate "local_symbolic_operand"
540  (match_code "const,label_ref,symbol_ref")
541{
542  if (GET_CODE (op) == CONST
543      && GET_CODE (XEXP (op, 0)) == PLUS
544      && CONST_INT_P (XEXP (XEXP (op, 0), 1)))
545    op = XEXP (XEXP (op, 0), 0);
546
547  if (GET_CODE (op) == LABEL_REF)
548    return true;
549
550  if (GET_CODE (op) != SYMBOL_REF)
551    return false;
552
553  if (SYMBOL_REF_TLS_MODEL (op))
554    return false;
555
556  /* Dll-imported symbols are always external.  */
557  if (TARGET_DLLIMPORT_DECL_ATTRIBUTES && SYMBOL_REF_DLLIMPORT_P (op))
558    return false;
559  if (SYMBOL_REF_LOCAL_P (op))
560    return true;
561
562  /* There is, however, a not insubstantial body of code in the rest of
563     the compiler that assumes it can just stick the results of
564     ASM_GENERATE_INTERNAL_LABEL in a symbol_ref and have done.  */
565  /* ??? This is a hack.  Should update the body of the compiler to
566     always create a DECL an invoke targetm.encode_section_info.  */
567  if (strncmp (XSTR (op, 0), internal_label_prefix,
568	       internal_label_prefix_len) == 0)
569    return true;
570
571  return false;
572})
573
574;; Test for a legitimate @GOTOFF operand.
575;;
576;; VxWorks does not impose a fixed gap between segments; the run-time
577;; gap can be different from the object-file gap.  We therefore can't
578;; use @GOTOFF unless we are absolutely sure that the symbol is in the
579;; same segment as the GOT.  Unfortunately, the flexibility of linker
580;; scripts means that we can't be sure of that in general, so assume
581;; that @GOTOFF is never valid on VxWorks.
582(define_predicate "gotoff_operand"
583  (and (not (match_test "TARGET_VXWORKS_RTP"))
584       (match_operand 0 "local_symbolic_operand")))
585
586;; Test for various thread-local symbols.
587(define_special_predicate "tls_symbolic_operand"
588  (and (match_code "symbol_ref")
589       (match_test "SYMBOL_REF_TLS_MODEL (op)")))
590
591(define_special_predicate "tls_modbase_operand"
592  (and (match_code "symbol_ref")
593       (match_test "op == ix86_tls_module_base ()")))
594
595(define_predicate "tls_address_pattern"
596  (and (match_code "set,parallel,unspec,unspec_volatile")
597       (match_test "ix86_tls_address_pattern_p (op)")))
598
599;; Test for a pc-relative call operand
600(define_predicate "constant_call_address_operand"
601  (match_code "symbol_ref")
602{
603  if (ix86_cmodel == CM_LARGE || ix86_cmodel == CM_LARGE_PIC
604      || flag_force_indirect_call)
605    return false;
606  if (TARGET_DLLIMPORT_DECL_ATTRIBUTES && SYMBOL_REF_DLLIMPORT_P (op))
607    return false;
608  return true;
609})
610
611;; P6 processors will jump to the address after the decrement when %esp
612;; is used as a call operand, so they will execute return address as a code.
613;; See Pentium Pro errata 70, Pentium 2 errata A33 and Pentium 3 errata E17.
614
615(define_predicate "call_register_no_elim_operand"
616  (match_operand 0 "register_operand")
617{
618  if (SUBREG_P (op))
619    op = SUBREG_REG (op);
620
621  if (!TARGET_64BIT && op == stack_pointer_rtx)
622    return false;
623
624  return register_no_elim_operand (op, mode);
625})
626
627;; True for any non-virtual or eliminable register.  Used in places where
628;; instantiation of such a register may cause the pattern to not be recognized.
629(define_predicate "register_no_elim_operand"
630  (match_operand 0 "register_operand")
631{
632  if (SUBREG_P (op))
633    op = SUBREG_REG (op);
634  return !(op == arg_pointer_rtx
635	   || op == frame_pointer_rtx
636	   || IN_RANGE (REGNO (op),
637			FIRST_PSEUDO_REGISTER, LAST_VIRTUAL_REGISTER));
638})
639
640;; Similarly, but include the stack pointer.  This is used to prevent esp
641;; from being used as an index reg.
642(define_predicate "index_register_operand"
643  (match_operand 0 "register_operand")
644{
645  if (SUBREG_P (op))
646    op = SUBREG_REG (op);
647  if (reload_completed)
648    return REG_OK_FOR_INDEX_STRICT_P (op);
649  else
650    return REG_OK_FOR_INDEX_NONSTRICT_P (op);
651})
652
653;; Return false if this is any eliminable register.  Otherwise general_operand.
654(define_predicate "general_no_elim_operand"
655  (if_then_else (match_code "reg,subreg")
656    (match_operand 0 "register_no_elim_operand")
657    (match_operand 0 "general_operand")))
658
659;; Return false if this is any eliminable register.  Otherwise
660;; register_operand or a constant.
661(define_predicate "nonmemory_no_elim_operand"
662  (ior (match_operand 0 "register_no_elim_operand")
663       (match_operand 0 "immediate_operand")))
664
665;; Test for a valid operand for indirect branch.
666(define_predicate "indirect_branch_operand"
667  (ior (match_operand 0 "register_operand")
668       (and (not (match_test "TARGET_INDIRECT_BRANCH_REGISTER"))
669	    (not (match_test "TARGET_X32"))
670	    (match_operand 0 "memory_operand"))))
671
672;; Return true if OP is a memory operands that can be used in sibcalls.
673;; Since sibcall never returns, we can only use call-clobbered register
674;; as GOT base.  Allow GOT slot here only with pseudo register as GOT
675;; base.  Properly handle sibcall over GOT slot with *sibcall_GOT_32
676;; and *sibcall_value_GOT_32 patterns.
677(define_predicate "sibcall_memory_operand"
678  (match_operand 0 "memory_operand")
679{
680  op = XEXP (op, 0);
681  if (CONSTANT_P (op))
682    return true;
683  if (GET_CODE (op) == PLUS && REG_P (XEXP (op, 0)))
684    {
685      int regno = REGNO (XEXP (op, 0));
686      if (!HARD_REGISTER_NUM_P (regno) || call_used_regs[regno])
687	{
688	  op = XEXP (op, 1);
689	  if (GOT32_symbol_operand (op, VOIDmode))
690	    return true;
691	}
692    }
693  return false;
694})
695
696;; Return true if OP is a GOT memory operand.
697(define_predicate "GOT_memory_operand"
698  (match_operand 0 "memory_operand")
699{
700  op = XEXP (op, 0);
701  return (GET_CODE (op) == CONST
702	  && GET_CODE (XEXP (op, 0)) == UNSPEC
703	  && XINT (XEXP (op, 0), 1) == UNSPEC_GOTPCREL);
704})
705
706;; Test for a valid operand for a call instruction.
707;; Allow constant call address operands in Pmode only.
708(define_special_predicate "call_insn_operand"
709  (ior (match_test "constant_call_address_operand
710		     (op, mode == VOIDmode ? mode : Pmode)")
711       (match_operand 0 "call_register_no_elim_operand")
712       (and (not (match_test "TARGET_INDIRECT_BRANCH_REGISTER"))
713	    (ior (and (not (match_test "TARGET_X32"))
714		      (match_operand 0 "memory_operand"))
715		 (and (match_test "TARGET_X32 && Pmode == DImode")
716		      (match_operand 0 "GOT_memory_operand"))))))
717
718;; Similarly, but for tail calls, in which we cannot allow memory references.
719(define_special_predicate "sibcall_insn_operand"
720  (ior (match_test "constant_call_address_operand
721		     (op, mode == VOIDmode ? mode : Pmode)")
722       (match_operand 0 "register_no_elim_operand")
723       (and (not (match_test "TARGET_INDIRECT_BRANCH_REGISTER"))
724	    (ior (and (not (match_test "TARGET_X32"))
725		      (match_operand 0 "sibcall_memory_operand"))
726		 (and (match_test "TARGET_X32 && Pmode == DImode")
727		      (match_operand 0 "GOT_memory_operand"))))))
728
729;; Return true if OP is a 32-bit GOT symbol operand.
730(define_predicate "GOT32_symbol_operand"
731  (match_test "GET_CODE (op) == CONST
732               && GET_CODE (XEXP (op, 0)) == UNSPEC
733               && XINT (XEXP (op, 0), 1) == UNSPEC_GOT"))
734
735;; Match exactly zero.
736(define_predicate "const0_operand"
737  (match_code "const_int,const_double,const_vector")
738{
739  if (mode == VOIDmode)
740    mode = GET_MODE (op);
741  return op == CONST0_RTX (mode);
742})
743
744;; Match one or a vector with all elements equal to one.
745(define_predicate "const1_operand"
746  (match_code "const_int,const_double,const_vector")
747{
748  if (mode == VOIDmode)
749    mode = GET_MODE (op);
750  return op == CONST1_RTX (mode);
751})
752
753;; Match exactly -1.
754(define_predicate "constm1_operand"
755  (and (match_code "const_int")
756       (match_test "op == constm1_rtx")))
757
758;; Match exactly eight.
759(define_predicate "const8_operand"
760  (and (match_code "const_int")
761       (match_test "INTVAL (op) == 8")))
762
763;; Match exactly 128.
764(define_predicate "const128_operand"
765  (and (match_code "const_int")
766       (match_test "INTVAL (op) == 128")))
767
768;; Match exactly 0x0FFFFFFFF in anddi as a zero-extension operation
769(define_predicate "const_32bit_mask"
770  (and (match_code "const_int")
771       (match_test "trunc_int_for_mode (INTVAL (op), DImode)
772		    == (HOST_WIDE_INT) 0xffffffff")))
773
774;; Match 2, 4, or 8.  Used for leal multiplicands.
775(define_predicate "const248_operand"
776  (match_code "const_int")
777{
778  HOST_WIDE_INT i = INTVAL (op);
779  return i == 2 || i == 4 || i == 8;
780})
781
782;; Match 1, 2, or 3.  Used for lea shift amounts.
783(define_predicate "const123_operand"
784  (match_code "const_int")
785{
786  HOST_WIDE_INT i = INTVAL (op);
787  return i == 1 || i == 2 || i == 3;
788})
789
790;; Match 2, 3, 6, or 7
791(define_predicate "const2367_operand"
792  (match_code "const_int")
793{
794  HOST_WIDE_INT i = INTVAL (op);
795  return i == 2 || i == 3 || i == 6 || i == 7;
796})
797
798;; Match 1, 2, 4, or 8
799(define_predicate "const1248_operand"
800  (match_code "const_int")
801{
802  HOST_WIDE_INT i = INTVAL (op);
803  return i == 1 || i == 2 || i == 4 || i == 8;
804})
805
806;; Match 3, 5, or 9.  Used for leal multiplicands.
807(define_predicate "const359_operand"
808  (match_code "const_int")
809{
810  HOST_WIDE_INT i = INTVAL (op);
811  return i == 3 || i == 5 || i == 9;
812})
813
814;; Match 4 or 8 to 11.  Used for embeded rounding.
815(define_predicate "const_4_or_8_to_11_operand"
816  (match_code "const_int")
817{
818  HOST_WIDE_INT i = INTVAL (op);
819  return i == 4 || (i >= 8 && i <= 11);
820})
821
822;; Match 4 or 8. Used for SAE.
823(define_predicate "const48_operand"
824  (match_code "const_int")
825{
826  HOST_WIDE_INT i = INTVAL (op);
827  return i == 4 || i == 8;
828})
829
830;; Match 0 or 1.
831(define_predicate "const_0_to_1_operand"
832  (and (match_code "const_int")
833       (ior (match_test "op == const0_rtx")
834	    (match_test "op == const1_rtx"))))
835
836;; Match 0 to 3.
837(define_predicate "const_0_to_3_operand"
838  (and (match_code "const_int")
839       (match_test "IN_RANGE (INTVAL (op), 0, 3)")))
840
841;; Match 0 to 4.
842(define_predicate "const_0_to_4_operand"
843  (and (match_code "const_int")
844       (match_test "IN_RANGE (INTVAL (op), 0, 4)")))
845
846;; Match 0 to 5.
847(define_predicate "const_0_to_5_operand"
848  (and (match_code "const_int")
849       (match_test "IN_RANGE (INTVAL (op), 0, 5)")))
850
851;; Match 0 to 7.
852(define_predicate "const_0_to_7_operand"
853  (and (match_code "const_int")
854       (match_test "IN_RANGE (INTVAL (op), 0, 7)")))
855
856;; Match 0 to 15.
857(define_predicate "const_0_to_15_operand"
858  (and (match_code "const_int")
859       (match_test "IN_RANGE (INTVAL (op), 0, 15)")))
860
861;; Match 0 to 31.
862(define_predicate "const_0_to_31_operand"
863  (and (match_code "const_int")
864       (match_test "IN_RANGE (INTVAL (op), 0, 31)")))
865
866;; Match 0 to 63.
867(define_predicate "const_0_to_63_operand"
868  (and (match_code "const_int")
869       (match_test "IN_RANGE (INTVAL (op), 0, 63)")))
870
871;; Match 0 to 255.
872(define_predicate "const_0_to_255_operand"
873  (and (match_code "const_int")
874       (match_test "IN_RANGE (INTVAL (op), 0, 255)")))
875
876;; Match (0 to 255) * 8
877(define_predicate "const_0_to_255_mul_8_operand"
878  (match_code "const_int")
879{
880  unsigned HOST_WIDE_INT val = INTVAL (op);
881  return val <= 255*8 && val % 8 == 0;
882})
883
884;; Return true if OP is CONST_INT >= 1 and <= 31 (a valid operand
885;; for shift & compare patterns, as shifting by 0 does not change flags).
886(define_predicate "const_1_to_31_operand"
887  (and (match_code "const_int")
888       (match_test "IN_RANGE (INTVAL (op), 1, 31)")))
889
890;; Return true if OP is CONST_INT >= 1 and <= 63 (a valid operand
891;; for 64bit shift & compare patterns, as shifting by 0 does not change flags).
892(define_predicate "const_1_to_63_operand"
893  (and (match_code "const_int")
894       (match_test "IN_RANGE (INTVAL (op), 1, 63)")))
895
896;; Match 2 or 3.
897(define_predicate "const_2_to_3_operand"
898  (and (match_code "const_int")
899       (match_test "IN_RANGE (INTVAL (op), 2, 3)")))
900
901;; Match 4 to 5.
902(define_predicate "const_4_to_5_operand"
903  (and (match_code "const_int")
904       (match_test "IN_RANGE (INTVAL (op), 4, 5)")))
905
906;; Match 4 to 7.
907(define_predicate "const_4_to_7_operand"
908  (and (match_code "const_int")
909       (match_test "IN_RANGE (INTVAL (op), 4, 7)")))
910
911;; Match 6 to 7.
912(define_predicate "const_6_to_7_operand"
913  (and (match_code "const_int")
914       (match_test "IN_RANGE (INTVAL (op), 6, 7)")))
915
916;; Match 8 to 9.
917(define_predicate "const_8_to_9_operand"
918  (and (match_code "const_int")
919       (match_test "IN_RANGE (INTVAL (op), 8, 9)")))
920
921;; Match 8 to 11.
922(define_predicate "const_8_to_11_operand"
923  (and (match_code "const_int")
924       (match_test "IN_RANGE (INTVAL (op), 8, 11)")))
925
926;; Match 8 to 15.
927(define_predicate "const_8_to_15_operand"
928  (and (match_code "const_int")
929       (match_test "IN_RANGE (INTVAL (op), 8, 15)")))
930
931;; Match 10 to 11.
932(define_predicate "const_10_to_11_operand"
933  (and (match_code "const_int")
934       (match_test "IN_RANGE (INTVAL (op), 10, 11)")))
935
936;; Match 12 to 13.
937(define_predicate "const_12_to_13_operand"
938  (and (match_code "const_int")
939       (match_test "IN_RANGE (INTVAL (op), 12, 13)")))
940
941;; Match 12 to 15.
942(define_predicate "const_12_to_15_operand"
943  (and (match_code "const_int")
944       (match_test "IN_RANGE (INTVAL (op), 12, 15)")))
945
946;; Match 14 to 15.
947(define_predicate "const_14_to_15_operand"
948  (and (match_code "const_int")
949       (match_test "IN_RANGE (INTVAL (op), 14, 15)")))
950
951;; Match 16 to 19.
952(define_predicate "const_16_to_19_operand"
953  (and (match_code "const_int")
954       (match_test "IN_RANGE (INTVAL (op), 16, 19)")))
955
956;; Match 16 to 31.
957(define_predicate "const_16_to_31_operand"
958  (and (match_code "const_int")
959       (match_test "IN_RANGE (INTVAL (op), 16, 31)")))
960
961;; Match 20 to 23.
962(define_predicate "const_20_to_23_operand"
963  (and (match_code "const_int")
964       (match_test "IN_RANGE (INTVAL (op), 20, 23)")))
965
966;; Match 24 to 27.
967(define_predicate "const_24_to_27_operand"
968  (and (match_code "const_int")
969       (match_test "IN_RANGE (INTVAL (op), 24, 27)")))
970
971;; Match 28 to 31.
972(define_predicate "const_28_to_31_operand"
973  (and (match_code "const_int")
974       (match_test "IN_RANGE (INTVAL (op), 28, 31)")))
975
976;; True if this is a constant appropriate for an increment or decrement.
977(define_predicate "incdec_operand"
978  (match_code "const_int")
979{
980  /* On Pentium4, the inc and dec operations causes extra dependency on flag
981     registers, since carry flag is not set.  */
982  if (!TARGET_USE_INCDEC && !optimize_insn_for_size_p ())
983    return false;
984  return op == const1_rtx || op == constm1_rtx;
985})
986
987;; True for registers, or 1 or -1.  Used to optimize double-word shifts.
988(define_predicate "reg_or_pm1_operand"
989  (ior (match_operand 0 "register_operand")
990       (and (match_code "const_int")
991	    (ior (match_test "op == const1_rtx")
992		 (match_test "op == constm1_rtx")))))
993
994;; True if OP is acceptable as operand of DImode shift expander.
995(define_predicate "shiftdi_operand"
996  (if_then_else (match_test "TARGET_64BIT")
997    (match_operand 0 "nonimmediate_operand")
998    (match_operand 0 "register_operand")))
999
1000(define_predicate "ashldi_input_operand"
1001  (if_then_else (match_test "TARGET_64BIT")
1002    (match_operand 0 "nonimmediate_operand")
1003    (match_operand 0 "reg_or_pm1_operand")))
1004
1005;; Return true if OP is a vector load from the constant pool with just
1006;; the first element nonzero.
1007(define_predicate "zero_extended_scalar_load_operand"
1008  (match_code "mem")
1009{
1010  unsigned n_elts;
1011  op = avoid_constant_pool_reference (op);
1012
1013  if (GET_CODE (op) != CONST_VECTOR)
1014    return false;
1015
1016  n_elts = CONST_VECTOR_NUNITS (op);
1017
1018  for (n_elts--; n_elts > 0; n_elts--)
1019    {
1020      rtx elt = CONST_VECTOR_ELT (op, n_elts);
1021      if (elt != CONST0_RTX (GET_MODE_INNER (GET_MODE (op))))
1022	return false;
1023    }
1024  return true;
1025})
1026
1027/* Return true if operand is a vector constant that is all ones. */
1028(define_predicate "vector_all_ones_operand"
1029  (and (match_code "const_vector")
1030       (match_test "INTEGRAL_MODE_P (GET_MODE (op))")
1031       (match_test "op == CONSTM1_RTX (GET_MODE (op))")))
1032
1033; Return true when OP is operand acceptable for vector memory operand.
1034; Only AVX can have misaligned memory operand.
1035(define_predicate "vector_memory_operand"
1036  (and (match_operand 0 "memory_operand")
1037       (ior (match_test "TARGET_AVX")
1038	    (match_test "MEM_ALIGN (op) >= GET_MODE_ALIGNMENT (mode)"))))
1039
1040; Return true when OP is register_operand or vector_memory_operand.
1041(define_predicate "vector_operand"
1042  (ior (match_operand 0 "register_operand")
1043       (match_operand 0 "vector_memory_operand")))
1044
1045; Return true when OP is operand acceptable for standard SSE move.
1046(define_predicate "vector_move_operand"
1047  (ior (match_operand 0 "nonimmediate_operand")
1048       (match_operand 0 "const0_operand")))
1049
1050;; Return true when OP is either nonimmediate operand, or any
1051;; CONST_VECTOR.
1052(define_predicate "nonimmediate_or_const_vector_operand"
1053  (ior (match_operand 0 "nonimmediate_operand")
1054       (match_code "const_vector")))
1055
1056;; Return true when OP is nonimmediate or standard SSE constant.
1057(define_predicate "nonimmediate_or_sse_const_operand"
1058  (ior (match_operand 0 "nonimmediate_operand")
1059       (match_test "standard_sse_constant_p (op, mode)")))
1060
1061;; Return true if OP is a register or a zero.
1062(define_predicate "reg_or_0_operand"
1063  (ior (match_operand 0 "register_operand")
1064       (match_operand 0 "const0_operand")))
1065
1066(define_predicate "norex_memory_operand"
1067  (and (match_operand 0 "memory_operand")
1068       (not (match_test "x86_extended_reg_mentioned_p (op)"))))
1069
1070;; Return true for RTX codes that force SImode address.
1071(define_predicate "SImode_address_operand"
1072  (match_code "subreg,zero_extend,and"))
1073
1074;; Return true if op is a valid address for LEA, and does not contain
1075;; a segment override.  Defined as a special predicate to allow
1076;; mode-less const_int operands pass to address_operand.
1077(define_special_predicate "address_no_seg_operand"
1078  (match_test "address_operand (op, VOIDmode)")
1079{
1080  struct ix86_address parts;
1081  int ok;
1082
1083  if (!CONST_INT_P (op)
1084      && mode != VOIDmode
1085      && GET_MODE (op) != mode)
1086    return false;
1087
1088  ok = ix86_decompose_address (op, &parts);
1089  gcc_assert (ok);
1090  return parts.seg == ADDR_SPACE_GENERIC;
1091})
1092
1093;; Return true if op if a valid base register, displacement or
1094;; sum of base register and displacement for VSIB addressing.
1095(define_predicate "vsib_address_operand"
1096  (match_test "address_operand (op, VOIDmode)")
1097{
1098  struct ix86_address parts;
1099  int ok;
1100  rtx disp;
1101
1102  ok = ix86_decompose_address (op, &parts);
1103  gcc_assert (ok);
1104  if (parts.index || parts.seg != ADDR_SPACE_GENERIC)
1105    return false;
1106
1107  /* VSIB addressing doesn't support (%rip).  */
1108  if (parts.disp)
1109    {
1110      disp = parts.disp;
1111      if (GET_CODE (disp) == CONST)
1112	{
1113	  disp = XEXP (disp, 0);
1114	  if (GET_CODE (disp) == PLUS)
1115	    disp = XEXP (disp, 0);
1116	  if (GET_CODE (disp) == UNSPEC)
1117	    switch (XINT (disp, 1))
1118	      {
1119	      case UNSPEC_GOTPCREL:
1120	      case UNSPEC_PCREL:
1121	      case UNSPEC_GOTNTPOFF:
1122		return false;
1123	      }
1124	}
1125      if (TARGET_64BIT
1126	  && flag_pic
1127	  && (GET_CODE (disp) == SYMBOL_REF
1128	      || GET_CODE (disp) == LABEL_REF))
1129	return false;
1130    }
1131
1132  return true;
1133})
1134
1135;; Return true if op is valid MPX address operand without base
1136(define_predicate "address_mpx_no_base_operand"
1137  (match_test "address_operand (op, VOIDmode)")
1138{
1139  struct ix86_address parts;
1140  int ok;
1141
1142  ok = ix86_decompose_address (op, &parts);
1143  gcc_assert (ok);
1144
1145  if (parts.index && parts.base)
1146    return false;
1147
1148  if (parts.seg != ADDR_SPACE_GENERIC)
1149    return false;
1150
1151  /* Do not support (%rip).  */
1152  if (parts.disp && flag_pic && TARGET_64BIT
1153      && SYMBOLIC_CONST (parts.disp))
1154    {
1155      if (GET_CODE (parts.disp) != CONST
1156	  || GET_CODE (XEXP (parts.disp, 0)) != PLUS
1157	  || GET_CODE (XEXP (XEXP (parts.disp, 0), 0)) != UNSPEC
1158	  || !CONST_INT_P (XEXP (XEXP (parts.disp, 0), 1))
1159	  || (XINT (XEXP (XEXP (parts.disp, 0), 0), 1) != UNSPEC_DTPOFF
1160	      && XINT (XEXP (XEXP (parts.disp, 0), 0), 1) != UNSPEC_NTPOFF))
1161	return false;
1162    }
1163
1164  return true;
1165})
1166
1167;; Return true if op is valid MPX address operand without index
1168(define_predicate "address_mpx_no_index_operand"
1169  (match_test "address_operand (op, VOIDmode)")
1170{
1171  struct ix86_address parts;
1172  int ok;
1173
1174  ok = ix86_decompose_address (op, &parts);
1175  gcc_assert (ok);
1176
1177  if (parts.index)
1178    return false;
1179
1180  if (parts.seg != ADDR_SPACE_GENERIC)
1181    return false;
1182
1183  /* Do not support (%rip).  */
1184  if (parts.disp && flag_pic && TARGET_64BIT
1185      && SYMBOLIC_CONST (parts.disp)
1186      && (GET_CODE (parts.disp) != CONST
1187	  || GET_CODE (XEXP (parts.disp, 0)) != PLUS
1188	  || GET_CODE (XEXP (XEXP (parts.disp, 0), 0)) != UNSPEC
1189	  || !CONST_INT_P (XEXP (XEXP (parts.disp, 0), 1))
1190	  || (XINT (XEXP (XEXP (parts.disp, 0), 0), 1) != UNSPEC_DTPOFF
1191	      && XINT (XEXP (XEXP (parts.disp, 0), 0), 1) != UNSPEC_NTPOFF)))
1192    return false;
1193
1194  return true;
1195})
1196
1197(define_predicate "vsib_mem_operator"
1198  (match_code "mem"))
1199
1200(define_predicate "bnd_mem_operator"
1201  (match_code "mem"))
1202
1203;; Return true if the rtx is known to be at least 32 bits aligned.
1204(define_predicate "aligned_operand"
1205  (match_operand 0 "general_operand")
1206{
1207  struct ix86_address parts;
1208  int ok;
1209
1210  /* Registers and immediate operands are always "aligned".  */
1211  if (!MEM_P (op))
1212    return true;
1213
1214  /* All patterns using aligned_operand on memory operands ends up
1215     in promoting memory operand to 64bit and thus causing memory mismatch.  */
1216  if (TARGET_MEMORY_MISMATCH_STALL && !optimize_insn_for_size_p ())
1217    return false;
1218
1219  /* Don't even try to do any aligned optimizations with volatiles.  */
1220  if (MEM_VOLATILE_P (op))
1221    return false;
1222
1223  if (MEM_ALIGN (op) >= 32)
1224    return true;
1225
1226  op = XEXP (op, 0);
1227
1228  /* Pushes and pops are only valid on the stack pointer.  */
1229  if (GET_CODE (op) == PRE_DEC
1230      || GET_CODE (op) == POST_INC)
1231    return true;
1232
1233  /* Decode the address.  */
1234  ok = ix86_decompose_address (op, &parts);
1235  gcc_assert (ok);
1236
1237  if (parts.base && SUBREG_P (parts.base))
1238    parts.base = SUBREG_REG (parts.base);
1239  if (parts.index && SUBREG_P (parts.index))
1240    parts.index = SUBREG_REG (parts.index);
1241
1242  /* Look for some component that isn't known to be aligned.  */
1243  if (parts.index)
1244    {
1245      if (REGNO_POINTER_ALIGN (REGNO (parts.index)) * parts.scale < 32)
1246	return false;
1247    }
1248  if (parts.base)
1249    {
1250      if (REGNO_POINTER_ALIGN (REGNO (parts.base)) < 32)
1251	return false;
1252    }
1253  if (parts.disp)
1254    {
1255      if (!CONST_INT_P (parts.disp)
1256	  || (INTVAL (parts.disp) & 3))
1257	return false;
1258    }
1259
1260  /* Didn't find one -- this must be an aligned address.  */
1261  return true;
1262})
1263
1264;; Return true if OP is memory operand with a displacement.
1265(define_predicate "memory_displacement_operand"
1266  (match_operand 0 "memory_operand")
1267{
1268  struct ix86_address parts;
1269  int ok;
1270
1271  ok = ix86_decompose_address (XEXP (op, 0), &parts);
1272  gcc_assert (ok);
1273  return parts.disp != NULL_RTX;
1274})
1275
1276;; Return true if OP is memory operand with a displacement only.
1277(define_predicate "memory_displacement_only_operand"
1278  (match_operand 0 "memory_operand")
1279{
1280  struct ix86_address parts;
1281  int ok;
1282
1283  if (TARGET_64BIT)
1284    return false;
1285
1286  ok = ix86_decompose_address (XEXP (op, 0), &parts);
1287  gcc_assert (ok);
1288
1289  if (parts.base || parts.index)
1290    return false;
1291
1292  return parts.disp != NULL_RTX;
1293})
1294
1295;; Return true if OP is memory operand that cannot be represented
1296;; by the modRM array.
1297(define_predicate "long_memory_operand"
1298  (and (match_operand 0 "memory_operand")
1299       (match_test "memory_address_length (op, false)")))
1300
1301;; Return true if OP is a comparison operator that can be issued by fcmov.
1302(define_predicate "fcmov_comparison_operator"
1303  (match_operand 0 "comparison_operator")
1304{
1305  machine_mode inmode = GET_MODE (XEXP (op, 0));
1306  enum rtx_code code = GET_CODE (op);
1307
1308  if (inmode == CCFPmode)
1309    {
1310      if (!ix86_trivial_fp_comparison_operator (op, mode))
1311	return false;
1312      code = ix86_fp_compare_code_to_integer (code);
1313    }
1314  /* i387 supports just limited amount of conditional codes.  */
1315  switch (code)
1316    {
1317    case LTU: case GTU: case LEU: case GEU:
1318      if (inmode == CCmode || inmode == CCFPmode || inmode == CCCmode)
1319	return true;
1320      return false;
1321    case ORDERED: case UNORDERED:
1322    case EQ: case NE:
1323      return true;
1324    default:
1325      return false;
1326    }
1327})
1328
1329;; Return true if OP is a comparison that can be used in the CMPSS/CMPPS insns.
1330;; The first set are supported directly; the second set can't be done with
1331;; full IEEE support, i.e. NaNs.
1332
1333(define_predicate "sse_comparison_operator"
1334  (ior (match_code "eq,ne,lt,le,unordered,unge,ungt,ordered")
1335       (and (match_test "TARGET_AVX")
1336	    (match_code "ge,gt,uneq,unle,unlt,ltgt"))))
1337
1338(define_predicate "ix86_comparison_int_operator"
1339  (match_code "ne,eq,ge,gt,le,lt"))
1340
1341(define_predicate "ix86_comparison_uns_operator"
1342  (match_code "ne,eq,geu,gtu,leu,ltu"))
1343
1344(define_predicate "bt_comparison_operator"
1345  (match_code "ne,eq"))
1346
1347;; Return true if OP is a valid comparison operator in valid mode.
1348(define_predicate "ix86_comparison_operator"
1349  (match_operand 0 "comparison_operator")
1350{
1351  machine_mode inmode = GET_MODE (XEXP (op, 0));
1352  enum rtx_code code = GET_CODE (op);
1353
1354  if (inmode == CCFPmode)
1355    return ix86_trivial_fp_comparison_operator (op, mode);
1356
1357  switch (code)
1358    {
1359    case EQ: case NE:
1360      if (inmode == CCGZmode)
1361	return false;
1362      return true;
1363    case GE: case LT:
1364      if (inmode == CCmode || inmode == CCGCmode
1365	  || inmode == CCGOCmode || inmode == CCNOmode || inmode == CCGZmode)
1366	return true;
1367      return false;
1368    case GEU: case LTU:
1369      if (inmode == CCGZmode)
1370	return true;
1371      /* FALLTHRU */
1372    case GTU: case LEU:
1373      if (inmode == CCmode || inmode == CCCmode || inmode == CCGZmode)
1374	return true;
1375      return false;
1376    case ORDERED: case UNORDERED:
1377      if (inmode == CCmode)
1378	return true;
1379      return false;
1380    case GT: case LE:
1381      if (inmode == CCmode || inmode == CCGCmode || inmode == CCNOmode)
1382	return true;
1383      return false;
1384    default:
1385      return false;
1386    }
1387})
1388
1389;; Return true if OP is a valid comparison operator
1390;; testing carry flag to be set.
1391(define_predicate "ix86_carry_flag_operator"
1392  (match_code "ltu,lt,unlt,gtu,gt,ungt,le,unle,ge,unge,ltgt,uneq")
1393{
1394  machine_mode inmode = GET_MODE (XEXP (op, 0));
1395  enum rtx_code code = GET_CODE (op);
1396
1397  if (inmode == CCFPmode)
1398    {
1399      if (!ix86_trivial_fp_comparison_operator (op, mode))
1400	return false;
1401      code = ix86_fp_compare_code_to_integer (code);
1402    }
1403  else if (inmode == CCCmode)
1404   return code == LTU || code == GTU;
1405  else if (inmode != CCmode)
1406    return false;
1407
1408  return code == LTU;
1409})
1410
1411;; Return true if this comparison only requires testing one flag bit.
1412(define_predicate "ix86_trivial_fp_comparison_operator"
1413  (match_code "gt,ge,unlt,unle,uneq,ltgt,ordered,unordered"))
1414
1415;; Return true if we know how to do this comparison.  Others require
1416;; testing more than one flag bit, and we let the generic middle-end
1417;; code do that.
1418(define_predicate "ix86_fp_comparison_operator"
1419  (if_then_else (match_test "ix86_fp_comparison_strategy (GET_CODE (op))
1420                             == IX86_FPCMP_ARITH")
1421               (match_operand 0 "comparison_operator")
1422               (match_operand 0 "ix86_trivial_fp_comparison_operator")))
1423
1424;; Nearly general operand, but accept any const_double, since we wish
1425;; to be able to drop them into memory rather than have them get pulled
1426;; into registers.
1427(define_predicate "cmp_fp_expander_operand"
1428  (ior (match_code "const_double")
1429       (match_operand 0 "general_operand")))
1430
1431;; Return true if this is a valid binary floating-point operation.
1432(define_predicate "binary_fp_operator"
1433  (match_code "plus,minus,mult,div"))
1434
1435;; Return true if this is a multiply operation.
1436(define_predicate "mult_operator"
1437  (match_code "mult"))
1438
1439;; Return true if this is a division operation.
1440(define_predicate "div_operator"
1441  (match_code "div"))
1442
1443;; Return true if this is a plus, minus, and, ior or xor operation.
1444(define_predicate "plusminuslogic_operator"
1445  (match_code "plus,minus,and,ior,xor"))
1446
1447;; Return true for ARITHMETIC_P.
1448(define_predicate "arith_or_logical_operator"
1449  (match_code "plus,mult,and,ior,xor,smin,smax,umin,umax,compare,minus,div,
1450	       mod,udiv,umod,ashift,rotate,ashiftrt,lshiftrt,rotatert"))
1451
1452;; Return true for COMMUTATIVE_P.
1453(define_predicate "commutative_operator"
1454  (match_code "plus,mult,and,ior,xor,smin,smax,umin,umax"))
1455
1456;; Return true if OP is a binary operator that can be promoted to wider mode.
1457(define_predicate "promotable_binary_operator"
1458  (ior (match_code "plus,minus,and,ior,xor,ashift")
1459       (and (match_code "mult")
1460	    (match_test "TARGET_TUNE_PROMOTE_HIMODE_IMUL"))))
1461
1462(define_predicate "compare_operator"
1463  (match_code "compare"))
1464
1465(define_predicate "absneg_operator"
1466  (match_code "abs,neg"))
1467
1468;; Return true if OP is a memory operand, aligned to
1469;; less than its natural alignment.
1470(define_predicate "misaligned_operand"
1471  (and (match_code "mem")
1472       (match_test "MEM_ALIGN (op) < GET_MODE_BITSIZE (mode)")))
1473
1474;; Return true if OP is a emms operation, known to be a PARALLEL.
1475(define_predicate "emms_operation"
1476  (match_code "parallel")
1477{
1478  unsigned i;
1479
1480  if (XVECLEN (op, 0) != 17)
1481    return false;
1482
1483  for (i = 0; i < 8; i++)
1484    {
1485      rtx elt = XVECEXP (op, 0, i+1);
1486
1487      if (GET_CODE (elt) != CLOBBER
1488	  || GET_CODE (SET_DEST (elt)) != REG
1489	  || GET_MODE (SET_DEST (elt)) != XFmode
1490	  || REGNO (SET_DEST (elt)) != FIRST_STACK_REG + i)
1491        return false;
1492
1493      elt = XVECEXP (op, 0, i+9);
1494
1495      if (GET_CODE (elt) != CLOBBER
1496	  || GET_CODE (SET_DEST (elt)) != REG
1497	  || GET_MODE (SET_DEST (elt)) != DImode
1498	  || REGNO (SET_DEST (elt)) != FIRST_MMX_REG + i)
1499	return false;
1500    }
1501  return true;
1502})
1503
1504;; Return true if OP is a vzeroall operation, known to be a PARALLEL.
1505(define_predicate "vzeroall_operation"
1506  (match_code "parallel")
1507{
1508  unsigned i, nregs = TARGET_64BIT ? 16 : 8;
1509
1510  if ((unsigned) XVECLEN (op, 0) != 1 + nregs)
1511    return false;
1512
1513  for (i = 0; i < nregs; i++)
1514    {
1515      rtx elt = XVECEXP (op, 0, i+1);
1516
1517      if (GET_CODE (elt) != SET
1518	  || GET_CODE (SET_DEST (elt)) != REG
1519	  || GET_MODE (SET_DEST (elt)) != V8SImode
1520	  || REGNO (SET_DEST (elt)) != GET_SSE_REGNO (i)
1521	  || SET_SRC (elt) != CONST0_RTX (V8SImode))
1522	return false;
1523    }
1524  return true;
1525})
1526
1527;; return true if OP is a vzeroupper operation.
1528(define_predicate "vzeroupper_operation"
1529  (and (match_code "unspec_volatile")
1530       (match_test "XINT (op, 1) == UNSPECV_VZEROUPPER")))
1531
1532;; Return true if OP is an addsub vec_merge operation
1533(define_predicate "addsub_vm_operator"
1534  (match_code "vec_merge")
1535{
1536  rtx op0, op1;
1537  int swapped;
1538  HOST_WIDE_INT mask;
1539  int nunits, elt;
1540
1541  op0 = XEXP (op, 0);
1542  op1 = XEXP (op, 1);
1543
1544  /* Sanity check.  */
1545  if (GET_CODE (op0) == MINUS && GET_CODE (op1) == PLUS)
1546    swapped = 0;
1547  else if (GET_CODE (op0) == PLUS && GET_CODE (op1) == MINUS)
1548    swapped = 1;
1549  else
1550    gcc_unreachable ();
1551
1552  mask = INTVAL (XEXP (op, 2));
1553  nunits = GET_MODE_NUNITS (mode);
1554
1555  for (elt = 0; elt < nunits; elt++)
1556    {
1557      /* bit clear: take from op0, set: take from op1  */
1558      int bit = !(mask & (HOST_WIDE_INT_1U << elt));
1559
1560      if (bit != ((elt & 1) ^ swapped))
1561	return false;
1562    }
1563
1564  return true;
1565})
1566
1567;; Return true if OP is an addsub vec_select/vec_concat operation
1568(define_predicate "addsub_vs_operator"
1569  (and (match_code "vec_select")
1570       (match_code "vec_concat" "0"))
1571{
1572  rtx op0, op1;
1573  bool swapped;
1574  int nunits, elt;
1575
1576  op0 = XEXP (XEXP (op, 0), 0);
1577  op1 = XEXP (XEXP (op, 0), 1);
1578
1579  /* Sanity check.  */
1580  if (GET_CODE (op0) == MINUS && GET_CODE (op1) == PLUS)
1581    swapped = false;
1582  else if (GET_CODE (op0) == PLUS && GET_CODE (op1) == MINUS)
1583    swapped = true;
1584  else
1585    gcc_unreachable ();
1586
1587  nunits = GET_MODE_NUNITS (mode);
1588  if (XVECLEN (XEXP (op, 1), 0) != nunits)
1589    return false;
1590
1591  /* We already checked that permutation is suitable for addsub,
1592     so only look at the first element of the parallel.  */
1593  elt = INTVAL (XVECEXP (XEXP (op, 1), 0, 0));
1594
1595  return elt == (swapped ? nunits : 0);
1596})
1597
1598;; Return true if OP is a parallel for an addsub vec_select.
1599(define_predicate "addsub_vs_parallel"
1600  (and (match_code "parallel")
1601       (match_code "const_int" "a"))
1602{
1603  int nelt = XVECLEN (op, 0);
1604  int elt, i;
1605
1606  if (nelt < 2)
1607    return false;
1608
1609  /* Check that the permutation is suitable for addsub.
1610     For example, { 0 9 2 11 4 13 6 15 } or { 8 1 10 3 12 5 14 7 }.  */
1611  elt = INTVAL (XVECEXP (op, 0, 0));
1612  if (elt == 0)
1613    {
1614      for (i = 1; i < nelt; ++i)
1615	if (INTVAL (XVECEXP (op, 0, i)) != (i + (i & 1) * nelt))
1616	  return false;
1617    }
1618  else if (elt == nelt)
1619    {
1620      for (i = 1; i < nelt; ++i)
1621	if (INTVAL (XVECEXP (op, 0, i)) != (elt + i - (i & 1) * nelt))
1622	  return false;
1623    }
1624  else
1625    return false;
1626
1627  return true;
1628})
1629
1630;; Return true if OP is a parallel for a vbroadcast permute.
1631(define_predicate "avx_vbroadcast_operand"
1632  (and (match_code "parallel")
1633       (match_code "const_int" "a"))
1634{
1635  rtx elt = XVECEXP (op, 0, 0);
1636  int i, nelt = XVECLEN (op, 0);
1637
1638  /* Don't bother checking there are the right number of operands,
1639     merely that they're all identical.  */
1640  for (i = 1; i < nelt; ++i)
1641    if (XVECEXP (op, 0, i) != elt)
1642      return false;
1643  return true;
1644})
1645
1646;; Return true if OP is a parallel for a palignr permute.
1647(define_predicate "palignr_operand"
1648  (and (match_code "parallel")
1649       (match_code "const_int" "a"))
1650{
1651  int elt = INTVAL (XVECEXP (op, 0, 0));
1652  int i, nelt = XVECLEN (op, 0);
1653
1654  /* Check that an order in the permutation is suitable for palignr.
1655     For example, {5 6 7 0 1 2 3 4} is "palignr 5, xmm, xmm".  */
1656  for (i = 1; i < nelt; ++i)
1657    if (INTVAL (XVECEXP (op, 0, i)) != ((elt + i) % nelt))
1658      return false;
1659  return true;
1660})
1661
1662;; Return true if OP is a proper third operand to vpblendw256.
1663(define_predicate "avx2_pblendw_operand"
1664  (match_code "const_int")
1665{
1666  HOST_WIDE_INT val = INTVAL (op);
1667  HOST_WIDE_INT low = val & 0xff;
1668  return val == ((low << 8) | low);
1669})
1670
1671;; Return true if OP is vector_operand or CONST_VECTOR.
1672(define_predicate "general_vector_operand"
1673  (ior (match_operand 0 "vector_operand")
1674       (match_code "const_vector")))
1675
1676;; Return true if OP is either -1 constant or stored in register.
1677(define_predicate "register_or_constm1_operand"
1678  (ior (match_operand 0 "register_operand")
1679       (and (match_code "const_int")
1680	    (match_test "op == constm1_rtx"))))
1681
1682;; Return true if the vector ends with between 12 and 18 register saves using
1683;; RAX as the base address.
1684(define_predicate "save_multiple"
1685  (match_code "parallel")
1686{
1687  const unsigned len = XVECLEN (op, 0);
1688  unsigned i;
1689
1690  /* Starting from end of vector, count register saves.  */
1691  for (i = 0; i < len; ++i)
1692    {
1693      rtx src, dest, addr;
1694      rtx e = XVECEXP (op, 0, len - 1 - i);
1695
1696      if (GET_CODE (e) != SET)
1697	break;
1698
1699      src  = SET_SRC (e);
1700      dest = SET_DEST (e);
1701
1702      if (!REG_P (src) || !MEM_P (dest))
1703	break;
1704
1705      addr = XEXP (dest, 0);
1706
1707      /* Good if dest address is in RAX.  */
1708      if (REG_P (addr) && REGNO (addr) == AX_REG)
1709	continue;
1710
1711      /* Good if dest address is offset of RAX.  */
1712      if (GET_CODE (addr) == PLUS
1713	  && REG_P (XEXP (addr, 0))
1714	  && REGNO (XEXP (addr, 0)) == AX_REG)
1715	continue;
1716
1717      break;
1718    }
1719  return (i >= 12 && i <= 18);
1720})
1721
1722
1723;; Return true if the vector ends with between 12 and 18 register loads using
1724;; RSI as the base address.
1725(define_predicate "restore_multiple"
1726  (match_code "parallel")
1727{
1728  const unsigned len = XVECLEN (op, 0);
1729  unsigned i;
1730
1731  /* Starting from end of vector, count register restores.  */
1732  for (i = 0; i < len; ++i)
1733    {
1734      rtx src, dest, addr;
1735      rtx e = XVECEXP (op, 0, len - 1 - i);
1736
1737      if (GET_CODE (e) != SET)
1738	break;
1739
1740      src  = SET_SRC (e);
1741      dest = SET_DEST (e);
1742
1743      if (!MEM_P (src) || !REG_P (dest))
1744	break;
1745
1746      addr = XEXP (src, 0);
1747
1748      /* Good if src address is in RSI.  */
1749      if (REG_P (addr) && REGNO (addr) == SI_REG)
1750	continue;
1751
1752      /* Good if src address is offset of RSI.  */
1753      if (GET_CODE (addr) == PLUS
1754	  && REG_P (XEXP (addr, 0))
1755	  && REGNO (XEXP (addr, 0)) == SI_REG)
1756	continue;
1757
1758      break;
1759    }
1760  return (i >= 12 && i <= 18);
1761})
1762