1;;  Machine Description for Renesas RL78 processors
2;;  Copyright (C) 2011-2016 Free Software Foundation, Inc.
3;;  Contributed by Red Hat.
4
5;; This file is part of GCC.
6
7;; GCC is free software; you can redistribute it and/or modify
8;; it under the terms of the GNU General Public License as published by
9;; the Free Software Foundation; either version 3, or (at your option)
10;; any later version.
11
12;; GCC is distributed in the hope that it will be useful,
13;; but WITHOUT ANY WARRANTY; without even the implied warranty of
14;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15;; GNU General Public License for more details.
16
17;; You should have received a copy of the GNU General Public License
18;; along with GCC; see the file COPYING3.  If not see
19;; <http://www.gnu.org/licenses/>.
20
21;; In this MD file, we define those insn patterns that involve
22;; registers, where such registers are virtual until allocated to a
23;; physical register.  All of these insns need to be conditional on
24;; rl78_virt_insns_ok () being true.
25
26;; This tells the physical register allocator what method to use to
27;; allocate registers.  Basically, this defines the template of the
28;; instruction - op1 is of the form "a = op(b)", op2 is "a = b op c"
29;; etc.
30
31(define_attr "valloc" "op1,op2,ro1,cmp,umul,macax,divhi,divsi"
32  (const_string "op2"))
33
34;;---------- Moving ------------------------
35
36(define_insn "*movqi_virt_mm"
37  [(set (match_operand:QI 0 "rl78_near_mem_operand" "=Y")
38	(match_operand    1 "rl78_near_mem_operand" "Y"))]
39  "rl78_virt_insns_ok ()"
40  "v.mov %0, %1"
41  [(set_attr "valloc" "op1")]
42)
43
44(define_insn "*movqi_virt"
45  [(set (match_operand:QI 0 "nonimmediate_operand" "=vY,v,*Wfr,Y,*Wfr,*Wfr")
46	(match_operand    1 "general_operand" "vInt8JY,*Wfr,vInt8J,*Wfr,Y,*Wfr"))]
47  "rl78_virt_insns_ok ()"
48  "v.mov %0, %1"
49  [(set_attr "valloc" "op1")]
50)
51
52(define_insn "*movhi_virt_mm"
53  [(set (match_operand:HI 0 "rl78_near_mem_operand" "=Y")
54	(match_operand:HI 1 "rl78_near_mem_operand" "Y"))]
55  "rl78_virt_insns_ok ()"
56  "v.movw %0, %1"
57  [(set_attr "valloc" "op1")]
58)
59
60(define_insn "*movhi_virt"
61  [(set (match_operand:HI 0 "nonimmediate_operand" "=vS,  Y,   v,   *Wfr")
62	(match_operand:HI 1 "general_operand"      "viYS, viS, *Wfr, vi"))]
63  "rl78_virt_insns_ok ()"
64  "v.movw %0, %1"
65  [(set_attr "valloc" "op1")]
66)
67
68;;---------- Conversions ------------------------
69
70(define_insn "*zero_extendqihi2_virt"
71  [(set (match_operand:HI                 0 "rl78_nonfar_nonimm_operand" "=vY,*Wfr")
72	(zero_extend:HI (match_operand:QI 1 "general_operand" "vim,viY")))]
73  "rl78_virt_insns_ok () && rl78_one_far_p (operands, 2)"
74  "v.zero_extend\t%0, %1"
75  [(set_attr "valloc" "op1")]
76  )
77
78(define_insn "*extendqihi2_virt"
79  [(set (match_operand:HI                 0 "rl78_nonfar_nonimm_operand" "=vY,*Wfr")
80	(sign_extend:HI (match_operand:QI 1 "general_operand" "vim,viY")))]
81  "rl78_virt_insns_ok () && rl78_one_far_p (operands, 2)"
82  "v.sign_extend\t%0, %1"
83  [(set_attr "valloc" "op1")]
84  )
85
86;;---------- Arithmetic ------------------------
87
88(define_insn "*inc<mode>3_virt"
89  [(set (match_operand:QHI           0 "rl78_incdec_memory_operand" "=vm")
90	(plus:QHI (match_operand:QHI 1 "rl78_incdec_memory_operand" "0")
91		  (match_operand:QHI 2 "rl78_1_2_operand" "KLNO")))
92   ]
93  "rl78_virt_insns_ok ()"
94  "v.inc\t%0, %1, %2"
95)
96
97(define_insn "*add<mode>3_virt"
98  [(set (match_operand:QHI           0 "rl78_nonimmediate_operand" "=vY,  S, *Wfr,  vY")
99	(plus:QHI (match_operand:QHI 1 "rl78_general_operand"      "%viY, 0, 0viY, *Wfr")
100		  (match_operand:QHI 2 "rl78_general_operand"       "vim, i, viY,  viY")))
101   ]
102  "rl78_virt_insns_ok () && rl78_one_far_p (operands, 3)"
103  "v.add\t%0, %1, %2"
104)
105
106(define_insn "*sub<mode>3_virt"
107  [(set (match_operand:QHI            0 "rl78_nonimmediate_operand" "=vY, S, *Wfr,  vY")
108	(minus:QHI (match_operand:QHI 1 "rl78_general_operand"      "viY, 0, 0viY, *Wfr")
109		   (match_operand:QHI 2 "rl78_general_operand"      "vim, i, viY,  viY")))
110   ]
111  "rl78_virt_insns_ok () && rl78_one_far_p (operands, 3)"
112  "v.sub\t%0, %1, %2"
113)
114
115(define_insn "*umulhi3_shift_virt"
116  [(set (match_operand:HI          0 "register_operand" "=v")
117        (mult:HI (match_operand:HI 1 "rl78_nonfar_operand" "%vim")
118                 (match_operand:HI 2 "rl78_24_operand" "Ni")))]
119  "rl78_virt_insns_ok ()"
120  "v.mulu\t%0, %1, %2"
121  [(set_attr "valloc" "umul")]
122)
123
124(define_insn "*umulqihi3_virt"
125  [(set (match_operand:HI                          0 "register_operand" "=v")
126        (mult:HI (zero_extend:HI (match_operand:QI 1 "rl78_nonfar_operand" "%vim"))
127                 (zero_extend:HI (match_operand:QI 2 "general_operand" "vim"))))]
128  "rl78_virt_insns_ok ()"
129  "v.mulu\t%0, %2"
130  [(set_attr "valloc" "umul")]
131)
132
133(define_insn "*andqi3_virt"
134  [(set (match_operand:QI         0 "rl78_nonimmediate_operand" "=vm,  *Wfr,  vY")
135	(and:QI (match_operand:QI 1 "rl78_general_operand"      "%vim, 0viY, *Wfr")
136		(match_operand:QI 2 "rl78_general_operand"      "vim,  viY,  viY")))
137   ]
138  "rl78_virt_insns_ok () && rl78_one_far_p (operands, 3)"
139  "v.and\t%0, %1, %2"
140)
141
142(define_insn "*iorqi3_virt"
143  [(set (match_operand:QI         0 "rl78_nonimmediate_operand" "=vm,  *Wfr,  vY")
144	(ior:QI (match_operand:QI 1 "rl78_general_operand"      "%vim, 0viY, *Wfr")
145		(match_operand:QI 2 "rl78_general_operand"      "vim,  viY,  viY")))
146   ]
147  "rl78_virt_insns_ok () && rl78_one_far_p (operands, 3)"
148  "v.or\t%0, %1, %2"
149)
150
151(define_insn "*xorqi3_virt"
152  [(set (match_operand:QI         0 "rl78_nonimmediate_operand" "=vm,  *Wfr,  vY")
153	(xor:QI (match_operand:QI 1 "rl78_general_operand"      "%vim, 0viY, *Wfr")
154		(match_operand    2 "rl78_general_operand"      "vim,  viY,  viY")))
155   ]
156  "rl78_virt_insns_ok () && rl78_one_far_p (operands, 3)"
157  "v.xor\t%0, %1, %2"
158)
159
160;;---------- Shifts ------------------------
161
162(define_insn "*ashl<mode>3_virt"
163  [(set (match_operand:QHI             0 "rl78_nonfar_nonimm_operand" "=vm")
164	(ashift:QHI (match_operand:QHI 1 "rl78_nonfar_operand" "vim")
165		    (match_operand:QI  2 "general_operand" "vim")))
166   ]
167  "rl78_virt_insns_ok ()"
168  "v.shl\t%0, %1, %2"
169)
170
171(define_insn "*ashr<mode>3_virt"
172  [(set (match_operand:QHI               0 "rl78_nonfar_nonimm_operand" "=vm")
173	(ashiftrt:QHI (match_operand:QHI 1 "rl78_nonfar_operand" "vim")
174		      (match_operand:QI  2 "general_operand" "vim")))
175   ]
176  "rl78_virt_insns_ok ()"
177  "v.sar\t%0, %1, %2"
178)
179
180(define_insn "*lshr<mode>3_virt"
181  [(set (match_operand:QHI               0 "rl78_nonfar_nonimm_operand" "=vm")
182	(lshiftrt:QHI (match_operand:QHI 1 "rl78_nonfar_operand" "vim")
183		      (match_operand:QI  2 "general_operand" "vim")))
184   ]
185  "rl78_virt_insns_ok ()"
186  "v.shr\t%0, %1, %2"
187)
188
189;; This is complex mostly because the RL78 has no SImode operations,
190;; and very limited HImode operations, and no variable shifts.  This
191;; pattern is optimized for each constant shift count and operand
192;; types, so as to use a hand-optimized pattern.  For readability, the
193;; usual \t\; syntax is not used here.  Also, there's no easy way to
194;; constrain to avoid partial overlaps, hence the duplication.
195(define_insn "ashrsi3_virt"                                  ;;   0  1      2-7            8         9-15           16   17-23     24   25-31 var
196  [(set (match_operand:SI               0 "nonimmediate_operand" "=v,vU,&vU,v,  &vU,  &vU, v,  &vU,  v,  &vU, &vU,  vU,  v,&vU,    vU,  vU,   vU")
197	(ashiftrt:SI (match_operand:SI  1 "nonimmediate_operand" "0, 0,  vU,0,   vWab, U,  0,   vU,  0,   vWab,U,   vU,  0, vU,    vU,  vU,   0")
198		      (match_operand:SI 2 "nonmemory_operand"    "M, K,  K, Int3,Int3,Int3,Iv08,Iv08,Is09,Is09,Is09,Iv16,Is17,Is17,Iv24,Is25, iv")))
199   (clobber (reg:HI X_REG))
200    ]
201   ""
202   "@
203    ; ashrsi %0, 0
204
205   movw ax,%H1 \; sarw ax,1 \; movw %H0,ax \; mov a,%Q1 \; rorc a,1 \; mov %Q0,a \; mov a,%q1 \; rorc a,1 \; mov %q0,a
206   movw ax,%H1 \; sarw ax,1 \; movw %H0,ax \; mov a,%Q1 \; rorc a,1 \; mov %Q0,a \; mov a,%q1 \; rorc a,1 \; mov %q0,a
207
208   movw ax,%1 \; shlw ax,%r2 \; mov %0,a             \; mov x,%Q1 \; mov a,%H1 \; shlw ax,%r2 \; mov %Q0,a \; movw ax,%H1 \; sarw ax,%u2 \; movw %H0,ax
209   movw ax,%1 \; shlw ax,%r2 \; mov %0,a             \; mov x,%Q1 \; mov a,%H1 \; shlw ax,%r2 \; mov %Q0,a \; movw ax,%H1 \; sarw ax,%u2 \; movw %H0,ax
210   movw ax,%1 \; shlw ax,%r2 \; mov %0,a \; mov a,%Q1 \; mov x,a   \; mov a,%H1 \; shlw ax,%r2 \; mov %Q0,a \; movw ax,%H1 \; sarw ax,%u2 \; movw %H0,ax
211
212   mov x,%Q1            \; mov a,%H1 \; movw %0,ax \; movw ax,%H1 \; sarw ax,8 \; movw %H0,ax
213   mov a,%Q1 \; mov x, a \; mov a,%H1 \; movw %0,ax \; movw ax,%H1 \; sarw ax,8 \; movw %H0,ax
214
215   mov x,%Q1           \; mov a,%H1 \; shlw ax,%r2 \; mov %0,a \; movw ax,%H1 \; shlw ax,%r2 \; mov %Q0,a \; movw ax,%H1 \; sarw ax,%u2 \; movw %H0,ax
216   mov x,%Q1           \; mov a,%H1 \; shlw ax,%r2 \; mov %0,a \; movw ax,%H1 \; shlw ax,%r2 \; mov %Q0,a \; movw ax,%H1 \; sarw ax,%u2 \; movw %H0,ax
217   mov a,%Q1 \; mov x,a \; mov a,%H1 \; shlw ax,%r2 \; mov %0,a \; movw ax,%H1 \; shlw ax,%r2 \; mov %Q0,a \; movw ax,%H1 \; sarw ax,%u2 \; movw %H0,ax
218
219   movw ax,%H1 \; movw %0,ax \; sarw ax,15 \; movw %H0,ax
220
221   movw ax,%H1 \; sarw ax,%S2 \; movw %0,ax \; sarw ax,15 \; movw %H0,ax
222   movw ax,%H1 \; sarw ax,%S2 \; movw %0,ax \; sarw ax,15 \; movw %H0,ax
223
224   movw ax,%H1 \; mov %0,a \; sarw ax,15 \; movw %H0,ax \; mov %Q0,a
225
226   movw ax,%H1 \; sar a,%s2 \; mov %0,a \; sarw ax,15 \; movw %H0,ax \; mov %Q0,a
227
228   mov b,%2 \; cmp0 b \; bz $2f \; 1: \; movw ax,%H1 \; sarw ax,1 \; movw %H0,ax \; mov a,%Q1 \; rorc a,1 \; mov %Q0,a \; mov a,%q1 \; rorc a,1 \; mov %q0,a \; dec b \; bnz $1b \; 2:"
229  [(set_attr "valloc" "macax")]
230)
231
232;; Likewise.
233(define_insn "lshrsi3_virt"                                  ;;   0  1      2-7            8         9-15           16   17-23     24   25-31 var
234  [(set (match_operand:SI               0 "nonimmediate_operand" "=v,vU,&vU,v,  &vU,  &vU, v,  &vU,  v,  &vU, &vU,  vU,  v,&vU,    vU,  vU,   vU")
235	(lshiftrt:SI (match_operand:SI  1 "nonimmediate_operand" "0, 0,  vU,0,   vWab, U,  0,   vU,  0,   vWab,U,   vU,  0, vU,    vU,  vU,   0")
236		      (match_operand:SI 2 "nonmemory_operand"    "M, K,  K, Int3,Int3,Int3,Iv08,Iv08,Is09,Is09,Is09,Iv16,Is17,Is17,Iv24,Is25, iv")))
237   (clobber (reg:HI X_REG))
238   ]
239  ""
240  "@
241   ; lshrsi %0, 0
242
243   movw ax,%H1 \; shrw ax,1 \; movw %H0,ax \; mov a,%Q1 \; rorc a,1 \; mov %Q0,a \; mov a,%q1 \; rorc a,1 \; mov %q0,a
244   movw ax,%H1 \; shrw ax,1 \; movw %H0,ax \; mov a,%Q1 \; rorc a,1 \; mov %Q0,a \; mov a,%q1 \; rorc a,1 \; mov %q0,a
245
246   movw ax,%1 \; shlw ax,%r2 \; mov %0,a             \; mov x,%Q1 \; mov a,%H1 \; shlw ax,%r2 \; mov %Q0,a \; movw ax,%H1 \; shrw ax,%u2 \; movw %H0,ax
247   movw ax,%1 \; shlw ax,%r2 \; mov %0,a             \; mov x,%Q1 \; mov a,%H1 \; shlw ax,%r2 \; mov %Q0,a \; movw ax,%H1 \; shrw ax,%u2 \; movw %H0,ax
248   movw ax,%1 \; shlw ax,%r2 \; mov %0,a \; mov a,%Q1 \; mov x,a   \; mov a,%H1 \; shlw ax,%r2 \; mov %Q0,a \; movw ax,%H1 \; shrw ax,%u2 \; movw %H0,ax
249
250   mov x,%Q1            \; mov a,%H1 \; movw %0,ax \; movw ax,%H1 \; shrw ax,8 \; movw %H0,ax
251   mov a,%Q1 \; mov x, a \; mov a,%H1 \; movw %0,ax \; movw ax,%H1 \; shrw ax,8 \; movw %H0,ax
252
253   mov x,%Q1           \; mov a,%H1 \; shlw ax,%r2 \; mov %0,a \; movw ax,%H1 \; shlw ax,%r2 \; mov %Q0,a \; movw ax,%H1 \; shrw ax,%u2 \; movw %H0,ax
254   mov x,%Q1           \; mov a,%H1 \; shlw ax,%r2 \; mov %0,a \; movw ax,%H1 \; shlw ax,%r2 \; mov %Q0,a \; movw ax,%H1 \; shrw ax,%u2 \; movw %H0,ax
255   mov a,%Q1 \; mov x,a \; mov a,%H1 \; shlw ax,%r2 \; mov %0,a \; movw ax,%H1 \; shlw ax,%r2 \; mov %Q0,a \; movw ax,%H1 \; shrw ax,%u2 \; movw %H0,ax
256
257   movw ax,%H1 \; movw %0,ax \; movw ax,#0 \; movw %H0,ax
258
259   movw ax,%H1 \; shrw ax,%S2 \; movw %0,ax \; movw ax,#0 \; movw %H0,ax
260   movw ax,%H1 \; shrw ax,%S2 \; movw %0,ax \; movw ax,#0 \; movw %H0,ax
261
262   movw ax,%H1 \; mov %0,a \; movw ax,#0 \; movw %H0,ax \; mov %Q0,a
263
264   movw ax,%H1 \; shr a,%s2 \; mov %0,a \; movw ax,#0 \; movw %H0,ax \; mov %Q0,a
265
266   mov b,%2 \; cmp0 b \; bz $2f \; 1: \; movw ax,%H1 \; shrw ax,1 \; movw %H0,ax \; mov a,%Q1 \; rorc a,1 \; mov %Q0,a \; mov a,%q1 \; rorc a,1 \; mov %q0,a \; dec b \; bnz $1b \; 2:"
267  [(set_attr "valloc" "macax")]
268)
269
270;; Likewise.
271(define_insn "ashlsi3_virt"                                ;;   0  1      2-7            8         9-15           16        17-23     24        25-31     var
272  [(set (match_operand:SI             0 "nonimmediate_operand" "=v,vU,&vU,v,  &vU,  &vU, v,  &vU,  v,  &vU, &vU,  v,   U,   v,&vU,    v,   U,   v,   U,   vWab,vU,  vU")
273	(ashift:SI (match_operand:SI  1 "nonimmediate_operand" "0, 0,  vU,0,   vWab, U,  0,   vU,  0,   vWab,U,   vU,  vU,  0, vU,    vU,  vU,  vU,  vU,  0,   vWab,U")
274		    (match_operand:SI 2 "nonmemory_operand"    "M, K,  K, Int3,Int3,Int3,Iv08,Iv08,Is09,Is09,Is09,Iv16,Iv16,Is17,Is17,Iv24,Iv24,Is25,Is25,iv,  iv,  iv")))
275   (clobber (reg:HI X_REG))
276   ]
277  ""
278  "@
279   ; lshrsi %0, 0
280
281   movw ax,%1 \; shlw ax,1 \; movw %0,ax \; movw ax,%H1 \; rolwc ax,1 \; movw %H0,ax
282   movw ax,%1 \; shlw ax,1 \; movw %0,ax \; movw ax,%H1 \; rolwc ax,1 \; movw %H0,ax
283
284   movw ax,%H1 \; shlw ax,%u2 \; mov %E0,a \; mov x,%Q1           \; mov a, %H1 \; shlw ax,%S2 \; mov %H0,a \; movw ax,%1 \; shlw ax,%u2 \; movw %0,ax
285   movw ax,%H1 \; shlw ax,%u2 \; mov %E0,a \; mov x,%Q1           \; mov a, %H1 \; shlw ax,%S2 \; mov %H0,a \; movw ax,%1 \; shlw ax,%u2 \; movw %0,ax
286   movw ax,%H1 \; shlw ax,%u2 \; mov %E0,a \; mov a,%Q1 \; mov x,a \; mov a, %H1 \; shlw ax,%S2 \; mov %H0,a \; movw ax,%1 \; shlw ax,%u2 \; movw %0,ax
287
288   mov x,%Q1           \; mov a,%H1 \; movw %H0,ax \; movw ax,%1 \; shlw ax,8 \; movw %0,ax
289   mov a,%Q1 \; mov x,a \; mov a,%H1 \; movw %H0,ax \; movw ax,%1 \; shlw ax,8 \; movw %0,ax
290
291   mov x,%Q1           \; mov a,%H1 \; shlw ax,%s2 \; movw %H0,ax \; movw ax,%1 \; shlw ax,%s2 \; mov %H0,a \; movw ax,%1 \; shlw ax,%u2 \; movw %0,ax
292   mov x,%Q1           \; mov a,%H1 \; shlw ax,%s2 \; movw %H0,ax \; movw ax,%1 \; shlw ax,%s2 \; mov %H0,a \; movw ax,%1 \; shlw ax,%u2 \; movw %0,ax
293   mov a,%Q1 \; mov x,a \; mov a,%H1 \; shlw ax,%s2 \; movw %H0,ax \; movw ax,%1 \; shlw ax,%s2 \; mov %H0,a \; movw ax,%1 \; shlw ax,%u2 \; movw %0,ax
294
295   movw ax,%1 \; movw %H0,ax \; movw %0,#0
296   movw ax,%1 \; movw %H0,ax \; movw ax,#0 \; movw %0,ax
297
298   movw ax,%1 \; shlw ax,%S2 \; movw %H0,ax \; movw %0,#0
299   movw ax,%1 \; shlw ax,%S2 \; movw %H0,ax \; movw ax,#0 \; movw %0,ax
300
301   mov a,%1 \; movw %H0,ax \; mov %H0,#0 \; movw %0,#0
302   mov a,%1 \; movw %H0,ax \; movw ax,#0 \; mov %H0,a \; movW %0,ax
303
304   mov a,%1 \; shl a,%s2 \; movw %H0,ax \; mov %H0,#0 \; movw %0,#0
305   mov a,%1 \; shl a,%s2 \; movw %H0,ax \; movw ax,#0 \; mov %H0,a \; movW %0,ax
306
307   mov a,%2 \; cmp0 a \; bz $2f \; mov d,a \; movw ax,%H1 \; movw bc,%1 \; 1: \; shlw bc,1 \; rolwc ax,1 \; dec d \; bnz $1b \; movw %H0,ax \; movw ax,bc \; movw %0,ax \; 2:
308   mov a,%2 \; mov d,a \; movw ax,%H1 \; movw bc,%1 \; cmp0 0xFFEFD \; bz $2f \; 1: \; shlw bc,1 \; rolwc ax,1 \; dec d \; bnz $1b \; 2: \; movw %H0,ax \; movw ax,bc \; movw %0,ax
309   mov a,%2 \; mov d,a \; movw ax,%1 \; movw bc,ax \; movw ax,%H1 \; cmp0 0xFFEFD \; bz $2f \; 1: \; shlw bc,1 \; rolwc ax,1 \; dec d \; bnz $1b \; 2: \; movw %H0,ax \; movw ax,bc \; movw %0,ax"
310   [(set_attr "valloc" "macax")]
311 )
312
313;;---------- Branching ------------------------
314
315(define_insn "*indirect_jump_virt"
316  [(set (pc)
317	(match_operand:HI 0 "nonimmediate_operand" "vm"))]
318  "rl78_virt_insns_ok ()"
319  "v.br\t%0"
320  [(set_attr "valloc" "ro1")]
321)
322
323(define_insn "*call_virt"
324  [(call (match_operand:HI 0 "memory_operand" "Wab,Wcv")
325	 (match_operand 1 "" ""))]
326  "rl78_virt_insns_ok ()"
327  "v.call\t%0"
328  [(set_attr "valloc" "ro1")]
329  )
330
331(define_insn "*call_value_virt"
332  [(set (match_operand 0 "register_operand" "=v,v")
333	(call (match_operand:HI 1 "memory_operand" "Wab,Wcv")
334	      (match_operand 2 "" "")))]
335  "rl78_virt_insns_ok ()"
336  "v.call\t%1"
337  [(set_attr "valloc" "op1")]
338  )
339
340(define_insn "*cbranchqi4_virt_signed"
341  [(set (pc) (if_then_else
342	      (match_operator 0 "rl78_cmp_operator_signed"
343			      [(match_operand:QI 1 "general_operand" "vim")
344			       (match_operand:QI 2 "nonmemory_operand" "vi")])
345              (label_ref (match_operand 3 "" ""))
346	      (pc)))]
347  "rl78_virt_insns_ok ()"
348  "v.cmp\t%1, %2\\n\tv.b%C0\t%3"
349  [(set_attr "valloc" "cmp")]
350  )
351
352(define_insn "*cbranchqi4_virt"
353  [(set (pc) (if_then_else
354	      (match_operator 0 "rl78_cmp_operator_real"
355			      [(match_operand:QI 1 "rl78_general_operand" "vim")
356			       (match_operand:QI 2 "rl78_general_operand" "vim")])
357              (label_ref (match_operand 3 "" ""))
358	      (pc)))]
359  "rl78_virt_insns_ok ()"
360  "v.cmp\t%1, %2\\n\tv.b%C0\t%3"
361  [(set_attr "valloc" "cmp")]
362  )
363
364(define_insn "*cbranchhi4_virt_signed"
365  [(set (pc) (if_then_else
366	      (match_operator 0 "rl78_cmp_operator_signed"
367			      [(match_operand:HI 1 "general_operand" "vim")
368			       (match_operand:HI 2 "nonmemory_operand" "vi")])
369              (label_ref (match_operand 3 "" ""))
370	      (pc)))]
371  "rl78_virt_insns_ok ()"
372  "v.cmpw\t%1, %2\\n\tv.b%C0\t%3"
373  [(set_attr "valloc" "cmp")]
374  )
375
376(define_insn "*cbranchhi4_virt"
377  [(set (pc) (if_then_else
378	      (match_operator 0 "rl78_cmp_operator_real"
379			      [(match_operand:HI 1 "rl78_general_operand" "vim")
380			       (match_operand:HI 2 "rl78_general_operand" "vim")])
381              (label_ref (match_operand 3 "" ""))
382	      (pc)))]
383  "rl78_virt_insns_ok ()"
384  "v.cmpw\t%1, %2\\n\tv.b%C0\t%3"
385  [(set_attr "valloc" "cmp")]
386  )
387
388(define_insn "*cbranchsi4_virt"
389  [(set (pc) (if_then_else
390	      (match_operator 0 "rl78_cmp_operator"
391			      [(match_operand:SI 1 "general_operand" "vim")
392			       (match_operand:SI 2 "nonmemory_operand" "vi")])
393              (label_ref (match_operand 3 "" ""))
394	      (pc)))
395   (clobber (reg:HI AX_REG))
396   ]
397  "rl78_virt_insns_ok ()"
398  "v.cmpd\t%1, %2\\n\tv.b%C0\t%3"
399  [(set_attr "valloc" "macax")]
400  )
401
402;;---------- Peepholes ------------------------
403
404(define_peephole2
405  [(set (match_operand:QI 0 "" "")
406	(match_operand:QI 1 "" ""))
407   (set (match_operand:QI 2 "" "")
408	(match_operand:QI 3 "" ""))]
409  "rl78_peep_movhi_p (operands)"
410  [(set (match_dup 4)
411	(match_dup 5))]
412  "rl78_setup_peep_movhi (operands);"
413  )
414
415(define_peephole2
416  [(set (reg:QI A_REG)
417	(match_operand:QI 1 "" ""))
418   (set (match_operand:QI 0 "" "")
419	(reg:QI A_REG))
420   (set (reg:QI A_REG)
421	(match_operand:QI 3 "" ""))
422   (set (match_operand:QI 2 "" "")
423	(reg:QI A_REG))
424   ]
425  "rl78_peep_movhi_p (operands)"
426  [(set (reg:HI AX_REG)
427	(match_dup 5))
428   (set (match_dup 4)
429	(reg:HI AX_REG))
430   ]
431  "rl78_setup_peep_movhi (operands);"
432  )
433
434(define_insn "*negandhi3_virt"
435  [(set (match_operand:HI                 0 "register_operand" "=v")
436	(and:HI (neg:HI (match_operand:HI 1 "register_operand"  "0"))
437 		(match_operand:HI         2 "immediate_operand" "n")))
438   ]
439  "rl78_virt_insns_ok ()"
440  "v.nand\t%0, %1, %2"
441)
442