1;;  Machine Description for Renesas RL78 processors
2;;  Copyright (C) 2011-2020 Free Software Foundation, Inc.
3;;  Contributed by Red Hat.
4
5;; This file is part of GCC.
6
7;; GCC is free software; you can redistribute it and/or modify
8;; it under the terms of the GNU General Public License as published by
9;; the Free Software Foundation; either version 3, or (at your option)
10;; any later version.
11
12;; GCC is distributed in the hope that it will be useful,
13;; but WITHOUT ANY WARRANTY; without even the implied warranty of
14;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15;; GNU General Public License for more details.
16
17;; You should have received a copy of the GNU General Public License
18;; along with GCC; see the file COPYING3.  If not see
19;; <http://www.gnu.org/licenses/>.
20
21;; In this MD file, we define those insn patterns that involve
22;; registers, where such registers are virtual until allocated to a
23;; physical register.  All of these insns need to be conditional on
24;; rl78_virt_insns_ok () being true.
25
26;; This tells the physical register allocator what method to use to
27;; allocate registers.  Basically, this defines the template of the
28;; instruction - op1 is of the form "a = op(b)", op2 is "a = b op c"
29;; etc.
30
31(define_attr "valloc" "op1,op2,ro1,cmp,umul,macax,divhi,divsi"
32  (const_string "op2"))
33
34;;---------- Moving ------------------------
35
36(define_insn "*movqi_virt_mm"
37  [(set (match_operand:QI 0 "rl78_near_mem_operand" "=Y")
38	(match_operand    1 "rl78_near_mem_operand" "Y"))]
39  "rl78_virt_insns_ok ()"
40  "v.mov %0, %1"
41  [(set_attr "valloc" "op1")]
42)
43
44(define_insn "*movqi_virt"
45  [(set (match_operand:QI 0 "nonimmediate_operand" "=vY,v,*Wfr,Y,*Wfr,*Wfr")
46	(match_operand    1 "general_operand" "vInt8JY,*Wfr,vInt8J,*Wfr,Y,*Wfr"))]
47  "rl78_virt_insns_ok ()"
48  "v.mov %0, %1"
49  [(set_attr "valloc" "op1")]
50)
51
52(define_insn "*movhi_virt_mm"
53  [(set (match_operand:HI 0 "rl78_near_mem_operand" "=Y")
54	(match_operand:HI 1 "rl78_near_mem_operand" "Y"))]
55  "rl78_virt_insns_ok ()"
56  "v.movw %0, %1"
57  [(set_attr "valloc" "op1")]
58)
59
60(define_insn "*movhi_virt"
61  [(set (match_operand:HI 0 "nonimmediate_operand" "=vS,  Y,   v,   *Wfr")
62	(match_operand:HI 1 "general_operand"      "viYS, viS, *Wfr, vi"))]
63  "rl78_virt_insns_ok ()"
64  "v.movw %0, %1"
65  [(set_attr "valloc" "op1")]
66)
67
68(define_insn "*bswaphi2_virt"
69  [(set (match_operand:HI           0 "rl78_nonfar_nonimm_operand" "=vm")
70        (bswap:HI (match_operand:HI 1 "general_operand"  "vim")))]
71  "rl78_virt_insns_ok ()"
72  "v.bswaphi\t%0, %1"
73  [(set_attr "valloc" "op1")]
74)
75
76;;---------- Conversions ------------------------
77
78(define_insn "*zero_extendqihi2_virt"
79  [(set (match_operand:HI                 0 "rl78_nonfar_nonimm_operand" "=vY,*Wfr")
80	(zero_extend:HI (match_operand:QI 1 "general_operand" "vim,viY")))]
81  "rl78_virt_insns_ok () && rl78_one_far_p (operands, 2)"
82  "v.zero_extend\t%0, %1"
83  [(set_attr "valloc" "op1")]
84  )
85
86(define_insn "*extendqihi2_virt"
87  [(set (match_operand:HI                 0 "rl78_nonfar_nonimm_operand" "=vY,*Wfr")
88	(sign_extend:HI (match_operand:QI 1 "general_operand" "vim,viY")))]
89  "rl78_virt_insns_ok () && rl78_one_far_p (operands, 2)"
90  "v.sign_extend\t%0, %1"
91  [(set_attr "valloc" "op1")]
92  )
93
94;;---------- Arithmetic ------------------------
95
96(define_insn "*inc<mode>3_virt"
97  [(set (match_operand:QHI           0 "rl78_incdec_memory_operand" "=vm")
98	(plus:QHI (match_operand:QHI 1 "rl78_incdec_memory_operand" "0")
99		  (match_operand:QHI 2 "rl78_1_2_operand" "KLNO")))
100   ]
101  "rl78_virt_insns_ok ()"
102  "v.inc\t%0, %1, %2"
103)
104
105(define_insn "*add<mode>3_virt"
106  [(set (match_operand:QHI           0 "rl78_nonimmediate_operand" "=vY,  S, *Wfr,  vY")
107	(plus:QHI (match_operand:QHI 1 "rl78_general_operand"      "%viY, 0, 0viY, *Wfr")
108		  (match_operand:QHI 2 "rl78_general_operand"       "vim, i, viY,  viY")))
109   ]
110  "rl78_virt_insns_ok () && rl78_one_far_p (operands, 3)"
111  "v.add\t%0, %1, %2"
112)
113
114(define_insn "*sub<mode>3_virt"
115  [(set (match_operand:QHI            0 "rl78_nonimmediate_operand" "=vY, S, *Wfr,  vY")
116	(minus:QHI (match_operand:QHI 1 "rl78_general_operand"      "viY, 0, 0viY, *Wfr")
117		   (match_operand:QHI 2 "rl78_general_operand"      "vim, i, viY,  viY")))
118   ]
119  "rl78_virt_insns_ok () && rl78_one_far_p (operands, 3)"
120  "v.sub\t%0, %1, %2"
121)
122
123(define_insn "*umulhi3_shift_virt"
124  [(set (match_operand:HI          0 "register_operand" "=v")
125        (mult:HI (match_operand:HI 1 "rl78_nonfar_operand" "%vim")
126                 (match_operand:HI 2 "rl78_24_operand" "Ni")))]
127  "rl78_virt_insns_ok ()"
128  "v.mulu\t%0, %1, %2"
129  [(set_attr "valloc" "umul")]
130)
131
132(define_insn "*umulqihi3_virt"
133  [(set (match_operand:HI                          0 "register_operand" "=v")
134        (mult:HI (zero_extend:HI (match_operand:QI 1 "rl78_nonfar_operand" "%vim"))
135                 (zero_extend:HI (match_operand:QI 2 "general_operand" "vim"))))]
136  "rl78_virt_insns_ok ()"
137  "v.mulu\t%0, %2"
138  [(set_attr "valloc" "umul")]
139)
140
141(define_insn "*andqi3_virt"
142  [(set (match_operand:QI         0 "rl78_nonimmediate_operand" "=vm,  *Wfr,  vY")
143	(and:QI (match_operand:QI 1 "rl78_general_operand"      "%vim, 0viY, *Wfr")
144		(match_operand:QI 2 "rl78_general_operand"      "vim,  viY,  viY")))
145   ]
146  "rl78_virt_insns_ok () && rl78_one_far_p (operands, 3)"
147  "v.and\t%0, %1, %2"
148)
149
150(define_insn "*iorqi3_virt"
151  [(set (match_operand:QI         0 "rl78_nonimmediate_operand" "=vm,  *Wfr,  vY")
152	(ior:QI (match_operand:QI 1 "rl78_general_operand"      "%vim, 0viY, *Wfr")
153		(match_operand:QI 2 "rl78_general_operand"      "vim,  viY,  viY")))
154   ]
155  "rl78_virt_insns_ok () && rl78_one_far_p (operands, 3)"
156  "v.or\t%0, %1, %2"
157)
158
159(define_insn "*xorqi3_virt"
160  [(set (match_operand:QI         0 "rl78_nonimmediate_operand" "=vm,  *Wfr,  vY")
161	(xor:QI (match_operand:QI 1 "rl78_general_operand"      "%vim, 0viY, *Wfr")
162		(match_operand    2 "rl78_general_operand"      "vim,  viY,  viY")))
163   ]
164  "rl78_virt_insns_ok () && rl78_one_far_p (operands, 3)"
165  "v.xor\t%0, %1, %2"
166)
167
168;;---------- Shifts ------------------------
169
170(define_insn "*ashl<mode>3_virt"
171  [(set (match_operand:QHI             0 "rl78_nonfar_nonimm_operand" "=vm")
172	(ashift:QHI (match_operand:QHI 1 "rl78_nonfar_operand" "vim")
173		    (match_operand:QI  2 "general_operand" "vim")))
174   ]
175  "rl78_virt_insns_ok ()"
176  "v.shl\t%0, %1, %2"
177)
178
179(define_insn "*ashr<mode>3_virt"
180  [(set (match_operand:QHI               0 "rl78_nonfar_nonimm_operand" "=vm")
181	(ashiftrt:QHI (match_operand:QHI 1 "rl78_nonfar_operand" "vim")
182		      (match_operand:QI  2 "general_operand" "vim")))
183   ]
184  "rl78_virt_insns_ok ()"
185  "v.sar\t%0, %1, %2"
186)
187
188(define_insn "*lshr<mode>3_virt"
189  [(set (match_operand:QHI               0 "rl78_nonfar_nonimm_operand" "=vm")
190	(lshiftrt:QHI (match_operand:QHI 1 "rl78_nonfar_operand" "vim")
191		      (match_operand:QI  2 "general_operand" "vim")))
192   ]
193  "rl78_virt_insns_ok ()"
194  "v.shr\t%0, %1, %2"
195)
196
197;; This is complex mostly because the RL78 has no SImode operations,
198;; and very limited HImode operations, and no variable shifts.  This
199;; pattern is optimized for each constant shift count and operand
200;; types, so as to use a hand-optimized pattern.  For readability, the
201;; usual \t\; syntax is not used here.  Also, there's no easy way to
202;; constrain to avoid partial overlaps, hence the duplication.
203(define_insn "ashrsi3_virt"                                  ;;   0  1      2-7            8         9-15           16   17-23     24   25-31 var
204  [(set (match_operand:SI               0 "nonimmediate_operand" "=v,vU,&vU,v,  &vU,  &vU, v,  &vU,  v,  &vU, &vU,  vU,  v,&vU,    vU,  vU,   vU")
205	(ashiftrt:SI (match_operand:SI  1 "nonimmediate_operand" "0, 0,  vU,0,   vWab, U,  0,   vU,  0,   vWab,U,   vU,  0, vU,    vU,  vU,   0")
206		      (match_operand:SI 2 "nonmemory_operand"    "M, K,  K, Int3,Int3,Int3,Iv08,Iv08,Is09,Is09,Is09,Iv16,Is17,Is17,Iv24,Is25, iv")))
207   (clobber (reg:HI X_REG))
208    ]
209   ""
210   "@
211    ; ashrsi %0, 0
212
213   movw ax,%H1 \; sarw ax,1 \; movw %H0,ax \; mov a,%Q1 \; rorc a,1 \; mov %Q0,a \; mov a,%q1 \; rorc a,1 \; mov %q0,a
214   movw ax,%H1 \; sarw ax,1 \; movw %H0,ax \; mov a,%Q1 \; rorc a,1 \; mov %Q0,a \; mov a,%q1 \; rorc a,1 \; mov %q0,a
215
216   movw ax,%1 \; shlw ax,%r2 \; mov %0,a             \; mov x,%Q1 \; mov a,%H1 \; shlw ax,%r2 \; mov %Q0,a \; movw ax,%H1 \; sarw ax,%u2 \; movw %H0,ax
217   movw ax,%1 \; shlw ax,%r2 \; mov %0,a             \; mov x,%Q1 \; mov a,%H1 \; shlw ax,%r2 \; mov %Q0,a \; movw ax,%H1 \; sarw ax,%u2 \; movw %H0,ax
218   movw ax,%1 \; shlw ax,%r2 \; mov %0,a \; mov a,%Q1 \; mov x,a   \; mov a,%H1 \; shlw ax,%r2 \; mov %Q0,a \; movw ax,%H1 \; sarw ax,%u2 \; movw %H0,ax
219
220   mov x,%Q1            \; mov a,%H1 \; movw %0,ax \; movw ax,%H1 \; sarw ax,8 \; movw %H0,ax
221   mov a,%Q1 \; mov x, a \; mov a,%H1 \; movw %0,ax \; movw ax,%H1 \; sarw ax,8 \; movw %H0,ax
222
223   mov x,%Q1           \; mov a,%H1 \; shlw ax,%r2 \; mov %0,a \; movw ax,%H1 \; shlw ax,%r2 \; mov %Q0,a \; movw ax,%H1 \; sarw ax,%u2 \; movw %H0,ax
224   mov x,%Q1           \; mov a,%H1 \; shlw ax,%r2 \; mov %0,a \; movw ax,%H1 \; shlw ax,%r2 \; mov %Q0,a \; movw ax,%H1 \; sarw ax,%u2 \; movw %H0,ax
225   mov a,%Q1 \; mov x,a \; mov a,%H1 \; shlw ax,%r2 \; mov %0,a \; movw ax,%H1 \; shlw ax,%r2 \; mov %Q0,a \; movw ax,%H1 \; sarw ax,%u2 \; movw %H0,ax
226
227   movw ax,%H1 \; movw %0,ax \; sarw ax,15 \; movw %H0,ax
228
229   movw ax,%H1 \; sarw ax,%S2 \; movw %0,ax \; sarw ax,15 \; movw %H0,ax
230   movw ax,%H1 \; sarw ax,%S2 \; movw %0,ax \; sarw ax,15 \; movw %H0,ax
231
232   movw ax,%H1 \; mov %0,a \; sarw ax,15 \; movw %H0,ax \; mov %Q0,a
233
234   movw ax,%H1 \; sar a,%s2 \; mov %0,a \; sarw ax,15 \; movw %H0,ax \; mov %Q0,a
235
236   mov b,%2 \; cmp0 b \; bz $2f \; 1: \; movw ax,%H1 \; sarw ax,1 \; movw %H0,ax \; mov a,%Q1 \; rorc a,1 \; mov %Q0,a \; mov a,%q1 \; rorc a,1 \; mov %q0,a \; dec b \; bnz $1b \; 2:"
237  [(set_attr "valloc" "macax")]
238)
239
240;; Likewise.
241(define_insn "lshrsi3_virt"                                  ;;   0  1      2-7            8         9-15           16   17-23     24   25-31 var
242  [(set (match_operand:SI               0 "nonimmediate_operand" "=v,vU,&vU,v,  &vU,  &vU, v,  &vU,  v,  &vU, &vU,  vU,  v,&vU,    vU,  vU,   vU")
243	(lshiftrt:SI (match_operand:SI  1 "nonimmediate_operand" "0, 0,  vU,0,   vWab, U,  0,   vU,  0,   vWab,U,   vU,  0, vU,    vU,  vU,   0")
244		      (match_operand:SI 2 "nonmemory_operand"    "M, K,  K, Int3,Int3,Int3,Iv08,Iv08,Is09,Is09,Is09,Iv16,Is17,Is17,Iv24,Is25, iv")))
245   (clobber (reg:HI X_REG))
246   ]
247  ""
248  "@
249   ; lshrsi %0, 0
250
251   movw ax,%H1 \; shrw ax,1 \; movw %H0,ax \; mov a,%Q1 \; rorc a,1 \; mov %Q0,a \; mov a,%q1 \; rorc a,1 \; mov %q0,a
252   movw ax,%H1 \; shrw ax,1 \; movw %H0,ax \; mov a,%Q1 \; rorc a,1 \; mov %Q0,a \; mov a,%q1 \; rorc a,1 \; mov %q0,a
253
254   movw ax,%1 \; shlw ax,%r2 \; mov %0,a             \; mov x,%Q1 \; mov a,%H1 \; shlw ax,%r2 \; mov %Q0,a \; movw ax,%H1 \; shrw ax,%u2 \; movw %H0,ax
255   movw ax,%1 \; shlw ax,%r2 \; mov %0,a             \; mov x,%Q1 \; mov a,%H1 \; shlw ax,%r2 \; mov %Q0,a \; movw ax,%H1 \; shrw ax,%u2 \; movw %H0,ax
256   movw ax,%1 \; shlw ax,%r2 \; mov %0,a \; mov a,%Q1 \; mov x,a   \; mov a,%H1 \; shlw ax,%r2 \; mov %Q0,a \; movw ax,%H1 \; shrw ax,%u2 \; movw %H0,ax
257
258   mov x,%Q1            \; mov a,%H1 \; movw %0,ax \; movw ax,%H1 \; shrw ax,8 \; movw %H0,ax
259   mov a,%Q1 \; mov x, a \; mov a,%H1 \; movw %0,ax \; movw ax,%H1 \; shrw ax,8 \; movw %H0,ax
260
261   mov x,%Q1           \; mov a,%H1 \; shlw ax,%r2 \; mov %0,a \; movw ax,%H1 \; shlw ax,%r2 \; mov %Q0,a \; movw ax,%H1 \; shrw ax,%u2 \; movw %H0,ax
262   mov x,%Q1           \; mov a,%H1 \; shlw ax,%r2 \; mov %0,a \; movw ax,%H1 \; shlw ax,%r2 \; mov %Q0,a \; movw ax,%H1 \; shrw ax,%u2 \; movw %H0,ax
263   mov a,%Q1 \; mov x,a \; mov a,%H1 \; shlw ax,%r2 \; mov %0,a \; movw ax,%H1 \; shlw ax,%r2 \; mov %Q0,a \; movw ax,%H1 \; shrw ax,%u2 \; movw %H0,ax
264
265   movw ax,%H1 \; movw %0,ax \; movw ax,#0 \; movw %H0,ax
266
267   movw ax,%H1 \; shrw ax,%S2 \; movw %0,ax \; movw ax,#0 \; movw %H0,ax
268   movw ax,%H1 \; shrw ax,%S2 \; movw %0,ax \; movw ax,#0 \; movw %H0,ax
269
270   movw ax,%H1 \; mov %0,a \; movw ax,#0 \; movw %H0,ax \; mov %Q0,a
271
272   movw ax,%H1 \; shr a,%s2 \; mov %0,a \; movw ax,#0 \; movw %H0,ax \; mov %Q0,a
273
274   mov b,%2 \; cmp0 b \; bz $2f \; 1: \; movw ax,%H1 \; shrw ax,1 \; movw %H0,ax \; mov a,%Q1 \; rorc a,1 \; mov %Q0,a \; mov a,%q1 \; rorc a,1 \; mov %q0,a \; dec b \; bnz $1b \; 2:"
275  [(set_attr "valloc" "macax")]
276)
277
278;; Likewise.
279(define_insn "ashlsi3_virt"                                ;;   0  1      2-7            8         9-15           16        17-23     24        25-31     var
280  [(set (match_operand:SI             0 "nonimmediate_operand" "=v,vU,&vU,v,  &vU,  &vU, v,  &vU,  v,  &vU, &vU,  v,   U,   v,&vU,    v,   U,   v,   U,   vWab,vU,  vU")
281	(ashift:SI (match_operand:SI  1 "nonimmediate_operand" "0, 0,  vU,0,   vWab, U,  0,   vU,  0,   vWab,U,   vU,  vU,  0, vU,    vU,  vU,  vU,  vU,  0,   vWab,U")
282		    (match_operand:SI 2 "nonmemory_operand"    "M, K,  K, Int3,Int3,Int3,Iv08,Iv08,Is09,Is09,Is09,Iv16,Iv16,Is17,Is17,Iv24,Iv24,Is25,Is25,iv,  iv,  iv")))
283   (clobber (reg:HI X_REG))
284   ]
285  ""
286  "@
287   ; lshrsi %0, 0
288
289   movw ax,%1 \; shlw ax,1 \; movw %0,ax \; movw ax,%H1 \; rolwc ax,1 \; movw %H0,ax
290   movw ax,%1 \; shlw ax,1 \; movw %0,ax \; movw ax,%H1 \; rolwc ax,1 \; movw %H0,ax
291
292   movw ax,%H1 \; shlw ax,%u2 \; mov %E0,a \; mov x,%Q1           \; mov a, %H1 \; shlw ax,%S2 \; mov %H0,a \; movw ax,%1 \; shlw ax,%u2 \; movw %0,ax
293   movw ax,%H1 \; shlw ax,%u2 \; mov %E0,a \; mov x,%Q1           \; mov a, %H1 \; shlw ax,%S2 \; mov %H0,a \; movw ax,%1 \; shlw ax,%u2 \; movw %0,ax
294   movw ax,%H1 \; shlw ax,%u2 \; mov %E0,a \; mov a,%Q1 \; mov x,a \; mov a, %H1 \; shlw ax,%S2 \; mov %H0,a \; movw ax,%1 \; shlw ax,%u2 \; movw %0,ax
295
296   mov x,%Q1           \; mov a,%H1 \; movw %H0,ax \; movw ax,%1 \; shlw ax,8 \; movw %0,ax
297   mov a,%Q1 \; mov x,a \; mov a,%H1 \; movw %H0,ax \; movw ax,%1 \; shlw ax,8 \; movw %0,ax
298
299   mov x,%Q1           \; mov a,%H1 \; shlw ax,%s2 \; movw %H0,ax \; movw ax,%1 \; shlw ax,%s2 \; mov %H0,a \; movw ax,%1 \; shlw ax,%u2 \; movw %0,ax
300   mov x,%Q1           \; mov a,%H1 \; shlw ax,%s2 \; movw %H0,ax \; movw ax,%1 \; shlw ax,%s2 \; mov %H0,a \; movw ax,%1 \; shlw ax,%u2 \; movw %0,ax
301   mov a,%Q1 \; mov x,a \; mov a,%H1 \; shlw ax,%s2 \; movw %H0,ax \; movw ax,%1 \; shlw ax,%s2 \; mov %H0,a \; movw ax,%1 \; shlw ax,%u2 \; movw %0,ax
302
303   movw ax,%1 \; movw %H0,ax \; movw %0,#0
304   movw ax,%1 \; movw %H0,ax \; movw ax,#0 \; movw %0,ax
305
306   movw ax,%1 \; shlw ax,%S2 \; movw %H0,ax \; movw %0,#0
307   movw ax,%1 \; shlw ax,%S2 \; movw %H0,ax \; movw ax,#0 \; movw %0,ax
308
309   mov a,%1 \; movw %H0,ax \; mov %H0,#0 \; movw %0,#0
310   mov a,%1 \; movw %H0,ax \; movw ax,#0 \; mov %H0,a \; movW %0,ax
311
312   mov a,%1 \; shl a,%s2 \; movw %H0,ax \; mov %H0,#0 \; movw %0,#0
313   mov a,%1 \; shl a,%s2 \; movw %H0,ax \; movw ax,#0 \; mov %H0,a \; movW %0,ax
314
315   mov a,%2 \; cmp0 a \; bz $2f \; mov d,a \; movw ax,%H1 \; movw bc,%1 \; 1: \; shlw bc,1 \; rolwc ax,1 \; dec d \; bnz $1b \; movw %H0,ax \; movw ax,bc \; movw %0,ax \; 2:
316   mov a,%2 \; mov d,a \; movw ax,%H1 \; movw bc,%1 \; cmp0 0xFFEFD \; bz $2f \; 1: \; shlw bc,1 \; rolwc ax,1 \; dec d \; bnz $1b \; 2: \; movw %H0,ax \; movw ax,bc \; movw %0,ax
317   mov a,%2 \; mov d,a \; movw ax,%1 \; movw bc,ax \; movw ax,%H1 \; cmp0 0xFFEFD \; bz $2f \; 1: \; shlw bc,1 \; rolwc ax,1 \; dec d \; bnz $1b \; 2: \; movw %H0,ax \; movw ax,bc \; movw %0,ax"
318   [(set_attr "valloc" "macax")]
319 )
320
321;;---------- Branching ------------------------
322
323(define_insn "*indirect_jump_virt"
324  [(set (pc)
325	(match_operand:HI 0 "nonimmediate_operand" "vm"))]
326  "rl78_virt_insns_ok ()"
327  "v.br\t%0"
328  [(set_attr "valloc" "ro1")]
329)
330
331(define_insn "*call_virt"
332  [(call (match_operand:HI 0 "memory_operand" "Wab,Wcv")
333	 (match_operand 1 "" ""))]
334  "rl78_virt_insns_ok ()"
335  "v.call\t%0"
336  [(set_attr "valloc" "ro1")]
337  )
338
339(define_insn "*call_value_virt"
340  [(set (match_operand 0 "register_operand" "=v,v")
341	(call (match_operand:HI 1 "memory_operand" "Wab,Wcv")
342	      (match_operand 2 "" "")))]
343  "rl78_virt_insns_ok ()"
344  "v.call\t%1"
345  [(set_attr "valloc" "op1")]
346  )
347
348(define_insn "*cbranchqi4_virt_signed"
349  [(set (pc) (if_then_else
350	      (match_operator 0 "rl78_cmp_operator_signed"
351			      [(match_operand:QI 1 "general_operand" "vim")
352			       (match_operand:QI 2 "nonmemory_operand" "vi")])
353              (label_ref (match_operand 3 "" ""))
354	      (pc)))]
355  "rl78_virt_insns_ok ()"
356  "v.cmp\t%1, %2\\n\tv.b%C0\t%3"
357  [(set_attr "valloc" "cmp")]
358  )
359
360(define_insn "*cbranchqi4_virt"
361  [(set (pc) (if_then_else
362	      (match_operator 0 "rl78_cmp_operator_real"
363			      [(match_operand:QI 1 "rl78_general_operand" "vim")
364			       (match_operand:QI 2 "rl78_general_operand" "vim")])
365              (label_ref (match_operand 3 "" ""))
366	      (pc)))]
367  "rl78_virt_insns_ok ()"
368  "v.cmp\t%1, %2\\n\tv.b%C0\t%3"
369  [(set_attr "valloc" "cmp")]
370  )
371
372(define_insn "*cbranchhi4_virt_signed"
373  [(set (pc) (if_then_else
374	      (match_operator 0 "rl78_cmp_operator_signed"
375			      [(match_operand:HI 1 "general_operand" "vim")
376			       (match_operand:HI 2 "nonmemory_operand" "vi")])
377              (label_ref (match_operand 3 "" ""))
378	      (pc)))]
379  "rl78_virt_insns_ok ()"
380  "v.cmpw\t%1, %2\\n\tv.b%C0\t%3"
381  [(set_attr "valloc" "cmp")]
382  )
383
384(define_insn "*cbranchhi4_virt"
385  [(set (pc) (if_then_else
386	      (match_operator 0 "rl78_cmp_operator_real"
387			      [(match_operand:HI 1 "rl78_general_operand" "vim")
388			       (match_operand:HI 2 "rl78_general_operand" "vim")])
389              (label_ref (match_operand 3 "" ""))
390	      (pc)))]
391  "rl78_virt_insns_ok ()"
392  "v.cmpw\t%1, %2\\n\tv.b%C0\t%3"
393  [(set_attr "valloc" "cmp")]
394  )
395
396(define_insn "*cbranchsi4_virt"
397  [(set (pc) (if_then_else
398	      (match_operator 0 "rl78_cmp_operator"
399			      [(match_operand:SI 1 "general_operand" "vim")
400			       (match_operand:SI 2 "nonmemory_operand" "vi")])
401              (label_ref (match_operand 3 "" ""))
402	      (pc)))
403   (clobber (reg:HI AX_REG))
404   ]
405  "rl78_virt_insns_ok ()"
406  "v.cmpd\t%1, %2\\n\tv.b%C0\t%3"
407  [(set_attr "valloc" "macax")]
408  )
409
410;;---------- Peepholes ------------------------
411
412(define_peephole2
413  [(set (match_operand:QI 0 "" "")
414	(match_operand:QI 1 "" ""))
415   (set (match_operand:QI 2 "" "")
416	(match_operand:QI 3 "" ""))]
417  "rl78_peep_movhi_p (operands)"
418  [(set (match_dup 4)
419	(match_dup 5))]
420  "rl78_setup_peep_movhi (operands);"
421  )
422
423(define_peephole2
424  [(set (reg:QI A_REG)
425	(match_operand:QI 1 "" ""))
426   (set (match_operand:QI 0 "" "")
427	(reg:QI A_REG))
428   (set (reg:QI A_REG)
429	(match_operand:QI 3 "" ""))
430   (set (match_operand:QI 2 "" "")
431	(reg:QI A_REG))
432   ]
433  "rl78_peep_movhi_p (operands)"
434  [(set (reg:HI AX_REG)
435	(match_dup 5))
436   (set (match_dup 4)
437	(reg:HI AX_REG))
438   ]
439  "rl78_setup_peep_movhi (operands);"
440  )
441
442(define_insn "*negandhi3_virt"
443  [(set (match_operand:HI                 0 "register_operand" "=v")
444	(and:HI (neg:HI (match_operand:HI 1 "register_operand"  "0"))
445 		(match_operand:HI         2 "immediate_operand" "n")))
446   ]
447  "rl78_virt_insns_ok ()"
448  "v.nand\t%0, %1, %2"
449)
450