1//===-- RISCVInstrInfoA.td - RISC-V 'A' instructions -------*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file describes the RISC-V instructions from the standard 'A', Atomic
10// Instructions extension as well as the experimental 'Zacas' (Atomic
11// Compare-and-Swap) extension.
12//
13//===----------------------------------------------------------------------===//
14
15//===----------------------------------------------------------------------===//
16// Instruction class templates
17//===----------------------------------------------------------------------===//
18
19let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in
20class LR_r<bit aq, bit rl, bits<3> funct3, string opcodestr>
21    : RVInstRAtomic<0b00010, aq, rl, funct3, OPC_AMO,
22                    (outs GPR:$rd), (ins GPRMemZeroOffset:$rs1),
23                    opcodestr, "$rd, $rs1"> {
24  let rs2 = 0;
25}
26
27multiclass LR_r_aq_rl<bits<3> funct3, string opcodestr> {
28  def ""     : LR_r<0, 0, funct3, opcodestr>;
29  def _AQ    : LR_r<1, 0, funct3, opcodestr # ".aq">;
30  def _RL    : LR_r<0, 1, funct3, opcodestr # ".rl">;
31  def _AQ_RL : LR_r<1, 1, funct3, opcodestr # ".aqrl">;
32}
33
34let hasSideEffects = 0, mayLoad = 1, mayStore = 1 in
35class AMO_rr<bits<5> funct5, bit aq, bit rl, bits<3> funct3, string opcodestr>
36    : RVInstRAtomic<funct5, aq, rl, funct3, OPC_AMO,
37                    (outs GPR:$rd), (ins GPRMemZeroOffset:$rs1, GPR:$rs2),
38                    opcodestr, "$rd, $rs2, $rs1">;
39
40multiclass AMO_rr_aq_rl<bits<5> funct5, bits<3> funct3, string opcodestr> {
41  def ""     : AMO_rr<funct5, 0, 0, funct3, opcodestr>;
42  def _AQ    : AMO_rr<funct5, 1, 0, funct3, opcodestr # ".aq">;
43  def _RL    : AMO_rr<funct5, 0, 1, funct3, opcodestr # ".rl">;
44  def _AQ_RL : AMO_rr<funct5, 1, 1, funct3, opcodestr # ".aqrl">;
45}
46
47class AtomicStPat<PatFrag StoreOp, RVInst Inst, RegisterClass StTy,
48                  ValueType vt = XLenVT>
49    : Pat<(StoreOp (AddrRegImm (XLenVT GPR:$rs1), simm12:$imm12),
50                   (vt StTy:$rs2)),
51          (Inst StTy:$rs2, GPR:$rs1, simm12:$imm12)>;
52
53//===----------------------------------------------------------------------===//
54// Instructions
55//===----------------------------------------------------------------------===//
56
57let Predicates = [HasStdExtA] in {
58defm LR_W       : LR_r_aq_rl<0b010, "lr.w">, Sched<[WriteAtomicLDW, ReadAtomicLDW]>;
59defm SC_W       : AMO_rr_aq_rl<0b00011, 0b010, "sc.w">,
60                  Sched<[WriteAtomicSTW, ReadAtomicSTW, ReadAtomicSTW]>;
61defm AMOSWAP_W  : AMO_rr_aq_rl<0b00001, 0b010, "amoswap.w">,
62                  Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
63defm AMOADD_W   : AMO_rr_aq_rl<0b00000, 0b010, "amoadd.w">,
64                  Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
65defm AMOXOR_W   : AMO_rr_aq_rl<0b00100, 0b010, "amoxor.w">,
66                  Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
67defm AMOAND_W   : AMO_rr_aq_rl<0b01100, 0b010, "amoand.w">,
68                  Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
69defm AMOOR_W    : AMO_rr_aq_rl<0b01000, 0b010, "amoor.w">,
70                  Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
71defm AMOMIN_W   : AMO_rr_aq_rl<0b10000, 0b010, "amomin.w">,
72                  Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
73defm AMOMAX_W   : AMO_rr_aq_rl<0b10100, 0b010, "amomax.w">,
74                  Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
75defm AMOMINU_W  : AMO_rr_aq_rl<0b11000, 0b010, "amominu.w">,
76                  Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
77defm AMOMAXU_W  : AMO_rr_aq_rl<0b11100, 0b010, "amomaxu.w">,
78                  Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
79} // Predicates = [HasStdExtA]
80
81let Predicates = [HasStdExtA, IsRV64] in {
82defm LR_D       : LR_r_aq_rl<0b011, "lr.d">, Sched<[WriteAtomicLDD, ReadAtomicLDD]>;
83defm SC_D       : AMO_rr_aq_rl<0b00011, 0b011, "sc.d">,
84                  Sched<[WriteAtomicSTD, ReadAtomicSTD, ReadAtomicSTD]>;
85defm AMOSWAP_D  : AMO_rr_aq_rl<0b00001, 0b011, "amoswap.d">,
86                  Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
87defm AMOADD_D   : AMO_rr_aq_rl<0b00000, 0b011, "amoadd.d">,
88                  Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
89defm AMOXOR_D   : AMO_rr_aq_rl<0b00100, 0b011, "amoxor.d">,
90                  Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
91defm AMOAND_D   : AMO_rr_aq_rl<0b01100, 0b011, "amoand.d">,
92                  Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
93defm AMOOR_D    : AMO_rr_aq_rl<0b01000, 0b011, "amoor.d">,
94                  Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
95defm AMOMIN_D   : AMO_rr_aq_rl<0b10000, 0b011, "amomin.d">,
96                  Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
97defm AMOMAX_D   : AMO_rr_aq_rl<0b10100, 0b011, "amomax.d">,
98                  Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
99defm AMOMINU_D  : AMO_rr_aq_rl<0b11000, 0b011, "amominu.d">,
100                  Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
101defm AMOMAXU_D  : AMO_rr_aq_rl<0b11100, 0b011, "amomaxu.d">,
102                  Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
103} // Predicates = [HasStdExtA, IsRV64]
104
105let Predicates = [HasStdExtZacas] in {
106defm AMOCAS_W : AMO_rr_aq_rl<0b00101, 0b010, "amocas.w">;
107defm AMOCAS_D : AMO_rr_aq_rl<0b00101, 0b011, "amocas.d">;
108} // Predicates = [HasStdExtZacas]
109
110let Predicates = [HasStdExtZacas, IsRV64] in {
111defm AMOCAS_Q : AMO_rr_aq_rl<0b00101, 0b100, "amocas.q">;
112} // Predicates = [HasStdExtZacas, IsRV64]
113
114//===----------------------------------------------------------------------===//
115// Pseudo-instructions and codegen patterns
116//===----------------------------------------------------------------------===//
117
118// Atomic load/store are available under both +a and +force-atomics.
119// Fences will be inserted for atomic load/stores according to the logic in
120// RISCVTargetLowering::{emitLeadingFence,emitTrailingFence}.
121let Predicates = [HasAtomicLdSt] in {
122  def : LdPat<atomic_load_8,  LB>;
123  def : LdPat<atomic_load_16, LH>;
124  def : LdPat<atomic_load_32, LW>;
125
126  def : AtomicStPat<atomic_store_8,  SB, GPR>;
127  def : AtomicStPat<atomic_store_16, SH, GPR>;
128  def : AtomicStPat<atomic_store_32, SW, GPR>;
129}
130
131let Predicates = [HasAtomicLdSt, IsRV64] in {
132  def : LdPat<atomic_load_64, LD, i64>;
133  def : AtomicStPat<atomic_store_64, SD, GPR, i64>;
134}
135
136let Predicates = [HasStdExtA] in {
137
138/// AMOs
139
140multiclass AMOPat<string AtomicOp, string BaseInst, ValueType vt = XLenVT> {
141  def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_monotonic"),
142                  !cast<RVInst>(BaseInst), vt>;
143  def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_acquire"),
144                  !cast<RVInst>(BaseInst#"_AQ"), vt>;
145  def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_release"),
146                  !cast<RVInst>(BaseInst#"_RL"), vt>;
147  def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_acq_rel"),
148                  !cast<RVInst>(BaseInst#"_AQ_RL"), vt>;
149  def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_seq_cst"),
150                  !cast<RVInst>(BaseInst#"_AQ_RL"), vt>;
151}
152
153defm : AMOPat<"atomic_swap_32", "AMOSWAP_W">;
154defm : AMOPat<"atomic_load_add_32", "AMOADD_W">;
155defm : AMOPat<"atomic_load_and_32", "AMOAND_W">;
156defm : AMOPat<"atomic_load_or_32", "AMOOR_W">;
157defm : AMOPat<"atomic_load_xor_32", "AMOXOR_W">;
158defm : AMOPat<"atomic_load_max_32", "AMOMAX_W">;
159defm : AMOPat<"atomic_load_min_32", "AMOMIN_W">;
160defm : AMOPat<"atomic_load_umax_32", "AMOMAXU_W">;
161defm : AMOPat<"atomic_load_umin_32", "AMOMINU_W">;
162
163def : Pat<(XLenVT (atomic_load_sub_32_monotonic GPR:$addr, GPR:$incr)),
164          (AMOADD_W GPR:$addr, (SUB (XLenVT X0), GPR:$incr))>;
165def : Pat<(XLenVT (atomic_load_sub_32_acquire GPR:$addr, GPR:$incr)),
166          (AMOADD_W_AQ GPR:$addr, (SUB (XLenVT X0), GPR:$incr))>;
167def : Pat<(XLenVT (atomic_load_sub_32_release GPR:$addr, GPR:$incr)),
168          (AMOADD_W_RL GPR:$addr, (SUB (XLenVT X0), GPR:$incr))>;
169def : Pat<(XLenVT (atomic_load_sub_32_acq_rel GPR:$addr, GPR:$incr)),
170          (AMOADD_W_AQ_RL GPR:$addr, (SUB (XLenVT X0), GPR:$incr))>;
171def : Pat<(XLenVT (atomic_load_sub_32_seq_cst GPR:$addr, GPR:$incr)),
172          (AMOADD_W_AQ_RL GPR:$addr, (SUB (XLenVT X0), GPR:$incr))>;
173
174/// Pseudo AMOs
175
176class PseudoAMO : Pseudo<(outs GPR:$res, GPR:$scratch),
177                         (ins GPR:$addr, GPR:$incr, ixlenimm:$ordering), []> {
178  let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
179  let mayLoad = 1;
180  let mayStore = 1;
181  let hasSideEffects = 0;
182}
183
184let Size = 20 in
185def PseudoAtomicLoadNand32 : PseudoAMO;
186// Ordering constants must be kept in sync with the AtomicOrdering enum in
187// AtomicOrdering.h.
188def : Pat<(XLenVT (atomic_load_nand_32_monotonic GPR:$addr, GPR:$incr)),
189          (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 2)>;
190def : Pat<(XLenVT (atomic_load_nand_32_acquire GPR:$addr, GPR:$incr)),
191          (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 4)>;
192def : Pat<(XLenVT (atomic_load_nand_32_release GPR:$addr, GPR:$incr)),
193          (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 5)>;
194def : Pat<(XLenVT (atomic_load_nand_32_acq_rel GPR:$addr, GPR:$incr)),
195          (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 6)>;
196def : Pat<(XLenVT (atomic_load_nand_32_seq_cst GPR:$addr, GPR:$incr)),
197          (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 7)>;
198
199class PseudoMaskedAMO
200    : Pseudo<(outs GPR:$res, GPR:$scratch),
201             (ins GPR:$addr, GPR:$incr, GPR:$mask, ixlenimm:$ordering), []> {
202  let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
203  let mayLoad = 1;
204  let mayStore = 1;
205  let hasSideEffects = 0;
206}
207
208class PseudoMaskedAMOMinMax
209    : Pseudo<(outs GPR:$res, GPR:$scratch1, GPR:$scratch2),
210             (ins GPR:$addr, GPR:$incr, GPR:$mask, ixlenimm:$sextshamt,
211              ixlenimm:$ordering), []> {
212  let Constraints = "@earlyclobber $res,@earlyclobber $scratch1,"
213                    "@earlyclobber $scratch2";
214  let mayLoad = 1;
215  let mayStore = 1;
216  let hasSideEffects = 0;
217}
218
219class PseudoMaskedAMOUMinUMax
220    : Pseudo<(outs GPR:$res, GPR:$scratch1, GPR:$scratch2),
221             (ins GPR:$addr, GPR:$incr, GPR:$mask, ixlenimm:$ordering), []> {
222  let Constraints = "@earlyclobber $res,@earlyclobber $scratch1,"
223                    "@earlyclobber $scratch2";
224  let mayLoad = 1;
225  let mayStore = 1;
226  let hasSideEffects = 0;
227}
228
229class PseudoMaskedAMOPat<Intrinsic intrin, Pseudo AMOInst>
230    : Pat<(intrin GPR:$addr, GPR:$incr, GPR:$mask, timm:$ordering),
231          (AMOInst GPR:$addr, GPR:$incr, GPR:$mask, timm:$ordering)>;
232
233class PseudoMaskedAMOMinMaxPat<Intrinsic intrin, Pseudo AMOInst>
234    : Pat<(intrin GPR:$addr, GPR:$incr, GPR:$mask, GPR:$shiftamt,
235           timm:$ordering),
236          (AMOInst GPR:$addr, GPR:$incr, GPR:$mask, GPR:$shiftamt,
237           timm:$ordering)>;
238
239let Size = 28 in
240def PseudoMaskedAtomicSwap32 : PseudoMaskedAMO;
241def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_xchg_i32,
242                         PseudoMaskedAtomicSwap32>;
243let Size = 28 in
244def PseudoMaskedAtomicLoadAdd32 : PseudoMaskedAMO;
245def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_add_i32,
246                         PseudoMaskedAtomicLoadAdd32>;
247let Size = 28 in
248def PseudoMaskedAtomicLoadSub32 : PseudoMaskedAMO;
249def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_sub_i32,
250                         PseudoMaskedAtomicLoadSub32>;
251let Size = 32 in
252def PseudoMaskedAtomicLoadNand32 : PseudoMaskedAMO;
253def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_nand_i32,
254                         PseudoMaskedAtomicLoadNand32>;
255let Size = 44 in
256def PseudoMaskedAtomicLoadMax32 : PseudoMaskedAMOMinMax;
257def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_max_i32,
258                               PseudoMaskedAtomicLoadMax32>;
259let Size = 44 in
260def PseudoMaskedAtomicLoadMin32 : PseudoMaskedAMOMinMax;
261def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_min_i32,
262                               PseudoMaskedAtomicLoadMin32>;
263let Size = 36 in
264def PseudoMaskedAtomicLoadUMax32 : PseudoMaskedAMOUMinUMax;
265def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umax_i32,
266                         PseudoMaskedAtomicLoadUMax32>;
267let Size = 36 in
268def PseudoMaskedAtomicLoadUMin32 : PseudoMaskedAMOUMinUMax;
269def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umin_i32,
270                         PseudoMaskedAtomicLoadUMin32>;
271
272/// Compare and exchange
273
274class PseudoCmpXchg
275    : Pseudo<(outs GPR:$res, GPR:$scratch),
276             (ins GPR:$addr, GPR:$cmpval, GPR:$newval, ixlenimm:$ordering), []> {
277  let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
278  let mayLoad = 1;
279  let mayStore = 1;
280  let hasSideEffects = 0;
281  let Size = 16;
282}
283
284// Ordering constants must be kept in sync with the AtomicOrdering enum in
285// AtomicOrdering.h.
286multiclass PseudoCmpXchgPat<string Op, Pseudo CmpXchgInst,
287                            ValueType vt = XLenVT> {
288  def : Pat<(vt (!cast<PatFrag>(Op#"_monotonic") GPR:$addr, GPR:$cmp, GPR:$new)),
289            (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 2)>;
290  def : Pat<(vt (!cast<PatFrag>(Op#"_acquire") GPR:$addr, GPR:$cmp, GPR:$new)),
291            (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 4)>;
292  def : Pat<(vt (!cast<PatFrag>(Op#"_release") GPR:$addr, GPR:$cmp, GPR:$new)),
293            (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 5)>;
294  def : Pat<(vt (!cast<PatFrag>(Op#"_acq_rel") GPR:$addr, GPR:$cmp, GPR:$new)),
295            (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 6)>;
296  def : Pat<(vt (!cast<PatFrag>(Op#"_seq_cst") GPR:$addr, GPR:$cmp, GPR:$new)),
297            (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 7)>;
298}
299
300def PseudoCmpXchg32 : PseudoCmpXchg;
301defm : PseudoCmpXchgPat<"atomic_cmp_swap_32", PseudoCmpXchg32>;
302
303def PseudoMaskedCmpXchg32
304    : Pseudo<(outs GPR:$res, GPR:$scratch),
305             (ins GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask,
306              ixlenimm:$ordering), []> {
307  let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
308  let mayLoad = 1;
309  let mayStore = 1;
310  let hasSideEffects = 0;
311  let Size = 32;
312}
313
314def : Pat<(int_riscv_masked_cmpxchg_i32
315            GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$ordering),
316          (PseudoMaskedCmpXchg32
317            GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$ordering)>;
318
319} // Predicates = [HasStdExtA]
320
321let Predicates = [HasStdExtA, IsRV64] in {
322
323defm : AMOPat<"atomic_swap_64", "AMOSWAP_D", i64>;
324defm : AMOPat<"atomic_load_add_64", "AMOADD_D", i64>;
325defm : AMOPat<"atomic_load_and_64", "AMOAND_D", i64>;
326defm : AMOPat<"atomic_load_or_64", "AMOOR_D", i64>;
327defm : AMOPat<"atomic_load_xor_64", "AMOXOR_D", i64>;
328defm : AMOPat<"atomic_load_max_64", "AMOMAX_D", i64>;
329defm : AMOPat<"atomic_load_min_64", "AMOMIN_D", i64>;
330defm : AMOPat<"atomic_load_umax_64", "AMOMAXU_D", i64>;
331defm : AMOPat<"atomic_load_umin_64", "AMOMINU_D", i64>;
332
333/// 64-bit AMOs
334
335def : Pat<(i64 (atomic_load_sub_64_monotonic GPR:$addr, GPR:$incr)),
336          (AMOADD_D GPR:$addr, (SUB (XLenVT X0), GPR:$incr))>;
337def : Pat<(i64 (atomic_load_sub_64_acquire GPR:$addr, GPR:$incr)),
338          (AMOADD_D_AQ GPR:$addr, (SUB (XLenVT X0), GPR:$incr))>;
339def : Pat<(i64 (atomic_load_sub_64_release GPR:$addr, GPR:$incr)),
340          (AMOADD_D_RL GPR:$addr, (SUB (XLenVT X0), GPR:$incr))>;
341def : Pat<(i64 (atomic_load_sub_64_acq_rel GPR:$addr, GPR:$incr)),
342          (AMOADD_D_AQ_RL GPR:$addr, (SUB (XLenVT X0), GPR:$incr))>;
343def : Pat<(i64 (atomic_load_sub_64_seq_cst GPR:$addr, GPR:$incr)),
344          (AMOADD_D_AQ_RL GPR:$addr, (SUB (XLenVT X0), GPR:$incr))>;
345
346/// 64-bit pseudo AMOs
347
348let Size = 20 in
349def PseudoAtomicLoadNand64 : PseudoAMO;
350// Ordering constants must be kept in sync with the AtomicOrdering enum in
351// AtomicOrdering.h.
352def : Pat<(i64 (atomic_load_nand_64_monotonic GPR:$addr, GPR:$incr)),
353          (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 2)>;
354def : Pat<(i64 (atomic_load_nand_64_acquire GPR:$addr, GPR:$incr)),
355          (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 4)>;
356def : Pat<(i64 (atomic_load_nand_64_release GPR:$addr, GPR:$incr)),
357          (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 5)>;
358def : Pat<(i64 (atomic_load_nand_64_acq_rel GPR:$addr, GPR:$incr)),
359          (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 6)>;
360def : Pat<(i64 (atomic_load_nand_64_seq_cst GPR:$addr, GPR:$incr)),
361          (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 7)>;
362
363def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_xchg_i64,
364                         PseudoMaskedAtomicSwap32>;
365def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_add_i64,
366                         PseudoMaskedAtomicLoadAdd32>;
367def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_sub_i64,
368                         PseudoMaskedAtomicLoadSub32>;
369def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_nand_i64,
370                         PseudoMaskedAtomicLoadNand32>;
371def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_max_i64,
372                               PseudoMaskedAtomicLoadMax32>;
373def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_min_i64,
374                               PseudoMaskedAtomicLoadMin32>;
375def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umax_i64,
376                         PseudoMaskedAtomicLoadUMax32>;
377def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umin_i64,
378                         PseudoMaskedAtomicLoadUMin32>;
379
380/// 64-bit compare and exchange
381
382def PseudoCmpXchg64 : PseudoCmpXchg;
383defm : PseudoCmpXchgPat<"atomic_cmp_swap_64", PseudoCmpXchg64, i64>;
384
385def : Pat<(int_riscv_masked_cmpxchg_i64
386            GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$ordering),
387          (PseudoMaskedCmpXchg32
388            GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$ordering)>;
389} // Predicates = [HasStdExtA, IsRV64]
390