1//===-- RISCVInstrInfoA.td - RISC-V 'A' instructions -------*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file describes the RISC-V instructions from the standard 'A', Atomic
10// Instructions extension.
11//
12//===----------------------------------------------------------------------===//
13
14//===----------------------------------------------------------------------===//
15// Instruction class templates
16//===----------------------------------------------------------------------===//
17
18let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in
19class LR_r<bit aq, bit rl, bits<3> funct3, string opcodestr>
20    : RVInstRAtomic<0b00010, aq, rl, funct3, OPC_AMO,
21                    (outs GPR:$rd), (ins GPRMemZeroOffset:$rs1),
22                    opcodestr, "$rd, $rs1"> {
23  let rs2 = 0;
24}
25
26multiclass LR_r_aq_rl<bits<3> funct3, string opcodestr> {
27  def ""     : LR_r<0, 0, funct3, opcodestr>;
28  def _AQ    : LR_r<1, 0, funct3, opcodestr # ".aq">;
29  def _RL    : LR_r<0, 1, funct3, opcodestr # ".rl">;
30  def _AQ_RL : LR_r<1, 1, funct3, opcodestr # ".aqrl">;
31}
32
33let hasSideEffects = 0, mayLoad = 1, mayStore = 1 in
34class AMO_rr<bits<5> funct5, bit aq, bit rl, bits<3> funct3, string opcodestr>
35    : RVInstRAtomic<funct5, aq, rl, funct3, OPC_AMO,
36                    (outs GPR:$rd), (ins GPRMemZeroOffset:$rs1, GPR:$rs2),
37                    opcodestr, "$rd, $rs2, $rs1">;
38
39multiclass AMO_rr_aq_rl<bits<5> funct5, bits<3> funct3, string opcodestr> {
40  def ""     : AMO_rr<funct5, 0, 0, funct3, opcodestr>;
41  def _AQ    : AMO_rr<funct5, 1, 0, funct3, opcodestr # ".aq">;
42  def _RL    : AMO_rr<funct5, 0, 1, funct3, opcodestr # ".rl">;
43  def _AQ_RL : AMO_rr<funct5, 1, 1, funct3, opcodestr # ".aqrl">;
44}
45
46multiclass AtomicStPat<PatFrag StoreOp, RVInst Inst, RegisterClass StTy,
47                       ValueType vt = XLenVT> {
48  def : Pat<(StoreOp (AddrRegImm GPR:$rs1, simm12:$imm12), (vt StTy:$rs2)),
49            (Inst StTy:$rs2, GPR:$rs1, simm12:$imm12)>;
50}
51
52//===----------------------------------------------------------------------===//
53// Instructions
54//===----------------------------------------------------------------------===//
55
56let Predicates = [HasStdExtA] in {
57defm LR_W       : LR_r_aq_rl<0b010, "lr.w">, Sched<[WriteAtomicLDW, ReadAtomicLDW]>;
58defm SC_W       : AMO_rr_aq_rl<0b00011, 0b010, "sc.w">,
59                  Sched<[WriteAtomicSTW, ReadAtomicSTW, ReadAtomicSTW]>;
60defm AMOSWAP_W  : AMO_rr_aq_rl<0b00001, 0b010, "amoswap.w">,
61                  Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
62defm AMOADD_W   : AMO_rr_aq_rl<0b00000, 0b010, "amoadd.w">,
63                  Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
64defm AMOXOR_W   : AMO_rr_aq_rl<0b00100, 0b010, "amoxor.w">,
65                  Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
66defm AMOAND_W   : AMO_rr_aq_rl<0b01100, 0b010, "amoand.w">,
67                  Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
68defm AMOOR_W    : AMO_rr_aq_rl<0b01000, 0b010, "amoor.w">,
69                  Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
70defm AMOMIN_W   : AMO_rr_aq_rl<0b10000, 0b010, "amomin.w">,
71                  Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
72defm AMOMAX_W   : AMO_rr_aq_rl<0b10100, 0b010, "amomax.w">,
73                  Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
74defm AMOMINU_W  : AMO_rr_aq_rl<0b11000, 0b010, "amominu.w">,
75                  Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
76defm AMOMAXU_W  : AMO_rr_aq_rl<0b11100, 0b010, "amomaxu.w">,
77                  Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
78} // Predicates = [HasStdExtA]
79
80let Predicates = [HasStdExtA, IsRV64] in {
81defm LR_D       : LR_r_aq_rl<0b011, "lr.d">, Sched<[WriteAtomicLDD, ReadAtomicLDD]>;
82defm SC_D       : AMO_rr_aq_rl<0b00011, 0b011, "sc.d">,
83                  Sched<[WriteAtomicSTD, ReadAtomicSTD, ReadAtomicSTD]>;
84defm AMOSWAP_D  : AMO_rr_aq_rl<0b00001, 0b011, "amoswap.d">,
85                  Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
86defm AMOADD_D   : AMO_rr_aq_rl<0b00000, 0b011, "amoadd.d">,
87                  Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
88defm AMOXOR_D   : AMO_rr_aq_rl<0b00100, 0b011, "amoxor.d">,
89                  Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
90defm AMOAND_D   : AMO_rr_aq_rl<0b01100, 0b011, "amoand.d">,
91                  Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
92defm AMOOR_D    : AMO_rr_aq_rl<0b01000, 0b011, "amoor.d">,
93                  Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
94defm AMOMIN_D   : AMO_rr_aq_rl<0b10000, 0b011, "amomin.d">,
95                  Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
96defm AMOMAX_D   : AMO_rr_aq_rl<0b10100, 0b011, "amomax.d">,
97                  Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
98defm AMOMINU_D  : AMO_rr_aq_rl<0b11000, 0b011, "amominu.d">,
99                  Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
100defm AMOMAXU_D  : AMO_rr_aq_rl<0b11100, 0b011, "amomaxu.d">,
101                  Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
102} // Predicates = [HasStdExtA, IsRV64]
103
104//===----------------------------------------------------------------------===//
105// Pseudo-instructions and codegen patterns
106//===----------------------------------------------------------------------===//
107
108// Atomic load/store are available under both +a and +force-atomics.
109// Fences will be inserted for atomic load/stores according to the logic in
110// RISCVTargetLowering::{emitLeadingFence,emitTrailingFence}.
111let Predicates = [HasAtomicLdSt] in {
112  defm : LdPat<atomic_load_8,  LB>;
113  defm : LdPat<atomic_load_16, LH>;
114  defm : LdPat<atomic_load_32, LW>;
115
116  defm : AtomicStPat<atomic_store_8,  SB, GPR>;
117  defm : AtomicStPat<atomic_store_16, SH, GPR>;
118  defm : AtomicStPat<atomic_store_32, SW, GPR>;
119}
120
121let Predicates = [HasAtomicLdSt, IsRV64] in {
122  defm : LdPat<atomic_load_64, LD, i64>;
123  defm : AtomicStPat<atomic_store_64, SD, GPR, i64>;
124}
125
126let Predicates = [HasStdExtA] in {
127
128/// AMOs
129
130multiclass AMOPat<string AtomicOp, string BaseInst> {
131  def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_monotonic"),
132                  !cast<RVInst>(BaseInst)>;
133  def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_acquire"),
134                  !cast<RVInst>(BaseInst#"_AQ")>;
135  def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_release"),
136                  !cast<RVInst>(BaseInst#"_RL")>;
137  def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_acq_rel"),
138                  !cast<RVInst>(BaseInst#"_AQ_RL")>;
139  def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_seq_cst"),
140                  !cast<RVInst>(BaseInst#"_AQ_RL")>;
141}
142
143defm : AMOPat<"atomic_swap_32", "AMOSWAP_W">;
144defm : AMOPat<"atomic_load_add_32", "AMOADD_W">;
145defm : AMOPat<"atomic_load_and_32", "AMOAND_W">;
146defm : AMOPat<"atomic_load_or_32", "AMOOR_W">;
147defm : AMOPat<"atomic_load_xor_32", "AMOXOR_W">;
148defm : AMOPat<"atomic_load_max_32", "AMOMAX_W">;
149defm : AMOPat<"atomic_load_min_32", "AMOMIN_W">;
150defm : AMOPat<"atomic_load_umax_32", "AMOMAXU_W">;
151defm : AMOPat<"atomic_load_umin_32", "AMOMINU_W">;
152
153def : Pat<(atomic_load_sub_32_monotonic GPR:$addr, GPR:$incr),
154          (AMOADD_W GPR:$addr, (SUB X0, GPR:$incr))>;
155def : Pat<(atomic_load_sub_32_acquire GPR:$addr, GPR:$incr),
156          (AMOADD_W_AQ GPR:$addr, (SUB X0, GPR:$incr))>;
157def : Pat<(atomic_load_sub_32_release GPR:$addr, GPR:$incr),
158          (AMOADD_W_RL GPR:$addr, (SUB X0, GPR:$incr))>;
159def : Pat<(atomic_load_sub_32_acq_rel GPR:$addr, GPR:$incr),
160          (AMOADD_W_AQ_RL GPR:$addr, (SUB X0, GPR:$incr))>;
161def : Pat<(atomic_load_sub_32_seq_cst GPR:$addr, GPR:$incr),
162          (AMOADD_W_AQ_RL GPR:$addr, (SUB X0, GPR:$incr))>;
163
164/// Pseudo AMOs
165
166class PseudoAMO : Pseudo<(outs GPR:$res, GPR:$scratch),
167                         (ins GPR:$addr, GPR:$incr, ixlenimm:$ordering), []> {
168  let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
169  let mayLoad = 1;
170  let mayStore = 1;
171  let hasSideEffects = 0;
172}
173
174let Size = 20 in
175def PseudoAtomicLoadNand32 : PseudoAMO;
176// Ordering constants must be kept in sync with the AtomicOrdering enum in
177// AtomicOrdering.h.
178def : Pat<(atomic_load_nand_32_monotonic GPR:$addr, GPR:$incr),
179          (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 2)>;
180def : Pat<(atomic_load_nand_32_acquire GPR:$addr, GPR:$incr),
181          (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 4)>;
182def : Pat<(atomic_load_nand_32_release GPR:$addr, GPR:$incr),
183          (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 5)>;
184def : Pat<(atomic_load_nand_32_acq_rel GPR:$addr, GPR:$incr),
185          (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 6)>;
186def : Pat<(atomic_load_nand_32_seq_cst GPR:$addr, GPR:$incr),
187          (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 7)>;
188
189class PseudoMaskedAMO
190    : Pseudo<(outs GPR:$res, GPR:$scratch),
191             (ins GPR:$addr, GPR:$incr, GPR:$mask, ixlenimm:$ordering), []> {
192  let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
193  let mayLoad = 1;
194  let mayStore = 1;
195  let hasSideEffects = 0;
196}
197
198class PseudoMaskedAMOMinMax
199    : Pseudo<(outs GPR:$res, GPR:$scratch1, GPR:$scratch2),
200             (ins GPR:$addr, GPR:$incr, GPR:$mask, ixlenimm:$sextshamt,
201              ixlenimm:$ordering), []> {
202  let Constraints = "@earlyclobber $res,@earlyclobber $scratch1,"
203                    "@earlyclobber $scratch2";
204  let mayLoad = 1;
205  let mayStore = 1;
206  let hasSideEffects = 0;
207}
208
209class PseudoMaskedAMOUMinUMax
210    : Pseudo<(outs GPR:$res, GPR:$scratch1, GPR:$scratch2),
211             (ins GPR:$addr, GPR:$incr, GPR:$mask, ixlenimm:$ordering), []> {
212  let Constraints = "@earlyclobber $res,@earlyclobber $scratch1,"
213                    "@earlyclobber $scratch2";
214  let mayLoad = 1;
215  let mayStore = 1;
216  let hasSideEffects = 0;
217}
218
219class PseudoMaskedAMOPat<Intrinsic intrin, Pseudo AMOInst>
220    : Pat<(intrin GPR:$addr, GPR:$incr, GPR:$mask, timm:$ordering),
221          (AMOInst GPR:$addr, GPR:$incr, GPR:$mask, timm:$ordering)>;
222
223class PseudoMaskedAMOMinMaxPat<Intrinsic intrin, Pseudo AMOInst>
224    : Pat<(intrin GPR:$addr, GPR:$incr, GPR:$mask, GPR:$shiftamt,
225           timm:$ordering),
226          (AMOInst GPR:$addr, GPR:$incr, GPR:$mask, GPR:$shiftamt,
227           timm:$ordering)>;
228
229let Size = 28 in
230def PseudoMaskedAtomicSwap32 : PseudoMaskedAMO;
231def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_xchg_i32,
232                         PseudoMaskedAtomicSwap32>;
233let Size = 28 in
234def PseudoMaskedAtomicLoadAdd32 : PseudoMaskedAMO;
235def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_add_i32,
236                         PseudoMaskedAtomicLoadAdd32>;
237let Size = 28 in
238def PseudoMaskedAtomicLoadSub32 : PseudoMaskedAMO;
239def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_sub_i32,
240                         PseudoMaskedAtomicLoadSub32>;
241let Size = 32 in
242def PseudoMaskedAtomicLoadNand32 : PseudoMaskedAMO;
243def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_nand_i32,
244                         PseudoMaskedAtomicLoadNand32>;
245let Size = 44 in
246def PseudoMaskedAtomicLoadMax32 : PseudoMaskedAMOMinMax;
247def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_max_i32,
248                               PseudoMaskedAtomicLoadMax32>;
249let Size = 44 in
250def PseudoMaskedAtomicLoadMin32 : PseudoMaskedAMOMinMax;
251def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_min_i32,
252                               PseudoMaskedAtomicLoadMin32>;
253let Size = 36 in
254def PseudoMaskedAtomicLoadUMax32 : PseudoMaskedAMOUMinUMax;
255def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umax_i32,
256                         PseudoMaskedAtomicLoadUMax32>;
257let Size = 36 in
258def PseudoMaskedAtomicLoadUMin32 : PseudoMaskedAMOUMinUMax;
259def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umin_i32,
260                         PseudoMaskedAtomicLoadUMin32>;
261
262/// Compare and exchange
263
264class PseudoCmpXchg
265    : Pseudo<(outs GPR:$res, GPR:$scratch),
266             (ins GPR:$addr, GPR:$cmpval, GPR:$newval, ixlenimm:$ordering), []> {
267  let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
268  let mayLoad = 1;
269  let mayStore = 1;
270  let hasSideEffects = 0;
271  let Size = 16;
272}
273
274// Ordering constants must be kept in sync with the AtomicOrdering enum in
275// AtomicOrdering.h.
276multiclass PseudoCmpXchgPat<string Op, Pseudo CmpXchgInst> {
277  def : Pat<(!cast<PatFrag>(Op#"_monotonic") GPR:$addr, GPR:$cmp, GPR:$new),
278            (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 2)>;
279  def : Pat<(!cast<PatFrag>(Op#"_acquire") GPR:$addr, GPR:$cmp, GPR:$new),
280            (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 4)>;
281  def : Pat<(!cast<PatFrag>(Op#"_release") GPR:$addr, GPR:$cmp, GPR:$new),
282            (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 5)>;
283  def : Pat<(!cast<PatFrag>(Op#"_acq_rel") GPR:$addr, GPR:$cmp, GPR:$new),
284            (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 6)>;
285  def : Pat<(!cast<PatFrag>(Op#"_seq_cst") GPR:$addr, GPR:$cmp, GPR:$new),
286            (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 7)>;
287}
288
289def PseudoCmpXchg32 : PseudoCmpXchg;
290defm : PseudoCmpXchgPat<"atomic_cmp_swap_32", PseudoCmpXchg32>;
291
292def PseudoMaskedCmpXchg32
293    : Pseudo<(outs GPR:$res, GPR:$scratch),
294             (ins GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask,
295              ixlenimm:$ordering), []> {
296  let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
297  let mayLoad = 1;
298  let mayStore = 1;
299  let hasSideEffects = 0;
300  let Size = 32;
301}
302
303def : Pat<(int_riscv_masked_cmpxchg_i32
304            GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$ordering),
305          (PseudoMaskedCmpXchg32
306            GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$ordering)>;
307
308} // Predicates = [HasStdExtA]
309
310let Predicates = [HasStdExtA, IsRV64] in {
311
312defm : AMOPat<"atomic_swap_64", "AMOSWAP_D">;
313defm : AMOPat<"atomic_load_add_64", "AMOADD_D">;
314defm : AMOPat<"atomic_load_and_64", "AMOAND_D">;
315defm : AMOPat<"atomic_load_or_64", "AMOOR_D">;
316defm : AMOPat<"atomic_load_xor_64", "AMOXOR_D">;
317defm : AMOPat<"atomic_load_max_64", "AMOMAX_D">;
318defm : AMOPat<"atomic_load_min_64", "AMOMIN_D">;
319defm : AMOPat<"atomic_load_umax_64", "AMOMAXU_D">;
320defm : AMOPat<"atomic_load_umin_64", "AMOMINU_D">;
321
322/// 64-bit AMOs
323
324def : Pat<(i64 (atomic_load_sub_64_monotonic GPR:$addr, GPR:$incr)),
325          (AMOADD_D GPR:$addr, (SUB X0, GPR:$incr))>;
326def : Pat<(i64 (atomic_load_sub_64_acquire GPR:$addr, GPR:$incr)),
327          (AMOADD_D_AQ GPR:$addr, (SUB X0, GPR:$incr))>;
328def : Pat<(i64 (atomic_load_sub_64_release GPR:$addr, GPR:$incr)),
329          (AMOADD_D_RL GPR:$addr, (SUB X0, GPR:$incr))>;
330def : Pat<(i64 (atomic_load_sub_64_acq_rel GPR:$addr, GPR:$incr)),
331          (AMOADD_D_AQ_RL GPR:$addr, (SUB X0, GPR:$incr))>;
332def : Pat<(i64 (atomic_load_sub_64_seq_cst GPR:$addr, GPR:$incr)),
333          (AMOADD_D_AQ_RL GPR:$addr, (SUB X0, GPR:$incr))>;
334
335/// 64-bit pseudo AMOs
336
337let Size = 20 in
338def PseudoAtomicLoadNand64 : PseudoAMO;
339// Ordering constants must be kept in sync with the AtomicOrdering enum in
340// AtomicOrdering.h.
341def : Pat<(i64 (atomic_load_nand_64_monotonic GPR:$addr, GPR:$incr)),
342          (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 2)>;
343def : Pat<(i64 (atomic_load_nand_64_acquire GPR:$addr, GPR:$incr)),
344          (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 4)>;
345def : Pat<(i64 (atomic_load_nand_64_release GPR:$addr, GPR:$incr)),
346          (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 5)>;
347def : Pat<(i64 (atomic_load_nand_64_acq_rel GPR:$addr, GPR:$incr)),
348          (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 6)>;
349def : Pat<(i64 (atomic_load_nand_64_seq_cst GPR:$addr, GPR:$incr)),
350          (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 7)>;
351
352def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_xchg_i64,
353                         PseudoMaskedAtomicSwap32>;
354def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_add_i64,
355                         PseudoMaskedAtomicLoadAdd32>;
356def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_sub_i64,
357                         PseudoMaskedAtomicLoadSub32>;
358def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_nand_i64,
359                         PseudoMaskedAtomicLoadNand32>;
360def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_max_i64,
361                               PseudoMaskedAtomicLoadMax32>;
362def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_min_i64,
363                               PseudoMaskedAtomicLoadMin32>;
364def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umax_i64,
365                         PseudoMaskedAtomicLoadUMax32>;
366def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umin_i64,
367                         PseudoMaskedAtomicLoadUMin32>;
368
369/// 64-bit compare and exchange
370
371def PseudoCmpXchg64 : PseudoCmpXchg;
372defm : PseudoCmpXchgPat<"atomic_cmp_swap_64", PseudoCmpXchg64>;
373
374def : Pat<(int_riscv_masked_cmpxchg_i64
375            GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$ordering),
376          (PseudoMaskedCmpXchg32
377            GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$ordering)>;
378} // Predicates = [HasStdExtA, IsRV64]
379