1// WebAssemblyInstrAtomics.td-WebAssembly Atomic codegen support-*- tablegen -*-
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// WebAssembly Atomic operand code-gen constructs.
11///
12//===----------------------------------------------------------------------===//
13
14let UseNamedOperandTable = 1 in
15multiclass ATOMIC_I<dag oops_r, dag iops_r, dag oops_s, dag iops_s,
16                    list<dag> pattern_r, string asmstr_r,
17                    string asmstr_s, bits<32> atomic_op,
18                    bit is64 = false> {
19  defm "" : I<oops_r, iops_r, oops_s, iops_s, pattern_r, asmstr_r, asmstr_s,
20              !or(0xfe00, !and(0xff, atomic_op)), is64>,
21            Requires<[HasAtomics]>;
22}
23
24multiclass ATOMIC_NRI<dag oops, dag iops, list<dag> pattern, string asmstr = "",
25                      bits<32> atomic_op = -1> {
26  defm "" : NRI<oops, iops, pattern, asmstr,
27                !or(0xfe00, !and(0xff, atomic_op))>,
28            Requires<[HasAtomics]>;
29}
30
31//===----------------------------------------------------------------------===//
32// Atomic wait / notify
33//===----------------------------------------------------------------------===//
34
35let hasSideEffects = 1 in {
36defm MEMORY_ATOMIC_NOTIFY_A32 :
37  ATOMIC_I<(outs I32:$dst),
38           (ins P2Align:$p2align, offset32_op:$off, I32:$addr, I32:$count),
39           (outs), (ins P2Align:$p2align, offset32_op:$off), [],
40           "memory.atomic.notify \t$dst, ${off}(${addr})${p2align}, $count",
41           "memory.atomic.notify \t${off}${p2align}", 0x00, false>;
42defm MEMORY_ATOMIC_NOTIFY_A64 :
43  ATOMIC_I<(outs I32:$dst),
44           (ins P2Align:$p2align, offset64_op:$off, I64:$addr, I32:$count),
45           (outs), (ins P2Align:$p2align, offset64_op:$off), [],
46           "memory.atomic.notify \t$dst, ${off}(${addr})${p2align}, $count",
47           "memory.atomic.notify \t${off}${p2align}", 0x00, true>;
48let mayLoad = 1 in {
49defm MEMORY_ATOMIC_WAIT32_A32 :
50  ATOMIC_I<(outs I32:$dst),
51           (ins P2Align:$p2align, offset32_op:$off, I32:$addr, I32:$exp,
52                I64:$timeout),
53           (outs), (ins P2Align:$p2align, offset32_op:$off), [],
54           "memory.atomic.wait32 \t$dst, ${off}(${addr})${p2align}, $exp, $timeout",
55           "memory.atomic.wait32 \t${off}${p2align}", 0x01, false>;
56defm MEMORY_ATOMIC_WAIT32_A64 :
57  ATOMIC_I<(outs I32:$dst),
58           (ins P2Align:$p2align, offset64_op:$off, I64:$addr, I32:$exp,
59                I64:$timeout),
60           (outs), (ins P2Align:$p2align, offset64_op:$off), [],
61           "memory.atomic.wait32 \t$dst, ${off}(${addr})${p2align}, $exp, $timeout",
62           "memory.atomic.wait32 \t${off}${p2align}", 0x01, true>;
63defm MEMORY_ATOMIC_WAIT64_A32 :
64  ATOMIC_I<(outs I32:$dst),
65           (ins P2Align:$p2align, offset32_op:$off, I32:$addr, I64:$exp,
66                I64:$timeout),
67           (outs), (ins P2Align:$p2align, offset32_op:$off), [],
68           "memory.atomic.wait64 \t$dst, ${off}(${addr})${p2align}, $exp, $timeout",
69           "memory.atomic.wait64 \t${off}${p2align}", 0x02, false>;
70defm MEMORY_ATOMIC_WAIT64_A64 :
71  ATOMIC_I<(outs I32:$dst),
72           (ins P2Align:$p2align, offset64_op:$off, I64:$addr, I64:$exp,
73                I64:$timeout),
74           (outs), (ins P2Align:$p2align, offset64_op:$off), [],
75           "memory.atomic.wait64 \t$dst, ${off}(${addr})${p2align}, $exp, $timeout",
76           "memory.atomic.wait64 \t${off}${p2align}", 0x02, true>;
77} // mayLoad = 1
78} // hasSideEffects = 1
79
80def NotifyPat_A32 :
81  Pat<(i32 (int_wasm_memory_atomic_notify (AddrOps32 offset32_op:$offset, I32:$addr), I32:$count)),
82      (MEMORY_ATOMIC_NOTIFY_A32 0, $offset, $addr, $count)>,
83  Requires<[HasAddr32, HasAtomics]>;
84def NotifyPat_A64 :
85  Pat<(i32 (int_wasm_memory_atomic_notify (AddrOps64 offset64_op:$offset, I64:$addr), I32:$count)),
86      (MEMORY_ATOMIC_NOTIFY_A64 0, $offset, $addr, $count)>,
87  Requires<[HasAddr64, HasAtomics]>;
88
89
90multiclass WaitPat<ValueType ty, Intrinsic kind, string inst> {
91  def WaitPat_A32 :
92    Pat<(i32 (kind (AddrOps32 offset32_op:$offset, I32:$addr), ty:$exp, I64:$timeout)),
93        (!cast<NI>(inst#_A32) 0, $offset, $addr, $exp, $timeout)>,
94    Requires<[HasAddr32, HasAtomics]>;
95  def WaitPat_A64 :
96    Pat<(i32 (kind (AddrOps64 offset64_op:$offset, I64:$addr), ty:$exp, I64:$timeout)),
97        (!cast<NI>(inst#_A64) 0, $offset, $addr, $exp, $timeout)>,
98    Requires<[HasAddr64, HasAtomics]>;
99}
100
101defm : WaitPat<i32, int_wasm_memory_atomic_wait32, "MEMORY_ATOMIC_WAIT32">;
102defm : WaitPat<i64, int_wasm_memory_atomic_wait64, "MEMORY_ATOMIC_WAIT64">;
103
104//===----------------------------------------------------------------------===//
105// Atomic fences
106//===----------------------------------------------------------------------===//
107
108// A compiler fence instruction that prevents reordering of instructions.
109let Defs = [ARGUMENTS] in {
110let isPseudo = 1, hasSideEffects = 1 in
111defm COMPILER_FENCE : ATOMIC_NRI<(outs), (ins), [], "compiler_fence">;
112let hasSideEffects = 1 in
113defm ATOMIC_FENCE : ATOMIC_NRI<(outs), (ins i8imm:$flags), [], "atomic.fence",
114                               0x03>;
115} // Defs = [ARGUMENTS]
116
117//===----------------------------------------------------------------------===//
118// Atomic loads
119//===----------------------------------------------------------------------===//
120
121multiclass AtomicLoad<WebAssemblyRegClass rc, string name, int atomic_op> {
122  defm "" : WebAssemblyLoad<rc, name, !or(0xfe00, !and(0xff, atomic_op)),
123                            [HasAtomics]>;
124}
125
126defm ATOMIC_LOAD_I32 : AtomicLoad<I32, "i32.atomic.load", 0x10>;
127defm ATOMIC_LOAD_I64 : AtomicLoad<I64, "i64.atomic.load", 0x11>;
128
129// Select loads
130defm : LoadPat<i32, atomic_load_32, "ATOMIC_LOAD_I32">;
131defm : LoadPat<i64, atomic_load_64, "ATOMIC_LOAD_I64">;
132
133// Extending loads. Note that there are only zero-extending atomic loads, no
134// sign-extending loads.
135defm ATOMIC_LOAD8_U_I32 : AtomicLoad<I32, "i32.atomic.load8_u", 0x12>;
136defm ATOMIC_LOAD16_U_I32 : AtomicLoad<I32, "i32.atomic.load16_u", 0x13>;
137defm ATOMIC_LOAD8_U_I64 : AtomicLoad<I64, "i64.atomic.load8_u", 0x14>;
138defm ATOMIC_LOAD16_U_I64 : AtomicLoad<I64, "i64.atomic.load16_u", 0x15>;
139defm ATOMIC_LOAD32_U_I64 : AtomicLoad<I64, "i64.atomic.load32_u", 0x16>;
140
141// Fragments for extending loads. These are different from regular loads because
142// the SDNodes are derived from AtomicSDNode rather than LoadSDNode and
143// therefore don't have the extension type field. So instead of matching that,
144// we match the patterns that the type legalizer expands them to.
145
146// Unlike regular loads, extension to i64 is handled differently than i32.
147// i64 (zext (i8 (atomic_load_8))) gets legalized to
148// i64 (and (i64 (anyext (i32 (atomic_load_8)))), 255)
149// Extension to i32 is elided by SelectionDAG as our atomic loads are
150// zero-extending.
151def zext_aload_8_64 :
152  PatFrag<(ops node:$addr),
153          (i64 (zext (i32 (atomic_load_8 node:$addr))))>;
154def zext_aload_16_64 :
155  PatFrag<(ops node:$addr),
156          (i64 (zext (i32 (atomic_load_16 node:$addr))))>;
157def zext_aload_32_64 :
158  PatFrag<(ops node:$addr),
159          (i64 (zext (i32 (atomic_load_32 node:$addr))))>;
160
161// We don't have single sext atomic load instructions. So for sext loads, we
162// match bare subword loads (for 32-bit results) and anyext loads (for 64-bit
163// results) and select a zext load; the next instruction will be sext_inreg
164// which is selected by itself.
165def sext_aload_8_64 :
166  PatFrag<(ops node:$addr), (anyext (i32 (atomic_load_8 node:$addr)))>;
167def sext_aload_16_64 :
168  PatFrag<(ops node:$addr), (anyext (i32 (atomic_load_16 node:$addr)))>;
169
170// Select zero-extending loads
171defm : LoadPat<i64, zext_aload_8_64, "ATOMIC_LOAD8_U_I64">;
172defm : LoadPat<i64, zext_aload_16_64, "ATOMIC_LOAD16_U_I64">;
173defm : LoadPat<i64, zext_aload_32_64, "ATOMIC_LOAD32_U_I64">;
174
175// Select sign-extending loads
176defm : LoadPat<i32, atomic_load_8, "ATOMIC_LOAD8_U_I32">;
177defm : LoadPat<i32, atomic_load_16, "ATOMIC_LOAD16_U_I32">;
178defm : LoadPat<i64, sext_aload_8_64, "ATOMIC_LOAD8_U_I64">;
179defm : LoadPat<i64, sext_aload_16_64, "ATOMIC_LOAD16_U_I64">;
180// 32->64 sext load gets selected as i32.atomic.load, i64.extend_i32_s
181
182
183//===----------------------------------------------------------------------===//
184// Atomic stores
185//===----------------------------------------------------------------------===//
186
187multiclass AtomicStore<WebAssemblyRegClass rc, string name, int atomic_op> {
188  defm "" : WebAssemblyStore<rc, name, !or(0xfe00, !and(0xff, atomic_op)),
189                             [HasAtomics]>;
190}
191
192defm ATOMIC_STORE_I32 : AtomicStore<I32, "i32.atomic.store", 0x17>;
193defm ATOMIC_STORE_I64 : AtomicStore<I64, "i64.atomic.store", 0x18>;
194
195// We used to need an 'atomic' version of store patterns because store and atomic_store
196// nodes have different operand orders.
197//
198// TODO: This is no longer true and atomic_store and store patterns
199// can be unified.
200
201multiclass AStorePat<ValueType ty, PatFrag kind, string inst> {
202  def : Pat<(kind ty:$val, (AddrOps32 offset32_op:$offset, I32:$addr)),
203            (!cast<NI>(inst#_A32) 0, $offset, $addr, $val)>,
204        Requires<[HasAddr32, HasAtomics]>;
205  def : Pat<(kind ty:$val, (AddrOps64 offset64_op:$offset, I64:$addr)),
206            (!cast<NI>(inst#_A64) 0, $offset, $addr, $val)>,
207        Requires<[HasAddr64, HasAtomics]>;
208}
209defm : AStorePat<i32, atomic_store_32, "ATOMIC_STORE_I32">;
210defm : AStorePat<i64, atomic_store_64, "ATOMIC_STORE_I64">;
211
212// Truncating stores.
213defm ATOMIC_STORE8_I32 : AtomicStore<I32, "i32.atomic.store8", 0x19>;
214defm ATOMIC_STORE16_I32 : AtomicStore<I32, "i32.atomic.store16", 0x1a>;
215defm ATOMIC_STORE8_I64 : AtomicStore<I64, "i64.atomic.store8", 0x1b>;
216defm ATOMIC_STORE16_I64 : AtomicStore<I64, "i64.atomic.store16", 0x1c>;
217defm ATOMIC_STORE32_I64 : AtomicStore<I64, "i64.atomic.store32", 0x1d>;
218
219// Fragments for truncating stores.
220
221// We don't have single truncating atomic store instructions. For 32-bit
222// instructions, we just need to match bare atomic stores. On the other hand,
223// truncating stores from i64 values are once truncated to i32 first.
224class trunc_astore_64<PatFrag kind> :
225  PatFrag<(ops node:$val, node:$addr),
226          (kind (i32 (trunc (i64 node:$val))), node:$addr)>;
227def trunc_astore_8_64 : trunc_astore_64<atomic_store_8>;
228def trunc_astore_16_64 : trunc_astore_64<atomic_store_16>;
229def trunc_astore_32_64 : trunc_astore_64<atomic_store_32>;
230
231// Truncating stores with no constant offset
232defm : AStorePat<i32, atomic_store_8, "ATOMIC_STORE8_I32">;
233defm : AStorePat<i32, atomic_store_16, "ATOMIC_STORE16_I32">;
234defm : AStorePat<i64, trunc_astore_8_64, "ATOMIC_STORE8_I64">;
235defm : AStorePat<i64, trunc_astore_16_64, "ATOMIC_STORE16_I64">;
236defm : AStorePat<i64, trunc_astore_32_64, "ATOMIC_STORE32_I64">;
237
238//===----------------------------------------------------------------------===//
239// Atomic binary read-modify-writes
240//===----------------------------------------------------------------------===//
241
242multiclass WebAssemblyBinRMW<WebAssemblyRegClass rc, string name,
243                             int atomic_op> {
244  defm "_A32" :
245    ATOMIC_I<(outs rc:$dst),
246             (ins P2Align:$p2align, offset32_op:$off, I32:$addr, rc:$val),
247             (outs), (ins P2Align:$p2align, offset32_op:$off), [],
248             !strconcat(name, "\t$dst, ${off}(${addr})${p2align}, $val"),
249             !strconcat(name, "\t${off}${p2align}"), atomic_op, false>;
250  defm "_A64" :
251    ATOMIC_I<(outs rc:$dst),
252             (ins P2Align:$p2align, offset64_op:$off, I64:$addr, rc:$val),
253             (outs), (ins P2Align:$p2align, offset64_op:$off), [],
254             !strconcat(name, "\t$dst, ${off}(${addr})${p2align}, $val"),
255             !strconcat(name, "\t${off}${p2align}"), atomic_op, true>;
256}
257
258defm ATOMIC_RMW_ADD_I32 : WebAssemblyBinRMW<I32, "i32.atomic.rmw.add", 0x1e>;
259defm ATOMIC_RMW_ADD_I64 : WebAssemblyBinRMW<I64, "i64.atomic.rmw.add", 0x1f>;
260defm ATOMIC_RMW8_U_ADD_I32 :
261  WebAssemblyBinRMW<I32, "i32.atomic.rmw8.add_u", 0x20>;
262defm ATOMIC_RMW16_U_ADD_I32 :
263  WebAssemblyBinRMW<I32, "i32.atomic.rmw16.add_u", 0x21>;
264defm ATOMIC_RMW8_U_ADD_I64 :
265  WebAssemblyBinRMW<I64, "i64.atomic.rmw8.add_u", 0x22>;
266defm ATOMIC_RMW16_U_ADD_I64 :
267  WebAssemblyBinRMW<I64, "i64.atomic.rmw16.add_u", 0x23>;
268defm ATOMIC_RMW32_U_ADD_I64 :
269  WebAssemblyBinRMW<I64, "i64.atomic.rmw32.add_u", 0x24>;
270
271defm ATOMIC_RMW_SUB_I32 : WebAssemblyBinRMW<I32, "i32.atomic.rmw.sub", 0x25>;
272defm ATOMIC_RMW_SUB_I64 : WebAssemblyBinRMW<I64, "i64.atomic.rmw.sub", 0x26>;
273defm ATOMIC_RMW8_U_SUB_I32 :
274  WebAssemblyBinRMW<I32, "i32.atomic.rmw8.sub_u", 0x27>;
275defm ATOMIC_RMW16_U_SUB_I32 :
276  WebAssemblyBinRMW<I32, "i32.atomic.rmw16.sub_u", 0x28>;
277defm ATOMIC_RMW8_U_SUB_I64 :
278  WebAssemblyBinRMW<I64, "i64.atomic.rmw8.sub_u", 0x29>;
279defm ATOMIC_RMW16_U_SUB_I64 :
280  WebAssemblyBinRMW<I64, "i64.atomic.rmw16.sub_u", 0x2a>;
281defm ATOMIC_RMW32_U_SUB_I64 :
282  WebAssemblyBinRMW<I64, "i64.atomic.rmw32.sub_u", 0x2b>;
283
284defm ATOMIC_RMW_AND_I32 : WebAssemblyBinRMW<I32, "i32.atomic.rmw.and", 0x2c>;
285defm ATOMIC_RMW_AND_I64 : WebAssemblyBinRMW<I64, "i64.atomic.rmw.and", 0x2d>;
286defm ATOMIC_RMW8_U_AND_I32 :
287  WebAssemblyBinRMW<I32, "i32.atomic.rmw8.and_u", 0x2e>;
288defm ATOMIC_RMW16_U_AND_I32 :
289  WebAssemblyBinRMW<I32, "i32.atomic.rmw16.and_u", 0x2f>;
290defm ATOMIC_RMW8_U_AND_I64 :
291  WebAssemblyBinRMW<I64, "i64.atomic.rmw8.and_u", 0x30>;
292defm ATOMIC_RMW16_U_AND_I64 :
293  WebAssemblyBinRMW<I64, "i64.atomic.rmw16.and_u", 0x31>;
294defm ATOMIC_RMW32_U_AND_I64 :
295  WebAssemblyBinRMW<I64, "i64.atomic.rmw32.and_u", 0x32>;
296
297defm ATOMIC_RMW_OR_I32 : WebAssemblyBinRMW<I32, "i32.atomic.rmw.or", 0x33>;
298defm ATOMIC_RMW_OR_I64 : WebAssemblyBinRMW<I64, "i64.atomic.rmw.or", 0x34>;
299defm ATOMIC_RMW8_U_OR_I32 :
300  WebAssemblyBinRMW<I32, "i32.atomic.rmw8.or_u", 0x35>;
301defm ATOMIC_RMW16_U_OR_I32 :
302  WebAssemblyBinRMW<I32, "i32.atomic.rmw16.or_u", 0x36>;
303defm ATOMIC_RMW8_U_OR_I64 :
304  WebAssemblyBinRMW<I64, "i64.atomic.rmw8.or_u", 0x37>;
305defm ATOMIC_RMW16_U_OR_I64 :
306  WebAssemblyBinRMW<I64, "i64.atomic.rmw16.or_u", 0x38>;
307defm ATOMIC_RMW32_U_OR_I64 :
308  WebAssemblyBinRMW<I64, "i64.atomic.rmw32.or_u", 0x39>;
309
310defm ATOMIC_RMW_XOR_I32 : WebAssemblyBinRMW<I32, "i32.atomic.rmw.xor", 0x3a>;
311defm ATOMIC_RMW_XOR_I64 : WebAssemblyBinRMW<I64, "i64.atomic.rmw.xor", 0x3b>;
312defm ATOMIC_RMW8_U_XOR_I32 :
313  WebAssemblyBinRMW<I32, "i32.atomic.rmw8.xor_u", 0x3c>;
314defm ATOMIC_RMW16_U_XOR_I32 :
315  WebAssemblyBinRMW<I32, "i32.atomic.rmw16.xor_u", 0x3d>;
316defm ATOMIC_RMW8_U_XOR_I64 :
317  WebAssemblyBinRMW<I64, "i64.atomic.rmw8.xor_u", 0x3e>;
318defm ATOMIC_RMW16_U_XOR_I64 :
319  WebAssemblyBinRMW<I64, "i64.atomic.rmw16.xor_u", 0x3f>;
320defm ATOMIC_RMW32_U_XOR_I64 :
321  WebAssemblyBinRMW<I64, "i64.atomic.rmw32.xor_u", 0x40>;
322
323defm ATOMIC_RMW_XCHG_I32 :
324  WebAssemblyBinRMW<I32, "i32.atomic.rmw.xchg", 0x41>;
325defm ATOMIC_RMW_XCHG_I64 :
326  WebAssemblyBinRMW<I64, "i64.atomic.rmw.xchg", 0x42>;
327defm ATOMIC_RMW8_U_XCHG_I32 :
328  WebAssemblyBinRMW<I32, "i32.atomic.rmw8.xchg_u", 0x43>;
329defm ATOMIC_RMW16_U_XCHG_I32 :
330  WebAssemblyBinRMW<I32, "i32.atomic.rmw16.xchg_u", 0x44>;
331defm ATOMIC_RMW8_U_XCHG_I64 :
332  WebAssemblyBinRMW<I64, "i64.atomic.rmw8.xchg_u", 0x45>;
333defm ATOMIC_RMW16_U_XCHG_I64 :
334  WebAssemblyBinRMW<I64, "i64.atomic.rmw16.xchg_u", 0x46>;
335defm ATOMIC_RMW32_U_XCHG_I64 :
336  WebAssemblyBinRMW<I64, "i64.atomic.rmw32.xchg_u", 0x47>;
337
338multiclass BinRMWPat<ValueType ty, PatFrag kind, string inst> {
339  def : Pat<(ty (kind (AddrOps32 offset32_op:$offset, I32:$addr), ty:$val)),
340            (!cast<NI>(inst#_A32) 0, $offset, $addr, $val)>,
341        Requires<[HasAddr32, HasAtomics]>;
342  def : Pat<(ty (kind (AddrOps64 offset64_op:$offset, I64:$addr), ty:$val)),
343            (!cast<NI>(inst#_A64) 0, $offset, $addr, $val)>,
344        Requires<[HasAddr64, HasAtomics]>;
345}
346
347// Patterns for various addressing modes.
348multiclass BinRMWPattern<PatFrag rmw_32, PatFrag rmw_64, string inst_32,
349                         string inst_64> {
350  defm : BinRMWPat<i32, rmw_32, inst_32>;
351  defm : BinRMWPat<i64, rmw_64, inst_64>;
352}
353
354defm : BinRMWPattern<atomic_load_add_32, atomic_load_add_64,
355                     "ATOMIC_RMW_ADD_I32", "ATOMIC_RMW_ADD_I64">;
356defm : BinRMWPattern<atomic_load_sub_32, atomic_load_sub_64,
357                     "ATOMIC_RMW_SUB_I32", "ATOMIC_RMW_SUB_I64">;
358defm : BinRMWPattern<atomic_load_and_32, atomic_load_and_64,
359                     "ATOMIC_RMW_AND_I32", "ATOMIC_RMW_AND_I64">;
360defm : BinRMWPattern<atomic_load_or_32, atomic_load_or_64,
361                     "ATOMIC_RMW_OR_I32", "ATOMIC_RMW_OR_I64">;
362defm : BinRMWPattern<atomic_load_xor_32, atomic_load_xor_64,
363                     "ATOMIC_RMW_XOR_I32", "ATOMIC_RMW_XOR_I64">;
364defm : BinRMWPattern<atomic_swap_32, atomic_swap_64,
365                     "ATOMIC_RMW_XCHG_I32", "ATOMIC_RMW_XCHG_I64">;
366
367// Truncating & zero-extending binary RMW patterns.
368// These are combined patterns of truncating store patterns and zero-extending
369// load patterns above.
370class zext_bin_rmw_8_32<PatFrag kind> :
371  PatFrag<(ops node:$addr, node:$val), (i32 (kind node:$addr, node:$val))>;
372class zext_bin_rmw_16_32<PatFrag kind> : zext_bin_rmw_8_32<kind>;
373class zext_bin_rmw_8_64<PatFrag kind> :
374  PatFrag<(ops node:$addr, node:$val),
375          (zext (i32 (kind node:$addr, (i32 (trunc (i64 node:$val))))))>;
376class zext_bin_rmw_16_64<PatFrag kind> : zext_bin_rmw_8_64<kind>;
377class zext_bin_rmw_32_64<PatFrag kind> : zext_bin_rmw_8_64<kind>;
378
379// Truncating & sign-extending binary RMW patterns.
380// These are combined patterns of truncating store patterns and sign-extending
381// load patterns above. We match subword RMWs (for 32-bit) and anyext RMWs (for
382// 64-bit) and select a zext RMW; the next instruction will be sext_inreg which
383// is selected by itself.
384class sext_bin_rmw_8_32<PatFrag kind> :
385  PatFrag<(ops node:$addr, node:$val), (kind node:$addr, node:$val)>;
386class sext_bin_rmw_16_32<PatFrag kind> : sext_bin_rmw_8_32<kind>;
387class sext_bin_rmw_8_64<PatFrag kind> :
388  PatFrag<(ops node:$addr, node:$val),
389          (anyext (i32 (kind node:$addr, (i32 (trunc (i64 node:$val))))))>;
390class sext_bin_rmw_16_64<PatFrag kind> : sext_bin_rmw_8_64<kind>;
391// 32->64 sext RMW gets selected as i32.atomic.rmw.***, i64.extend_i32_s
392
393// Patterns for various addressing modes for truncating-extending binary RMWs.
394multiclass BinRMWTruncExtPattern<
395  PatFrag rmw_8, PatFrag rmw_16, PatFrag rmw_32,
396  string inst8_32, string inst16_32, string inst8_64, string inst16_64, string inst32_64> {
397  // Truncating-extending binary RMWs
398  defm : BinRMWPat<i32, zext_bin_rmw_8_32<rmw_8>, inst8_32>;
399  defm : BinRMWPat<i32, zext_bin_rmw_16_32<rmw_16>, inst16_32>;
400  defm : BinRMWPat<i64, zext_bin_rmw_8_64<rmw_8>, inst8_64>;
401  defm : BinRMWPat<i64, zext_bin_rmw_16_64<rmw_16>, inst16_64>;
402  defm : BinRMWPat<i64, zext_bin_rmw_32_64<rmw_32>, inst32_64>;
403
404  defm : BinRMWPat<i32, sext_bin_rmw_8_32<rmw_8>, inst8_32>;
405  defm : BinRMWPat<i32, sext_bin_rmw_16_32<rmw_16>, inst16_32>;
406  defm : BinRMWPat<i64, sext_bin_rmw_8_64<rmw_8>, inst8_64>;
407  defm : BinRMWPat<i64, sext_bin_rmw_16_64<rmw_16>, inst16_64>;
408}
409
410defm : BinRMWTruncExtPattern<
411  atomic_load_add_8, atomic_load_add_16, atomic_load_add_32,
412  "ATOMIC_RMW8_U_ADD_I32", "ATOMIC_RMW16_U_ADD_I32",
413  "ATOMIC_RMW8_U_ADD_I64", "ATOMIC_RMW16_U_ADD_I64", "ATOMIC_RMW32_U_ADD_I64">;
414defm : BinRMWTruncExtPattern<
415  atomic_load_sub_8, atomic_load_sub_16, atomic_load_sub_32,
416  "ATOMIC_RMW8_U_SUB_I32", "ATOMIC_RMW16_U_SUB_I32",
417  "ATOMIC_RMW8_U_SUB_I64", "ATOMIC_RMW16_U_SUB_I64", "ATOMIC_RMW32_U_SUB_I64">;
418defm : BinRMWTruncExtPattern<
419  atomic_load_and_8, atomic_load_and_16, atomic_load_and_32,
420  "ATOMIC_RMW8_U_AND_I32", "ATOMIC_RMW16_U_AND_I32",
421  "ATOMIC_RMW8_U_AND_I64", "ATOMIC_RMW16_U_AND_I64", "ATOMIC_RMW32_U_AND_I64">;
422defm : BinRMWTruncExtPattern<
423  atomic_load_or_8, atomic_load_or_16, atomic_load_or_32,
424  "ATOMIC_RMW8_U_OR_I32", "ATOMIC_RMW16_U_OR_I32",
425  "ATOMIC_RMW8_U_OR_I64", "ATOMIC_RMW16_U_OR_I64", "ATOMIC_RMW32_U_OR_I64">;
426defm : BinRMWTruncExtPattern<
427  atomic_load_xor_8, atomic_load_xor_16, atomic_load_xor_32,
428  "ATOMIC_RMW8_U_XOR_I32", "ATOMIC_RMW16_U_XOR_I32",
429  "ATOMIC_RMW8_U_XOR_I64", "ATOMIC_RMW16_U_XOR_I64", "ATOMIC_RMW32_U_XOR_I64">;
430defm : BinRMWTruncExtPattern<
431  atomic_swap_8, atomic_swap_16, atomic_swap_32,
432  "ATOMIC_RMW8_U_XCHG_I32", "ATOMIC_RMW16_U_XCHG_I32",
433  "ATOMIC_RMW8_U_XCHG_I64", "ATOMIC_RMW16_U_XCHG_I64",
434  "ATOMIC_RMW32_U_XCHG_I64">;
435
436//===----------------------------------------------------------------------===//
437// Atomic ternary read-modify-writes
438//===----------------------------------------------------------------------===//
439
440// TODO LLVM IR's cmpxchg instruction returns a pair of {loaded value, success
441// flag}. When we use the success flag or both values, we can't make use of i64
442// truncate/extend versions of instructions for now, which is suboptimal.
443// Consider adding a pass after instruction selection that optimizes this case
444// if it is frequent.
445
446multiclass WebAssemblyTerRMW<WebAssemblyRegClass rc, string name,
447                             int atomic_op> {
448  defm "_A32" :
449    ATOMIC_I<(outs rc:$dst),
450             (ins P2Align:$p2align, offset32_op:$off, I32:$addr, rc:$exp,
451                  rc:$new_),
452             (outs), (ins P2Align:$p2align, offset32_op:$off), [],
453             !strconcat(name, "\t$dst, ${off}(${addr})${p2align}, $exp, $new_"),
454             !strconcat(name, "\t${off}${p2align}"), atomic_op, false>;
455  defm "_A64" :
456    ATOMIC_I<(outs rc:$dst),
457             (ins P2Align:$p2align, offset64_op:$off, I64:$addr, rc:$exp,
458                  rc:$new_),
459             (outs), (ins P2Align:$p2align, offset64_op:$off), [],
460             !strconcat(name, "\t$dst, ${off}(${addr})${p2align}, $exp, $new_"),
461             !strconcat(name, "\t${off}${p2align}"), atomic_op, true>;
462}
463
464defm ATOMIC_RMW_CMPXCHG_I32 :
465  WebAssemblyTerRMW<I32, "i32.atomic.rmw.cmpxchg", 0x48>;
466defm ATOMIC_RMW_CMPXCHG_I64 :
467  WebAssemblyTerRMW<I64, "i64.atomic.rmw.cmpxchg", 0x49>;
468defm ATOMIC_RMW8_U_CMPXCHG_I32 :
469  WebAssemblyTerRMW<I32, "i32.atomic.rmw8.cmpxchg_u", 0x4a>;
470defm ATOMIC_RMW16_U_CMPXCHG_I32 :
471  WebAssemblyTerRMW<I32, "i32.atomic.rmw16.cmpxchg_u", 0x4b>;
472defm ATOMIC_RMW8_U_CMPXCHG_I64 :
473  WebAssemblyTerRMW<I64, "i64.atomic.rmw8.cmpxchg_u", 0x4c>;
474defm ATOMIC_RMW16_U_CMPXCHG_I64 :
475  WebAssemblyTerRMW<I64, "i64.atomic.rmw16.cmpxchg_u", 0x4d>;
476defm ATOMIC_RMW32_U_CMPXCHG_I64 :
477  WebAssemblyTerRMW<I64, "i64.atomic.rmw32.cmpxchg_u", 0x4e>;
478
479multiclass TerRMWPat<ValueType ty, PatFrag kind, string inst> {
480  def : Pat<(ty (kind (AddrOps32 offset32_op:$offset, I32:$addr), ty:$exp, ty:$new)),
481            (!cast<NI>(inst#_A32) 0, $offset, $addr, $exp, $new)>,
482        Requires<[HasAddr32, HasAtomics]>;
483  def : Pat<(ty (kind (AddrOps64 offset64_op:$offset, I64:$addr), ty:$exp, ty:$new)),
484            (!cast<NI>(inst#_A64) 0, $offset, $addr, $exp, $new)>,
485        Requires<[HasAddr64, HasAtomics]>;
486}
487
488defm : TerRMWPat<i32, atomic_cmp_swap_32, "ATOMIC_RMW_CMPXCHG_I32">;
489defm : TerRMWPat<i64, atomic_cmp_swap_64, "ATOMIC_RMW_CMPXCHG_I64">;
490
491// Truncating & zero-extending ternary RMW patterns.
492// DAG legalization & optimization before instruction selection may introduce
493// additional nodes such as anyext or assertzext depending on operand types.
494class zext_ter_rmw_8_32<PatFrag kind> :
495  PatFrag<(ops node:$addr, node:$exp, node:$new),
496          (i32 (kind node:$addr, node:$exp, node:$new))>;
497class zext_ter_rmw_16_32<PatFrag kind> : zext_ter_rmw_8_32<kind>;
498class zext_ter_rmw_8_64<PatFrag kind> :
499  PatFrag<(ops node:$addr, node:$exp, node:$new),
500          (zext (i32 (assertzext (i32 (kind node:$addr,
501                                            (i32 (trunc (i64 node:$exp))),
502                                            (i32 (trunc (i64 node:$new))))))))>;
503class zext_ter_rmw_16_64<PatFrag kind> : zext_ter_rmw_8_64<kind>;
504class zext_ter_rmw_32_64<PatFrag kind> :
505  PatFrag<(ops node:$addr, node:$exp, node:$new),
506          (zext (i32 (kind node:$addr,
507                           (i32 (trunc (i64 node:$exp))),
508                           (i32 (trunc (i64 node:$new))))))>;
509
510// Truncating & sign-extending ternary RMW patterns.
511// We match subword RMWs (for 32-bit) and anyext RMWs (for 64-bit) and select a
512// zext RMW; the next instruction will be sext_inreg which is selected by
513// itself.
514class sext_ter_rmw_8_32<PatFrag kind> :
515  PatFrag<(ops node:$addr, node:$exp, node:$new),
516          (kind node:$addr, node:$exp, node:$new)>;
517class sext_ter_rmw_16_32<PatFrag kind> : sext_ter_rmw_8_32<kind>;
518class sext_ter_rmw_8_64<PatFrag kind> :
519  PatFrag<(ops node:$addr, node:$exp, node:$new),
520          (anyext (i32 (assertzext (i32
521            (kind node:$addr,
522                  (i32 (trunc (i64 node:$exp))),
523                  (i32 (trunc (i64 node:$new))))))))>;
524class sext_ter_rmw_16_64<PatFrag kind> : sext_ter_rmw_8_64<kind>;
525// 32->64 sext RMW gets selected as i32.atomic.rmw.***, i64.extend_i32_s
526
527defm : TerRMWPat<i32, zext_ter_rmw_8_32<atomic_cmp_swap_8>, "ATOMIC_RMW8_U_CMPXCHG_I32">;
528defm : TerRMWPat<i32, zext_ter_rmw_16_32<atomic_cmp_swap_16>, "ATOMIC_RMW16_U_CMPXCHG_I32">;
529defm : TerRMWPat<i64, zext_ter_rmw_8_64<atomic_cmp_swap_8>, "ATOMIC_RMW8_U_CMPXCHG_I64">;
530defm : TerRMWPat<i64, zext_ter_rmw_16_64<atomic_cmp_swap_16>, "ATOMIC_RMW16_U_CMPXCHG_I64">;
531defm : TerRMWPat<i64, zext_ter_rmw_32_64<atomic_cmp_swap_32>, "ATOMIC_RMW32_U_CMPXCHG_I64">;
532
533defm : TerRMWPat<i32, sext_ter_rmw_8_32<atomic_cmp_swap_8>, "ATOMIC_RMW8_U_CMPXCHG_I32">;
534defm : TerRMWPat<i32, sext_ter_rmw_16_32<atomic_cmp_swap_16>, "ATOMIC_RMW16_U_CMPXCHG_I32">;
535defm : TerRMWPat<i64, sext_ter_rmw_8_64<atomic_cmp_swap_8>, "ATOMIC_RMW8_U_CMPXCHG_I64">;
536defm : TerRMWPat<i64, sext_ter_rmw_16_64<atomic_cmp_swap_16>, "ATOMIC_RMW16_U_CMPXCHG_I64">;
537