1//=- AArch64InstrAtomics.td - AArch64 Atomic codegen support -*- tablegen -*-=//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// AArch64 Atomic operand code-gen constructs.
11//
12//===----------------------------------------------------------------------===//
13
14//===----------------------------------
15// Atomic fences
16//===----------------------------------
17def : Pat<(atomic_fence (i64 4), (imm)), (DMB (i32 0x9))>;
18def : Pat<(atomic_fence (imm), (imm)), (DMB (i32 0xb))>;
19
20//===----------------------------------
21// Atomic loads
22//===----------------------------------
23
24// When they're actually atomic, only one addressing mode (GPR64sp) is
25// supported, but when they're relaxed and anything can be used, all the
26// standard modes would be valid and may give efficiency gains.
27
28// A atomic load operation that actually needs acquire semantics.
29class acquiring_load<PatFrag base>
30  : PatFrag<(ops node:$ptr), (base node:$ptr), [{
31  AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getOrdering();
32  return isAtLeastAcquire(Ordering);
33}]>;
34
35// An atomic load operation that does not need either acquire or release
36// semantics.
37class relaxed_load<PatFrag base>
38  : PatFrag<(ops node:$ptr), (base node:$ptr), [{
39  AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getOrdering();
40  return !isAtLeastAcquire(Ordering);
41}]>;
42
43// 8-bit loads
44def : Pat<(acquiring_load<atomic_load_8>  GPR64sp:$ptr), (LDARB GPR64sp:$ptr)>;
45def : Pat<(relaxed_load<atomic_load_8> (ro_Windexed8 GPR64sp:$Rn, GPR32:$Rm,
46                                                     ro_Wextend8:$offset)),
47          (LDRBBroW GPR64sp:$Rn, GPR32:$Rm, ro_Wextend8:$offset)>;
48def : Pat<(relaxed_load<atomic_load_8> (ro_Xindexed8 GPR64sp:$Rn, GPR64:$Rm,
49                                                     ro_Xextend8:$offset)),
50          (LDRBBroX GPR64sp:$Rn, GPR64:$Rm, ro_Xextend8:$offset)>;
51def : Pat<(relaxed_load<atomic_load_8> (am_indexed8 GPR64sp:$Rn,
52                                                    uimm12s1:$offset)),
53          (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
54def : Pat<(relaxed_load<atomic_load_8>
55               (am_unscaled8 GPR64sp:$Rn, simm9:$offset)),
56          (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
57
58// 16-bit loads
59def : Pat<(acquiring_load<atomic_load_16> GPR64sp:$ptr), (LDARH GPR64sp:$ptr)>;
60def : Pat<(relaxed_load<atomic_load_16> (ro_Windexed16 GPR64sp:$Rn, GPR32:$Rm,
61                                                       ro_Wextend16:$extend)),
62          (LDRHHroW GPR64sp:$Rn, GPR32:$Rm, ro_Wextend16:$extend)>;
63def : Pat<(relaxed_load<atomic_load_16> (ro_Xindexed16 GPR64sp:$Rn, GPR64:$Rm,
64                                                       ro_Xextend16:$extend)),
65          (LDRHHroX GPR64sp:$Rn, GPR64:$Rm, ro_Xextend16:$extend)>;
66def : Pat<(relaxed_load<atomic_load_16> (am_indexed16 GPR64sp:$Rn,
67                                                      uimm12s2:$offset)),
68          (LDRHHui GPR64sp:$Rn, uimm12s2:$offset)>;
69def : Pat<(relaxed_load<atomic_load_16>
70               (am_unscaled16 GPR64sp:$Rn, simm9:$offset)),
71          (LDURHHi GPR64sp:$Rn, simm9:$offset)>;
72
73// 32-bit loads
74def : Pat<(acquiring_load<atomic_load_32> GPR64sp:$ptr), (LDARW GPR64sp:$ptr)>;
75def : Pat<(relaxed_load<atomic_load_32> (ro_Windexed32 GPR64sp:$Rn, GPR32:$Rm,
76                                                       ro_Wextend32:$extend)),
77          (LDRWroW GPR64sp:$Rn, GPR32:$Rm, ro_Wextend32:$extend)>;
78def : Pat<(relaxed_load<atomic_load_32> (ro_Xindexed32 GPR64sp:$Rn, GPR64:$Rm,
79                                                       ro_Xextend32:$extend)),
80          (LDRWroX GPR64sp:$Rn, GPR64:$Rm, ro_Xextend32:$extend)>;
81def : Pat<(relaxed_load<atomic_load_32> (am_indexed32 GPR64sp:$Rn,
82                                                      uimm12s4:$offset)),
83          (LDRWui GPR64sp:$Rn, uimm12s4:$offset)>;
84def : Pat<(relaxed_load<atomic_load_32>
85               (am_unscaled32 GPR64sp:$Rn, simm9:$offset)),
86          (LDURWi GPR64sp:$Rn, simm9:$offset)>;
87
88// 64-bit loads
89def : Pat<(acquiring_load<atomic_load_64> GPR64sp:$ptr), (LDARX GPR64sp:$ptr)>;
90def : Pat<(relaxed_load<atomic_load_64> (ro_Windexed64 GPR64sp:$Rn, GPR32:$Rm,
91                                                       ro_Wextend64:$extend)),
92          (LDRXroW GPR64sp:$Rn, GPR32:$Rm, ro_Wextend64:$extend)>;
93def : Pat<(relaxed_load<atomic_load_64> (ro_Xindexed64 GPR64sp:$Rn, GPR64:$Rm,
94                                                       ro_Xextend64:$extend)),
95          (LDRXroX GPR64sp:$Rn, GPR64:$Rm, ro_Xextend64:$extend)>;
96def : Pat<(relaxed_load<atomic_load_64> (am_indexed64 GPR64sp:$Rn,
97                                                      uimm12s8:$offset)),
98          (LDRXui GPR64sp:$Rn, uimm12s8:$offset)>;
99def : Pat<(relaxed_load<atomic_load_64>
100               (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
101          (LDURXi GPR64sp:$Rn, simm9:$offset)>;
102
103//===----------------------------------
104// Atomic stores
105//===----------------------------------
106
107// When they're actually atomic, only one addressing mode (GPR64sp) is
108// supported, but when they're relaxed and anything can be used, all the
109// standard modes would be valid and may give efficiency gains.
110
111// A store operation that actually needs release semantics.
112class releasing_store<PatFrag base>
113  : PatFrag<(ops node:$ptr, node:$val), (base node:$ptr, node:$val), [{
114  AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getOrdering();
115  assert(Ordering != AcquireRelease && "unexpected store ordering");
116  return isAtLeastRelease(Ordering);
117}]>;
118
119// An atomic store operation that doesn't actually need to be atomic on AArch64.
120class relaxed_store<PatFrag base>
121  : PatFrag<(ops node:$ptr, node:$val), (base node:$ptr, node:$val), [{
122  AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getOrdering();
123  return !isAtLeastRelease(Ordering);
124}]>;
125
126// 8-bit stores
127def : Pat<(releasing_store<atomic_store_8> GPR64sp:$ptr, GPR32:$val),
128          (STLRB GPR32:$val, GPR64sp:$ptr)>;
129def : Pat<(relaxed_store<atomic_store_8>
130               (ro_Windexed8 GPR64sp:$Rn, GPR32:$Rm, ro_Wextend8:$extend),
131               GPR32:$val),
132          (STRBBroW GPR32:$val, GPR64sp:$Rn, GPR32:$Rm, ro_Wextend8:$extend)>;
133def : Pat<(relaxed_store<atomic_store_8>
134               (ro_Xindexed8 GPR64sp:$Rn, GPR64:$Rm, ro_Xextend8:$extend),
135               GPR32:$val),
136          (STRBBroX GPR32:$val, GPR64sp:$Rn, GPR64:$Rm, ro_Xextend8:$extend)>;
137def : Pat<(relaxed_store<atomic_store_8>
138               (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset), GPR32:$val),
139          (STRBBui GPR32:$val, GPR64sp:$Rn, uimm12s1:$offset)>;
140def : Pat<(relaxed_store<atomic_store_8>
141               (am_unscaled8 GPR64sp:$Rn, simm9:$offset), GPR32:$val),
142          (STURBBi GPR32:$val, GPR64sp:$Rn, simm9:$offset)>;
143
144// 16-bit stores
145def : Pat<(releasing_store<atomic_store_16> GPR64sp:$ptr, GPR32:$val),
146          (STLRH GPR32:$val, GPR64sp:$ptr)>;
147def : Pat<(relaxed_store<atomic_store_16> (ro_Windexed16 GPR64sp:$Rn, GPR32:$Rm,
148                                                         ro_Wextend16:$extend),
149                                          GPR32:$val),
150          (STRHHroW GPR32:$val, GPR64sp:$Rn, GPR32:$Rm, ro_Wextend16:$extend)>;
151def : Pat<(relaxed_store<atomic_store_16> (ro_Xindexed16 GPR64sp:$Rn, GPR64:$Rm,
152                                                         ro_Xextend16:$extend),
153                                          GPR32:$val),
154          (STRHHroX GPR32:$val, GPR64sp:$Rn, GPR64:$Rm, ro_Xextend16:$extend)>;
155def : Pat<(relaxed_store<atomic_store_16>
156              (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset), GPR32:$val),
157          (STRHHui GPR32:$val, GPR64sp:$Rn, uimm12s2:$offset)>;
158def : Pat<(relaxed_store<atomic_store_16>
159               (am_unscaled16 GPR64sp:$Rn, simm9:$offset), GPR32:$val),
160          (STURHHi GPR32:$val, GPR64sp:$Rn, simm9:$offset)>;
161
162// 32-bit stores
163def : Pat<(releasing_store<atomic_store_32> GPR64sp:$ptr, GPR32:$val),
164          (STLRW GPR32:$val, GPR64sp:$ptr)>;
165def : Pat<(relaxed_store<atomic_store_32> (ro_Windexed32 GPR64sp:$Rn, GPR32:$Rm,
166                                                         ro_Wextend32:$extend),
167                                          GPR32:$val),
168          (STRWroW GPR32:$val, GPR64sp:$Rn, GPR32:$Rm, ro_Wextend32:$extend)>;
169def : Pat<(relaxed_store<atomic_store_32> (ro_Xindexed32 GPR64sp:$Rn, GPR64:$Rm,
170                                                         ro_Xextend32:$extend),
171                                          GPR32:$val),
172          (STRWroX GPR32:$val, GPR64sp:$Rn, GPR64:$Rm, ro_Xextend32:$extend)>;
173def : Pat<(relaxed_store<atomic_store_32>
174              (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset), GPR32:$val),
175          (STRWui GPR32:$val, GPR64sp:$Rn, uimm12s4:$offset)>;
176def : Pat<(relaxed_store<atomic_store_32>
177               (am_unscaled32 GPR64sp:$Rn, simm9:$offset), GPR32:$val),
178          (STURWi GPR32:$val, GPR64sp:$Rn, simm9:$offset)>;
179
180// 64-bit stores
181def : Pat<(releasing_store<atomic_store_64> GPR64sp:$ptr, GPR64:$val),
182          (STLRX GPR64:$val, GPR64sp:$ptr)>;
183def : Pat<(relaxed_store<atomic_store_64> (ro_Windexed64 GPR64sp:$Rn, GPR32:$Rm,
184                                                         ro_Wextend16:$extend),
185                                          GPR64:$val),
186          (STRXroW GPR64:$val, GPR64sp:$Rn, GPR32:$Rm, ro_Wextend64:$extend)>;
187def : Pat<(relaxed_store<atomic_store_64> (ro_Xindexed64 GPR64sp:$Rn, GPR64:$Rm,
188                                                         ro_Xextend16:$extend),
189                                          GPR64:$val),
190          (STRXroX GPR64:$val, GPR64sp:$Rn, GPR64:$Rm, ro_Xextend64:$extend)>;
191def : Pat<(relaxed_store<atomic_store_64>
192              (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset), GPR64:$val),
193          (STRXui GPR64:$val, GPR64sp:$Rn, uimm12s8:$offset)>;
194def : Pat<(relaxed_store<atomic_store_64>
195               (am_unscaled64 GPR64sp:$Rn, simm9:$offset), GPR64:$val),
196          (STURXi GPR64:$val, GPR64sp:$Rn, simm9:$offset)>;
197
198//===----------------------------------
199// Low-level exclusive operations
200//===----------------------------------
201
202// Load-exclusives.
203
204def ldxr_1 : PatFrag<(ops node:$ptr), (int_aarch64_ldxr node:$ptr), [{
205  return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i8;
206}]>;
207
208def ldxr_2 : PatFrag<(ops node:$ptr), (int_aarch64_ldxr node:$ptr), [{
209  return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i16;
210}]>;
211
212def ldxr_4 : PatFrag<(ops node:$ptr), (int_aarch64_ldxr node:$ptr), [{
213  return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i32;
214}]>;
215
216def ldxr_8 : PatFrag<(ops node:$ptr), (int_aarch64_ldxr node:$ptr), [{
217  return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i64;
218}]>;
219
220def : Pat<(ldxr_1 GPR64sp:$addr),
221          (SUBREG_TO_REG (i64 0), (LDXRB GPR64sp:$addr), sub_32)>;
222def : Pat<(ldxr_2 GPR64sp:$addr),
223          (SUBREG_TO_REG (i64 0), (LDXRH GPR64sp:$addr), sub_32)>;
224def : Pat<(ldxr_4 GPR64sp:$addr),
225          (SUBREG_TO_REG (i64 0), (LDXRW GPR64sp:$addr), sub_32)>;
226def : Pat<(ldxr_8 GPR64sp:$addr), (LDXRX GPR64sp:$addr)>;
227
228def : Pat<(and (ldxr_1 GPR64sp:$addr), 0xff),
229          (SUBREG_TO_REG (i64 0), (LDXRB GPR64sp:$addr), sub_32)>;
230def : Pat<(and (ldxr_2 GPR64sp:$addr), 0xffff),
231          (SUBREG_TO_REG (i64 0), (LDXRH GPR64sp:$addr), sub_32)>;
232def : Pat<(and (ldxr_4 GPR64sp:$addr), 0xffffffff),
233          (SUBREG_TO_REG (i64 0), (LDXRW GPR64sp:$addr), sub_32)>;
234
235// Load-exclusives.
236
237def ldaxr_1 : PatFrag<(ops node:$ptr), (int_aarch64_ldaxr node:$ptr), [{
238  return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i8;
239}]>;
240
241def ldaxr_2 : PatFrag<(ops node:$ptr), (int_aarch64_ldaxr node:$ptr), [{
242  return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i16;
243}]>;
244
245def ldaxr_4 : PatFrag<(ops node:$ptr), (int_aarch64_ldaxr node:$ptr), [{
246  return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i32;
247}]>;
248
249def ldaxr_8 : PatFrag<(ops node:$ptr), (int_aarch64_ldaxr node:$ptr), [{
250  return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i64;
251}]>;
252
253def : Pat<(ldaxr_1 GPR64sp:$addr),
254          (SUBREG_TO_REG (i64 0), (LDAXRB GPR64sp:$addr), sub_32)>;
255def : Pat<(ldaxr_2 GPR64sp:$addr),
256          (SUBREG_TO_REG (i64 0), (LDAXRH GPR64sp:$addr), sub_32)>;
257def : Pat<(ldaxr_4 GPR64sp:$addr),
258          (SUBREG_TO_REG (i64 0), (LDAXRW GPR64sp:$addr), sub_32)>;
259def : Pat<(ldaxr_8 GPR64sp:$addr), (LDAXRX GPR64sp:$addr)>;
260
261def : Pat<(and (ldaxr_1 GPR64sp:$addr), 0xff),
262          (SUBREG_TO_REG (i64 0), (LDAXRB GPR64sp:$addr), sub_32)>;
263def : Pat<(and (ldaxr_2 GPR64sp:$addr), 0xffff),
264          (SUBREG_TO_REG (i64 0), (LDAXRH GPR64sp:$addr), sub_32)>;
265def : Pat<(and (ldaxr_4 GPR64sp:$addr), 0xffffffff),
266          (SUBREG_TO_REG (i64 0), (LDAXRW GPR64sp:$addr), sub_32)>;
267
268// Store-exclusives.
269
270def stxr_1 : PatFrag<(ops node:$val, node:$ptr),
271                     (int_aarch64_stxr node:$val, node:$ptr), [{
272  return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i8;
273}]>;
274
275def stxr_2 : PatFrag<(ops node:$val, node:$ptr),
276                     (int_aarch64_stxr node:$val, node:$ptr), [{
277  return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i16;
278}]>;
279
280def stxr_4 : PatFrag<(ops node:$val, node:$ptr),
281                     (int_aarch64_stxr node:$val, node:$ptr), [{
282  return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i32;
283}]>;
284
285def stxr_8 : PatFrag<(ops node:$val, node:$ptr),
286                     (int_aarch64_stxr node:$val, node:$ptr), [{
287  return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i64;
288}]>;
289
290
291def : Pat<(stxr_1 GPR64:$val, GPR64sp:$addr),
292          (STXRB (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
293def : Pat<(stxr_2 GPR64:$val, GPR64sp:$addr),
294          (STXRH (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
295def : Pat<(stxr_4 GPR64:$val, GPR64sp:$addr),
296          (STXRW (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
297def : Pat<(stxr_8 GPR64:$val, GPR64sp:$addr),
298          (STXRX GPR64:$val, GPR64sp:$addr)>;
299
300def : Pat<(stxr_1 (zext (and GPR32:$val, 0xff)), GPR64sp:$addr),
301          (STXRB GPR32:$val, GPR64sp:$addr)>;
302def : Pat<(stxr_2 (zext (and GPR32:$val, 0xffff)), GPR64sp:$addr),
303          (STXRH GPR32:$val, GPR64sp:$addr)>;
304def : Pat<(stxr_4 (zext GPR32:$val), GPR64sp:$addr),
305          (STXRW GPR32:$val, GPR64sp:$addr)>;
306
307def : Pat<(stxr_1 (and GPR64:$val, 0xff), GPR64sp:$addr),
308          (STXRB (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
309def : Pat<(stxr_2 (and GPR64:$val, 0xffff), GPR64sp:$addr),
310          (STXRH (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
311def : Pat<(stxr_4 (and GPR64:$val, 0xffffffff), GPR64sp:$addr),
312          (STXRW (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
313
314// Store-release-exclusives.
315
316def stlxr_1 : PatFrag<(ops node:$val, node:$ptr),
317                     (int_aarch64_stlxr node:$val, node:$ptr), [{
318  return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i8;
319}]>;
320
321def stlxr_2 : PatFrag<(ops node:$val, node:$ptr),
322                     (int_aarch64_stlxr node:$val, node:$ptr), [{
323  return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i16;
324}]>;
325
326def stlxr_4 : PatFrag<(ops node:$val, node:$ptr),
327                     (int_aarch64_stlxr node:$val, node:$ptr), [{
328  return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i32;
329}]>;
330
331def stlxr_8 : PatFrag<(ops node:$val, node:$ptr),
332                     (int_aarch64_stlxr node:$val, node:$ptr), [{
333  return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i64;
334}]>;
335
336
337def : Pat<(stlxr_1 GPR64:$val, GPR64sp:$addr),
338          (STLXRB (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
339def : Pat<(stlxr_2 GPR64:$val, GPR64sp:$addr),
340          (STLXRH (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
341def : Pat<(stlxr_4 GPR64:$val, GPR64sp:$addr),
342          (STLXRW (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
343def : Pat<(stlxr_8 GPR64:$val, GPR64sp:$addr),
344          (STLXRX GPR64:$val, GPR64sp:$addr)>;
345
346def : Pat<(stlxr_1 (zext (and GPR32:$val, 0xff)), GPR64sp:$addr),
347          (STLXRB GPR32:$val, GPR64sp:$addr)>;
348def : Pat<(stlxr_2 (zext (and GPR32:$val, 0xffff)), GPR64sp:$addr),
349          (STLXRH GPR32:$val, GPR64sp:$addr)>;
350def : Pat<(stlxr_4 (zext GPR32:$val), GPR64sp:$addr),
351          (STLXRW GPR32:$val, GPR64sp:$addr)>;
352
353def : Pat<(stlxr_1 (and GPR64:$val, 0xff), GPR64sp:$addr),
354          (STLXRB (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
355def : Pat<(stlxr_2 (and GPR64:$val, 0xffff), GPR64sp:$addr),
356          (STLXRH (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
357def : Pat<(stlxr_4 (and GPR64:$val, 0xffffffff), GPR64sp:$addr),
358          (STLXRW (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
359
360
361// And clear exclusive.
362
363def : Pat<(int_aarch64_clrex), (CLREX 0xf)>;
364