1;; Machine description for ARM processor synchronization primitives.
2;; Copyright (C) 2010-2018 Free Software Foundation, Inc.
3;; Written by Marcus Shawcroft (marcus.shawcroft@arm.com)
4;; 64bit Atomics by Dave Gilbert (david.gilbert@linaro.org)
5;;
6;; This file is part of GCC.
7;;
8;; GCC is free software; you can redistribute it and/or modify it
9;; under the terms of the GNU General Public License as published by
10;; the Free Software Foundation; either version 3, or (at your option)
11;; any later version.
12;;
13;; GCC is distributed in the hope that it will be useful, but
14;; WITHOUT ANY WARRANTY; without even the implied warranty of
15;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16;; General Public License for more details.
17;;
18;; You should have received a copy of the GNU General Public License
19;; along with GCC; see the file COPYING3.  If not see
20;; <http://www.gnu.org/licenses/>.  */
21
22(define_mode_attr sync_predtab
23  [(QI "TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER")
24   (HI "TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER")
25   (SI "TARGET_HAVE_LDREX && TARGET_HAVE_MEMORY_BARRIER")
26   (DI "TARGET_HAVE_LDREXD && ARM_DOUBLEWORD_ALIGN
27	&& TARGET_HAVE_MEMORY_BARRIER")])
28
29(define_code_iterator syncop [plus minus ior xor and])
30
31(define_code_attr sync_optab
32  [(ior "or") (xor "xor") (and "and") (plus "add") (minus "sub")])
33
34(define_mode_attr sync_sfx
35  [(QI "b") (HI "h") (SI "") (DI "d")])
36
37(define_expand "memory_barrier"
38  [(set (match_dup 0)
39	(unspec:BLK [(match_dup 0)] UNSPEC_MEMORY_BARRIER))]
40  "TARGET_HAVE_MEMORY_BARRIER"
41{
42  operands[0] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
43  MEM_VOLATILE_P (operands[0]) = 1;
44})
45
46(define_insn "*memory_barrier"
47  [(set (match_operand:BLK 0 "" "")
48	(unspec:BLK [(match_dup 0)] UNSPEC_MEMORY_BARRIER))]
49  "TARGET_HAVE_MEMORY_BARRIER"
50  {
51    if (TARGET_HAVE_DMB)
52      {
53	return "dmb\\tish";
54      }
55
56    if (TARGET_HAVE_DMB_MCR)
57      return "mcr\\tp15, 0, r0, c7, c10, 5";
58
59    gcc_unreachable ();
60  }
61  [(set_attr "length" "4")
62   (set_attr "conds" "unconditional")
63   (set_attr "predicable" "no")])
64
65(define_insn "atomic_load<mode>"
66  [(set (match_operand:QHSI 0 "register_operand" "=r,r,l")
67    (unspec_volatile:QHSI
68      [(match_operand:QHSI 1 "arm_sync_memory_operand" "Q,Q,Q")
69       (match_operand:SI 2 "const_int_operand" "n,Pf,n")]	;; model
70      VUNSPEC_LDA))]
71  "TARGET_HAVE_LDACQ"
72  {
73    enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
74    if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model))
75      {
76	if (TARGET_THUMB1)
77	  return \"ldr<sync_sfx>\\t%0, %1\";
78	else
79	  return \"ldr<sync_sfx>%?\\t%0, %1\";
80      }
81    else
82      {
83	if (TARGET_THUMB1)
84	  return \"lda<sync_sfx>\\t%0, %1\";
85	else
86	  return \"lda<sync_sfx>%?\\t%0, %1\";
87      }
88  }
89  [(set_attr "arch" "32,v8mb,any")
90   (set_attr "predicable" "yes")])
91
92(define_insn "atomic_store<mode>"
93  [(set (match_operand:QHSI 0 "memory_operand" "=Q,Q,Q")
94    (unspec_volatile:QHSI
95      [(match_operand:QHSI 1 "general_operand" "r,r,l")
96       (match_operand:SI 2 "const_int_operand" "n,Pf,n")]	;; model
97      VUNSPEC_STL))]
98  "TARGET_HAVE_LDACQ"
99  {
100    enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
101    if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model))
102      {
103	if (TARGET_THUMB1)
104	  return \"str<sync_sfx>\t%1, %0\";
105	else
106	  return \"str<sync_sfx>%?\t%1, %0\";
107      }
108    else
109      {
110	if (TARGET_THUMB1)
111	  return \"stl<sync_sfx>\t%1, %0\";
112	else
113	  return \"stl<sync_sfx>%?\t%1, %0\";
114      }
115  }
116  [(set_attr "arch" "32,v8mb,any")
117   (set_attr "predicable" "yes")])
118
119;; An LDRD instruction usable by the atomic_loaddi expander on LPAE targets
120
121(define_insn "arm_atomic_loaddi2_ldrd"
122  [(set (match_operand:DI 0 "register_operand" "=r")
123	(unspec_volatile:DI
124	  [(match_operand:DI 1 "arm_sync_memory_operand" "Q")]
125	    VUNSPEC_LDRD_ATOMIC))]
126  "ARM_DOUBLEWORD_ALIGN && TARGET_HAVE_LPAE"
127  "ldrd%?\t%0, %H0, %C1"
128  [(set_attr "predicable" "yes")])
129
130;; There are three ways to expand this depending on the architecture
131;; features available.  As for the barriers, a load needs a barrier
132;; after it on all non-relaxed memory models except when the load
133;; has acquire semantics (for ARMv8-A).
134
135(define_expand "atomic_loaddi"
136  [(match_operand:DI 0 "s_register_operand")		;; val out
137   (match_operand:DI 1 "mem_noofs_operand")		;; memory
138   (match_operand:SI 2 "const_int_operand")]		;; model
139  "(TARGET_HAVE_LDREXD || TARGET_HAVE_LPAE || TARGET_HAVE_LDACQEXD)
140   && ARM_DOUBLEWORD_ALIGN"
141{
142  memmodel model = memmodel_from_int (INTVAL (operands[2]));
143
144  /* For ARMv8-A we can use an LDAEXD to atomically load two 32-bit registers
145     when acquire or stronger semantics are needed.  When the relaxed model is
146     used this can be relaxed to a normal LDRD.  */
147  if (TARGET_HAVE_LDACQEXD)
148    {
149      if (is_mm_relaxed (model))
150	emit_insn (gen_arm_atomic_loaddi2_ldrd (operands[0], operands[1]));
151      else
152	emit_insn (gen_arm_load_acquire_exclusivedi (operands[0], operands[1]));
153
154      DONE;
155    }
156
157  /* On LPAE targets LDRD and STRD accesses to 64-bit aligned
158     locations are 64-bit single-copy atomic.  We still need barriers in the
159     appropriate places to implement the ordering constraints.  */
160  if (TARGET_HAVE_LPAE)
161    emit_insn (gen_arm_atomic_loaddi2_ldrd (operands[0], operands[1]));
162  else
163    emit_insn (gen_arm_load_exclusivedi (operands[0], operands[1]));
164
165
166  /* All non-relaxed models need a barrier after the load when load-acquire
167     instructions are not available.  */
168  if (!is_mm_relaxed (model))
169    expand_mem_thread_fence (model);
170
171  DONE;
172})
173
174(define_expand "atomic_compare_and_swap<mode>"
175  [(match_operand:SI 0 "s_register_operand" "")		;; bool out
176   (match_operand:QHSD 1 "s_register_operand" "")	;; val out
177   (match_operand:QHSD 2 "mem_noofs_operand" "")	;; memory
178   (match_operand:QHSD 3 "general_operand" "")		;; expected
179   (match_operand:QHSD 4 "s_register_operand" "")	;; desired
180   (match_operand:SI 5 "const_int_operand")		;; is_weak
181   (match_operand:SI 6 "const_int_operand")		;; mod_s
182   (match_operand:SI 7 "const_int_operand")]		;; mod_f
183  "<sync_predtab>"
184{
185  arm_expand_compare_and_swap (operands);
186  DONE;
187})
188
189;; Constraints of this pattern must be at least as strict as those of the
190;; cbranchsi operations in thumb1.md and aim to be as permissive.
191(define_insn_and_split "atomic_compare_and_swap<CCSI:arch><NARROW:mode>_1"
192  [(set (match_operand:CCSI 0 "cc_register_operand" "=&c,&l,&l,&l")	;; bool out
193	(unspec_volatile:CCSI [(const_int 0)] VUNSPEC_ATOMIC_CAS))
194   (set (match_operand:SI 1 "s_register_operand" "=&r,&l,&0,&l*h")	;; val out
195	(zero_extend:SI
196	  (match_operand:NARROW 2 "mem_noofs_operand" "+Ua,Ua,Ua,Ua")))	;; memory
197   (set (match_dup 2)
198	(unspec_volatile:NARROW
199	  [(match_operand:SI 3 "arm_add_operand" "rIL,lIL*h,J,*r")	;; expected
200	   (match_operand:NARROW 4 "s_register_operand" "r,r,r,r")	;; desired
201	   (match_operand:SI 5 "const_int_operand")		;; is_weak
202	   (match_operand:SI 6 "const_int_operand")		;; mod_s
203	   (match_operand:SI 7 "const_int_operand")]		;; mod_f
204	  VUNSPEC_ATOMIC_CAS))
205   (clobber (match_scratch:SI 8 "=&r,X,X,X"))]
206  "<sync_predtab>"
207  "#"
208  "&& reload_completed"
209  [(const_int 0)]
210  {
211    arm_split_compare_and_swap (operands);
212    DONE;
213  }
214  [(set_attr "arch" "32,v8mb,v8mb,v8mb")])
215
216(define_mode_attr cas_cmp_operand
217  [(SI "arm_add_operand") (DI "cmpdi_operand")])
218(define_mode_attr cas_cmp_str
219  [(SI "rIL") (DI "rDi")])
220
221;; Constraints of this pattern must be at least as strict as those of the
222;; cbranchsi operations in thumb1.md and aim to be as permissive.
223(define_insn_and_split "atomic_compare_and_swap<CCSI:arch><SIDI:mode>_1"
224  [(set (match_operand:CCSI 0 "cc_register_operand" "=&c,&l,&l,&l")	;; bool out
225	(unspec_volatile:CCSI [(const_int 0)] VUNSPEC_ATOMIC_CAS))
226   (set (match_operand:SIDI 1 "s_register_operand" "=&r,&l,&0,&l*h")	;; val out
227	(match_operand:SIDI 2 "mem_noofs_operand" "+Ua,Ua,Ua,Ua"))	;; memory
228   (set (match_dup 2)
229	(unspec_volatile:SIDI
230	  [(match_operand:SIDI 3 "<cas_cmp_operand>" "<cas_cmp_str>,lIL*h,J,*r") ;; expect
231	   (match_operand:SIDI 4 "s_register_operand" "r,r,r,r")	;; desired
232	   (match_operand:SI 5 "const_int_operand")		;; is_weak
233	   (match_operand:SI 6 "const_int_operand")		;; mod_s
234	   (match_operand:SI 7 "const_int_operand")]		;; mod_f
235	  VUNSPEC_ATOMIC_CAS))
236   (clobber (match_scratch:SI 8 "=&r,X,X,X"))]
237  "<sync_predtab>"
238  "#"
239  "&& reload_completed"
240  [(const_int 0)]
241  {
242    arm_split_compare_and_swap (operands);
243    DONE;
244  }
245  [(set_attr "arch" "32,v8mb,v8mb,v8mb")])
246
247(define_insn_and_split "atomic_exchange<mode>"
248  [(set (match_operand:QHSD 0 "s_register_operand" "=&r,&r")	;; output
249	(match_operand:QHSD 1 "mem_noofs_operand" "+Ua,Ua"))	;; memory
250   (set (match_dup 1)
251	(unspec_volatile:QHSD
252	  [(match_operand:QHSD 2 "s_register_operand" "r,r")	;; input
253	   (match_operand:SI 3 "const_int_operand" "")]		;; model
254	  VUNSPEC_ATOMIC_XCHG))
255   (clobber (reg:CC CC_REGNUM))
256   (clobber (match_scratch:SI 4 "=&r,&l"))]
257  "<sync_predtab>"
258  "#"
259  "&& reload_completed"
260  [(const_int 0)]
261  {
262    arm_split_atomic_op (SET, operands[0], NULL, operands[1],
263			 operands[2], operands[3], operands[4]);
264    DONE;
265  }
266  [(set_attr "arch" "32,v8mb")])
267
268;; The following mode and code attribute are defined here because they are
269;; specific to atomics and are not needed anywhere else.
270
271(define_mode_attr atomic_op_operand
272  [(QI "reg_or_int_operand")
273   (HI "reg_or_int_operand")
274   (SI "reg_or_int_operand")
275   (DI "s_register_operand")])
276
277(define_mode_attr atomic_op_str
278  [(QI "rn") (HI "rn") (SI "rn") (DI "r")])
279
280(define_code_attr thumb1_atomic_op_str
281  [(ior "l,l") (xor "l,l") (and "l,l") (plus "lIJL,r") (minus "lPd,lPd")])
282
283(define_code_attr thumb1_atomic_newop_str
284  [(ior "&l,&l") (xor "&l,&l") (and "&l,&l") (plus "&l,&r") (minus "&l,&l")])
285
286;; Constraints of this pattern must be at least as strict as those of the non
287;; atomic operations in thumb1.md and aim to be as permissive.
288(define_insn_and_split "atomic_<sync_optab><mode>"
289  [(set (match_operand:QHSD 0 "mem_noofs_operand" "+Ua,Ua,Ua")
290	(unspec_volatile:QHSD
291	  [(syncop:QHSD (match_dup 0)
292	     (match_operand:QHSD 1 "<atomic_op_operand>" "<atomic_op_str>,<thumb1_atomic_op_str>"))
293	   (match_operand:SI 2 "const_int_operand")]		;; model
294	  VUNSPEC_ATOMIC_OP))
295   (clobber (reg:CC CC_REGNUM))
296   (clobber (match_scratch:QHSD 3 "=&r,<thumb1_atomic_newop_str>"))
297   (clobber (match_scratch:SI 4 "=&r,&l,&l"))]
298  "<sync_predtab>"
299  "#"
300  "&& reload_completed"
301  [(const_int 0)]
302  {
303    arm_split_atomic_op (<CODE>, NULL, operands[3], operands[0],
304			 operands[1], operands[2], operands[4]);
305    DONE;
306  }
307  [(set_attr "arch" "32,v8mb,v8mb")])
308
309;; Constraints of this pattern must be at least as strict as those of the non
310;; atomic NANDs in thumb1.md and aim to be as permissive.
311(define_insn_and_split "atomic_nand<mode>"
312  [(set (match_operand:QHSD 0 "mem_noofs_operand" "+Ua,Ua")
313	(unspec_volatile:QHSD
314	  [(not:QHSD
315	     (and:QHSD (match_dup 0)
316	       (match_operand:QHSD 1 "<atomic_op_operand>" "<atomic_op_str>,l")))
317	   (match_operand:SI 2 "const_int_operand")]		;; model
318	  VUNSPEC_ATOMIC_OP))
319   (clobber (reg:CC CC_REGNUM))
320   (clobber (match_scratch:QHSD 3 "=&r,&l"))
321   (clobber (match_scratch:SI 4 "=&r,&l"))]
322  "<sync_predtab>"
323  "#"
324  "&& reload_completed"
325  [(const_int 0)]
326  {
327    arm_split_atomic_op (NOT, NULL, operands[3], operands[0],
328			 operands[1], operands[2], operands[4]);
329    DONE;
330  }
331  [(set_attr "arch" "32,v8mb")])
332
333;; 3 alternatives are needed to represent constraints after split from
334;; thumb1_addsi3: (i) case where operand1 and destination can be in different
335;; registers, (ii) case where they are in the same low register and (iii) case
336;; when they are in the same register without restriction on the register.  We
337;; disparage slightly alternatives that require copying the old value into the
338;; register for the new value (see bind_old_new in arm_split_atomic_op).
339(define_code_attr thumb1_atomic_fetch_op_str
340  [(ior "l,l,l") (xor "l,l,l") (and "l,l,l") (plus "lL,?IJ,?r") (minus "lPd,lPd,lPd")])
341
342(define_code_attr thumb1_atomic_fetch_newop_str
343  [(ior "&l,&l,&l") (xor "&l,&l,&l") (and "&l,&l,&l") (plus "&l,&l,&r") (minus "&l,&l,&l")])
344
345(define_code_attr thumb1_atomic_fetch_oldop_str
346  [(ior "&r,&r,&r") (xor "&r,&r,&r") (and "&r,&r,&r") (plus "&l,&r,&r") (minus "&l,&l,&l")])
347
348;; Constraints of this pattern must be at least as strict as those of the non
349;; atomic operations in thumb1.md and aim to be as permissive.
350(define_insn_and_split "atomic_fetch_<sync_optab><mode>"
351  [(set (match_operand:QHSD 0 "s_register_operand" "=&r,<thumb1_atomic_fetch_oldop_str>")
352	(match_operand:QHSD 1 "mem_noofs_operand" "+Ua,Ua,Ua,Ua"))
353   (set (match_dup 1)
354	(unspec_volatile:QHSD
355	  [(syncop:QHSD (match_dup 1)
356	     (match_operand:QHSD 2 "<atomic_op_operand>" "<atomic_op_str>,<thumb1_atomic_fetch_op_str>"))
357	   (match_operand:SI 3 "const_int_operand")]		;; model
358	  VUNSPEC_ATOMIC_OP))
359   (clobber (reg:CC CC_REGNUM))
360   (clobber (match_scratch:QHSD 4 "=&r,<thumb1_atomic_fetch_newop_str>"))
361   (clobber (match_scratch:SI 5 "=&r,&l,&l,&l"))]
362  "<sync_predtab>"
363  "#"
364  "&& reload_completed"
365  [(const_int 0)]
366  {
367    arm_split_atomic_op (<CODE>, operands[0], operands[4], operands[1],
368			 operands[2], operands[3], operands[5]);
369    DONE;
370  }
371  [(set_attr "arch" "32,v8mb,v8mb,v8mb")])
372
373;; Constraints of this pattern must be at least as strict as those of the non
374;; atomic NANDs in thumb1.md and aim to be as permissive.
375(define_insn_and_split "atomic_fetch_nand<mode>"
376  [(set (match_operand:QHSD 0 "s_register_operand" "=&r,&r")
377	(match_operand:QHSD 1 "mem_noofs_operand" "+Ua,Ua"))
378   (set (match_dup 1)
379	(unspec_volatile:QHSD
380	  [(not:QHSD
381	     (and:QHSD (match_dup 1)
382	       (match_operand:QHSD 2 "<atomic_op_operand>" "<atomic_op_str>,l")))
383	   (match_operand:SI 3 "const_int_operand")]		;; model
384	  VUNSPEC_ATOMIC_OP))
385   (clobber (reg:CC CC_REGNUM))
386   (clobber (match_scratch:QHSD 4 "=&r,&l"))
387   (clobber (match_scratch:SI 5 "=&r,&l"))]
388  "<sync_predtab>"
389  "#"
390  "&& reload_completed"
391  [(const_int 0)]
392  {
393    arm_split_atomic_op (NOT, operands[0], operands[4], operands[1],
394			 operands[2], operands[3], operands[5]);
395    DONE;
396  }
397  [(set_attr "arch" "32,v8mb")])
398
399;; Constraints of this pattern must be at least as strict as those of the non
400;; atomic operations in thumb1.md and aim to be as permissive.
401(define_insn_and_split "atomic_<sync_optab>_fetch<mode>"
402  [(set (match_operand:QHSD 0 "s_register_operand" "=&r,<thumb1_atomic_newop_str>")
403	(syncop:QHSD
404	  (match_operand:QHSD 1 "mem_noofs_operand" "+Ua,Ua,Ua")
405	  (match_operand:QHSD 2 "<atomic_op_operand>" "<atomic_op_str>,<thumb1_atomic_op_str>")))
406   (set (match_dup 1)
407	(unspec_volatile:QHSD
408	  [(match_dup 1) (match_dup 2)
409	   (match_operand:SI 3 "const_int_operand")]		;; model
410	  VUNSPEC_ATOMIC_OP))
411   (clobber (reg:CC CC_REGNUM))
412   (clobber (match_scratch:SI 4 "=&r,&l,&l"))]
413  "<sync_predtab>"
414  "#"
415  "&& reload_completed"
416  [(const_int 0)]
417  {
418    arm_split_atomic_op (<CODE>, NULL, operands[0], operands[1],
419			 operands[2], operands[3], operands[4]);
420    DONE;
421  }
422  [(set_attr "arch" "32,v8mb,v8mb")])
423
424;; Constraints of this pattern must be at least as strict as those of the non
425;; atomic NANDs in thumb1.md and aim to be as permissive.
426(define_insn_and_split "atomic_nand_fetch<mode>"
427  [(set (match_operand:QHSD 0 "s_register_operand" "=&r,&l")
428	(not:QHSD
429	  (and:QHSD
430	    (match_operand:QHSD 1 "mem_noofs_operand" "+Ua,Ua")
431	    (match_operand:QHSD 2 "<atomic_op_operand>" "<atomic_op_str>,l"))))
432   (set (match_dup 1)
433	(unspec_volatile:QHSD
434	  [(match_dup 1) (match_dup 2)
435	   (match_operand:SI 3 "const_int_operand")]		;; model
436	  VUNSPEC_ATOMIC_OP))
437   (clobber (reg:CC CC_REGNUM))
438   (clobber (match_scratch:SI 4 "=&r,&l"))]
439  "<sync_predtab>"
440  "#"
441  "&& reload_completed"
442  [(const_int 0)]
443  {
444    arm_split_atomic_op (NOT, NULL, operands[0], operands[1],
445			 operands[2], operands[3], operands[4]);
446    DONE;
447  }
448  [(set_attr "arch" "32,v8mb")])
449
450(define_insn "arm_load_exclusive<mode>"
451  [(set (match_operand:SI 0 "s_register_operand" "=r,r")
452        (zero_extend:SI
453	  (unspec_volatile:NARROW
454	    [(match_operand:NARROW 1 "mem_noofs_operand" "Ua,Ua")]
455	    VUNSPEC_LL)))]
456  "TARGET_HAVE_LDREXBH"
457  "@
458   ldrex<sync_sfx>%?\t%0, %C1
459   ldrex<sync_sfx>\t%0, %C1"
460  [(set_attr "arch" "32,v8mb")
461   (set_attr "predicable" "yes")])
462
463(define_insn "arm_load_acquire_exclusive<mode>"
464  [(set (match_operand:SI 0 "s_register_operand" "=r,r")
465        (zero_extend:SI
466	  (unspec_volatile:NARROW
467	    [(match_operand:NARROW 1 "mem_noofs_operand" "Ua,Ua")]
468	    VUNSPEC_LAX)))]
469  "TARGET_HAVE_LDACQ"
470  "@
471   ldaex<sync_sfx>%?\\t%0, %C1
472   ldaex<sync_sfx>\\t%0, %C1"
473  [(set_attr "arch" "32,v8mb")
474   (set_attr "predicable" "yes")])
475
476(define_insn "arm_load_exclusivesi"
477  [(set (match_operand:SI 0 "s_register_operand" "=r,r")
478	(unspec_volatile:SI
479	  [(match_operand:SI 1 "mem_noofs_operand" "Ua,Ua")]
480	  VUNSPEC_LL))]
481  "TARGET_HAVE_LDREX"
482  "@
483   ldrex%?\t%0, %C1
484   ldrex\t%0, %C1"
485  [(set_attr "arch" "32,v8mb")
486   (set_attr "predicable" "yes")])
487
488(define_insn "arm_load_acquire_exclusivesi"
489  [(set (match_operand:SI 0 "s_register_operand" "=r,r")
490	(unspec_volatile:SI
491	  [(match_operand:SI 1 "mem_noofs_operand" "Ua,Ua")]
492	  VUNSPEC_LAX))]
493  "TARGET_HAVE_LDACQ"
494  "@
495   ldaex%?\t%0, %C1
496   ldaex\t%0, %C1"
497  [(set_attr "arch" "32,v8mb")
498   (set_attr "predicable" "yes")])
499
500(define_insn "arm_load_exclusivedi"
501  [(set (match_operand:DI 0 "s_register_operand" "=r")
502	(unspec_volatile:DI
503	  [(match_operand:DI 1 "mem_noofs_operand" "Ua")]
504	  VUNSPEC_LL))]
505  "TARGET_HAVE_LDREXD"
506  "ldrexd%?\t%0, %H0, %C1"
507  [(set_attr "predicable" "yes")])
508
509(define_insn "arm_load_acquire_exclusivedi"
510  [(set (match_operand:DI 0 "s_register_operand" "=r")
511	(unspec_volatile:DI
512	  [(match_operand:DI 1 "mem_noofs_operand" "Ua")]
513	  VUNSPEC_LAX))]
514  "TARGET_HAVE_LDACQEXD && ARM_DOUBLEWORD_ALIGN"
515  "ldaexd%?\t%0, %H0, %C1"
516  [(set_attr "predicable" "yes")])
517
518(define_insn "arm_store_exclusive<mode>"
519  [(set (match_operand:SI 0 "s_register_operand" "=&r")
520	(unspec_volatile:SI [(const_int 0)] VUNSPEC_SC))
521   (set (match_operand:QHSD 1 "mem_noofs_operand" "=Ua")
522	(unspec_volatile:QHSD
523	  [(match_operand:QHSD 2 "s_register_operand" "r")]
524	  VUNSPEC_SC))]
525  "<sync_predtab>"
526  {
527    if (<MODE>mode == DImode)
528      {
529	/* The restrictions on target registers in ARM mode are that the two
530	   registers are consecutive and the first one is even; Thumb is
531	   actually more flexible, but DI should give us this anyway.
532	   Note that the 1st register always gets the
533	   lowest word in memory.  */
534	gcc_assert ((REGNO (operands[2]) & 1) == 0 || TARGET_THUMB2);
535	return "strexd%?\t%0, %2, %H2, %C1";
536      }
537    if (TARGET_THUMB1)
538      return "strex<sync_sfx>\t%0, %2, %C1";
539    else
540      return "strex<sync_sfx>%?\t%0, %2, %C1";
541  }
542  [(set_attr "predicable" "yes")])
543
544(define_insn "arm_store_release_exclusivedi"
545  [(set (match_operand:SI 0 "s_register_operand" "=&r")
546	(unspec_volatile:SI [(const_int 0)] VUNSPEC_SLX))
547   (set (match_operand:DI 1 "mem_noofs_operand" "=Ua")
548	(unspec_volatile:DI
549	  [(match_operand:DI 2 "s_register_operand" "r")]
550	  VUNSPEC_SLX))]
551  "TARGET_HAVE_LDACQEXD && ARM_DOUBLEWORD_ALIGN"
552  {
553    /* See comment in arm_store_exclusive<mode> above.  */
554    gcc_assert ((REGNO (operands[2]) & 1) == 0 || TARGET_THUMB2);
555    return "stlexd%?\t%0, %2, %H2, %C1";
556  }
557  [(set_attr "predicable" "yes")])
558
559(define_insn "arm_store_release_exclusive<mode>"
560  [(set (match_operand:SI 0 "s_register_operand" "=&r,&r")
561	(unspec_volatile:SI [(const_int 0)] VUNSPEC_SLX))
562   (set (match_operand:QHSI 1 "mem_noofs_operand" "=Ua,Ua")
563	(unspec_volatile:QHSI
564	  [(match_operand:QHSI 2 "s_register_operand" "r,r")]
565	  VUNSPEC_SLX))]
566  "TARGET_HAVE_LDACQ"
567  "@
568   stlex<sync_sfx>%?\t%0, %2, %C1
569   stlex<sync_sfx>\t%0, %2, %C1"
570  [(set_attr "arch" "32,v8mb")
571   (set_attr "predicable" "yes")])
572