1;; Machine description for AArch64 processor synchronization primitives.
2;; Copyright (C) 2009-2021 Free Software Foundation, Inc.
3;; Contributed by ARM Ltd.
4;;
5;; This file is part of GCC.
6;;
7;; GCC is free software; you can redistribute it and/or modify it
8;; under the terms of the GNU General Public License as published by
9;; the Free Software Foundation; either version 3, or (at your option)
10;; any later version.
11;;
12;; GCC is distributed in the hope that it will be useful, but
13;; WITHOUT ANY WARRANTY; without even the implied warranty of
14;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15;; General Public License for more details.
16;;
17;; You should have received a copy of the GNU General Public License
18;; along with GCC; see the file COPYING3.  If not see
19;; <http://www.gnu.org/licenses/>.
20
21;; Instruction patterns.
22
23(define_expand "@atomic_compare_and_swap<mode>"
24  [(match_operand:SI 0 "register_operand" "")			;; bool out
25   (match_operand:ALLI_TI 1 "register_operand" "")		;; val out
26   (match_operand:ALLI_TI 2 "aarch64_sync_memory_operand" "")	;; memory
27   (match_operand:ALLI_TI 3 "nonmemory_operand" "")		;; expected
28   (match_operand:ALLI_TI 4 "aarch64_reg_or_zero" "")		;; desired
29   (match_operand:SI 5 "const_int_operand")			;; is_weak
30   (match_operand:SI 6 "const_int_operand")			;; mod_s
31   (match_operand:SI 7 "const_int_operand")]			;; mod_f
32  ""
33  {
34    aarch64_expand_compare_and_swap (operands);
35    DONE;
36  }
37)
38
39(define_mode_attr cas_short_expected_pred
40  [(QI "aarch64_reg_or_imm") (HI "aarch64_plushi_operand")])
41(define_mode_attr cas_short_expected_imm
42  [(QI "n") (HI "Uph")])
43
44(define_insn_and_split "@aarch64_compare_and_swap<mode>"
45  [(set (reg:CC CC_REGNUM)					;; bool out
46    (unspec_volatile:CC [(const_int 0)] UNSPECV_ATOMIC_CMPSW))
47   (set (match_operand:SI 0 "register_operand" "=&r")		;; val out
48    (zero_extend:SI
49      (match_operand:SHORT 1 "aarch64_sync_memory_operand" "+Q"))) ;; memory
50   (set (match_dup 1)
51    (unspec_volatile:SHORT
52      [(match_operand:SHORT 2 "<cas_short_expected_pred>"
53			      "r<cas_short_expected_imm>")	;; expected
54       (match_operand:SHORT 3 "aarch64_reg_or_zero" "rZ")	;; desired
55       (match_operand:SI 4 "const_int_operand")			;; is_weak
56       (match_operand:SI 5 "const_int_operand")			;; mod_s
57       (match_operand:SI 6 "const_int_operand")]		;; mod_f
58      UNSPECV_ATOMIC_CMPSW))
59   (clobber (match_scratch:SI 7 "=&r"))]
60  ""
61  "#"
62  "&& epilogue_completed"
63  [(const_int 0)]
64  {
65    aarch64_split_compare_and_swap (operands);
66    DONE;
67  }
68)
69
70(define_insn_and_split "@aarch64_compare_and_swap<mode>"
71  [(set (reg:CC CC_REGNUM)					;; bool out
72    (unspec_volatile:CC [(const_int 0)] UNSPECV_ATOMIC_CMPSW))
73   (set (match_operand:GPI 0 "register_operand" "=&r")		;; val out
74    (match_operand:GPI 1 "aarch64_sync_memory_operand" "+Q"))   ;; memory
75   (set (match_dup 1)
76    (unspec_volatile:GPI
77      [(match_operand:GPI 2 "aarch64_plus_operand" "rIJ")	;; expect
78       (match_operand:GPI 3 "aarch64_reg_or_zero" "rZ")		;; desired
79       (match_operand:SI 4 "const_int_operand")			;; is_weak
80       (match_operand:SI 5 "const_int_operand")			;; mod_s
81       (match_operand:SI 6 "const_int_operand")]		;; mod_f
82      UNSPECV_ATOMIC_CMPSW))
83   (clobber (match_scratch:SI 7 "=&r"))]
84  ""
85  "#"
86  "&& epilogue_completed"
87  [(const_int 0)]
88  {
89    aarch64_split_compare_and_swap (operands);
90    DONE;
91  }
92)
93
94(define_insn_and_split "@aarch64_compare_and_swap<mode>"
95  [(set (reg:CC CC_REGNUM)					;; bool out
96    (unspec_volatile:CC [(const_int 0)] UNSPECV_ATOMIC_CMPSW))
97   (set (match_operand:JUST_TI 0 "register_operand" "=&r")	;; val out
98    (match_operand:JUST_TI 1 "aarch64_sync_memory_operand" "+Q")) ;; memory
99   (set (match_dup 1)
100    (unspec_volatile:JUST_TI
101      [(match_operand:JUST_TI 2 "aarch64_reg_or_zero" "rZ")	;; expect
102       (match_operand:JUST_TI 3 "aarch64_reg_or_zero" "rZ")	;; desired
103       (match_operand:SI 4 "const_int_operand")			;; is_weak
104       (match_operand:SI 5 "const_int_operand")			;; mod_s
105       (match_operand:SI 6 "const_int_operand")]		;; mod_f
106      UNSPECV_ATOMIC_CMPSW))
107   (clobber (match_scratch:SI 7 "=&r"))]
108  ""
109  "#"
110  "&& epilogue_completed"
111  [(const_int 0)]
112  {
113    aarch64_split_compare_and_swap (operands);
114    DONE;
115  }
116)
117
118(define_insn "@aarch64_compare_and_swap<mode>_lse"
119  [(set (match_operand:SI 0 "register_operand" "+r")		;; val out
120    (zero_extend:SI
121     (match_operand:SHORT 1 "aarch64_sync_memory_operand" "+Q"))) ;; memory
122   (set (match_dup 1)
123    (unspec_volatile:SHORT
124      [(match_dup 0)						;; expected
125       (match_operand:SHORT 2 "aarch64_reg_or_zero" "rZ")	;; desired
126       (match_operand:SI 3 "const_int_operand")]		;; mod_s
127      UNSPECV_ATOMIC_CMPSW))]
128  "TARGET_LSE"
129{
130  enum memmodel model = memmodel_from_int (INTVAL (operands[3]));
131  if (is_mm_relaxed (model))
132    return "cas<atomic_sfx>\t%<w>0, %<w>2, %1";
133  else if (is_mm_acquire (model) || is_mm_consume (model))
134    return "casa<atomic_sfx>\t%<w>0, %<w>2, %1";
135  else if (is_mm_release (model))
136    return "casl<atomic_sfx>\t%<w>0, %<w>2, %1";
137  else
138    return "casal<atomic_sfx>\t%<w>0, %<w>2, %1";
139})
140
141(define_insn "@aarch64_compare_and_swap<mode>_lse"
142  [(set (match_operand:GPI 0 "register_operand" "+r")		;; val out
143    (match_operand:GPI 1 "aarch64_sync_memory_operand" "+Q"))   ;; memory
144   (set (match_dup 1)
145    (unspec_volatile:GPI
146      [(match_dup 0)						;; expected
147       (match_operand:GPI 2 "aarch64_reg_or_zero" "rZ")		;; desired
148       (match_operand:SI 3 "const_int_operand")]		;; mod_s
149      UNSPECV_ATOMIC_CMPSW))]
150  "TARGET_LSE"
151{
152  enum memmodel model = memmodel_from_int (INTVAL (operands[3]));
153  if (is_mm_relaxed (model))
154    return "cas<atomic_sfx>\t%<w>0, %<w>2, %1";
155  else if (is_mm_acquire (model) || is_mm_consume (model))
156    return "casa<atomic_sfx>\t%<w>0, %<w>2, %1";
157  else if (is_mm_release (model))
158    return "casl<atomic_sfx>\t%<w>0, %<w>2, %1";
159  else
160    return "casal<atomic_sfx>\t%<w>0, %<w>2, %1";
161})
162
163(define_insn "@aarch64_compare_and_swap<mode>_lse"
164  [(set (match_operand:JUST_TI 0 "register_operand" "+r")	;; val out
165    (match_operand:JUST_TI 1 "aarch64_sync_memory_operand" "+Q")) ;; memory
166   (set (match_dup 1)
167    (unspec_volatile:JUST_TI
168      [(match_dup 0)						;; expect
169       (match_operand:JUST_TI 2 "register_operand" "r")		;; desired
170       (match_operand:SI 3 "const_int_operand")]		;; mod_s
171      UNSPECV_ATOMIC_CMPSW))]
172  "TARGET_LSE"
173{
174  enum memmodel model = memmodel_from_int (INTVAL (operands[3]));
175  if (is_mm_relaxed (model))
176    return "casp\t%0, %R0, %2, %R2, %1";
177  else if (is_mm_acquire (model) || is_mm_consume (model))
178    return "caspa\t%0, %R0, %2, %R2, %1";
179  else if (is_mm_release (model))
180    return "caspl\t%0, %R0, %2, %R2, %1";
181  else
182    return "caspal\t%0, %R0, %2, %R2, %1";
183})
184
185(define_expand "atomic_exchange<mode>"
186 [(match_operand:ALLI 0 "register_operand")
187  (match_operand:ALLI 1 "aarch64_sync_memory_operand")
188  (match_operand:ALLI 2 "aarch64_reg_or_zero")
189  (match_operand:SI 3 "const_int_operand")]
190  ""
191  {
192    /* Use an atomic SWP when available.  */
193    if (TARGET_LSE)
194      {
195	emit_insn (gen_aarch64_atomic_exchange<mode>_lse
196		   (operands[0], operands[1], operands[2], operands[3]));
197      }
198    else if (TARGET_OUTLINE_ATOMICS)
199      {
200	machine_mode mode = <MODE>mode;
201	rtx func = aarch64_atomic_ool_func (mode, operands[3],
202					    &aarch64_ool_swp_names);
203	rtx rval = emit_library_call_value (func, operands[0], LCT_NORMAL,
204					    mode, operands[2], mode,
205					    XEXP (operands[1], 0), Pmode);
206        emit_move_insn (operands[0], rval);
207      }
208    else
209      {
210	emit_insn (gen_aarch64_atomic_exchange<mode>
211		   (operands[0], operands[1], operands[2], operands[3]));
212      }
213    DONE;
214  }
215)
216
217(define_insn_and_split "aarch64_atomic_exchange<mode>"
218  [(set (match_operand:ALLI 0 "register_operand" "=&r")		;; output
219    (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q"))	;; memory
220   (set (match_dup 1)
221    (unspec_volatile:ALLI
222      [(match_operand:ALLI 2 "aarch64_reg_or_zero" "rZ")	;; input
223       (match_operand:SI 3 "const_int_operand" "")]		;; model
224      UNSPECV_ATOMIC_EXCHG))
225   (clobber (reg:CC CC_REGNUM))
226   (clobber (match_scratch:SI 4 "=&r"))]
227  ""
228  "#"
229  "&& epilogue_completed"
230  [(const_int 0)]
231  {
232    aarch64_split_atomic_op (SET, operands[0], NULL, operands[1],
233			     operands[2], operands[3], operands[4]);
234    DONE;
235  }
236)
237
238(define_insn "aarch64_atomic_exchange<mode>_lse"
239  [(set (match_operand:ALLI 0 "register_operand" "=r")
240    (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q"))
241   (set (match_dup 1)
242    (unspec_volatile:ALLI
243      [(match_operand:ALLI 2 "aarch64_reg_or_zero" "rZ")
244       (match_operand:SI 3 "const_int_operand" "")]
245      UNSPECV_ATOMIC_EXCHG))]
246  "TARGET_LSE"
247  {
248    enum memmodel model = memmodel_from_int (INTVAL (operands[3]));
249    if (is_mm_relaxed (model))
250      return "swp<atomic_sfx>\t%<w>2, %<w>0, %1";
251    else if (is_mm_acquire (model) || is_mm_consume (model))
252      return "swpa<atomic_sfx>\t%<w>2, %<w>0, %1";
253    else if (is_mm_release (model))
254      return "swpl<atomic_sfx>\t%<w>2, %<w>0, %1";
255    else
256      return "swpal<atomic_sfx>\t%<w>2, %<w>0, %1";
257  }
258)
259
260(define_expand "atomic_<atomic_optab><mode>"
261 [(match_operand:ALLI 0 "aarch64_sync_memory_operand")
262  (atomic_op:ALLI
263   (match_operand:ALLI 1 "<atomic_op_operand>")
264   (match_operand:SI 2 "const_int_operand"))]
265  ""
266  {
267    rtx (*gen) (rtx, rtx, rtx);
268
269    /* Use an atomic load-operate instruction when possible.  */
270    if (TARGET_LSE)
271      {
272	switch (<CODE>)
273	  {
274	  case MINUS:
275	    operands[1] = expand_simple_unop (<MODE>mode, NEG, operands[1],
276					      NULL, 1);
277	    /* fallthru */
278	  case PLUS:
279	    gen = gen_aarch64_atomic_add<mode>_lse;
280	    break;
281	  case IOR:
282	    gen = gen_aarch64_atomic_ior<mode>_lse;
283	    break;
284	  case XOR:
285	    gen = gen_aarch64_atomic_xor<mode>_lse;
286	    break;
287	  case AND:
288	    operands[1] = expand_simple_unop (<MODE>mode, NOT, operands[1],
289					      NULL, 1);
290	    gen = gen_aarch64_atomic_bic<mode>_lse;
291	    break;
292	  default:
293	    gcc_unreachable ();
294	  }
295	operands[1] = force_reg (<MODE>mode, operands[1]);
296      }
297    else if (TARGET_OUTLINE_ATOMICS)
298      {
299        const atomic_ool_names *names;
300	switch (<CODE>)
301	  {
302	  case MINUS:
303	    operands[1] = expand_simple_unop (<MODE>mode, NEG, operands[1],
304					      NULL, 1);
305	    /* fallthru */
306	  case PLUS:
307	    names = &aarch64_ool_ldadd_names;
308	    break;
309	  case IOR:
310	    names = &aarch64_ool_ldset_names;
311	    break;
312	  case XOR:
313	    names = &aarch64_ool_ldeor_names;
314	    break;
315	  case AND:
316	    operands[1] = expand_simple_unop (<MODE>mode, NOT, operands[1],
317					      NULL, 1);
318	    names = &aarch64_ool_ldclr_names;
319	    break;
320	  default:
321	    gcc_unreachable ();
322	  }
323        machine_mode mode = <MODE>mode;
324	rtx func = aarch64_atomic_ool_func (mode, operands[2], names);
325	emit_library_call_value (func, NULL_RTX, LCT_NORMAL, mode,
326				 operands[1], mode,
327				 XEXP (operands[0], 0), Pmode);
328        DONE;
329      }
330    else
331      gen = gen_aarch64_atomic_<atomic_optab><mode>;
332
333    emit_insn (gen (operands[0], operands[1], operands[2]));
334    DONE;
335  }
336)
337
338(define_insn_and_split "aarch64_atomic_<atomic_optab><mode>"
339 [(set (match_operand:ALLI 0 "aarch64_sync_memory_operand" "+Q")
340   (unspec_volatile:ALLI
341    [(atomic_op:ALLI (match_dup 0)
342      (match_operand:ALLI 1 "<atomic_op_operand>" "r<const_atomic>"))
343     (match_operand:SI 2 "const_int_operand")]
344    UNSPECV_ATOMIC_OP))
345  (clobber (reg:CC CC_REGNUM))
346  (clobber (match_scratch:ALLI 3 "=&r"))
347  (clobber (match_scratch:SI 4 "=&r"))]
348  ""
349  "#"
350  "&& epilogue_completed"
351  [(const_int 0)]
352  {
353    aarch64_split_atomic_op (<CODE>, NULL, operands[3], operands[0],
354			     operands[1], operands[2], operands[4]);
355    DONE;
356  }
357)
358
359;; It is tempting to want to use ST<OP> for relaxed and release
360;; memory models here.  However, that is incompatible with the
361;; C++ memory model for the following case:
362;;
363;;	atomic_fetch_add(ptr, 1, memory_order_relaxed);
364;;	atomic_thread_fence(memory_order_acquire);
365;;
366;; The problem is that the architecture says that ST<OP> (and LD<OP>
367;; insns where the destination is XZR) are not regarded as a read.
368;; However we also implement the acquire memory barrier with DMB LD,
369;; and so the ST<OP> is not blocked by the barrier.
370
371(define_insn "aarch64_atomic_<atomic_ldoptab><mode>_lse"
372  [(set (match_operand:ALLI 0 "aarch64_sync_memory_operand" "+Q")
373	(unspec_volatile:ALLI
374	  [(match_dup 0)
375	   (match_operand:ALLI 1 "register_operand" "r")
376	   (match_operand:SI 2 "const_int_operand")]
377      ATOMIC_LDOP))
378   (clobber (match_scratch:ALLI 3 "=r"))]
379  "TARGET_LSE"
380  {
381   enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
382   if (is_mm_relaxed (model))
383     return "ld<atomic_ldop><atomic_sfx>\t%<w>1, %<w>3, %0";
384   else if (is_mm_release (model))
385     return "ld<atomic_ldop>l<atomic_sfx>\t%<w>1, %<w>3, %0";
386   else if (is_mm_acquire (model) || is_mm_consume (model))
387     return "ld<atomic_ldop>a<atomic_sfx>\t%<w>1, %<w>3, %0";
388   else
389     return "ld<atomic_ldop>al<atomic_sfx>\t%<w>1, %<w>3, %0";
390  }
391)
392
393(define_insn_and_split "atomic_nand<mode>"
394  [(set (match_operand:ALLI 0 "aarch64_sync_memory_operand" "+Q")
395    (unspec_volatile:ALLI
396      [(not:ALLI
397	(and:ALLI (match_dup 0)
398	  (match_operand:ALLI 1 "aarch64_logical_operand" "r<lconst_atomic>")))
399       (match_operand:SI 2 "const_int_operand")]		;; model
400      UNSPECV_ATOMIC_OP))
401   (clobber (reg:CC CC_REGNUM))
402   (clobber (match_scratch:ALLI 3 "=&r"))
403   (clobber (match_scratch:SI 4 "=&r"))]
404  ""
405  "#"
406  "&& epilogue_completed"
407  [(const_int 0)]
408  {
409     aarch64_split_atomic_op (NOT, NULL, operands[3], operands[0],
410			     operands[1], operands[2], operands[4]);
411     DONE;
412  }
413)
414
415;; Load-operate-store, returning the original memory data.
416
417(define_expand "atomic_fetch_<atomic_optab><mode>"
418 [(match_operand:ALLI 0 "register_operand")
419  (match_operand:ALLI 1 "aarch64_sync_memory_operand")
420  (atomic_op:ALLI
421   (match_operand:ALLI 2 "<atomic_op_operand>")
422   (match_operand:SI 3 "const_int_operand"))]
423 ""
424{
425  rtx (*gen) (rtx, rtx, rtx, rtx);
426
427  /* Use an atomic load-operate instruction when possible.  */
428  if (TARGET_LSE)
429    {
430      switch (<CODE>)
431        {
432	case MINUS:
433	  operands[2] = expand_simple_unop (<MODE>mode, NEG, operands[2],
434					    NULL, 1);
435	  /* fallthru */
436	case PLUS:
437	  gen = gen_aarch64_atomic_fetch_add<mode>_lse;
438	  break;
439	case IOR:
440	  gen = gen_aarch64_atomic_fetch_ior<mode>_lse;
441	  break;
442	case XOR:
443	  gen = gen_aarch64_atomic_fetch_xor<mode>_lse;
444	  break;
445	case AND:
446	  operands[2] = expand_simple_unop (<MODE>mode, NOT, operands[2],
447					    NULL, 1);
448	  gen = gen_aarch64_atomic_fetch_bic<mode>_lse;
449	  break;
450	default:
451	  gcc_unreachable ();
452	}
453      operands[2] = force_reg (<MODE>mode, operands[2]);
454    }
455  else if (TARGET_OUTLINE_ATOMICS)
456    {
457      const atomic_ool_names *names;
458      switch (<CODE>)
459	{
460	case MINUS:
461	  operands[2] = expand_simple_unop (<MODE>mode, NEG, operands[2],
462					    NULL, 1);
463	  /* fallthru */
464	case PLUS:
465	  names = &aarch64_ool_ldadd_names;
466	  break;
467	case IOR:
468	  names = &aarch64_ool_ldset_names;
469	  break;
470	case XOR:
471	  names = &aarch64_ool_ldeor_names;
472	  break;
473	case AND:
474	  operands[2] = expand_simple_unop (<MODE>mode, NOT, operands[2],
475					    NULL, 1);
476	  names = &aarch64_ool_ldclr_names;
477	  break;
478	default:
479	  gcc_unreachable ();
480	}
481      machine_mode mode = <MODE>mode;
482      rtx func = aarch64_atomic_ool_func (mode, operands[3], names);
483      rtx rval = emit_library_call_value (func, operands[0], LCT_NORMAL, mode,
484					  operands[2], mode,
485					  XEXP (operands[1], 0), Pmode);
486      emit_move_insn (operands[0], rval);
487      DONE;
488    }
489  else
490    gen = gen_aarch64_atomic_fetch_<atomic_optab><mode>;
491
492  emit_insn (gen (operands[0], operands[1], operands[2], operands[3]));
493  DONE;
494})
495
496(define_insn_and_split "aarch64_atomic_fetch_<atomic_optab><mode>"
497  [(set (match_operand:ALLI 0 "register_operand" "=&r")
498    (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q"))
499   (set (match_dup 1)
500    (unspec_volatile:ALLI
501      [(atomic_op:ALLI (match_dup 1)
502	(match_operand:ALLI 2 "<atomic_op_operand>" "r<const_atomic>"))
503       (match_operand:SI 3 "const_int_operand")]		;; model
504      UNSPECV_ATOMIC_OP))
505   (clobber (reg:CC CC_REGNUM))
506   (clobber (match_scratch:ALLI 4 "=&r"))
507   (clobber (match_scratch:SI 5 "=&r"))]
508  ""
509  "#"
510  "&& epilogue_completed"
511  [(const_int 0)]
512  {
513    aarch64_split_atomic_op (<CODE>, operands[0], operands[4], operands[1],
514			     operands[2], operands[3], operands[5]);
515    DONE;
516  }
517)
518
519(define_insn "aarch64_atomic_fetch_<atomic_ldoptab><mode>_lse"
520  [(set (match_operand:ALLI 0 "register_operand" "=r")
521	(match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q"))
522   (set (match_dup 1)
523	(unspec_volatile:ALLI
524	  [(match_dup 1)
525	   (match_operand:ALLI 2 "register_operand" "r")
526	   (match_operand:SI 3 "const_int_operand")]
527	  ATOMIC_LDOP))]
528  "TARGET_LSE"
529  {
530   enum memmodel model = memmodel_from_int (INTVAL (operands[3]));
531   if (is_mm_relaxed (model))
532     return "ld<atomic_ldop><atomic_sfx>\t%<w>2, %<w>0, %1";
533   else if (is_mm_acquire (model) || is_mm_consume (model))
534     return "ld<atomic_ldop>a<atomic_sfx>\t%<w>2, %<w>0, %1";
535   else if (is_mm_release (model))
536     return "ld<atomic_ldop>l<atomic_sfx>\t%<w>2, %<w>0, %1";
537   else
538     return "ld<atomic_ldop>al<atomic_sfx>\t%<w>2, %<w>0, %1";
539  }
540)
541
542(define_insn_and_split "atomic_fetch_nand<mode>"
543  [(set (match_operand:ALLI 0 "register_operand" "=&r")
544    (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q"))
545   (set (match_dup 1)
546    (unspec_volatile:ALLI
547      [(not:ALLI
548	 (and:ALLI (match_dup 1)
549	   (match_operand:ALLI 2 "aarch64_logical_operand" "r<lconst_atomic>")))
550       (match_operand:SI 3 "const_int_operand")]		;; model
551      UNSPECV_ATOMIC_OP))
552   (clobber (reg:CC CC_REGNUM))
553   (clobber (match_scratch:ALLI 4 "=&r"))
554   (clobber (match_scratch:SI 5 "=&r"))]
555  ""
556  "#"
557  "&& epilogue_completed"
558  [(const_int 0)]
559  {
560    aarch64_split_atomic_op (NOT, operands[0], operands[4], operands[1],
561			    operands[2], operands[3], operands[5]);
562    DONE;
563  }
564)
565
566;; Load-operate-store, returning the updated memory data.
567
568(define_expand "atomic_<atomic_optab>_fetch<mode>"
569 [(match_operand:ALLI 0 "register_operand")
570  (atomic_op:ALLI
571   (match_operand:ALLI 1 "aarch64_sync_memory_operand")
572   (match_operand:ALLI 2 "<atomic_op_operand>"))
573  (match_operand:SI 3 "const_int_operand")]
574 ""
575{
576  /* Use an atomic load-operate instruction when possible.  In this case
577     we will re-compute the result from the original mem value. */
578  if (TARGET_LSE || TARGET_OUTLINE_ATOMICS)
579    {
580      rtx tmp = gen_reg_rtx (<MODE>mode);
581      operands[2] = force_reg (<MODE>mode, operands[2]);
582      emit_insn (gen_atomic_fetch_<atomic_optab><mode>
583                 (tmp, operands[1], operands[2], operands[3]));
584      tmp = expand_simple_binop (<MODE>mode, <CODE>, tmp, operands[2],
585				 operands[0], 1, OPTAB_WIDEN);
586      emit_move_insn (operands[0], tmp);
587    }
588  else
589    {
590      emit_insn (gen_aarch64_atomic_<atomic_optab>_fetch<mode>
591                 (operands[0], operands[1], operands[2], operands[3]));
592    }
593  DONE;
594})
595
596(define_insn_and_split "aarch64_atomic_<atomic_optab>_fetch<mode>"
597  [(set (match_operand:ALLI 0 "register_operand" "=&r")
598    (atomic_op:ALLI
599      (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q")
600      (match_operand:ALLI 2 "<atomic_op_operand>" "r<const_atomic>")))
601   (set (match_dup 1)
602    (unspec_volatile:ALLI
603      [(match_dup 1) (match_dup 2)
604       (match_operand:SI 3 "const_int_operand")]		;; model
605      UNSPECV_ATOMIC_OP))
606    (clobber (reg:CC CC_REGNUM))
607   (clobber (match_scratch:SI 4 "=&r"))]
608  ""
609  "#"
610  "&& epilogue_completed"
611  [(const_int 0)]
612  {
613    aarch64_split_atomic_op (<CODE>, NULL, operands[0], operands[1],
614			     operands[2], operands[3], operands[4]);
615    DONE;
616  }
617)
618
619(define_insn_and_split "atomic_nand_fetch<mode>"
620  [(set (match_operand:ALLI 0 "register_operand" "=&r")
621    (not:ALLI
622      (and:ALLI
623	(match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q")
624	(match_operand:ALLI 2 "aarch64_logical_operand" "r<lconst_atomic>"))))
625   (set (match_dup 1)
626    (unspec_volatile:ALLI
627      [(match_dup 1) (match_dup 2)
628       (match_operand:SI 3 "const_int_operand")]		;; model
629      UNSPECV_ATOMIC_OP))
630   (clobber (reg:CC CC_REGNUM))
631   (clobber (match_scratch:SI 4 "=&r"))]
632  ""
633  "#"
634  "&& epilogue_completed"
635  [(const_int 0)]
636  {
637    aarch64_split_atomic_op (NOT, NULL, operands[0], operands[1],
638			    operands[2], operands[3], operands[4]);
639    DONE;
640  }
641)
642
643(define_insn "atomic_load<mode>"
644  [(set (match_operand:ALLI 0 "register_operand" "=r")
645    (unspec_volatile:ALLI
646      [(match_operand:ALLI 1 "aarch64_sync_memory_operand" "Q")
647       (match_operand:SI 2 "const_int_operand")]			;; model
648      UNSPECV_LDA))]
649  ""
650  {
651    enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
652    if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model))
653      return "ldr<atomic_sfx>\t%<w>0, %1";
654    else
655      return "ldar<atomic_sfx>\t%<w>0, %1";
656  }
657)
658
659(define_insn "atomic_store<mode>"
660  [(set (match_operand:ALLI 0 "aarch64_rcpc_memory_operand" "=Q,Ust")
661    (unspec_volatile:ALLI
662      [(match_operand:ALLI 1 "general_operand" "rZ,rZ")
663       (match_operand:SI 2 "const_int_operand")]			;; model
664      UNSPECV_STL))]
665  ""
666  {
667    enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
668    if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model))
669      return "str<atomic_sfx>\t%<w>1, %0";
670    else if (which_alternative == 0)
671      return "stlr<atomic_sfx>\t%<w>1, %0";
672    else
673      return "stlur<atomic_sfx>\t%<w>1, %0";
674  }
675  [(set_attr "arch" "*,rcpc8_4")]
676)
677
678(define_insn "@aarch64_load_exclusive<mode>"
679  [(set (match_operand:SI 0 "register_operand" "=r")
680    (zero_extend:SI
681      (unspec_volatile:SHORT
682	[(match_operand:SHORT 1 "aarch64_sync_memory_operand" "Q")
683	 (match_operand:SI 2 "const_int_operand")]
684	UNSPECV_LX)))]
685  ""
686  {
687    enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
688    if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model))
689      return "ldxr<atomic_sfx>\t%w0, %1";
690    else
691      return "ldaxr<atomic_sfx>\t%w0, %1";
692  }
693)
694
695(define_insn "@aarch64_load_exclusive<mode>"
696  [(set (match_operand:GPI 0 "register_operand" "=r")
697    (unspec_volatile:GPI
698      [(match_operand:GPI 1 "aarch64_sync_memory_operand" "Q")
699       (match_operand:SI 2 "const_int_operand")]
700      UNSPECV_LX))]
701  ""
702  {
703    enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
704    if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model))
705      return "ldxr\t%<w>0, %1";
706    else
707      return "ldaxr\t%<w>0, %1";
708  }
709)
710
711(define_insn "aarch64_load_exclusive_pair"
712  [(set (match_operand:DI 0 "register_operand" "=r")
713	(unspec_volatile:DI
714	  [(match_operand:TI 2 "aarch64_sync_memory_operand" "Q")
715	   (match_operand:SI 3 "const_int_operand")]
716	  UNSPECV_LX))
717   (set (match_operand:DI 1 "register_operand" "=r")
718	(unspec_volatile:DI [(match_dup 2) (match_dup 3)] UNSPECV_LX))]
719  ""
720  {
721    enum memmodel model = memmodel_from_int (INTVAL (operands[3]));
722    if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model))
723      return "ldxp\t%0, %1, %2";
724    else
725      return "ldaxp\t%0, %1, %2";
726  }
727)
728
729(define_insn "@aarch64_store_exclusive<mode>"
730  [(set (match_operand:SI 0 "register_operand" "=&r")
731    (unspec_volatile:SI [(const_int 0)] UNSPECV_SX))
732   (set (match_operand:ALLI 1 "aarch64_sync_memory_operand" "=Q")
733    (unspec_volatile:ALLI
734      [(match_operand:ALLI 2 "aarch64_reg_or_zero" "rZ")
735       (match_operand:SI 3 "const_int_operand")]
736      UNSPECV_SX))]
737  ""
738  {
739    enum memmodel model = memmodel_from_int (INTVAL (operands[3]));
740    if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model))
741      return "stxr<atomic_sfx>\t%w0, %<w>2, %1";
742    else
743      return "stlxr<atomic_sfx>\t%w0, %<w>2, %1";
744  }
745)
746
747(define_insn "aarch64_store_exclusive_pair"
748  [(set (match_operand:SI 0 "register_operand" "=&r")
749	(unspec_volatile:SI [(const_int 0)] UNSPECV_SX))
750   (set (match_operand:TI 1 "aarch64_sync_memory_operand" "=Q")
751	(unspec_volatile:TI
752	  [(match_operand:DI 2 "aarch64_reg_or_zero" "rZ")
753	   (match_operand:DI 3 "aarch64_reg_or_zero" "rZ")
754	   (match_operand:SI 4 "const_int_operand")]
755	  UNSPECV_SX))]
756  ""
757  {
758    enum memmodel model = memmodel_from_int (INTVAL (operands[4]));
759    if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model))
760      return "stxp\t%w0, %x2, %x3, %1";
761    else
762      return "stlxp\t%w0, %x2, %x3, %1";
763  }
764)
765
766(define_expand "mem_thread_fence"
767  [(match_operand:SI 0 "const_int_operand")]
768  ""
769  {
770    enum memmodel model = memmodel_from_int (INTVAL (operands[0]));
771    if (!(is_mm_relaxed (model) || is_mm_consume (model)))
772      emit_insn (gen_dmb (operands[0]));
773    DONE;
774  }
775)
776
777(define_expand "dmb"
778  [(set (match_dup 1)
779    (unspec:BLK [(match_dup 1) (match_operand:SI 0 "const_int_operand")]
780     UNSPEC_MB))]
781   ""
782   {
783    operands[1] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
784    MEM_VOLATILE_P (operands[1]) = 1;
785  }
786)
787
788(define_insn "*dmb"
789  [(set (match_operand:BLK 0 "" "")
790    (unspec:BLK [(match_dup 0) (match_operand:SI 1 "const_int_operand")]
791     UNSPEC_MB))]
792  ""
793  {
794    enum memmodel model = memmodel_from_int (INTVAL (operands[1]));
795    if (is_mm_acquire (model))
796      return "dmb\\tishld";
797    else
798      return "dmb\\tish";
799  }
800)
801