xref: /linux/arch/arm64/include/asm/atomic_lse.h (revision 44f57d78)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Based on arch/arm/include/asm/atomic.h
4  *
5  * Copyright (C) 1996 Russell King.
6  * Copyright (C) 2002 Deep Blue Solutions Ltd.
7  * Copyright (C) 2012 ARM Ltd.
8  */
9 
10 #ifndef __ASM_ATOMIC_LSE_H
11 #define __ASM_ATOMIC_LSE_H
12 
13 #ifndef __ARM64_IN_ATOMIC_IMPL
14 #error "please don't include this file directly"
15 #endif
16 
17 #define __LL_SC_ATOMIC(op)	__LL_SC_CALL(arch_atomic_##op)
18 #define ATOMIC_OP(op, asm_op)						\
19 static inline void arch_atomic_##op(int i, atomic_t *v)			\
20 {									\
21 	register int w0 asm ("w0") = i;					\
22 	register atomic_t *x1 asm ("x1") = v;				\
23 									\
24 	asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(op),		\
25 "	" #asm_op "	%w[i], %[v]\n")					\
26 	: [i] "+r" (w0), [v] "+Q" (v->counter)				\
27 	: "r" (x1)							\
28 	: __LL_SC_CLOBBERS);						\
29 }
30 
31 ATOMIC_OP(andnot, stclr)
32 ATOMIC_OP(or, stset)
33 ATOMIC_OP(xor, steor)
34 ATOMIC_OP(add, stadd)
35 
36 #undef ATOMIC_OP
37 
38 #define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...)			\
39 static inline int arch_atomic_fetch_##op##name(int i, atomic_t *v)	\
40 {									\
41 	register int w0 asm ("w0") = i;					\
42 	register atomic_t *x1 asm ("x1") = v;				\
43 									\
44 	asm volatile(ARM64_LSE_ATOMIC_INSN(				\
45 	/* LL/SC */							\
46 	__LL_SC_ATOMIC(fetch_##op##name),				\
47 	/* LSE atomics */						\
48 "	" #asm_op #mb "	%w[i], %w[i], %[v]")				\
49 	: [i] "+r" (w0), [v] "+Q" (v->counter)				\
50 	: "r" (x1)							\
51 	: __LL_SC_CLOBBERS, ##cl);					\
52 									\
53 	return w0;							\
54 }
55 
56 #define ATOMIC_FETCH_OPS(op, asm_op)					\
57 	ATOMIC_FETCH_OP(_relaxed,   , op, asm_op)			\
58 	ATOMIC_FETCH_OP(_acquire,  a, op, asm_op, "memory")		\
59 	ATOMIC_FETCH_OP(_release,  l, op, asm_op, "memory")		\
60 	ATOMIC_FETCH_OP(        , al, op, asm_op, "memory")
61 
62 ATOMIC_FETCH_OPS(andnot, ldclr)
63 ATOMIC_FETCH_OPS(or, ldset)
64 ATOMIC_FETCH_OPS(xor, ldeor)
65 ATOMIC_FETCH_OPS(add, ldadd)
66 
67 #undef ATOMIC_FETCH_OP
68 #undef ATOMIC_FETCH_OPS
69 
70 #define ATOMIC_OP_ADD_RETURN(name, mb, cl...)				\
71 static inline int arch_atomic_add_return##name(int i, atomic_t *v)	\
72 {									\
73 	register int w0 asm ("w0") = i;					\
74 	register atomic_t *x1 asm ("x1") = v;				\
75 									\
76 	asm volatile(ARM64_LSE_ATOMIC_INSN(				\
77 	/* LL/SC */							\
78 	__LL_SC_ATOMIC(add_return##name)				\
79 	__nops(1),							\
80 	/* LSE atomics */						\
81 	"	ldadd" #mb "	%w[i], w30, %[v]\n"			\
82 	"	add	%w[i], %w[i], w30")				\
83 	: [i] "+r" (w0), [v] "+Q" (v->counter)				\
84 	: "r" (x1)							\
85 	: __LL_SC_CLOBBERS, ##cl);					\
86 									\
87 	return w0;							\
88 }
89 
90 ATOMIC_OP_ADD_RETURN(_relaxed,   )
91 ATOMIC_OP_ADD_RETURN(_acquire,  a, "memory")
92 ATOMIC_OP_ADD_RETURN(_release,  l, "memory")
93 ATOMIC_OP_ADD_RETURN(        , al, "memory")
94 
95 #undef ATOMIC_OP_ADD_RETURN
96 
97 static inline void arch_atomic_and(int i, atomic_t *v)
98 {
99 	register int w0 asm ("w0") = i;
100 	register atomic_t *x1 asm ("x1") = v;
101 
102 	asm volatile(ARM64_LSE_ATOMIC_INSN(
103 	/* LL/SC */
104 	__LL_SC_ATOMIC(and)
105 	__nops(1),
106 	/* LSE atomics */
107 	"	mvn	%w[i], %w[i]\n"
108 	"	stclr	%w[i], %[v]")
109 	: [i] "+&r" (w0), [v] "+Q" (v->counter)
110 	: "r" (x1)
111 	: __LL_SC_CLOBBERS);
112 }
113 
114 #define ATOMIC_FETCH_OP_AND(name, mb, cl...)				\
115 static inline int arch_atomic_fetch_and##name(int i, atomic_t *v)	\
116 {									\
117 	register int w0 asm ("w0") = i;					\
118 	register atomic_t *x1 asm ("x1") = v;				\
119 									\
120 	asm volatile(ARM64_LSE_ATOMIC_INSN(				\
121 	/* LL/SC */							\
122 	__LL_SC_ATOMIC(fetch_and##name)					\
123 	__nops(1),							\
124 	/* LSE atomics */						\
125 	"	mvn	%w[i], %w[i]\n"					\
126 	"	ldclr" #mb "	%w[i], %w[i], %[v]")			\
127 	: [i] "+&r" (w0), [v] "+Q" (v->counter)				\
128 	: "r" (x1)							\
129 	: __LL_SC_CLOBBERS, ##cl);					\
130 									\
131 	return w0;							\
132 }
133 
134 ATOMIC_FETCH_OP_AND(_relaxed,   )
135 ATOMIC_FETCH_OP_AND(_acquire,  a, "memory")
136 ATOMIC_FETCH_OP_AND(_release,  l, "memory")
137 ATOMIC_FETCH_OP_AND(        , al, "memory")
138 
139 #undef ATOMIC_FETCH_OP_AND
140 
141 static inline void arch_atomic_sub(int i, atomic_t *v)
142 {
143 	register int w0 asm ("w0") = i;
144 	register atomic_t *x1 asm ("x1") = v;
145 
146 	asm volatile(ARM64_LSE_ATOMIC_INSN(
147 	/* LL/SC */
148 	__LL_SC_ATOMIC(sub)
149 	__nops(1),
150 	/* LSE atomics */
151 	"	neg	%w[i], %w[i]\n"
152 	"	stadd	%w[i], %[v]")
153 	: [i] "+&r" (w0), [v] "+Q" (v->counter)
154 	: "r" (x1)
155 	: __LL_SC_CLOBBERS);
156 }
157 
158 #define ATOMIC_OP_SUB_RETURN(name, mb, cl...)				\
159 static inline int arch_atomic_sub_return##name(int i, atomic_t *v)	\
160 {									\
161 	register int w0 asm ("w0") = i;					\
162 	register atomic_t *x1 asm ("x1") = v;				\
163 									\
164 	asm volatile(ARM64_LSE_ATOMIC_INSN(				\
165 	/* LL/SC */							\
166 	__LL_SC_ATOMIC(sub_return##name)				\
167 	__nops(2),							\
168 	/* LSE atomics */						\
169 	"	neg	%w[i], %w[i]\n"					\
170 	"	ldadd" #mb "	%w[i], w30, %[v]\n"			\
171 	"	add	%w[i], %w[i], w30")				\
172 	: [i] "+&r" (w0), [v] "+Q" (v->counter)				\
173 	: "r" (x1)							\
174 	: __LL_SC_CLOBBERS , ##cl);					\
175 									\
176 	return w0;							\
177 }
178 
179 ATOMIC_OP_SUB_RETURN(_relaxed,   )
180 ATOMIC_OP_SUB_RETURN(_acquire,  a, "memory")
181 ATOMIC_OP_SUB_RETURN(_release,  l, "memory")
182 ATOMIC_OP_SUB_RETURN(        , al, "memory")
183 
184 #undef ATOMIC_OP_SUB_RETURN
185 
186 #define ATOMIC_FETCH_OP_SUB(name, mb, cl...)				\
187 static inline int arch_atomic_fetch_sub##name(int i, atomic_t *v)	\
188 {									\
189 	register int w0 asm ("w0") = i;					\
190 	register atomic_t *x1 asm ("x1") = v;				\
191 									\
192 	asm volatile(ARM64_LSE_ATOMIC_INSN(				\
193 	/* LL/SC */							\
194 	__LL_SC_ATOMIC(fetch_sub##name)					\
195 	__nops(1),							\
196 	/* LSE atomics */						\
197 	"	neg	%w[i], %w[i]\n"					\
198 	"	ldadd" #mb "	%w[i], %w[i], %[v]")			\
199 	: [i] "+&r" (w0), [v] "+Q" (v->counter)				\
200 	: "r" (x1)							\
201 	: __LL_SC_CLOBBERS, ##cl);					\
202 									\
203 	return w0;							\
204 }
205 
206 ATOMIC_FETCH_OP_SUB(_relaxed,   )
207 ATOMIC_FETCH_OP_SUB(_acquire,  a, "memory")
208 ATOMIC_FETCH_OP_SUB(_release,  l, "memory")
209 ATOMIC_FETCH_OP_SUB(        , al, "memory")
210 
211 #undef ATOMIC_FETCH_OP_SUB
212 #undef __LL_SC_ATOMIC
213 
214 #define __LL_SC_ATOMIC64(op)	__LL_SC_CALL(arch_atomic64_##op)
215 #define ATOMIC64_OP(op, asm_op)						\
216 static inline void arch_atomic64_##op(long i, atomic64_t *v)		\
217 {									\
218 	register long x0 asm ("x0") = i;				\
219 	register atomic64_t *x1 asm ("x1") = v;				\
220 									\
221 	asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(op),	\
222 "	" #asm_op "	%[i], %[v]\n")					\
223 	: [i] "+r" (x0), [v] "+Q" (v->counter)				\
224 	: "r" (x1)							\
225 	: __LL_SC_CLOBBERS);						\
226 }
227 
228 ATOMIC64_OP(andnot, stclr)
229 ATOMIC64_OP(or, stset)
230 ATOMIC64_OP(xor, steor)
231 ATOMIC64_OP(add, stadd)
232 
233 #undef ATOMIC64_OP
234 
235 #define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...)			\
236 static inline long arch_atomic64_fetch_##op##name(long i, atomic64_t *v)\
237 {									\
238 	register long x0 asm ("x0") = i;				\
239 	register atomic64_t *x1 asm ("x1") = v;				\
240 									\
241 	asm volatile(ARM64_LSE_ATOMIC_INSN(				\
242 	/* LL/SC */							\
243 	__LL_SC_ATOMIC64(fetch_##op##name),				\
244 	/* LSE atomics */						\
245 "	" #asm_op #mb "	%[i], %[i], %[v]")				\
246 	: [i] "+r" (x0), [v] "+Q" (v->counter)				\
247 	: "r" (x1)							\
248 	: __LL_SC_CLOBBERS, ##cl);					\
249 									\
250 	return x0;							\
251 }
252 
253 #define ATOMIC64_FETCH_OPS(op, asm_op)					\
254 	ATOMIC64_FETCH_OP(_relaxed,   , op, asm_op)			\
255 	ATOMIC64_FETCH_OP(_acquire,  a, op, asm_op, "memory")		\
256 	ATOMIC64_FETCH_OP(_release,  l, op, asm_op, "memory")		\
257 	ATOMIC64_FETCH_OP(        , al, op, asm_op, "memory")
258 
259 ATOMIC64_FETCH_OPS(andnot, ldclr)
260 ATOMIC64_FETCH_OPS(or, ldset)
261 ATOMIC64_FETCH_OPS(xor, ldeor)
262 ATOMIC64_FETCH_OPS(add, ldadd)
263 
264 #undef ATOMIC64_FETCH_OP
265 #undef ATOMIC64_FETCH_OPS
266 
267 #define ATOMIC64_OP_ADD_RETURN(name, mb, cl...)				\
268 static inline long arch_atomic64_add_return##name(long i, atomic64_t *v)\
269 {									\
270 	register long x0 asm ("x0") = i;				\
271 	register atomic64_t *x1 asm ("x1") = v;				\
272 									\
273 	asm volatile(ARM64_LSE_ATOMIC_INSN(				\
274 	/* LL/SC */							\
275 	__LL_SC_ATOMIC64(add_return##name)				\
276 	__nops(1),							\
277 	/* LSE atomics */						\
278 	"	ldadd" #mb "	%[i], x30, %[v]\n"			\
279 	"	add	%[i], %[i], x30")				\
280 	: [i] "+r" (x0), [v] "+Q" (v->counter)				\
281 	: "r" (x1)							\
282 	: __LL_SC_CLOBBERS, ##cl);					\
283 									\
284 	return x0;							\
285 }
286 
287 ATOMIC64_OP_ADD_RETURN(_relaxed,   )
288 ATOMIC64_OP_ADD_RETURN(_acquire,  a, "memory")
289 ATOMIC64_OP_ADD_RETURN(_release,  l, "memory")
290 ATOMIC64_OP_ADD_RETURN(        , al, "memory")
291 
292 #undef ATOMIC64_OP_ADD_RETURN
293 
294 static inline void arch_atomic64_and(long i, atomic64_t *v)
295 {
296 	register long x0 asm ("x0") = i;
297 	register atomic64_t *x1 asm ("x1") = v;
298 
299 	asm volatile(ARM64_LSE_ATOMIC_INSN(
300 	/* LL/SC */
301 	__LL_SC_ATOMIC64(and)
302 	__nops(1),
303 	/* LSE atomics */
304 	"	mvn	%[i], %[i]\n"
305 	"	stclr	%[i], %[v]")
306 	: [i] "+&r" (x0), [v] "+Q" (v->counter)
307 	: "r" (x1)
308 	: __LL_SC_CLOBBERS);
309 }
310 
311 #define ATOMIC64_FETCH_OP_AND(name, mb, cl...)				\
312 static inline long arch_atomic64_fetch_and##name(long i, atomic64_t *v)	\
313 {									\
314 	register long x0 asm ("x0") = i;				\
315 	register atomic64_t *x1 asm ("x1") = v;				\
316 									\
317 	asm volatile(ARM64_LSE_ATOMIC_INSN(				\
318 	/* LL/SC */							\
319 	__LL_SC_ATOMIC64(fetch_and##name)				\
320 	__nops(1),							\
321 	/* LSE atomics */						\
322 	"	mvn	%[i], %[i]\n"					\
323 	"	ldclr" #mb "	%[i], %[i], %[v]")			\
324 	: [i] "+&r" (x0), [v] "+Q" (v->counter)				\
325 	: "r" (x1)							\
326 	: __LL_SC_CLOBBERS, ##cl);					\
327 									\
328 	return x0;							\
329 }
330 
331 ATOMIC64_FETCH_OP_AND(_relaxed,   )
332 ATOMIC64_FETCH_OP_AND(_acquire,  a, "memory")
333 ATOMIC64_FETCH_OP_AND(_release,  l, "memory")
334 ATOMIC64_FETCH_OP_AND(        , al, "memory")
335 
336 #undef ATOMIC64_FETCH_OP_AND
337 
338 static inline void arch_atomic64_sub(long i, atomic64_t *v)
339 {
340 	register long x0 asm ("x0") = i;
341 	register atomic64_t *x1 asm ("x1") = v;
342 
343 	asm volatile(ARM64_LSE_ATOMIC_INSN(
344 	/* LL/SC */
345 	__LL_SC_ATOMIC64(sub)
346 	__nops(1),
347 	/* LSE atomics */
348 	"	neg	%[i], %[i]\n"
349 	"	stadd	%[i], %[v]")
350 	: [i] "+&r" (x0), [v] "+Q" (v->counter)
351 	: "r" (x1)
352 	: __LL_SC_CLOBBERS);
353 }
354 
355 #define ATOMIC64_OP_SUB_RETURN(name, mb, cl...)				\
356 static inline long arch_atomic64_sub_return##name(long i, atomic64_t *v)\
357 {									\
358 	register long x0 asm ("x0") = i;				\
359 	register atomic64_t *x1 asm ("x1") = v;				\
360 									\
361 	asm volatile(ARM64_LSE_ATOMIC_INSN(				\
362 	/* LL/SC */							\
363 	__LL_SC_ATOMIC64(sub_return##name)				\
364 	__nops(2),							\
365 	/* LSE atomics */						\
366 	"	neg	%[i], %[i]\n"					\
367 	"	ldadd" #mb "	%[i], x30, %[v]\n"			\
368 	"	add	%[i], %[i], x30")				\
369 	: [i] "+&r" (x0), [v] "+Q" (v->counter)				\
370 	: "r" (x1)							\
371 	: __LL_SC_CLOBBERS, ##cl);					\
372 									\
373 	return x0;							\
374 }
375 
376 ATOMIC64_OP_SUB_RETURN(_relaxed,   )
377 ATOMIC64_OP_SUB_RETURN(_acquire,  a, "memory")
378 ATOMIC64_OP_SUB_RETURN(_release,  l, "memory")
379 ATOMIC64_OP_SUB_RETURN(        , al, "memory")
380 
381 #undef ATOMIC64_OP_SUB_RETURN
382 
383 #define ATOMIC64_FETCH_OP_SUB(name, mb, cl...)				\
384 static inline long arch_atomic64_fetch_sub##name(long i, atomic64_t *v)	\
385 {									\
386 	register long x0 asm ("x0") = i;				\
387 	register atomic64_t *x1 asm ("x1") = v;				\
388 									\
389 	asm volatile(ARM64_LSE_ATOMIC_INSN(				\
390 	/* LL/SC */							\
391 	__LL_SC_ATOMIC64(fetch_sub##name)				\
392 	__nops(1),							\
393 	/* LSE atomics */						\
394 	"	neg	%[i], %[i]\n"					\
395 	"	ldadd" #mb "	%[i], %[i], %[v]")			\
396 	: [i] "+&r" (x0), [v] "+Q" (v->counter)				\
397 	: "r" (x1)							\
398 	: __LL_SC_CLOBBERS, ##cl);					\
399 									\
400 	return x0;							\
401 }
402 
403 ATOMIC64_FETCH_OP_SUB(_relaxed,   )
404 ATOMIC64_FETCH_OP_SUB(_acquire,  a, "memory")
405 ATOMIC64_FETCH_OP_SUB(_release,  l, "memory")
406 ATOMIC64_FETCH_OP_SUB(        , al, "memory")
407 
408 #undef ATOMIC64_FETCH_OP_SUB
409 
410 static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
411 {
412 	register long x0 asm ("x0") = (long)v;
413 
414 	asm volatile(ARM64_LSE_ATOMIC_INSN(
415 	/* LL/SC */
416 	__LL_SC_ATOMIC64(dec_if_positive)
417 	__nops(6),
418 	/* LSE atomics */
419 	"1:	ldr	x30, %[v]\n"
420 	"	subs	%[ret], x30, #1\n"
421 	"	b.lt	2f\n"
422 	"	casal	x30, %[ret], %[v]\n"
423 	"	sub	x30, x30, #1\n"
424 	"	sub	x30, x30, %[ret]\n"
425 	"	cbnz	x30, 1b\n"
426 	"2:")
427 	: [ret] "+&r" (x0), [v] "+Q" (v->counter)
428 	:
429 	: __LL_SC_CLOBBERS, "cc", "memory");
430 
431 	return x0;
432 }
433 
434 #undef __LL_SC_ATOMIC64
435 
436 #define __LL_SC_CMPXCHG(op)	__LL_SC_CALL(__cmpxchg_case_##op)
437 
438 #define __CMPXCHG_CASE(w, sfx, name, sz, mb, cl...)			\
439 static inline u##sz __cmpxchg_case_##name##sz(volatile void *ptr,	\
440 					      u##sz old,		\
441 					      u##sz new)		\
442 {									\
443 	register unsigned long x0 asm ("x0") = (unsigned long)ptr;	\
444 	register u##sz x1 asm ("x1") = old;				\
445 	register u##sz x2 asm ("x2") = new;				\
446 									\
447 	asm volatile(ARM64_LSE_ATOMIC_INSN(				\
448 	/* LL/SC */							\
449 	__LL_SC_CMPXCHG(name##sz)					\
450 	__nops(2),							\
451 	/* LSE atomics */						\
452 	"	mov	" #w "30, %" #w "[old]\n"			\
453 	"	cas" #mb #sfx "\t" #w "30, %" #w "[new], %[v]\n"	\
454 	"	mov	%" #w "[ret], " #w "30")			\
455 	: [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr)		\
456 	: [old] "r" (x1), [new] "r" (x2)				\
457 	: __LL_SC_CLOBBERS, ##cl);					\
458 									\
459 	return x0;							\
460 }
461 
462 __CMPXCHG_CASE(w, b,     ,  8,   )
463 __CMPXCHG_CASE(w, h,     , 16,   )
464 __CMPXCHG_CASE(w,  ,     , 32,   )
465 __CMPXCHG_CASE(x,  ,     , 64,   )
466 __CMPXCHG_CASE(w, b, acq_,  8,  a, "memory")
467 __CMPXCHG_CASE(w, h, acq_, 16,  a, "memory")
468 __CMPXCHG_CASE(w,  , acq_, 32,  a, "memory")
469 __CMPXCHG_CASE(x,  , acq_, 64,  a, "memory")
470 __CMPXCHG_CASE(w, b, rel_,  8,  l, "memory")
471 __CMPXCHG_CASE(w, h, rel_, 16,  l, "memory")
472 __CMPXCHG_CASE(w,  , rel_, 32,  l, "memory")
473 __CMPXCHG_CASE(x,  , rel_, 64,  l, "memory")
474 __CMPXCHG_CASE(w, b,  mb_,  8, al, "memory")
475 __CMPXCHG_CASE(w, h,  mb_, 16, al, "memory")
476 __CMPXCHG_CASE(w,  ,  mb_, 32, al, "memory")
477 __CMPXCHG_CASE(x,  ,  mb_, 64, al, "memory")
478 
479 #undef __LL_SC_CMPXCHG
480 #undef __CMPXCHG_CASE
481 
482 #define __LL_SC_CMPXCHG_DBL(op)	__LL_SC_CALL(__cmpxchg_double##op)
483 
484 #define __CMPXCHG_DBL(name, mb, cl...)					\
485 static inline long __cmpxchg_double##name(unsigned long old1,		\
486 					 unsigned long old2,		\
487 					 unsigned long new1,		\
488 					 unsigned long new2,		\
489 					 volatile void *ptr)		\
490 {									\
491 	unsigned long oldval1 = old1;					\
492 	unsigned long oldval2 = old2;					\
493 	register unsigned long x0 asm ("x0") = old1;			\
494 	register unsigned long x1 asm ("x1") = old2;			\
495 	register unsigned long x2 asm ("x2") = new1;			\
496 	register unsigned long x3 asm ("x3") = new2;			\
497 	register unsigned long x4 asm ("x4") = (unsigned long)ptr;	\
498 									\
499 	asm volatile(ARM64_LSE_ATOMIC_INSN(				\
500 	/* LL/SC */							\
501 	__LL_SC_CMPXCHG_DBL(name)					\
502 	__nops(3),							\
503 	/* LSE atomics */						\
504 	"	casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\
505 	"	eor	%[old1], %[old1], %[oldval1]\n"			\
506 	"	eor	%[old2], %[old2], %[oldval2]\n"			\
507 	"	orr	%[old1], %[old1], %[old2]")			\
508 	: [old1] "+&r" (x0), [old2] "+&r" (x1),				\
509 	  [v] "+Q" (*(unsigned long *)ptr)				\
510 	: [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4),		\
511 	  [oldval1] "r" (oldval1), [oldval2] "r" (oldval2)		\
512 	: __LL_SC_CLOBBERS, ##cl);					\
513 									\
514 	return x0;							\
515 }
516 
517 __CMPXCHG_DBL(   ,   )
518 __CMPXCHG_DBL(_mb, al, "memory")
519 
520 #undef __LL_SC_CMPXCHG_DBL
521 #undef __CMPXCHG_DBL
522 
523 #endif	/* __ASM_ATOMIC_LSE_H */
524