1 /* Legacy sub-word atomics for RISC-V.
2 
3    Copyright (C) 2016-2019 Free Software Foundation, Inc.
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11 
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15 for more details.
16 
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
20 
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
24 <http://www.gnu.org/licenses/>.  */
25 
26 #ifdef __riscv_atomic
27 
28 #include <stdbool.h>
29 
30 #define INVERT		"not %[tmp1], %[tmp1]\n\t"
31 #define DONT_INVERT	""
32 
33 #define GENERATE_FETCH_AND_OP(type, size, opname, insn, invert, cop)	\
34   type __sync_fetch_and_ ## opname ## _ ## size (type *p, type v)	\
35   {									\
36     unsigned long aligned_addr = ((unsigned long) p) & ~3UL;		\
37     int shift = (((unsigned long) p) & 3) * 8;				\
38     unsigned mask = ((1U << ((sizeof v) * 8)) - 1) << shift;		\
39     unsigned old, tmp1, tmp2;						\
40 									\
41     asm volatile ("1:\n\t"						\
42 		  "lr.w.aq %[old], %[mem]\n\t"				\
43 		  #insn " %[tmp1], %[old], %[value]\n\t"		\
44 		  invert						\
45 		  "and %[tmp1], %[tmp1], %[mask]\n\t"			\
46 		  "and %[tmp2], %[old], %[not_mask]\n\t"		\
47 		  "or %[tmp2], %[tmp2], %[tmp1]\n\t"			\
48 		  "sc.w.rl %[tmp1], %[tmp2], %[mem]\n\t"		\
49 		  "bnez %[tmp1], 1b"					\
50 		  : [old] "=&r" (old),					\
51 		    [mem] "+A" (*(volatile unsigned*) aligned_addr),	\
52 		    [tmp1] "=&r" (tmp1),				\
53 		    [tmp2] "=&r" (tmp2)					\
54 		  : [value] "r" (((unsigned) v) << shift),		\
55 		    [mask] "r" (mask),					\
56 		    [not_mask] "r" (~mask));				\
57 									\
58     return (type) (old >> shift);					\
59   }									\
60 									\
61   type __sync_ ## opname ## _and_fetch_ ## size (type *p, type v)	\
62   {									\
63     type o = __sync_fetch_and_ ## opname ## _ ## size (p, v);		\
64     return cop;								\
65   }
66 
67 #define GENERATE_COMPARE_AND_SWAP(type, size)				\
68   type __sync_val_compare_and_swap_ ## size (type *p, type o, type n)	\
69   {									\
70     unsigned long aligned_addr = ((unsigned long) p) & ~3UL;		\
71     int shift = (((unsigned long) p) & 3) * 8;				\
72     unsigned mask = ((1U << ((sizeof o) * 8)) - 1) << shift;		\
73     unsigned old, tmp1;							\
74 									\
75     asm volatile ("1:\n\t"						\
76 		  "lr.w.aq %[old], %[mem]\n\t"				\
77 		  "and %[tmp1], %[old], %[mask]\n\t"			\
78 		  "bne %[tmp1], %[o], 1f\n\t"				\
79 		  "and %[tmp1], %[old], %[not_mask]\n\t"		\
80 		  "or %[tmp1], %[tmp1], %[n]\n\t"			\
81 		  "sc.w.rl %[tmp1], %[tmp1], %[mem]\n\t"		\
82 		  "bnez %[tmp1], 1b\n\t"				\
83 		  "1:"							\
84 		  : [old] "=&r" (old),					\
85 		    [mem] "+A" (*(volatile unsigned*) aligned_addr),	\
86 		    [tmp1] "=&r" (tmp1)					\
87 		  : [o] "r" ((((unsigned) o) << shift) & mask),		\
88 		    [n] "r" ((((unsigned) n) << shift) & mask),		\
89 		    [mask] "r" (mask),					\
90 		    [not_mask] "r" (~mask));				\
91 									\
92     return (type) (old >> shift);					\
93   }									\
94   bool __sync_bool_compare_and_swap_ ## size (type *p, type o, type n)	\
95   {									\
96     return __sync_val_compare_and_swap(p, o, n) == o;			\
97   }
98 
99 #define GENERATE_ALL(type, size)					\
100   GENERATE_FETCH_AND_OP(type, size, add, add, DONT_INVERT, o + v)	\
101   GENERATE_FETCH_AND_OP(type, size, sub, sub, DONT_INVERT, o - v)	\
102   GENERATE_FETCH_AND_OP(type, size, and, and, DONT_INVERT, o & v)	\
103   GENERATE_FETCH_AND_OP(type, size, xor, xor, DONT_INVERT, o ^ v)	\
104   GENERATE_FETCH_AND_OP(type, size, or, or, DONT_INVERT, o | v)		\
105   GENERATE_FETCH_AND_OP(type, size, nand, and, INVERT, ~(o & v))	\
106   GENERATE_COMPARE_AND_SWAP(type, size)
107 
108 GENERATE_ALL(unsigned char, 1)
109 GENERATE_ALL(unsigned short, 2)
110 
111 #endif
112