1 /***********************license start***************
2  * Author: Cavium Networks
3  *
4  * Contact: support@caviumnetworks.com
5  * This file is part of the OCTEON SDK
6  *
7  * Copyright (c) 2003-2008 Cavium Networks
8  *
9  * This file is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License, Version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This file is distributed in the hope that it will be useful, but
14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16  * NONINFRINGEMENT.  See the GNU General Public License for more
17  * details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this file; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22  * or visit http://www.gnu.org/licenses/.
23  *
24  * This file may also be available under a different license from Cavium.
25  * Contact Cavium Networks for more information
26  ***********************license end**************************************/
27 
28 /*
29  * Interface to the hardware Fetch and Add Unit.
30  */
31 
32 #ifndef __CVMX_FAU_H__
33 #define __CVMX_FAU_H__
34 
35 /*
36  * Octeon Fetch and Add Unit (FAU)
37  */
38 
39 #define CVMX_FAU_LOAD_IO_ADDRESS    cvmx_build_io_address(0x1e, 0)
40 #define CVMX_FAU_BITS_SCRADDR	    63, 56
41 #define CVMX_FAU_BITS_LEN	    55, 48
42 #define CVMX_FAU_BITS_INEVAL	    35, 14
43 #define CVMX_FAU_BITS_TAGWAIT	    13, 13
44 #define CVMX_FAU_BITS_NOADD	    13, 13
45 #define CVMX_FAU_BITS_SIZE	    12, 11
46 #define CVMX_FAU_BITS_REGISTER	    10, 0
47 
48 typedef enum {
49 	CVMX_FAU_OP_SIZE_8 = 0,
50 	CVMX_FAU_OP_SIZE_16 = 1,
51 	CVMX_FAU_OP_SIZE_32 = 2,
52 	CVMX_FAU_OP_SIZE_64 = 3
53 } cvmx_fau_op_size_t;
54 
55 /**
56  * Tagwait return definition. If a timeout occurs, the error
57  * bit will be set. Otherwise the value of the register before
58  * the update will be returned.
59  */
60 typedef struct {
61 	uint64_t error:1;
62 	int64_t value:63;
63 } cvmx_fau_tagwait64_t;
64 
65 /**
66  * Tagwait return definition. If a timeout occurs, the error
67  * bit will be set. Otherwise the value of the register before
68  * the update will be returned.
69  */
70 typedef struct {
71 	uint64_t error:1;
72 	int32_t value:31;
73 } cvmx_fau_tagwait32_t;
74 
75 /**
76  * Tagwait return definition. If a timeout occurs, the error
77  * bit will be set. Otherwise the value of the register before
78  * the update will be returned.
79  */
80 typedef struct {
81 	uint64_t error:1;
82 	int16_t value:15;
83 } cvmx_fau_tagwait16_t;
84 
85 /**
86  * Tagwait return definition. If a timeout occurs, the error
87  * bit will be set. Otherwise the value of the register before
88  * the update will be returned.
89  */
90 typedef struct {
91 	uint64_t error:1;
92 	int8_t value:7;
93 } cvmx_fau_tagwait8_t;
94 
95 /**
96  * Asynchronous tagwait return definition. If a timeout occurs,
97  * the error bit will be set. Otherwise the value of the
98  * register before the update will be returned.
99  */
100 typedef union {
101 	uint64_t u64;
102 	struct {
103 		uint64_t invalid:1;
104 		uint64_t data:63;	/* unpredictable if invalid is set */
105 	} s;
106 } cvmx_fau_async_tagwait_result_t;
107 
108 #ifdef __BIG_ENDIAN_BITFIELD
109 #define SWIZZLE_8  0
110 #define SWIZZLE_16 0
111 #define SWIZZLE_32 0
112 #else
113 #define SWIZZLE_8  0x7
114 #define SWIZZLE_16 0x6
115 #define SWIZZLE_32 0x4
116 #endif
117 
118 /**
119  * Builds a store I/O address for writing to the FAU
120  *
121  * @noadd:  0 = Store value is atomically added to the current value
122  *		 1 = Store value is atomically written over the current value
123  * @reg:    FAU atomic register to access. 0 <= reg < 2048.
124  *		 - Step by 2 for 16 bit access.
125  *		 - Step by 4 for 32 bit access.
126  *		 - Step by 8 for 64 bit access.
127  * Returns Address to store for atomic update
128  */
__cvmx_fau_store_address(uint64_t noadd,uint64_t reg)129 static inline uint64_t __cvmx_fau_store_address(uint64_t noadd, uint64_t reg)
130 {
131 	return CVMX_ADD_IO_SEG(CVMX_FAU_LOAD_IO_ADDRESS) |
132 	       cvmx_build_bits(CVMX_FAU_BITS_NOADD, noadd) |
133 	       cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg);
134 }
135 
136 /**
137  * Builds a I/O address for accessing the FAU
138  *
139  * @tagwait: Should the atomic add wait for the current tag switch
140  *		  operation to complete.
141  *		  - 0 = Don't wait
142  *		  - 1 = Wait for tag switch to complete
143  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
144  *		  - Step by 2 for 16 bit access.
145  *		  - Step by 4 for 32 bit access.
146  *		  - Step by 8 for 64 bit access.
147  * @value:   Signed value to add.
148  *		  Note: When performing 32 and 64 bit access, only the low
149  *		  22 bits are available.
150  * Returns Address to read from for atomic update
151  */
__cvmx_fau_atomic_address(uint64_t tagwait,uint64_t reg,int64_t value)152 static inline uint64_t __cvmx_fau_atomic_address(uint64_t tagwait, uint64_t reg,
153 						 int64_t value)
154 {
155 	return CVMX_ADD_IO_SEG(CVMX_FAU_LOAD_IO_ADDRESS) |
156 	       cvmx_build_bits(CVMX_FAU_BITS_INEVAL, value) |
157 	       cvmx_build_bits(CVMX_FAU_BITS_TAGWAIT, tagwait) |
158 	       cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg);
159 }
160 
161 /**
162  * Perform an atomic 64 bit add
163  *
164  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
165  *		  - Step by 8 for 64 bit access.
166  * @value:   Signed value to add.
167  *		  Note: Only the low 22 bits are available.
168  * Returns Value of the register before the update
169  */
cvmx_fau_fetch_and_add64(cvmx_fau_reg_64_t reg,int64_t value)170 static inline int64_t cvmx_fau_fetch_and_add64(cvmx_fau_reg_64_t reg,
171 					       int64_t value)
172 {
173 	return cvmx_read64_int64(__cvmx_fau_atomic_address(0, reg, value));
174 }
175 
176 /**
177  * Perform an atomic 32 bit add
178  *
179  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
180  *		  - Step by 4 for 32 bit access.
181  * @value:   Signed value to add.
182  *		  Note: Only the low 22 bits are available.
183  * Returns Value of the register before the update
184  */
cvmx_fau_fetch_and_add32(cvmx_fau_reg_32_t reg,int32_t value)185 static inline int32_t cvmx_fau_fetch_and_add32(cvmx_fau_reg_32_t reg,
186 					       int32_t value)
187 {
188 	reg ^= SWIZZLE_32;
189 	return cvmx_read64_int32(__cvmx_fau_atomic_address(0, reg, value));
190 }
191 
192 /**
193  * Perform an atomic 16 bit add
194  *
195  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
196  *		  - Step by 2 for 16 bit access.
197  * @value:   Signed value to add.
198  * Returns Value of the register before the update
199  */
cvmx_fau_fetch_and_add16(cvmx_fau_reg_16_t reg,int16_t value)200 static inline int16_t cvmx_fau_fetch_and_add16(cvmx_fau_reg_16_t reg,
201 					       int16_t value)
202 {
203 	reg ^= SWIZZLE_16;
204 	return cvmx_read64_int16(__cvmx_fau_atomic_address(0, reg, value));
205 }
206 
207 /**
208  * Perform an atomic 8 bit add
209  *
210  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
211  * @value:   Signed value to add.
212  * Returns Value of the register before the update
213  */
cvmx_fau_fetch_and_add8(cvmx_fau_reg_8_t reg,int8_t value)214 static inline int8_t cvmx_fau_fetch_and_add8(cvmx_fau_reg_8_t reg, int8_t value)
215 {
216 	reg ^= SWIZZLE_8;
217 	return cvmx_read64_int8(__cvmx_fau_atomic_address(0, reg, value));
218 }
219 
220 /**
221  * Perform an atomic 64 bit add after the current tag switch
222  * completes
223  *
224  * @reg:    FAU atomic register to access. 0 <= reg < 2048.
225  *		 - Step by 8 for 64 bit access.
226  * @value:  Signed value to add.
227  *		 Note: Only the low 22 bits are available.
228  * Returns If a timeout occurs, the error bit will be set. Otherwise
229  *	   the value of the register before the update will be
230  *	   returned
231  */
232 static inline cvmx_fau_tagwait64_t
cvmx_fau_tagwait_fetch_and_add64(cvmx_fau_reg_64_t reg,int64_t value)233 cvmx_fau_tagwait_fetch_and_add64(cvmx_fau_reg_64_t reg, int64_t value)
234 {
235 	union {
236 		uint64_t i64;
237 		cvmx_fau_tagwait64_t t;
238 	} result;
239 	result.i64 =
240 	    cvmx_read64_int64(__cvmx_fau_atomic_address(1, reg, value));
241 	return result.t;
242 }
243 
244 /**
245  * Perform an atomic 32 bit add after the current tag switch
246  * completes
247  *
248  * @reg:    FAU atomic register to access. 0 <= reg < 2048.
249  *		 - Step by 4 for 32 bit access.
250  * @value:  Signed value to add.
251  *		 Note: Only the low 22 bits are available.
252  * Returns If a timeout occurs, the error bit will be set. Otherwise
253  *	   the value of the register before the update will be
254  *	   returned
255  */
256 static inline cvmx_fau_tagwait32_t
cvmx_fau_tagwait_fetch_and_add32(cvmx_fau_reg_32_t reg,int32_t value)257 cvmx_fau_tagwait_fetch_and_add32(cvmx_fau_reg_32_t reg, int32_t value)
258 {
259 	union {
260 		uint64_t i32;
261 		cvmx_fau_tagwait32_t t;
262 	} result;
263 	reg ^= SWIZZLE_32;
264 	result.i32 =
265 	    cvmx_read64_int32(__cvmx_fau_atomic_address(1, reg, value));
266 	return result.t;
267 }
268 
269 /**
270  * Perform an atomic 16 bit add after the current tag switch
271  * completes
272  *
273  * @reg:    FAU atomic register to access. 0 <= reg < 2048.
274  *		 - Step by 2 for 16 bit access.
275  * @value:  Signed value to add.
276  * Returns If a timeout occurs, the error bit will be set. Otherwise
277  *	   the value of the register before the update will be
278  *	   returned
279  */
280 static inline cvmx_fau_tagwait16_t
cvmx_fau_tagwait_fetch_and_add16(cvmx_fau_reg_16_t reg,int16_t value)281 cvmx_fau_tagwait_fetch_and_add16(cvmx_fau_reg_16_t reg, int16_t value)
282 {
283 	union {
284 		uint64_t i16;
285 		cvmx_fau_tagwait16_t t;
286 	} result;
287 	reg ^= SWIZZLE_16;
288 	result.i16 =
289 	    cvmx_read64_int16(__cvmx_fau_atomic_address(1, reg, value));
290 	return result.t;
291 }
292 
293 /**
294  * Perform an atomic 8 bit add after the current tag switch
295  * completes
296  *
297  * @reg:    FAU atomic register to access. 0 <= reg < 2048.
298  * @value:  Signed value to add.
299  * Returns If a timeout occurs, the error bit will be set. Otherwise
300  *	   the value of the register before the update will be
301  *	   returned
302  */
303 static inline cvmx_fau_tagwait8_t
cvmx_fau_tagwait_fetch_and_add8(cvmx_fau_reg_8_t reg,int8_t value)304 cvmx_fau_tagwait_fetch_and_add8(cvmx_fau_reg_8_t reg, int8_t value)
305 {
306 	union {
307 		uint64_t i8;
308 		cvmx_fau_tagwait8_t t;
309 	} result;
310 	reg ^= SWIZZLE_8;
311 	result.i8 = cvmx_read64_int8(__cvmx_fau_atomic_address(1, reg, value));
312 	return result.t;
313 }
314 
315 /**
316  * Builds I/O data for async operations
317  *
318  * @scraddr: Scratch pad byte address to write to.  Must be 8 byte aligned
319  * @value:   Signed value to add.
320  *		  Note: When performing 32 and 64 bit access, only the low
321  *		  22 bits are available.
322  * @tagwait: Should the atomic add wait for the current tag switch
323  *		  operation to complete.
324  *		  - 0 = Don't wait
325  *		  - 1 = Wait for tag switch to complete
326  * @size:    The size of the operation:
327  *		  - CVMX_FAU_OP_SIZE_8	(0) = 8 bits
328  *		  - CVMX_FAU_OP_SIZE_16 (1) = 16 bits
329  *		  - CVMX_FAU_OP_SIZE_32 (2) = 32 bits
330  *		  - CVMX_FAU_OP_SIZE_64 (3) = 64 bits
331  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
332  *		  - Step by 2 for 16 bit access.
333  *		  - Step by 4 for 32 bit access.
334  *		  - Step by 8 for 64 bit access.
335  * Returns Data to write using cvmx_send_single
336  */
__cvmx_fau_iobdma_data(uint64_t scraddr,int64_t value,uint64_t tagwait,cvmx_fau_op_size_t size,uint64_t reg)337 static inline uint64_t __cvmx_fau_iobdma_data(uint64_t scraddr, int64_t value,
338 					      uint64_t tagwait,
339 					      cvmx_fau_op_size_t size,
340 					      uint64_t reg)
341 {
342 	return CVMX_FAU_LOAD_IO_ADDRESS |
343 	       cvmx_build_bits(CVMX_FAU_BITS_SCRADDR, scraddr >> 3) |
344 	       cvmx_build_bits(CVMX_FAU_BITS_LEN, 1) |
345 	       cvmx_build_bits(CVMX_FAU_BITS_INEVAL, value) |
346 	       cvmx_build_bits(CVMX_FAU_BITS_TAGWAIT, tagwait) |
347 	       cvmx_build_bits(CVMX_FAU_BITS_SIZE, size) |
348 	       cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg);
349 }
350 
351 /**
352  * Perform an async atomic 64 bit add. The old value is
353  * placed in the scratch memory at byte address scraddr.
354  *
355  * @scraddr: Scratch memory byte address to put response in.
356  *		  Must be 8 byte aligned.
357  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
358  *		  - Step by 8 for 64 bit access.
359  * @value:   Signed value to add.
360  *		  Note: Only the low 22 bits are available.
361  * Returns Placed in the scratch pad register
362  */
cvmx_fau_async_fetch_and_add64(uint64_t scraddr,cvmx_fau_reg_64_t reg,int64_t value)363 static inline void cvmx_fau_async_fetch_and_add64(uint64_t scraddr,
364 						  cvmx_fau_reg_64_t reg,
365 						  int64_t value)
366 {
367 	cvmx_send_single(__cvmx_fau_iobdma_data
368 			 (scraddr, value, 0, CVMX_FAU_OP_SIZE_64, reg));
369 }
370 
371 /**
372  * Perform an async atomic 32 bit add. The old value is
373  * placed in the scratch memory at byte address scraddr.
374  *
375  * @scraddr: Scratch memory byte address to put response in.
376  *		  Must be 8 byte aligned.
377  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
378  *		  - Step by 4 for 32 bit access.
379  * @value:   Signed value to add.
380  *		  Note: Only the low 22 bits are available.
381  * Returns Placed in the scratch pad register
382  */
cvmx_fau_async_fetch_and_add32(uint64_t scraddr,cvmx_fau_reg_32_t reg,int32_t value)383 static inline void cvmx_fau_async_fetch_and_add32(uint64_t scraddr,
384 						  cvmx_fau_reg_32_t reg,
385 						  int32_t value)
386 {
387 	cvmx_send_single(__cvmx_fau_iobdma_data
388 			 (scraddr, value, 0, CVMX_FAU_OP_SIZE_32, reg));
389 }
390 
391 /**
392  * Perform an async atomic 16 bit add. The old value is
393  * placed in the scratch memory at byte address scraddr.
394  *
395  * @scraddr: Scratch memory byte address to put response in.
396  *		  Must be 8 byte aligned.
397  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
398  *		  - Step by 2 for 16 bit access.
399  * @value:   Signed value to add.
400  * Returns Placed in the scratch pad register
401  */
cvmx_fau_async_fetch_and_add16(uint64_t scraddr,cvmx_fau_reg_16_t reg,int16_t value)402 static inline void cvmx_fau_async_fetch_and_add16(uint64_t scraddr,
403 						  cvmx_fau_reg_16_t reg,
404 						  int16_t value)
405 {
406 	cvmx_send_single(__cvmx_fau_iobdma_data
407 			 (scraddr, value, 0, CVMX_FAU_OP_SIZE_16, reg));
408 }
409 
410 /**
411  * Perform an async atomic 8 bit add. The old value is
412  * placed in the scratch memory at byte address scraddr.
413  *
414  * @scraddr: Scratch memory byte address to put response in.
415  *		  Must be 8 byte aligned.
416  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
417  * @value:   Signed value to add.
418  * Returns Placed in the scratch pad register
419  */
cvmx_fau_async_fetch_and_add8(uint64_t scraddr,cvmx_fau_reg_8_t reg,int8_t value)420 static inline void cvmx_fau_async_fetch_and_add8(uint64_t scraddr,
421 						 cvmx_fau_reg_8_t reg,
422 						 int8_t value)
423 {
424 	cvmx_send_single(__cvmx_fau_iobdma_data
425 			 (scraddr, value, 0, CVMX_FAU_OP_SIZE_8, reg));
426 }
427 
428 /**
429  * Perform an async atomic 64 bit add after the current tag
430  * switch completes.
431  *
432  * @scraddr: Scratch memory byte address to put response in.  Must be
433  *	     8 byte aligned.  If a timeout occurs, the error bit (63)
434  *	     will be set. Otherwise the value of the register before
435  *	     the update will be returned
436  *
437  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
438  *		  - Step by 8 for 64 bit access.
439  * @value:   Signed value to add.
440  *		  Note: Only the low 22 bits are available.
441  * Returns Placed in the scratch pad register
442  */
cvmx_fau_async_tagwait_fetch_and_add64(uint64_t scraddr,cvmx_fau_reg_64_t reg,int64_t value)443 static inline void cvmx_fau_async_tagwait_fetch_and_add64(uint64_t scraddr,
444 							  cvmx_fau_reg_64_t reg,
445 							  int64_t value)
446 {
447 	cvmx_send_single(__cvmx_fau_iobdma_data
448 			 (scraddr, value, 1, CVMX_FAU_OP_SIZE_64, reg));
449 }
450 
451 /**
452  * Perform an async atomic 32 bit add after the current tag
453  * switch completes.
454  *
455  * @scraddr: Scratch memory byte address to put response in.  Must be
456  *	     8 byte aligned.  If a timeout occurs, the error bit (63)
457  *	     will be set. Otherwise the value of the register before
458  *	     the update will be returned
459  *
460  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
461  *		  - Step by 4 for 32 bit access.
462  * @value:   Signed value to add.
463  *		  Note: Only the low 22 bits are available.
464  * Returns Placed in the scratch pad register
465  */
cvmx_fau_async_tagwait_fetch_and_add32(uint64_t scraddr,cvmx_fau_reg_32_t reg,int32_t value)466 static inline void cvmx_fau_async_tagwait_fetch_and_add32(uint64_t scraddr,
467 							  cvmx_fau_reg_32_t reg,
468 							  int32_t value)
469 {
470 	cvmx_send_single(__cvmx_fau_iobdma_data
471 			 (scraddr, value, 1, CVMX_FAU_OP_SIZE_32, reg));
472 }
473 
474 /**
475  * Perform an async atomic 16 bit add after the current tag
476  * switch completes.
477  *
478  * @scraddr: Scratch memory byte address to put response in.  Must be
479  *	     8 byte aligned.  If a timeout occurs, the error bit (63)
480  *	     will be set. Otherwise the value of the register before
481  *	     the update will be returned
482  *
483  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
484  *		  - Step by 2 for 16 bit access.
485  * @value:   Signed value to add.
486  *
487  * Returns Placed in the scratch pad register
488  */
cvmx_fau_async_tagwait_fetch_and_add16(uint64_t scraddr,cvmx_fau_reg_16_t reg,int16_t value)489 static inline void cvmx_fau_async_tagwait_fetch_and_add16(uint64_t scraddr,
490 							  cvmx_fau_reg_16_t reg,
491 							  int16_t value)
492 {
493 	cvmx_send_single(__cvmx_fau_iobdma_data
494 			 (scraddr, value, 1, CVMX_FAU_OP_SIZE_16, reg));
495 }
496 
497 /**
498  * Perform an async atomic 8 bit add after the current tag
499  * switch completes.
500  *
501  * @scraddr: Scratch memory byte address to put response in.  Must be
502  *	     8 byte aligned.  If a timeout occurs, the error bit (63)
503  *	     will be set. Otherwise the value of the register before
504  *	     the update will be returned
505  *
506  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
507  * @value:   Signed value to add.
508  *
509  * Returns Placed in the scratch pad register
510  */
cvmx_fau_async_tagwait_fetch_and_add8(uint64_t scraddr,cvmx_fau_reg_8_t reg,int8_t value)511 static inline void cvmx_fau_async_tagwait_fetch_and_add8(uint64_t scraddr,
512 							 cvmx_fau_reg_8_t reg,
513 							 int8_t value)
514 {
515 	cvmx_send_single(__cvmx_fau_iobdma_data
516 			 (scraddr, value, 1, CVMX_FAU_OP_SIZE_8, reg));
517 }
518 
519 /**
520  * Perform an atomic 64 bit add
521  *
522  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
523  *		  - Step by 8 for 64 bit access.
524  * @value:   Signed value to add.
525  */
cvmx_fau_atomic_add64(cvmx_fau_reg_64_t reg,int64_t value)526 static inline void cvmx_fau_atomic_add64(cvmx_fau_reg_64_t reg, int64_t value)
527 {
528 	cvmx_write64_int64(__cvmx_fau_store_address(0, reg), value);
529 }
530 
531 /**
532  * Perform an atomic 32 bit add
533  *
534  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
535  *		  - Step by 4 for 32 bit access.
536  * @value:   Signed value to add.
537  */
cvmx_fau_atomic_add32(cvmx_fau_reg_32_t reg,int32_t value)538 static inline void cvmx_fau_atomic_add32(cvmx_fau_reg_32_t reg, int32_t value)
539 {
540 	reg ^= SWIZZLE_32;
541 	cvmx_write64_int32(__cvmx_fau_store_address(0, reg), value);
542 }
543 
544 /**
545  * Perform an atomic 16 bit add
546  *
547  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
548  *		  - Step by 2 for 16 bit access.
549  * @value:   Signed value to add.
550  */
cvmx_fau_atomic_add16(cvmx_fau_reg_16_t reg,int16_t value)551 static inline void cvmx_fau_atomic_add16(cvmx_fau_reg_16_t reg, int16_t value)
552 {
553 	reg ^= SWIZZLE_16;
554 	cvmx_write64_int16(__cvmx_fau_store_address(0, reg), value);
555 }
556 
557 /**
558  * Perform an atomic 8 bit add
559  *
560  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
561  * @value:   Signed value to add.
562  */
cvmx_fau_atomic_add8(cvmx_fau_reg_8_t reg,int8_t value)563 static inline void cvmx_fau_atomic_add8(cvmx_fau_reg_8_t reg, int8_t value)
564 {
565 	reg ^= SWIZZLE_8;
566 	cvmx_write64_int8(__cvmx_fau_store_address(0, reg), value);
567 }
568 
569 /**
570  * Perform an atomic 64 bit write
571  *
572  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
573  *		  - Step by 8 for 64 bit access.
574  * @value:   Signed value to write.
575  */
cvmx_fau_atomic_write64(cvmx_fau_reg_64_t reg,int64_t value)576 static inline void cvmx_fau_atomic_write64(cvmx_fau_reg_64_t reg, int64_t value)
577 {
578 	cvmx_write64_int64(__cvmx_fau_store_address(1, reg), value);
579 }
580 
581 /**
582  * Perform an atomic 32 bit write
583  *
584  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
585  *		  - Step by 4 for 32 bit access.
586  * @value:   Signed value to write.
587  */
cvmx_fau_atomic_write32(cvmx_fau_reg_32_t reg,int32_t value)588 static inline void cvmx_fau_atomic_write32(cvmx_fau_reg_32_t reg, int32_t value)
589 {
590 	reg ^= SWIZZLE_32;
591 	cvmx_write64_int32(__cvmx_fau_store_address(1, reg), value);
592 }
593 
594 /**
595  * Perform an atomic 16 bit write
596  *
597  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
598  *		  - Step by 2 for 16 bit access.
599  * @value:   Signed value to write.
600  */
cvmx_fau_atomic_write16(cvmx_fau_reg_16_t reg,int16_t value)601 static inline void cvmx_fau_atomic_write16(cvmx_fau_reg_16_t reg, int16_t value)
602 {
603 	reg ^= SWIZZLE_16;
604 	cvmx_write64_int16(__cvmx_fau_store_address(1, reg), value);
605 }
606 
607 /**
608  * Perform an atomic 8 bit write
609  *
610  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
611  * @value:   Signed value to write.
612  */
cvmx_fau_atomic_write8(cvmx_fau_reg_8_t reg,int8_t value)613 static inline void cvmx_fau_atomic_write8(cvmx_fau_reg_8_t reg, int8_t value)
614 {
615 	reg ^= SWIZZLE_8;
616 	cvmx_write64_int8(__cvmx_fau_store_address(1, reg), value);
617 }
618 
619 #endif /* __CVMX_FAU_H__ */
620