1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2020 Marvell International Ltd.
4  *
5  * Interface to the hardware Free Pool Allocator on Octeon chips.
6  * These are the legacy models, i.e. prior to CN78XX/CN76XX.
7  */
8 
9 #ifndef __CVMX_FPA1_HW_H__
10 #define __CVMX_FPA1_HW_H__
11 
12 #include "cvmx-scratch.h"
13 #include "cvmx-fpa-defs.h"
14 #include "cvmx-fpa3.h"
15 
16 /* Legacy pool range is 0..7 and 8 on CN68XX */
17 typedef int cvmx_fpa1_pool_t;
18 
19 #define CVMX_FPA1_NUM_POOLS    8
20 #define CVMX_FPA1_INVALID_POOL ((cvmx_fpa1_pool_t)-1)
21 #define CVMX_FPA1_NAME_SIZE    16
22 
23 /**
24  * Structure describing the data format used for stores to the FPA.
25  */
26 typedef union {
27 	u64 u64;
28 	struct {
29 		u64 scraddr : 8;
30 		u64 len : 8;
31 		u64 did : 8;
32 		u64 addr : 40;
33 	} s;
34 } cvmx_fpa1_iobdma_data_t;
35 
36 /*
37  * Allocate or reserve the specified fpa pool.
38  *
39  * @param pool	  FPA pool to allocate/reserve. If -1 it
40  *                finds an empty pool to allocate.
41  * @return        Alloctaed pool number or CVMX_FPA1_POOL_INVALID
42  *                if fails to allocate the pool
43  */
44 cvmx_fpa1_pool_t cvmx_fpa1_reserve_pool(cvmx_fpa1_pool_t pool);
45 
46 /**
47  * Free the specified fpa pool.
48  * @param pool	   Pool to free
49  * @return         0 for success -1 failure
50  */
51 int cvmx_fpa1_release_pool(cvmx_fpa1_pool_t pool);
52 
cvmx_fpa1_free(void * ptr,cvmx_fpa1_pool_t pool,u64 num_cache_lines)53 static inline void cvmx_fpa1_free(void *ptr, cvmx_fpa1_pool_t pool, u64 num_cache_lines)
54 {
55 	cvmx_addr_t newptr;
56 
57 	newptr.u64 = cvmx_ptr_to_phys(ptr);
58 	newptr.sfilldidspace.didspace = CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool));
59 	/* Make sure that any previous writes to memory go out before we free
60 	 * this buffer.  This also serves as a barrier to prevent GCC from
61 	 * reordering operations to after the free.
62 	 */
63 	CVMX_SYNCWS;
64 	/* value written is number of cache lines not written back */
65 	cvmx_write_io(newptr.u64, num_cache_lines);
66 }
67 
cvmx_fpa1_free_nosync(void * ptr,cvmx_fpa1_pool_t pool,unsigned int num_cache_lines)68 static inline void cvmx_fpa1_free_nosync(void *ptr, cvmx_fpa1_pool_t pool,
69 					 unsigned int num_cache_lines)
70 {
71 	cvmx_addr_t newptr;
72 
73 	newptr.u64 = cvmx_ptr_to_phys(ptr);
74 	newptr.sfilldidspace.didspace = CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool));
75 	/* Prevent GCC from reordering around free */
76 	asm volatile("" : : : "memory");
77 	/* value written is number of cache lines not written back */
78 	cvmx_write_io(newptr.u64, num_cache_lines);
79 }
80 
81 /**
82  * Enable the FPA for use. Must be performed after any CSR
83  * configuration but before any other FPA functions.
84  */
cvmx_fpa1_enable(void)85 static inline void cvmx_fpa1_enable(void)
86 {
87 	cvmx_fpa_ctl_status_t status;
88 
89 	status.u64 = csr_rd(CVMX_FPA_CTL_STATUS);
90 	if (status.s.enb) {
91 		/*
92 		 * CN68XXP1 should not reset the FPA (doing so may break
93 		 * the SSO, so we may end up enabling it more than once.
94 		 * Just return and don't spew messages.
95 		 */
96 		return;
97 	}
98 
99 	status.u64 = 0;
100 	status.s.enb = 1;
101 	csr_wr(CVMX_FPA_CTL_STATUS, status.u64);
102 }
103 
104 /**
105  * Reset FPA to disable. Make sure buffers from all FPA pools are freed
106  * before disabling FPA.
107  */
cvmx_fpa1_disable(void)108 static inline void cvmx_fpa1_disable(void)
109 {
110 	cvmx_fpa_ctl_status_t status;
111 
112 	if (OCTEON_IS_MODEL(OCTEON_CN68XX_PASS1))
113 		return;
114 
115 	status.u64 = csr_rd(CVMX_FPA_CTL_STATUS);
116 	status.s.reset = 1;
117 	csr_wr(CVMX_FPA_CTL_STATUS, status.u64);
118 }
119 
cvmx_fpa1_alloc(cvmx_fpa1_pool_t pool)120 static inline void *cvmx_fpa1_alloc(cvmx_fpa1_pool_t pool)
121 {
122 	u64 address;
123 
124 	for (;;) {
125 		address = csr_rd(CVMX_ADDR_DID(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool)));
126 		if (cvmx_likely(address)) {
127 			return cvmx_phys_to_ptr(address);
128 		} else {
129 			if (csr_rd(CVMX_FPA_QUEX_AVAILABLE(pool)) > 0)
130 				udelay(50);
131 			else
132 				return NULL;
133 		}
134 	}
135 }
136 
137 /**
138  * Asynchronously get a new block from the FPA
139  * @INTERNAL
140  *
141  * The result of cvmx_fpa_async_alloc() may be retrieved using
142  * cvmx_fpa_async_alloc_finish().
143  *
144  * @param scr_addr Local scratch address to put response in.  This is a byte
145  *		   address but must be 8 byte aligned.
146  * @param pool      Pool to get the block from
147  */
cvmx_fpa1_async_alloc(u64 scr_addr,cvmx_fpa1_pool_t pool)148 static inline void cvmx_fpa1_async_alloc(u64 scr_addr, cvmx_fpa1_pool_t pool)
149 {
150 	cvmx_fpa1_iobdma_data_t data;
151 
152 	/* Hardware only uses 64 bit aligned locations, so convert from byte
153 	 * address to 64-bit index
154 	 */
155 	data.u64 = 0ull;
156 	data.s.scraddr = scr_addr >> 3;
157 	data.s.len = 1;
158 	data.s.did = CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool);
159 	data.s.addr = 0;
160 
161 	cvmx_scratch_write64(scr_addr, 0ull);
162 	CVMX_SYNCW;
163 	cvmx_send_single(data.u64);
164 }
165 
166 /**
167  * Retrieve the result of cvmx_fpa_async_alloc
168  * @INTERNAL
169  *
170  * @param scr_addr The Local scratch address.  Must be the same value
171  * passed to cvmx_fpa_async_alloc().
172  *
173  * @param pool Pool the block came from.  Must be the same value
174  * passed to cvmx_fpa_async_alloc.
175  *
176  * @return Pointer to the block or NULL on failure
177  */
cvmx_fpa1_async_alloc_finish(u64 scr_addr,cvmx_fpa1_pool_t pool)178 static inline void *cvmx_fpa1_async_alloc_finish(u64 scr_addr, cvmx_fpa1_pool_t pool)
179 {
180 	u64 address;
181 
182 	CVMX_SYNCIOBDMA;
183 
184 	address = cvmx_scratch_read64(scr_addr);
185 	if (cvmx_likely(address))
186 		return cvmx_phys_to_ptr(address);
187 	else
188 		return cvmx_fpa1_alloc(pool);
189 }
190 
cvmx_fpa1_get_available(cvmx_fpa1_pool_t pool)191 static inline u64 cvmx_fpa1_get_available(cvmx_fpa1_pool_t pool)
192 {
193 	return csr_rd(CVMX_FPA_QUEX_AVAILABLE(pool));
194 }
195 
196 #endif /* __CVMX_FPA1_HW_H__ */
197