1 /* SPDX-License-Identifier:    GPL-2.0
2  *
3  * Copyright (C) 2018 Marvell International Ltd.
4  */
5 
6 #ifndef __NIX_H__
7 #define	__NIX_H__
8 
9 #include <asm/arch/csrs/csrs-npa.h>
10 #include <asm/arch/csrs/csrs-nix.h>
11 #include "rvu.h"
12 
13 /** Maximum number of LMACs supported */
14 #define MAX_LMAC			12
15 
16 /* NIX RX action operation*/
17 #define NIX_RX_ACTIONOP_DROP		(0x0ull)
18 #define NIX_RX_ACTIONOP_UCAST		(0x1ull)
19 #define NIX_RX_ACTIONOP_UCAST_IPSEC	(0x2ull)
20 #define NIX_RX_ACTIONOP_MCAST		(0x3ull)
21 #define NIX_RX_ACTIONOP_RSS		(0x4ull)
22 
23 /* NIX TX action operation*/
24 #define NIX_TX_ACTIONOP_DROP		(0x0ull)
25 #define NIX_TX_ACTIONOP_UCAST_DEFAULT	(0x1ull)
26 #define NIX_TX_ACTIONOP_UCAST_CHAN	(0x2ull)
27 #define NIX_TX_ACTIONOP_MCAST		(0x3ull)
28 #define NIX_TX_ACTIONOP_DROP_VIOL	(0x5ull)
29 
30 #define NIX_INTF_RX			0
31 #define NIX_INTF_TX			1
32 
33 #define NIX_INTF_TYPE_CGX		0
34 #define NIX_INTF_TYPE_LBK		1
35 #define NIX_MAX_HW_MTU			9212
36 #define NIX_MIN_HW_MTU			40
37 #define MAX_MTU				1536
38 
39 #define NPA_POOL_COUNT			3
40 #define NPA_AURA_COUNT(x)		(1ULL << ((x) + 6))
41 #define NPA_POOL_RX			0ULL
42 #define NPA_POOL_TX			1ULL
43 #define NPA_POOL_SQB			2ULL
44 #define RQ_QLEN				Q_COUNT(Q_SIZE_1K)
45 #define SQ_QLEN				Q_COUNT(Q_SIZE_1K)
46 #define SQB_QLEN			Q_COUNT(Q_SIZE_16)
47 
48 #define NIX_CQ_RX			0ULL
49 #define NIX_CQ_TX			1ULL
50 #define NIX_CQ_COUNT			2ULL
51 #define NIX_CQE_SIZE_W16		(16 * sizeof(u64))
52 #define NIX_CQE_SIZE_W64		(64 * sizeof(u64))
53 
54 /** Size of aura hardware context */
55 #define NPA_AURA_HW_CTX_SIZE		48
56 /** Size of pool hardware context */
57 #define NPA_POOL_HW_CTX_SIZE		64
58 
59 #define NPA_DEFAULT_PF_FUNC		0xffff
60 
61 #define NIX_CHAN_CGX_LMAC_CHX(a, b, c)	(0x800 + 0x100 * (a) + 0x10 * (b) + (c))
62 #define NIX_LINK_CGX_LMAC(a, b)		(0 + 4 * (a) + (b))
63 #define NIX_LINK_LBK(a)			(12 + (a))
64 #define NIX_CHAN_LBK_CHX(a, b)		(0 + 0x100 * (a) + (b))
65 #define MAX_LMAC_PKIND			12
66 
67 /** Number of Admin queue entries */
68 #define AQ_RING_SIZE	Q_COUNT(Q_SIZE_16)
69 
70 /** Each completion queue contains 256 entries, see NIC_CQ_CTX_S[qsize] */
71 #define CQS_QSIZE			Q_SIZE_256
72 #define CQ_ENTRIES			Q_COUNT(CQS_QSIZE)
73 /**
74  * Each completion queue entry contains 128 bytes, see
75  * NIXX_AF_LFX_CFG[xqe_size]
76  */
77 #define CQ_ENTRY_SIZE			NIX_CQE_SIZE_W16
78 
79 enum npa_aura_size {
80 	NPA_AURA_SZ_0,
81 	NPA_AURA_SZ_128,
82 	NPA_AURA_SZ_256,
83 	NPA_AURA_SZ_512,
84 	NPA_AURA_SZ_1K,
85 	NPA_AURA_SZ_2K,
86 	NPA_AURA_SZ_4K,
87 	NPA_AURA_SZ_8K,
88 	NPA_AURA_SZ_16K,
89 	NPA_AURA_SZ_32K,
90 	NPA_AURA_SZ_64K,
91 	NPA_AURA_SZ_128K,
92 	NPA_AURA_SZ_256K,
93 	NPA_AURA_SZ_512K,
94 	NPA_AURA_SZ_1M,
95 	NPA_AURA_SZ_MAX,
96 };
97 
98 #define NPA_AURA_SIZE_DEFAULT		NPA_AURA_SZ_128
99 
100 /* NIX Transmit schedulers */
101 enum nix_scheduler {
102 	NIX_TXSCH_LVL_SMQ = 0x0,
103 	NIX_TXSCH_LVL_MDQ = 0x0,
104 	NIX_TXSCH_LVL_TL4 = 0x1,
105 	NIX_TXSCH_LVL_TL3 = 0x2,
106 	NIX_TXSCH_LVL_TL2 = 0x3,
107 	NIX_TXSCH_LVL_TL1 = 0x4,
108 	NIX_TXSCH_LVL_CNT = 0x5,
109 };
110 
111 struct cgx;
112 
113 struct nix_stats {
114 	u64	num_packets;
115 	u64	num_bytes;
116 };
117 
118 struct nix;
119 struct lmac;
120 
121 struct npa_af {
122 	void __iomem		*npa_af_base;
123 	struct admin_queue	aq;
124 	u32			aura;
125 };
126 
127 struct npa {
128 	struct npa_af		*npa_af;
129 	void __iomem		*npa_base;
130 	void __iomem		*npc_base;
131 	void __iomem		*lmt_base;
132 	/** Hardware aura context */
133 	void			*aura_ctx;
134 	/** Hardware pool context */
135 	void			*pool_ctx[NPA_POOL_COUNT];
136 	void			*pool_stack[NPA_POOL_COUNT];
137 	void                    **buffers[NPA_POOL_COUNT];
138 	u32                     pool_stack_pages[NPA_POOL_COUNT];
139 	u32			pool_stack_pointers;
140 	u32			q_len[NPA_POOL_COUNT];
141 	u32			buf_size[NPA_POOL_COUNT];
142 	u32			stack_pages[NPA_POOL_COUNT];
143 };
144 
145 struct nix_af {
146 	struct udevice			*dev;
147 	struct nix			*lmacs[MAX_LMAC];
148 	struct npa_af			*npa_af;
149 	void __iomem			*nix_af_base;
150 	void __iomem			*npc_af_base;
151 	struct admin_queue		aq;
152 	u8				num_lmacs;
153 	s8				index;
154 	u8				xqe_size;
155 	u32				sqb_size;
156 	u32				qints;
157 	u32				cints;
158 	u32				sq_ctx_sz;
159 	u32				rq_ctx_sz;
160 	u32				cq_ctx_sz;
161 	u32				rsse_ctx_sz;
162 	u32				cint_ctx_sz;
163 	u32				qint_ctx_sz;
164 };
165 
166 struct nix_tx_dr {
167 	union nix_send_hdr_s	hdr;
168 	union nix_send_sg_s	tx_sg;
169 	dma_addr_t			sg1_addr;
170 	dma_addr_t			sg2_addr;
171 	dma_addr_t			sg3_addr;
172 	u64				in_use;
173 };
174 
175 struct nix_rx_dr {
176 	union nix_cqe_hdr_s hdr;
177 	union nix_rx_parse_s rx_parse;
178 	union nix_rx_sg_s rx_sg;
179 };
180 
181 struct nix {
182 	struct udevice			*dev;
183 	struct eth_device		*netdev;
184 	struct nix_af			*nix_af;
185 	struct npa			*npa;
186 	struct lmac			*lmac;
187 	union nix_cint_hw_s	*cint_base;
188 	union nix_cq_ctx_s		*cq_ctx_base;
189 	union nix_qint_hw_s	*qint_base;
190 	union nix_rq_ctx_s		*rq_ctx_base;
191 	union nix_rsse_s		*rss_base;
192 	union nix_sq_ctx_s		*sq_ctx_base;
193 	void				*cqe_base;
194 	struct qmem			sq;
195 	struct qmem			cq[NIX_CQ_COUNT];
196 	struct qmem			rq;
197 	struct qmem			rss;
198 	struct qmem			cq_ints;
199 	struct qmem			qints;
200 	char				name[16];
201 	void __iomem			*nix_base;	/** PF reg base */
202 	void __iomem			*npc_base;
203 	void __iomem			*lmt_base;
204 	struct nix_stats		tx_stats;
205 	struct nix_stats		rx_stats;
206 	u32				aura;
207 	int				pknd;
208 	int				lf;
209 	int				pf;
210 	u16				pf_func;
211 	u32				rq_cnt;	/** receive queues count */
212 	u32				sq_cnt;	/** send queues count */
213 	u32				cq_cnt;	/** completion queues count */
214 	u16				rss_sz;
215 	u16				sqb_size;
216 	u8				rss_grps;
217 	u8				xqe_sz;
218 };
219 
220 struct nix_aq_cq_dis {
221 	union nix_aq_res_s	resp ALIGNED;
222 	union nix_cq_ctx_s	cq ALIGNED;
223 	union nix_cq_ctx_s	mcq ALIGNED;
224 };
225 
226 struct nix_aq_rq_dis {
227 	union nix_aq_res_s	resp ALIGNED;
228 	union nix_rq_ctx_s	rq ALIGNED;
229 	union nix_rq_ctx_s	mrq ALIGNED;
230 };
231 
232 struct nix_aq_sq_dis {
233 	union nix_aq_res_s	resp ALIGNED;
234 	union nix_sq_ctx_s	sq ALIGNED;
235 	union nix_sq_ctx_s	msq ALIGNED;
236 };
237 
238 struct nix_aq_cq_request {
239 	union nix_aq_res_s	resp ALIGNED;
240 	union nix_cq_ctx_s	cq ALIGNED;
241 };
242 
243 struct nix_aq_rq_request {
244 	union nix_aq_res_s	resp ALIGNED;
245 	union nix_rq_ctx_s	rq ALIGNED;
246 };
247 
248 struct nix_aq_sq_request {
249 	union nix_aq_res_s	resp ALIGNED;
250 	union nix_sq_ctx_s	sq ALIGNED;
251 };
252 
nix_af_reg_read(struct nix_af * nix_af,u64 offset)253 static inline u64 nix_af_reg_read(struct nix_af *nix_af, u64 offset)
254 {
255 	u64 val = readq(nix_af->nix_af_base + offset);
256 
257 	debug("%s reg %p val %llx\n", __func__, nix_af->nix_af_base + offset,
258 	      val);
259 	return val;
260 }
261 
nix_af_reg_write(struct nix_af * nix_af,u64 offset,u64 val)262 static inline void nix_af_reg_write(struct nix_af *nix_af, u64 offset,
263 				    u64 val)
264 {
265 	debug("%s reg %p val %llx\n", __func__, nix_af->nix_af_base + offset,
266 	      val);
267 	writeq(val, nix_af->nix_af_base + offset);
268 }
269 
nix_pf_reg_read(struct nix * nix,u64 offset)270 static inline u64 nix_pf_reg_read(struct nix *nix, u64 offset)
271 {
272 	u64 val = readq(nix->nix_base + offset);
273 
274 	debug("%s reg %p val %llx\n", __func__, nix->nix_base + offset,
275 	      val);
276 	return val;
277 }
278 
nix_pf_reg_write(struct nix * nix,u64 offset,u64 val)279 static inline void nix_pf_reg_write(struct nix *nix, u64 offset,
280 				    u64 val)
281 {
282 	debug("%s reg %p val %llx\n", __func__, nix->nix_base + offset,
283 	      val);
284 	writeq(val, nix->nix_base + offset);
285 }
286 
npa_af_reg_read(struct npa_af * npa_af,u64 offset)287 static inline u64 npa_af_reg_read(struct npa_af *npa_af, u64 offset)
288 {
289 	u64 val = readq(npa_af->npa_af_base + offset);
290 
291 	debug("%s reg %p val %llx\n", __func__, npa_af->npa_af_base + offset,
292 	      val);
293 	return val;
294 }
295 
npa_af_reg_write(struct npa_af * npa_af,u64 offset,u64 val)296 static inline void npa_af_reg_write(struct npa_af *npa_af, u64 offset,
297 				    u64 val)
298 {
299 	debug("%s reg %p val %llx\n", __func__, npa_af->npa_af_base + offset,
300 	      val);
301 	writeq(val, npa_af->npa_af_base + offset);
302 }
303 
npc_af_reg_read(struct nix_af * nix_af,u64 offset)304 static inline u64 npc_af_reg_read(struct nix_af *nix_af, u64 offset)
305 {
306 	u64 val = readq(nix_af->npc_af_base + offset);
307 
308 	debug("%s reg %p val %llx\n", __func__, nix_af->npc_af_base + offset,
309 	      val);
310 	return val;
311 }
312 
npc_af_reg_write(struct nix_af * nix_af,u64 offset,u64 val)313 static inline void npc_af_reg_write(struct nix_af *nix_af, u64 offset,
314 				    u64 val)
315 {
316 	debug("%s reg %p val %llx\n", __func__, nix_af->npc_af_base + offset,
317 	      val);
318 	writeq(val, nix_af->npc_af_base + offset);
319 }
320 
321 int npa_attach_aura(struct nix_af *nix_af, int lf,
322 		    const union npa_aura_s *desc, u32 aura_id);
323 int npa_attach_pool(struct nix_af *nix_af, int lf,
324 		    const union npa_pool_s *desc, u32 pool_id);
325 int npa_af_setup(struct npa_af *npa_af);
326 int npa_af_shutdown(struct npa_af *npa_af);
327 int npa_lf_setup(struct nix *nix);
328 int npa_lf_shutdown(struct nix *nix);
329 int npa_lf_admin_setup(struct npa *npa, int lf, dma_addr_t aura_base);
330 int npa_lf_admin_shutdown(struct nix_af *nix_af, int lf, u32 pool_count);
331 
332 int npc_lf_admin_setup(struct nix *nix);
333 int npc_af_shutdown(struct nix_af *nix_af);
334 
335 int nix_af_setup(struct nix_af *nix_af);
336 int nix_af_shutdown(struct nix_af *nix_af);
337 int nix_lf_setup(struct nix *nix);
338 int nix_lf_shutdown(struct nix *nix);
339 struct nix *nix_lf_alloc(struct udevice *dev);
340 int nix_lf_admin_setup(struct nix *nix);
341 int nix_lf_admin_shutdown(struct nix_af *nix_af, int lf,
342 			  u32 cq_count, u32 rq_count, u32 sq_count);
343 struct rvu_af *get_af(void);
344 
345 int nix_lf_setup_mac(struct udevice *dev);
346 int nix_lf_read_rom_mac(struct udevice *dev);
347 void nix_lf_halt(struct udevice *dev);
348 int nix_lf_free_pkt(struct udevice *dev, uchar *pkt, int pkt_len);
349 int nix_lf_recv(struct udevice *dev, int flags, uchar **packetp);
350 int nix_lf_init(struct udevice *dev);
351 int nix_lf_xmit(struct udevice *dev, void *pkt, int pkt_len);
352 
353 #endif /* __NIX_H__ */
354