1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Marvell RVU Admin Function driver
3  *
4  * Copyright (C) 2018 Marvell.
5  */
6 
7 #ifndef COMMON_H
8 #define COMMON_H
9 
10 #include "rvu_struct.h"
11 
12 #define OTX2_ALIGN			128  /* Align to cacheline */
13 
14 #define Q_SIZE_16		0ULL /* 16 entries */
15 #define Q_SIZE_64		1ULL /* 64 entries */
16 #define Q_SIZE_256		2ULL
17 #define Q_SIZE_1K		3ULL
18 #define Q_SIZE_4K		4ULL
19 #define Q_SIZE_16K		5ULL
20 #define Q_SIZE_64K		6ULL
21 #define Q_SIZE_256K		7ULL
22 #define Q_SIZE_1M		8ULL /* Million entries */
23 #define Q_SIZE_MIN		Q_SIZE_16
24 #define Q_SIZE_MAX		Q_SIZE_1M
25 
26 #define Q_COUNT(x)		(16ULL << (2 * x))
27 #define Q_SIZE(x, n)		((ilog2(x) - (n)) / 2)
28 
29 /* Admin queue info */
30 
31 /* Since we intend to add only one instruction at a time,
32  * keep queue size to it's minimum.
33  */
34 #define AQ_SIZE			Q_SIZE_16
35 /* HW head & tail pointer mask */
36 #define AQ_PTR_MASK		0xFFFFF
37 
38 struct qmem {
39 	void            *base;
40 	dma_addr_t	iova;
41 	int		alloc_sz;
42 	u16		entry_sz;
43 	u8		align;
44 	u32		qsize;
45 };
46 
qmem_alloc(struct device * dev,struct qmem ** q,int qsize,int entry_sz)47 static inline int qmem_alloc(struct device *dev, struct qmem **q,
48 			     int qsize, int entry_sz)
49 {
50 	struct qmem *qmem;
51 	int aligned_addr;
52 
53 	if (!qsize)
54 		return -EINVAL;
55 
56 	*q = devm_kzalloc(dev, sizeof(*qmem), GFP_KERNEL);
57 	if (!*q)
58 		return -ENOMEM;
59 	qmem = *q;
60 
61 	qmem->entry_sz = entry_sz;
62 	qmem->alloc_sz = (qsize * entry_sz) + OTX2_ALIGN;
63 	qmem->base = dma_alloc_attrs(dev, qmem->alloc_sz, &qmem->iova,
64 				     GFP_KERNEL, DMA_ATTR_FORCE_CONTIGUOUS);
65 	if (!qmem->base)
66 		return -ENOMEM;
67 
68 	qmem->qsize = qsize;
69 
70 	aligned_addr = ALIGN((u64)qmem->iova, OTX2_ALIGN);
71 	qmem->align = (aligned_addr - qmem->iova);
72 	qmem->base += qmem->align;
73 	qmem->iova += qmem->align;
74 	return 0;
75 }
76 
qmem_free(struct device * dev,struct qmem * qmem)77 static inline void qmem_free(struct device *dev, struct qmem *qmem)
78 {
79 	if (!qmem)
80 		return;
81 
82 	if (qmem->base)
83 		dma_free_attrs(dev, qmem->alloc_sz,
84 			       qmem->base - qmem->align,
85 			       qmem->iova - qmem->align,
86 			       DMA_ATTR_FORCE_CONTIGUOUS);
87 	devm_kfree(dev, qmem);
88 }
89 
90 struct admin_queue {
91 	struct qmem	*inst;
92 	struct qmem	*res;
93 	spinlock_t	lock; /* Serialize inst enqueue from PFs */
94 };
95 
96 /* NPA aura count */
97 enum npa_aura_sz {
98 	NPA_AURA_SZ_0,
99 	NPA_AURA_SZ_128,
100 	NPA_AURA_SZ_256,
101 	NPA_AURA_SZ_512,
102 	NPA_AURA_SZ_1K,
103 	NPA_AURA_SZ_2K,
104 	NPA_AURA_SZ_4K,
105 	NPA_AURA_SZ_8K,
106 	NPA_AURA_SZ_16K,
107 	NPA_AURA_SZ_32K,
108 	NPA_AURA_SZ_64K,
109 	NPA_AURA_SZ_128K,
110 	NPA_AURA_SZ_256K,
111 	NPA_AURA_SZ_512K,
112 	NPA_AURA_SZ_1M,
113 	NPA_AURA_SZ_MAX,
114 };
115 
116 #define NPA_AURA_COUNT(x)	(1ULL << ((x) + 6))
117 
118 /* NPA AQ result structure for init/read/write of aura HW contexts */
119 struct npa_aq_aura_res {
120 	struct	npa_aq_res_s	res;
121 	struct	npa_aura_s	aura_ctx;
122 	struct	npa_aura_s	ctx_mask;
123 };
124 
125 /* NPA AQ result structure for init/read/write of pool HW contexts */
126 struct npa_aq_pool_res {
127 	struct	npa_aq_res_s	res;
128 	struct	npa_pool_s	pool_ctx;
129 	struct	npa_pool_s	ctx_mask;
130 };
131 
132 /* NIX Transmit schedulers */
133 enum nix_scheduler {
134 	NIX_TXSCH_LVL_SMQ = 0x0,
135 	NIX_TXSCH_LVL_MDQ = 0x0,
136 	NIX_TXSCH_LVL_TL4 = 0x1,
137 	NIX_TXSCH_LVL_TL3 = 0x2,
138 	NIX_TXSCH_LVL_TL2 = 0x3,
139 	NIX_TXSCH_LVL_TL1 = 0x4,
140 	NIX_TXSCH_LVL_CNT = 0x5,
141 };
142 
143 #define TXSCH_RR_QTM_MAX		((1 << 24) - 1)
144 #define TXSCH_TL1_DFLT_RR_QTM		TXSCH_RR_QTM_MAX
145 #define TXSCH_TL1_DFLT_RR_PRIO		(0x7ull)
146 #define CN10K_MAX_DWRR_WEIGHT          16384 /* Weight is 14bit on CN10K */
147 
148 /* Don't change the order as on CN10K (except CN10KB)
149  * SMQX_CFG[SDP] value should be 1 for SDP flows.
150  */
151 #define SMQ_LINK_TYPE_RPM		0
152 #define SMQ_LINK_TYPE_SDP		1
153 #define SMQ_LINK_TYPE_LBK		2
154 
155 /* Min/Max packet sizes, excluding FCS */
156 #define	NIC_HW_MIN_FRS			40
157 #define	NIC_HW_MAX_FRS			9212
158 #define	SDP_HW_MAX_FRS			65535
159 #define CN10K_LMAC_LINK_MAX_FRS		16380 /* 16k - FCS */
160 #define CN10K_LBK_LINK_MAX_FRS		65535 /* 64k */
161 
162 /* NIX RX action operation*/
163 #define NIX_RX_ACTIONOP_DROP		(0x0ull)
164 #define NIX_RX_ACTIONOP_UCAST		(0x1ull)
165 #define NIX_RX_ACTIONOP_UCAST_IPSEC	(0x2ull)
166 #define NIX_RX_ACTIONOP_MCAST		(0x3ull)
167 #define NIX_RX_ACTIONOP_RSS		(0x4ull)
168 /* Use the RX action set in the default unicast entry */
169 #define NIX_RX_ACTION_DEFAULT		(0xfull)
170 
171 /* NIX TX action operation*/
172 #define NIX_TX_ACTIONOP_DROP		(0x0ull)
173 #define NIX_TX_ACTIONOP_UCAST_DEFAULT	(0x1ull)
174 #define NIX_TX_ACTIONOP_UCAST_CHAN	(0x2ull)
175 #define NIX_TX_ACTIONOP_MCAST		(0x3ull)
176 #define NIX_TX_ACTIONOP_DROP_VIOL	(0x5ull)
177 
178 #define NPC_MCAM_KEY_X1			0
179 #define NPC_MCAM_KEY_X2			1
180 #define NPC_MCAM_KEY_X4			2
181 
182 #define NIX_INTFX_RX(a)			(0x0ull | (a) << 1)
183 #define NIX_INTFX_TX(a)			(0x1ull | (a) << 1)
184 
185 /* Default interfaces are NIX0_RX and NIX0_TX */
186 #define NIX_INTF_RX			NIX_INTFX_RX(0)
187 #define NIX_INTF_TX			NIX_INTFX_TX(0)
188 
189 #define NIX_INTF_TYPE_CGX		0
190 #define NIX_INTF_TYPE_LBK		1
191 #define NIX_INTF_TYPE_SDP		2
192 
193 #define MAX_LMAC_PKIND			12
194 #define NIX_LINK_CGX_LMAC(a, b)		(0 + 4 * (a) + (b))
195 #define NIX_LINK_LBK(a)			(12 + (a))
196 #define NIX_CHAN_CGX_LMAC_CHX(a, b, c)	(0x800 + 0x100 * (a) + 0x10 * (b) + (c))
197 #define NIX_CHAN_LBK_CHX(a, b)		(0 + 0x100 * (a) + (b))
198 #define NIX_CHAN_SDP_CH_START          (0x700ull)
199 #define NIX_CHAN_SDP_CHX(a)            (NIX_CHAN_SDP_CH_START + (a))
200 #define NIX_CHAN_SDP_NUM_CHANS		256
201 #define NIX_CHAN_CPT_CH_START          (0x800ull)
202 
203 /* The mask is to extract lower 10-bits of channel number
204  * which CPT will pass to X2P.
205  */
206 #define NIX_CHAN_CPT_X2P_MASK          (0x3ffull)
207 
208 /* NIX LSO format indices.
209  * As of now TSO is the only one using, so statically assigning indices.
210  */
211 #define NIX_LSO_FORMAT_IDX_TSOV4	0
212 #define NIX_LSO_FORMAT_IDX_TSOV6	1
213 
214 /* RSS info */
215 #define MAX_RSS_GROUPS			8
216 /* Group 0 has to be used in default pkt forwarding MCAM entries
217  * reserved for NIXLFs. Groups 1-7 can be used for RSS for ntuple
218  * filters.
219  */
220 #define DEFAULT_RSS_CONTEXT_GROUP	0
221 #define MAX_RSS_INDIR_TBL_SIZE		256 /* 1 << Max adder bits */
222 
223 /* NDC info */
224 enum ndc_idx_e {
225 	NIX0_RX = 0x0,
226 	NIX0_TX = 0x1,
227 	NPA0_U  = 0x2,
228 	NIX1_RX = 0x4,
229 	NIX1_TX = 0x5,
230 };
231 
232 enum ndc_ctype_e {
233 	CACHING = 0x0,
234 	BYPASS = 0x1,
235 };
236 
237 #define NDC_MAX_PORT 6
238 #define NDC_READ_TRANS 0
239 #define NDC_WRITE_TRANS 1
240 
241 #endif /* COMMON_H */
242