xref: /freebsd/sys/dev/vnic/nicvf_queues.h (revision 81ad6265)
1 /*
2  * Copyright (C) 2015 Cavium Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  *
28  */
29 
30 #ifndef NICVF_QUEUES_H
31 #define	NICVF_QUEUES_H
32 
33 #include "q_struct.h"
34 
35 #define	MAX_QUEUE_SET			128
36 #define	MAX_RCV_QUEUES_PER_QS		8
37 #define	MAX_RCV_BUF_DESC_RINGS_PER_QS	2
38 #define	MAX_SND_QUEUES_PER_QS		8
39 #define	MAX_CMP_QUEUES_PER_QS		8
40 
41 /* VF's queue interrupt ranges */
42 #define	NICVF_INTR_ID_CQ		0
43 #define	NICVF_INTR_ID_SQ		8
44 #define	NICVF_INTR_ID_RBDR		16
45 #define	NICVF_INTR_ID_MISC		18
46 #define	NICVF_INTR_ID_QS_ERR		19
47 
48 #define	for_each_cq_irq(irq)	\
49 	for ((irq) = NICVF_INTR_ID_CQ; (irq) < NICVF_INTR_ID_SQ; (irq)++)
50 #define	for_each_sq_irq(irq)	\
51 	for ((irq) = NICVF_INTR_ID_SQ; (irq) < NICVF_INTR_ID_RBDR; (irq)++)
52 #define	for_each_rbdr_irq(irq)	\
53 	for ((irq) = NICVF_INTR_ID_RBDR; (irq) < NICVF_INTR_ID_MISC; (irq)++)
54 
55 #define	RBDR_SIZE0		0UL /* 8K entries */
56 #define	RBDR_SIZE1		1UL /* 16K entries */
57 #define	RBDR_SIZE2		2UL /* 32K entries */
58 #define	RBDR_SIZE3		3UL /* 64K entries */
59 #define	RBDR_SIZE4		4UL /* 126K entries */
60 #define	RBDR_SIZE5		5UL /* 256K entries */
61 #define	RBDR_SIZE6		6UL /* 512K entries */
62 
63 #define	SND_QUEUE_SIZE0		0UL /* 1K entries */
64 #define	SND_QUEUE_SIZE1		1UL /* 2K entries */
65 #define	SND_QUEUE_SIZE2		2UL /* 4K entries */
66 #define	SND_QUEUE_SIZE3		3UL /* 8K entries */
67 #define	SND_QUEUE_SIZE4		4UL /* 16K entries */
68 #define	SND_QUEUE_SIZE5		5UL /* 32K entries */
69 #define	SND_QUEUE_SIZE6		6UL /* 64K entries */
70 
71 #define	CMP_QUEUE_SIZE0		0UL /* 1K entries */
72 #define	CMP_QUEUE_SIZE1		1UL /* 2K entries */
73 #define	CMP_QUEUE_SIZE2		2UL /* 4K entries */
74 #define	CMP_QUEUE_SIZE3		3UL /* 8K entries */
75 #define	CMP_QUEUE_SIZE4		4UL /* 16K entries */
76 #define	CMP_QUEUE_SIZE5		5UL /* 32K entries */
77 #define	CMP_QUEUE_SIZE6		6UL /* 64K entries */
78 
79 /* Default queue count per QS, its lengths and threshold values */
80 #define	RBDR_CNT		1
81 #define	RCV_QUEUE_CNT		8
82 #define	SND_QUEUE_CNT		8
83 #define	CMP_QUEUE_CNT		8 /* Max of RCV and SND qcount */
84 
85 #define	SND_QSIZE		SND_QUEUE_SIZE2
86 #define	SND_QUEUE_LEN		(1UL << (SND_QSIZE + 10))
87 #define	MAX_SND_QUEUE_LEN	(1UL << (SND_QUEUE_SIZE6 + 10))
88 #define	SND_QUEUE_THRESH	2UL
89 #define	MIN_SQ_DESC_PER_PKT_XMIT	2
90 /* Since timestamp not enabled, otherwise 2 */
91 #define	MAX_CQE_PER_PKT_XMIT		1
92 
93 /*
94  * Keep CQ and SQ sizes same, if timestamping
95  * is enabled this equation will change.
96  */
97 #define	CMP_QSIZE		CMP_QUEUE_SIZE2
98 #define	CMP_QUEUE_LEN		(1UL << (CMP_QSIZE + 10))
99 #define	CMP_QUEUE_CQE_THRESH	32
100 #define	CMP_QUEUE_TIMER_THRESH	220 /* 10usec */
101 
102 #define	RBDR_SIZE		RBDR_SIZE0
103 #define	RCV_BUF_COUNT		(1UL << (RBDR_SIZE + 13))
104 #define	MAX_RCV_BUF_COUNT	(1UL << (RBDR_SIZE6 + 13))
105 #define	RBDR_THRESH		(RCV_BUF_COUNT / 2)
106 #define	DMA_BUFFER_LEN		2048 /* In multiples of 128bytes */
107 
108 #define	MAX_CQES_FOR_TX		\
109     ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * MAX_CQE_PER_PKT_XMIT)
110 /* Calculate number of CQEs to reserve for all SQEs.
111  * Its 1/256th level of CQ size.
112  * '+ 1' to account for pipelining
113  */
114 #define	RQ_CQ_DROP		\
115     ((256 / (CMP_QUEUE_LEN / (CMP_QUEUE_LEN - MAX_CQES_FOR_TX))) + 1)
116 
117 /* Descriptor size in bytes */
118 #define	SND_QUEUE_DESC_SIZE	16
119 #define	CMP_QUEUE_DESC_SIZE	512
120 
121 /* Buffer / descriptor alignments */
122 #define	NICVF_RCV_BUF_ALIGN		7
123 #define	NICVF_RCV_BUF_ALIGN_BYTES	(1UL << NICVF_RCV_BUF_ALIGN)
124 #define	NICVF_CQ_BASE_ALIGN_BYTES	512  /* 9 bits */
125 #define	NICVF_SQ_BASE_ALIGN_BYTES	128  /* 7 bits */
126 
127 #define	NICVF_ALIGNED_ADDR(addr, align_bytes)	\
128     roundup2((addr), (align_bytes))
129 #define	NICVF_ADDR_ALIGN_LEN(addr, bytes)	\
130     (NICVF_ALIGNED_ADDR((addr), (bytes)) - (bytes))
131 #define	NICVF_RCV_BUF_ALIGN_LEN(addr)		\
132     (NICVF_ALIGNED_ADDR((addr), NICVF_RCV_BUF_ALIGN_BYTES) - (addr))
133 
134 #define	NICVF_TXBUF_MAXSIZE	NIC_HW_MAX_FRS	/* Total max payload without TSO */
135 #define	NICVF_TXBUF_NSEGS	256	/* Single command is at most 256 buffers
136 					   (hdr + 255 subcmds) */
137 /* TSO-related definitions */
138 #define	NICVF_TSO_MAXSIZE	IP_MAXPACKET
139 #define	NICVF_TSO_NSEGS		NICVF_TXBUF_NSEGS
140 #define	NICVF_TSO_HEADER_SIZE	128
141 
142 /* Queue enable/disable */
143 #define	NICVF_SQ_EN		(1UL << 19)
144 
145 /* Queue reset */
146 #define	NICVF_CQ_RESET		(1UL << 41)
147 #define	NICVF_SQ_RESET		(1UL << 17)
148 #define	NICVF_RBDR_RESET	(1UL << 43)
149 
150 enum CQ_RX_ERRLVL_E {
151 	CQ_ERRLVL_MAC,
152 	CQ_ERRLVL_L2,
153 	CQ_ERRLVL_L3,
154 	CQ_ERRLVL_L4,
155 };
156 
157 enum CQ_RX_ERROP_E {
158 	CQ_RX_ERROP_RE_NONE = 0x0,
159 	CQ_RX_ERROP_RE_PARTIAL = 0x1,
160 	CQ_RX_ERROP_RE_JABBER = 0x2,
161 	CQ_RX_ERROP_RE_FCS = 0x7,
162 	CQ_RX_ERROP_RE_TERMINATE = 0x9,
163 	CQ_RX_ERROP_RE_RX_CTL = 0xb,
164 	CQ_RX_ERROP_PREL2_ERR = 0x1f,
165 	CQ_RX_ERROP_L2_FRAGMENT = 0x20,
166 	CQ_RX_ERROP_L2_OVERRUN = 0x21,
167 	CQ_RX_ERROP_L2_PFCS = 0x22,
168 	CQ_RX_ERROP_L2_PUNY = 0x23,
169 	CQ_RX_ERROP_L2_MAL = 0x24,
170 	CQ_RX_ERROP_L2_OVERSIZE = 0x25,
171 	CQ_RX_ERROP_L2_UNDERSIZE = 0x26,
172 	CQ_RX_ERROP_L2_LENMISM = 0x27,
173 	CQ_RX_ERROP_L2_PCLP = 0x28,
174 	CQ_RX_ERROP_IP_NOT = 0x41,
175 	CQ_RX_ERROP_IP_CSUM_ERR = 0x42,
176 	CQ_RX_ERROP_IP_MAL = 0x43,
177 	CQ_RX_ERROP_IP_MALD = 0x44,
178 	CQ_RX_ERROP_IP_HOP = 0x45,
179 	CQ_RX_ERROP_L3_ICRC = 0x46,
180 	CQ_RX_ERROP_L3_PCLP = 0x47,
181 	CQ_RX_ERROP_L4_MAL = 0x61,
182 	CQ_RX_ERROP_L4_CHK = 0x62,
183 	CQ_RX_ERROP_UDP_LEN = 0x63,
184 	CQ_RX_ERROP_L4_PORT = 0x64,
185 	CQ_RX_ERROP_TCP_FLAG = 0x65,
186 	CQ_RX_ERROP_TCP_OFFSET = 0x66,
187 	CQ_RX_ERROP_L4_PCLP = 0x67,
188 	CQ_RX_ERROP_RBDR_TRUNC = 0x70,
189 };
190 
191 enum CQ_TX_ERROP_E {
192 	CQ_TX_ERROP_GOOD = 0x0,
193 	CQ_TX_ERROP_DESC_FAULT = 0x10,
194 	CQ_TX_ERROP_HDR_CONS_ERR = 0x11,
195 	CQ_TX_ERROP_SUBDC_ERR = 0x12,
196 	CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80,
197 	CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81,
198 	CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82,
199 	CQ_TX_ERROP_LOCK_VIOL = 0x83,
200 	CQ_TX_ERROP_DATA_FAULT = 0x84,
201 	CQ_TX_ERROP_TSTMP_CONFLICT = 0x85,
202 	CQ_TX_ERROP_TSTMP_TIMEOUT = 0x86,
203 	CQ_TX_ERROP_MEM_FAULT = 0x87,
204 	CQ_TX_ERROP_CK_OVERLAP = 0x88,
205 	CQ_TX_ERROP_CK_OFLOW = 0x89,
206 	CQ_TX_ERROP_ENUM_LAST = 0x8a,
207 };
208 
209 struct cmp_queue_stats {
210 	struct tx_stats {
211 		uint64_t good;
212 		uint64_t desc_fault;
213 		uint64_t hdr_cons_err;
214 		uint64_t subdesc_err;
215 		uint64_t imm_size_oflow;
216 		uint64_t data_seq_err;
217 		uint64_t mem_seq_err;
218 		uint64_t lock_viol;
219 		uint64_t data_fault;
220 		uint64_t tstmp_conflict;
221 		uint64_t tstmp_timeout;
222 		uint64_t mem_fault;
223 		uint64_t csum_overlap;
224 		uint64_t csum_overflow;
225 	} tx;
226 } __aligned(CACHE_LINE_SIZE);
227 
228 enum RQ_SQ_STATS {
229 	RQ_SQ_STATS_OCTS,
230 	RQ_SQ_STATS_PKTS,
231 };
232 
233 struct rx_tx_queue_stats {
234 	uint64_t	bytes;
235 	uint64_t	pkts;
236 } __aligned(CACHE_LINE_SIZE);
237 
238 struct q_desc_mem {
239 	bus_dma_tag_t	dmat;
240 	bus_dmamap_t	dmap;
241 	void		*base;
242 	bus_addr_t	phys_base;
243 	uint64_t	size;
244 	uint16_t	q_len;
245 };
246 
247 struct rbdr {
248 	boolean_t		enable;
249 	uint32_t		dma_size;
250 	uint32_t		frag_len;
251 	uint32_t		thresh;		/* Threshold level for interrupt */
252 	void			*desc;
253 	uint32_t		head;
254 	uint32_t		tail;
255 	struct q_desc_mem	dmem;
256 
257 	struct nicvf		*nic;
258 	int			idx;
259 
260 	struct task		rbdr_task;
261 	struct task		rbdr_task_nowait;
262 	struct taskqueue	*rbdr_taskq;
263 
264 	bus_dma_tag_t		rbdr_buff_dmat;
265 	bus_dmamap_t		*rbdr_buff_dmaps;
266 } __aligned(CACHE_LINE_SIZE);
267 
268 struct rcv_queue {
269 	boolean_t	enable;
270 	struct	rbdr	*rbdr_start;
271 	struct	rbdr	*rbdr_cont;
272 	boolean_t	en_tcp_reassembly;
273 	uint8_t		cq_qs;  /* CQ's QS to which this RQ is assigned */
274 	uint8_t		cq_idx; /* CQ index (0 to 7) in the QS */
275 	uint8_t		cont_rbdr_qs;      /* Continue buffer ptrs - QS num */
276 	uint8_t		cont_qs_rbdr_idx;  /* RBDR idx in the cont QS */
277 	uint8_t		start_rbdr_qs;     /* First buffer ptrs - QS num */
278 	uint8_t		start_qs_rbdr_idx; /* RBDR idx in the above QS */
279 	uint8_t		caching;
280 	struct		rx_tx_queue_stats stats;
281 
282 	boolean_t	lro_enabled;
283 	struct lro_ctrl	lro;
284 } __aligned(CACHE_LINE_SIZE);
285 
286 struct cmp_queue {
287 	boolean_t		enable;
288 	uint16_t		thresh;
289 
290 	struct nicvf		*nic;
291 	int			idx;	/* This queue index */
292 
293 	struct buf_ring		*rx_br;	/* Reception buf ring */
294 	struct mtx		mtx;	/* lock to serialize processing CQEs */
295 	char			mtx_name[32];
296 
297 	struct task		cmp_task;
298 	struct taskqueue	*cmp_taskq;
299 	u_int			cmp_cpuid; /* CPU to which bind the CQ task */
300 
301 	void			*desc;
302 	struct q_desc_mem	dmem;
303 	struct cmp_queue_stats	stats;
304 	int			irq;
305 } __aligned(CACHE_LINE_SIZE);
306 
307 struct snd_buff {
308 	bus_dmamap_t	dmap;
309 	struct mbuf	*mbuf;
310 };
311 
312 struct snd_queue {
313 	boolean_t		enable;
314 	uint8_t			cq_qs;  /* CQ's QS to which this SQ is pointing */
315 	uint8_t			cq_idx; /* CQ index (0 to 7) in the above QS */
316 	uint16_t		thresh;
317 	volatile int		free_cnt;
318 	uint32_t		head;
319 	uint32_t		tail;
320 	uint64_t		*skbuff;
321 	void			*desc;
322 
323 	struct nicvf		*nic;
324 	int			idx;	/* This queue index */
325 
326 	bus_dma_tag_t		snd_buff_dmat;
327 	struct snd_buff		*snd_buff;
328 
329 	struct buf_ring		*br;	/* Transmission buf ring */
330 	struct mtx		mtx;
331 	char			mtx_name[32];
332 
333 	struct task		snd_task;
334 	struct taskqueue	*snd_taskq;
335 
336 	struct q_desc_mem	dmem;
337 	struct rx_tx_queue_stats stats;
338 } __aligned(CACHE_LINE_SIZE);
339 
340 struct queue_set {
341 	boolean_t	enable;
342 	boolean_t	be_en;
343 	uint8_t		vnic_id;
344 	uint8_t		rq_cnt;
345 	uint8_t		cq_cnt;
346 	uint64_t	cq_len;
347 	uint8_t		sq_cnt;
348 	uint64_t	sq_len;
349 	uint8_t		rbdr_cnt;
350 	uint64_t	rbdr_len;
351 	struct	rcv_queue	rq[MAX_RCV_QUEUES_PER_QS];
352 	struct	cmp_queue	cq[MAX_CMP_QUEUES_PER_QS];
353 	struct	snd_queue	sq[MAX_SND_QUEUES_PER_QS];
354 	struct	rbdr		rbdr[MAX_RCV_BUF_DESC_RINGS_PER_QS];
355 
356 	struct task		qs_err_task;
357 	struct taskqueue	*qs_err_taskq;
358 } __aligned(CACHE_LINE_SIZE);
359 
360 #define	GET_RBDR_DESC(RING, idx)				\
361     (&(((struct rbdr_entry_t *)((RING)->desc))[(idx)]))
362 #define	GET_SQ_DESC(RING, idx)					\
363     (&(((struct sq_hdr_subdesc *)((RING)->desc))[(idx)]))
364 #define	GET_CQ_DESC(RING, idx)					\
365     (&(((union cq_desc_t *)((RING)->desc))[(idx)]))
366 
367 /* CQ status bits */
368 #define	CQ_WR_FUL	(1UL << 26)
369 #define	CQ_WR_DISABLE	(1UL << 25)
370 #define	CQ_WR_FAULT	(1UL << 24)
371 #define	CQ_CQE_COUNT	(0xFFFF << 0)
372 
373 #define	CQ_ERR_MASK	(CQ_WR_FUL | CQ_WR_DISABLE | CQ_WR_FAULT)
374 
375 #define	NICVF_TX_LOCK(sq)		mtx_lock(&(sq)->mtx)
376 #define	NICVF_TX_TRYLOCK(sq)		mtx_trylock(&(sq)->mtx)
377 #define	NICVF_TX_UNLOCK(sq)		mtx_unlock(&(sq)->mtx)
378 #define	NICVF_TX_LOCK_ASSERT(sq)	mtx_assert(&(sq)->mtx, MA_OWNED)
379 
380 #define	NICVF_CMP_LOCK(cq)		mtx_lock(&(cq)->mtx)
381 #define	NICVF_CMP_UNLOCK(cq)		mtx_unlock(&(cq)->mtx)
382 
383 int nicvf_set_qset_resources(struct nicvf *);
384 int nicvf_config_data_transfer(struct nicvf *, boolean_t);
385 void nicvf_qset_config(struct nicvf *, boolean_t);
386 
387 void nicvf_enable_intr(struct nicvf *, int, int);
388 void nicvf_disable_intr(struct nicvf *, int, int);
389 void nicvf_clear_intr(struct nicvf *, int, int);
390 int nicvf_is_intr_enabled(struct nicvf *, int, int);
391 
392 int nicvf_xmit_locked(struct snd_queue *sq);
393 
394 /* Register access APIs */
395 void nicvf_reg_write(struct nicvf *, uint64_t, uint64_t);
396 uint64_t nicvf_reg_read(struct nicvf *, uint64_t);
397 void nicvf_qset_reg_write(struct nicvf *, uint64_t, uint64_t);
398 uint64_t nicvf_qset_reg_read(struct nicvf *, uint64_t);
399 void nicvf_queue_reg_write(struct nicvf *, uint64_t, uint64_t, uint64_t);
400 uint64_t nicvf_queue_reg_read(struct nicvf *, uint64_t, uint64_t);
401 
402 /* Stats */
403 void nicvf_update_rq_stats(struct nicvf *, int);
404 void nicvf_update_sq_stats(struct nicvf *, int);
405 int nicvf_check_cqe_rx_errs(struct nicvf *, struct cmp_queue *,
406     struct cqe_rx_t *);
407 int nicvf_check_cqe_tx_errs(struct nicvf *,struct cmp_queue *,
408     struct cqe_send_t *);
409 #endif /* NICVF_QUEUES_H */
410