xref: /openbsd/sys/dev/pci/if_iavf.c (revision 0f9891f1)
1 /*	$OpenBSD: if_iavf.c,v 1.13 2024/05/24 06:02:53 jsg Exp $	*/
2 
3 /*
4  * Copyright (c) 2013-2015, Intel Corporation
5  * All rights reserved.
6 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions are met:
9  *
10  *  1. Redistributions of source code must retain the above copyright notice,
11  *     this list of conditions and the following disclaimer.
12  *
13  *  2. Redistributions in binary form must reproduce the above copyright
14  *     notice, this list of conditions and the following disclaimer in the
15  *     documentation and/or other materials provided with the distribution.
16  *
17  *  3. Neither the name of the Intel Corporation nor the names of its
18  *     contributors may be used to endorse or promote products derived from
19  *     this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * Copyright (c) 2016,2017 David Gwynne <dlg@openbsd.org>
36  * Copyright (c) 2019 Jonathan Matthew <jmatthew@openbsd.org>
37  *
38  * Permission to use, copy, modify, and distribute this software for any
39  * purpose with or without fee is hereby granted, provided that the above
40  * copyright notice and this permission notice appear in all copies.
41  *
42  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
43  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
44  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
45  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
46  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
47  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
48  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
49  */
50 
51 #include "bpfilter.h"
52 
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/proc.h>
56 #include <sys/sockio.h>
57 #include <sys/mbuf.h>
58 #include <sys/socket.h>
59 #include <sys/device.h>
60 #include <sys/pool.h>
61 #include <sys/queue.h>
62 #include <sys/timeout.h>
63 #include <sys/task.h>
64 #include <sys/syslog.h>
65 
66 #include <machine/bus.h>
67 #include <machine/intr.h>
68 
69 #include <net/if.h>
70 #include <net/if_media.h>
71 
72 #if NBPFILTER > 0
73 #include <net/bpf.h>
74 #endif
75 
76 #include <netinet/in.h>
77 #include <netinet/if_ether.h>
78 
79 #include <dev/pci/pcireg.h>
80 #include <dev/pci/pcivar.h>
81 #include <dev/pci/pcidevs.h>
82 
83 #define I40E_MASK(mask, shift)		((mask) << (shift))
84 #define I40E_AQ_LARGE_BUF		512
85 
86 #define IAVF_REG_VFR			0xdeadbeef
87 
88 #define IAVF_VFR_INPROGRESS		0
89 #define IAVF_VFR_COMPLETED		1
90 #define IAVF_VFR_VFACTIVE		2
91 
92 #include <dev/pci/if_ixlreg.h>
93 
94 struct iavf_aq_desc {
95 	uint16_t	iaq_flags;
96 #define	IAVF_AQ_DD		(1U << 0)
97 #define	IAVF_AQ_CMP		(1U << 1)
98 #define IAVF_AQ_ERR		(1U << 2)
99 #define IAVF_AQ_VFE		(1U << 3)
100 #define IAVF_AQ_LB		(1U << 9)
101 #define IAVF_AQ_RD		(1U << 10)
102 #define IAVF_AQ_VFC		(1U << 11)
103 #define IAVF_AQ_BUF		(1U << 12)
104 #define IAVF_AQ_SI		(1U << 13)
105 #define IAVF_AQ_EI		(1U << 14)
106 #define IAVF_AQ_FE		(1U << 15)
107 
108 #define IAVF_AQ_FLAGS_FMT	"\020" "\020FE" "\017EI" "\016SI" "\015BUF" \
109 				    "\014VFC" "\013DB" "\012LB" "\004VFE" \
110 				    "\003ERR" "\002CMP" "\001DD"
111 
112 	uint16_t	iaq_opcode;
113 
114 	uint16_t	iaq_datalen;
115 	uint16_t	iaq_retval;
116 
117 	uint32_t	iaq_vc_opcode;
118 	uint32_t	iaq_vc_retval;
119 
120 	uint32_t	iaq_param[4];
121 /*	iaq_vfid	iaq_param[0] */
122 /*	iaq_data_hi	iaq_param[2] */
123 /*	iaq_data_lo	iaq_param[3] */
124 } __packed __aligned(8);
125 
126 /* aq commands */
127 #define IAVF_AQ_OP_SEND_TO_PF		0x0801
128 #define IAVF_AQ_OP_MSG_FROM_PF		0x0802
129 #define IAVF_AQ_OP_SHUTDOWN		0x0803
130 
131 /* virt channel messages */
132 #define IAVF_VC_OP_VERSION		1
133 #define IAVF_VC_OP_RESET_VF		2
134 #define IAVF_VC_OP_GET_VF_RESOURCES	3
135 #define IAVF_VC_OP_CONFIG_TX_QUEUE	4
136 #define IAVF_VC_OP_CONFIG_RX_QUEUE	5
137 #define IAVF_VC_OP_CONFIG_VSI_QUEUES	6
138 #define IAVF_VC_OP_CONFIG_IRQ_MAP	7
139 #define IAVF_VC_OP_ENABLE_QUEUES	8
140 #define IAVF_VC_OP_DISABLE_QUEUES	9
141 #define IAVF_VC_OP_ADD_ETH_ADDR		10
142 #define IAVF_VC_OP_DEL_ETH_ADDR		11
143 #define IAVF_VC_OP_ADD_VLAN		12
144 #define IAVF_VC_OP_DEL_VLAN		13
145 #define IAVF_VC_OP_CONFIG_PROMISC	14
146 #define IAVF_VC_OP_GET_STATS		15
147 #define IAVF_VC_OP_EVENT		17
148 #define IAVF_VC_OP_GET_RSS_HENA_CAPS	25
149 #define IAVF_VC_OP_SET_RSS_HENA		26
150 
151 /* virt channel response codes */
152 #define IAVF_VC_RC_SUCCESS		0
153 #define IAVF_VC_RC_ERR_PARAM		-5
154 #define IAVF_VC_RC_ERR_OPCODE		-38
155 #define IAVF_VC_RC_ERR_CQP_COMPL	-39
156 #define IAVF_VC_RC_ERR_VF_ID		-40
157 #define IAVF_VC_RC_ERR_NOT_SUP		-64
158 
159 /* virt channel events */
160 #define IAVF_VC_EVENT_LINK_CHANGE	1
161 #define IAVF_VC_EVENT_RESET_IMPENDING	2
162 #define IAVF_VC_EVENT_PF_DRIVER_CLOSE	3
163 
164 /* virt channel offloads */
165 #define IAVF_VC_OFFLOAD_L2		0x00000001
166 #define IAVF_VC_OFFLOAD_IWARP		0x00000002
167 #define IAVF_VC_OFFLOAD_RSVD		0x00000004
168 #define IAVF_VC_OFFLOAD_RSS_AQ		0x00000008
169 #define IAVF_VC_OFFLOAD_RSS_REG		0x00000010
170 #define IAVF_VC_OFFLOAD_WB_ON_ITR	0x00000020
171 #define IAVF_VC_OFFLOAD_VLAN		0x00010000
172 #define IAVF_VC_OFFLOAD_RX_POLLING	0x00020000
173 #define IAVF_VC_OFFLOAD_RSS_PCTYPE_V2	0x00040000
174 #define IAVF_VC_OFFLOAD_RSS_PF		0x00080000
175 #define IAVF_VC_OFFLOAD_ENCAP		0x00100000
176 #define IAVF_VC_OFFLOAD_ENCAP_CSUM	0x00200000
177 #define IAVF_VC_OFFLOAD_RX_ENCAP_CSUM	0x00400000
178 
179 /* link speeds */
180 #define IAVF_VC_LINK_SPEED_100MB	0x1
181 #define IAVC_VC_LINK_SPEED_1000MB	0x2
182 #define IAVC_VC_LINK_SPEED_10GB		0x3
183 #define IAVC_VC_LINK_SPEED_40GB		0x4
184 #define IAVC_VC_LINK_SPEED_20GB		0x5
185 #define IAVC_VC_LINK_SPEED_25GB		0x6
186 
187 struct iavf_link_speed {
188 	uint64_t	baudrate;
189 	uint64_t	media;
190 };
191 
192 static const struct iavf_link_speed iavf_link_speeds[] = {
193 	{ 0, 0 },
194 	{ IF_Mbps(100), IFM_100_TX },
195 	{ IF_Mbps(1000), IFM_1000_T },
196 	{ IF_Gbps(10), IFM_10G_T },
197 	{ IF_Gbps(40), IFM_40G_CR4 },
198 	{ IF_Gbps(20), IFM_20G_KR2 },
199 	{ IF_Gbps(25), IFM_25G_CR }
200 };
201 
202 
203 struct iavf_vc_version_info {
204 	uint32_t	major;
205 	uint32_t	minor;
206 } __packed;
207 
208 struct iavf_vc_txq_info {
209 	uint16_t	vsi_id;
210 	uint16_t	queue_id;
211 	uint16_t	ring_len;
212 	uint16_t	headwb_ena;		/* deprecated */
213 	uint64_t	dma_ring_addr;
214 	uint64_t	dma_headwb_addr;	/* deprecated */
215 } __packed;
216 
217 struct iavf_vc_rxq_info {
218 	uint16_t	vsi_id;
219 	uint16_t	queue_id;
220 	uint32_t	ring_len;
221 	uint16_t	hdr_size;
222 	uint16_t	splithdr_ena;
223 	uint32_t	databuf_size;
224 	uint32_t	max_pkt_size;
225 	uint32_t	pad1;
226 	uint64_t	dma_ring_addr;
227 	uint32_t	rx_split_pos;
228 	uint32_t	pad2;
229 } __packed;
230 
231 struct iavf_vc_queue_pair_info {
232 	struct iavf_vc_txq_info	txq;
233 	struct iavf_vc_rxq_info	rxq;
234 } __packed;
235 
236 struct iavf_vc_queue_config_info {
237 	uint16_t	vsi_id;
238 	uint16_t	num_queue_pairs;
239 	uint32_t	pad;
240 	struct iavf_vc_queue_pair_info qpair[1];
241 } __packed;
242 
243 struct iavf_vc_vector_map {
244 	uint16_t	vsi_id;
245 	uint16_t	vector_id;
246 	uint16_t	rxq_map;
247 	uint16_t	txq_map;
248 	uint16_t	rxitr_idx;
249 	uint16_t	txitr_idx;
250 } __packed;
251 
252 struct iavf_vc_irq_map_info {
253 	uint16_t	num_vectors;
254 	struct iavf_vc_vector_map vecmap[1];
255 } __packed;
256 
257 struct iavf_vc_queue_select {
258 	uint16_t	vsi_id;
259 	uint16_t	pad;
260 	uint32_t	rx_queues;
261 	uint32_t	tx_queues;
262 } __packed;
263 
264 struct iavf_vc_vsi_resource {
265 	uint16_t	vsi_id;
266 	uint16_t	num_queue_pairs;
267 	uint32_t	vsi_type;
268 	uint16_t	qset_handle;
269 	uint8_t		default_mac[ETHER_ADDR_LEN];
270 } __packed;
271 
272 struct iavf_vc_vf_resource {
273 	uint16_t	num_vsis;
274 	uint16_t	num_qp;
275 	uint16_t	max_vectors;
276 	uint16_t	max_mtu;
277 	uint32_t	offload_flags;
278 	uint32_t	rss_key_size;
279 	uint32_t	rss_lut_size;
280 	struct iavf_vc_vsi_resource vsi_res[1];
281 } __packed;
282 
283 struct iavf_vc_eth_addr {
284 	uint8_t		addr[ETHER_ADDR_LEN];
285 	uint8_t		pad[2];
286 } __packed;
287 
288 struct iavf_vc_eth_addr_list {
289 	uint16_t	vsi_id;
290 	uint16_t	num_elements;
291 	struct iavf_vc_eth_addr list[1];
292 } __packed;
293 
294 struct iavf_vc_vlan_list {
295 	uint16_t	vsi_id;
296 	uint16_t	num_elements;
297 	uint16_t	vlan_id[1];
298 } __packed;
299 
300 struct iavf_vc_promisc_info {
301 	uint16_t	vsi_id;
302 	uint16_t	flags;
303 #define IAVF_FLAG_VF_UNICAST_PROMISC	0x0001
304 #define IAVF_FLAG_VF_MULTICAST_PROMISC	0x0002
305 } __packed;
306 
307 struct iavf_vc_pf_event {
308 	uint32_t	event;
309 	uint32_t	link_speed;
310 	uint8_t		link_status;
311 	uint8_t		pad[3];
312 	uint32_t	severity;
313 } __packed;
314 
315 /* aq response codes */
316 #define IAVF_AQ_RC_OK			0  /* success */
317 #define IAVF_AQ_RC_EPERM		1  /* Operation not permitted */
318 #define IAVF_AQ_RC_ENOENT		2  /* No such element */
319 #define IAVF_AQ_RC_ESRCH		3  /* Bad opcode */
320 #define IAVF_AQ_RC_EINTR		4  /* operation interrupted */
321 #define IAVF_AQ_RC_EIO			5  /* I/O error */
322 #define IAVF_AQ_RC_ENXIO		6  /* No such resource */
323 #define IAVF_AQ_RC_E2BIG		7  /* Arg too long */
324 #define IAVF_AQ_RC_EAGAIN		8  /* Try again */
325 #define IAVF_AQ_RC_ENOMEM		9  /* Out of memory */
326 #define IAVF_AQ_RC_EACCES		10 /* Permission denied */
327 #define IAVF_AQ_RC_EFAULT		11 /* Bad address */
328 #define IAVF_AQ_RC_EBUSY		12 /* Device or resource busy */
329 #define IAVF_AQ_RC_EEXIST		13 /* object already exists */
330 #define IAVF_AQ_RC_EINVAL		14 /* invalid argument */
331 #define IAVF_AQ_RC_ENOTTY		15 /* not a typewriter */
332 #define IAVF_AQ_RC_ENOSPC		16 /* No space or alloc failure */
333 #define IAVF_AQ_RC_ENOSYS		17 /* function not implemented */
334 #define IAVF_AQ_RC_ERANGE		18 /* parameter out of range */
335 #define IAVF_AQ_RC_EFLUSHED		19 /* cmd flushed due to prev error */
336 #define IAVF_AQ_RC_BAD_ADDR		20 /* contains a bad pointer */
337 #define IAVF_AQ_RC_EMODE		21 /* not allowed in current mode */
338 #define IAVF_AQ_RC_EFBIG		22 /* file too large */
339 
340 struct iavf_tx_desc {
341 	uint64_t		addr;
342 	uint64_t		cmd;
343 #define IAVF_TX_DESC_DTYPE_SHIFT		0
344 #define IAVF_TX_DESC_DTYPE_MASK		(0xfULL << IAVF_TX_DESC_DTYPE_SHIFT)
345 #define IAVF_TX_DESC_DTYPE_DATA		(0x0ULL << IAVF_TX_DESC_DTYPE_SHIFT)
346 #define IAVF_TX_DESC_DTYPE_NOP		(0x1ULL << IAVF_TX_DESC_DTYPE_SHIFT)
347 #define IAVF_TX_DESC_DTYPE_CONTEXT	(0x1ULL << IAVF_TX_DESC_DTYPE_SHIFT)
348 #define IAVF_TX_DESC_DTYPE_FCOE_CTX	(0x2ULL << IAVF_TX_DESC_DTYPE_SHIFT)
349 #define IAVF_TX_DESC_DTYPE_FD		(0x8ULL << IAVF_TX_DESC_DTYPE_SHIFT)
350 #define IAVF_TX_DESC_DTYPE_DDP_CTX	(0x9ULL << IAVF_TX_DESC_DTYPE_SHIFT)
351 #define IAVF_TX_DESC_DTYPE_FLEX_DATA	(0xbULL << IAVF_TX_DESC_DTYPE_SHIFT)
352 #define IAVF_TX_DESC_DTYPE_FLEX_CTX_1	(0xcULL << IAVF_TX_DESC_DTYPE_SHIFT)
353 #define IAVF_TX_DESC_DTYPE_FLEX_CTX_2	(0xdULL << IAVF_TX_DESC_DTYPE_SHIFT)
354 #define IAVF_TX_DESC_DTYPE_DONE		(0xfULL << IAVF_TX_DESC_DTYPE_SHIFT)
355 
356 #define IAVF_TX_DESC_CMD_SHIFT		4
357 #define IAVF_TX_DESC_CMD_MASK		(0x3ffULL << IAVF_TX_DESC_CMD_SHIFT)
358 #define IAVF_TX_DESC_CMD_EOP		(0x001 << IAVF_TX_DESC_CMD_SHIFT)
359 #define IAVF_TX_DESC_CMD_RS		(0x002 << IAVF_TX_DESC_CMD_SHIFT)
360 #define IAVF_TX_DESC_CMD_ICRC		(0x004 << IAVF_TX_DESC_CMD_SHIFT)
361 #define IAVF_TX_DESC_CMD_IL2TAG1	(0x008 << IAVF_TX_DESC_CMD_SHIFT)
362 #define IAVF_TX_DESC_CMD_DUMMY		(0x010 << IAVF_TX_DESC_CMD_SHIFT)
363 #define IAVF_TX_DESC_CMD_IIPT_MASK	(0x060 << IAVF_TX_DESC_CMD_SHIFT)
364 #define IAVF_TX_DESC_CMD_IIPT_NONIP	(0x000 << IAVF_TX_DESC_CMD_SHIFT)
365 #define IAVF_TX_DESC_CMD_IIPT_IPV6	(0x020 << IAVF_TX_DESC_CMD_SHIFT)
366 #define IAVF_TX_DESC_CMD_IIPT_IPV4	(0x040 << IAVF_TX_DESC_CMD_SHIFT)
367 #define IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM	(0x060 << IAVF_TX_DESC_CMD_SHIFT)
368 #define IAVF_TX_DESC_CMD_FCOET		(0x080 << IAVF_TX_DESC_CMD_SHIFT)
369 #define IAVF_TX_DESC_CMD_L4T_EOFT_MASK	(0x300 << IAVF_TX_DESC_CMD_SHIFT)
370 #define IAVF_TX_DESC_CMD_L4T_EOFT_UNK	(0x000 << IAVF_TX_DESC_CMD_SHIFT)
371 #define IAVF_TX_DESC_CMD_L4T_EOFT_TCP	(0x100 << IAVF_TX_DESC_CMD_SHIFT)
372 #define IAVF_TX_DESC_CMD_L4T_EOFT_SCTP	(0x200 << IAVF_TX_DESC_CMD_SHIFT)
373 #define IAVF_TX_DESC_CMD_L4T_EOFT_UDP	(0x300 << IAVF_TX_DESC_CMD_SHIFT)
374 
375 #define IAVF_TX_DESC_MACLEN_SHIFT	16
376 #define IAVF_TX_DESC_MACLEN_MASK	(0x7fULL << IAVF_TX_DESC_MACLEN_SHIFT)
377 #define IAVF_TX_DESC_IPLEN_SHIFT	23
378 #define IAVF_TX_DESC_IPLEN_MASK		(0x7fULL << IAVF_TX_DESC_IPLEN_SHIFT)
379 #define IAVF_TX_DESC_L4LEN_SHIFT	30
380 #define IAVF_TX_DESC_L4LEN_MASK		(0xfULL << IAVF_TX_DESC_L4LEN_SHIFT)
381 #define IAVF_TX_DESC_FCLEN_SHIFT	30
382 #define IAVF_TX_DESC_FCLEN_MASK		(0xfULL << IAVF_TX_DESC_FCLEN_SHIFT)
383 
384 #define IAVF_TX_DESC_BSIZE_SHIFT	34
385 #define IAVF_TX_DESC_BSIZE_MAX		0x3fffULL
386 #define IAVF_TX_DESC_BSIZE_MASK		\
387 	(IAVF_TX_DESC_BSIZE_MAX << IAVF_TX_DESC_BSIZE_SHIFT)
388 
389 #define IAVF_TX_DESC_L2TAG1_SHIFT	48
390 #define IAVF_TX_DESC_L2TAG1_MASK	(0xffff << IAVF_TX_DESC_L2TAG1_SHIFT)
391 } __packed __aligned(16);
392 
393 struct iavf_rx_rd_desc_16 {
394 	uint64_t		paddr; /* packet addr */
395 	uint64_t		haddr; /* header addr */
396 } __packed __aligned(16);
397 
398 struct iavf_rx_rd_desc_32 {
399 	uint64_t		paddr; /* packet addr */
400 	uint64_t		haddr; /* header addr */
401 	uint64_t		_reserved1;
402 	uint64_t		_reserved2;
403 } __packed __aligned(16);
404 
405 struct iavf_rx_wb_desc_16 {
406 	uint64_t		qword0;
407 #define IAVF_RX_DESC_L2TAG1_SHIFT	16
408 #define IAVF_RX_DESC_L2TAG1_MASK	(0xffff << IAVF_RX_DESC_L2TAG1_SHIFT)
409 	uint64_t		qword1;
410 #define IAVF_RX_DESC_DD			(1 << 0)
411 #define IAVF_RX_DESC_EOP		(1 << 1)
412 #define IAVF_RX_DESC_L2TAG1P		(1 << 2)
413 #define IAVF_RX_DESC_L3L4P		(1 << 3)
414 #define IAVF_RX_DESC_CRCP		(1 << 4)
415 #define IAVF_RX_DESC_TSYNINDX_SHIFT	5	/* TSYNINDX */
416 #define IAVF_RX_DESC_TSYNINDX_MASK	(7 << IAVF_RX_DESC_TSYNINDX_SHIFT)
417 #define IAVF_RX_DESC_UMB_SHIFT		9
418 #define IAVF_RX_DESC_UMB_MASK		(0x3 << IAVF_RX_DESC_UMB_SHIFT)
419 #define IAVF_RX_DESC_UMB_UCAST		(0x0 << IAVF_RX_DESC_UMB_SHIFT)
420 #define IAVF_RX_DESC_UMB_MCAST		(0x1 << IAVF_RX_DESC_UMB_SHIFT)
421 #define IAVF_RX_DESC_UMB_BCAST		(0x2 << IAVF_RX_DESC_UMB_SHIFT)
422 #define IAVF_RX_DESC_UMB_MIRROR		(0x3 << IAVF_RX_DESC_UMB_SHIFT)
423 #define IAVF_RX_DESC_FLM		(1 << 11)
424 #define IAVF_RX_DESC_FLTSTAT_SHIFT 	12
425 #define IAVF_RX_DESC_FLTSTAT_MASK 	(0x3 << IAVF_RX_DESC_FLTSTAT_SHIFT)
426 #define IAVF_RX_DESC_FLTSTAT_NODATA 	(0x0 << IAVF_RX_DESC_FLTSTAT_SHIFT)
427 #define IAVF_RX_DESC_FLTSTAT_FDFILTID 	(0x1 << IAVF_RX_DESC_FLTSTAT_SHIFT)
428 #define IAVF_RX_DESC_FLTSTAT_RSS 	(0x3 << IAVF_RX_DESC_FLTSTAT_SHIFT)
429 #define IAVF_RX_DESC_LPBK		(1 << 14)
430 #define IAVF_RX_DESC_IPV6EXTADD		(1 << 15)
431 #define IAVF_RX_DESC_INT_UDP_0		(1 << 18)
432 
433 #define IAVF_RX_DESC_RXE		(1 << 19)
434 #define IAVF_RX_DESC_HBO		(1 << 21)
435 #define IAVF_RX_DESC_IPE		(1 << 22)
436 #define IAVF_RX_DESC_L4E		(1 << 23)
437 #define IAVF_RX_DESC_EIPE		(1 << 24)
438 #define IAVF_RX_DESC_OVERSIZE		(1 << 25)
439 
440 #define IAVF_RX_DESC_PTYPE_SHIFT	30
441 #define IAVF_RX_DESC_PTYPE_MASK		(0xffULL << IAVF_RX_DESC_PTYPE_SHIFT)
442 
443 #define IAVF_RX_DESC_PLEN_SHIFT		38
444 #define IAVF_RX_DESC_PLEN_MASK		(0x3fffULL << IAVF_RX_DESC_PLEN_SHIFT)
445 #define IAVF_RX_DESC_HLEN_SHIFT		42
446 #define IAVF_RX_DESC_HLEN_MASK		(0x7ffULL << IAVF_RX_DESC_HLEN_SHIFT)
447 } __packed __aligned(16);
448 
449 struct iavf_rx_wb_desc_32 {
450 	uint64_t		qword0;
451 	uint64_t		qword1;
452 	uint64_t		qword2;
453 	uint64_t		qword3;
454 } __packed __aligned(16);
455 
456 
457 #define IAVF_VF_MAJOR			1
458 #define IAVF_VF_MINOR			1
459 
460 #define IAVF_TX_PKT_DESCS		8
461 #define IAVF_TX_QUEUE_ALIGN		128
462 #define IAVF_RX_QUEUE_ALIGN		128
463 
464 #define IAVF_HARDMTU			9712 /* 9726 - ETHER_HDR_LEN */
465 
466 #define IAVF_PCIREG			PCI_MAPREG_START
467 
468 #define IAVF_ITR0			0x0
469 #define IAVF_ITR1			0x1
470 #define IAVF_ITR2			0x2
471 #define IAVF_NOITR			0x3
472 
473 #define IAVF_AQ_NUM			256
474 #define IAVF_AQ_MASK			(IAVF_AQ_NUM - 1)
475 #define IAVF_AQ_ALIGN			64 /* lol */
476 #define IAVF_AQ_BUFLEN			4096
477 
478 struct iavf_aq_regs {
479 	bus_size_t		atq_tail;
480 	bus_size_t		atq_head;
481 	bus_size_t		atq_len;
482 	bus_size_t		atq_bal;
483 	bus_size_t		atq_bah;
484 
485 	bus_size_t		arq_tail;
486 	bus_size_t		arq_head;
487 	bus_size_t		arq_len;
488 	bus_size_t		arq_bal;
489 	bus_size_t		arq_bah;
490 
491 	uint32_t		atq_len_enable;
492 	uint32_t		atq_tail_mask;
493 	uint32_t		atq_head_mask;
494 
495 	uint32_t		arq_len_enable;
496 	uint32_t		arq_tail_mask;
497 	uint32_t		arq_head_mask;
498 };
499 
500 struct iavf_aq_buf {
501 	SIMPLEQ_ENTRY(iavf_aq_buf)
502 				 aqb_entry;
503 	void			*aqb_data;
504 	bus_dmamap_t		 aqb_map;
505 };
506 SIMPLEQ_HEAD(iavf_aq_bufs, iavf_aq_buf);
507 
508 struct iavf_dmamem {
509 	bus_dmamap_t		ixm_map;
510 	bus_dma_segment_t	ixm_seg;
511 	int			ixm_nsegs;
512 	size_t			ixm_size;
513 	caddr_t			ixm_kva;
514 };
515 #define IAVF_DMA_MAP(_ixm)	((_ixm)->ixm_map)
516 #define IAVF_DMA_DVA(_ixm)	((_ixm)->ixm_map->dm_segs[0].ds_addr)
517 #define IAVF_DMA_KVA(_ixm)	((void *)(_ixm)->ixm_kva)
518 #define IAVF_DMA_LEN(_ixm)	((_ixm)->ixm_size)
519 
520 struct iavf_tx_map {
521 	struct mbuf		*txm_m;
522 	bus_dmamap_t		 txm_map;
523 	unsigned int		 txm_eop;
524 };
525 
526 struct iavf_tx_ring {
527 	unsigned int		 txr_prod;
528 	unsigned int		 txr_cons;
529 
530 	struct iavf_tx_map	*txr_maps;
531 	struct iavf_dmamem	 txr_mem;
532 
533 	bus_size_t		 txr_tail;
534 	unsigned int		 txr_qid;
535 };
536 
537 struct iavf_rx_map {
538 	struct mbuf		*rxm_m;
539 	bus_dmamap_t		 rxm_map;
540 };
541 
542 struct iavf_rx_ring {
543 	struct iavf_softc	*rxr_sc;
544 
545 	struct if_rxring	 rxr_acct;
546 	struct timeout		 rxr_refill;
547 
548 	unsigned int		 rxr_prod;
549 	unsigned int		 rxr_cons;
550 
551 	struct iavf_rx_map	*rxr_maps;
552 	struct iavf_dmamem	 rxr_mem;
553 
554 	struct mbuf		*rxr_m_head;
555 	struct mbuf		**rxr_m_tail;
556 
557 	bus_size_t		 rxr_tail;
558 	unsigned int		 rxr_qid;
559 };
560 
561 struct iavf_softc {
562 	struct device		 sc_dev;
563 	struct arpcom		 sc_ac;
564 	struct ifmedia		 sc_media;
565 	uint64_t		 sc_media_status;
566 	uint64_t		 sc_media_active;
567 
568 	pci_chipset_tag_t	 sc_pc;
569 	pci_intr_handle_t	 sc_ih;
570 	void			*sc_ihc;
571 	pcitag_t		 sc_tag;
572 
573 	bus_dma_tag_t		 sc_dmat;
574 	bus_space_tag_t		 sc_memt;
575 	bus_space_handle_t	 sc_memh;
576 	bus_size_t		 sc_mems;
577 
578 	uint32_t		 sc_major_ver;
579 	uint32_t		 sc_minor_ver;
580 
581 	int			 sc_got_vf_resources;
582 	int			 sc_got_irq_map;
583 	uint32_t		 sc_vf_id;
584 	uint16_t		 sc_vsi_id;
585 	uint16_t		 sc_qset_handle;
586 	unsigned int		 sc_base_queue;
587 
588 	struct cond		 sc_admin_cond;
589 	int			 sc_admin_result;
590 	struct timeout		 sc_admin_timeout;
591 
592 	struct iavf_dmamem	 sc_scratch;
593 
594 	const struct iavf_aq_regs *
595 				 sc_aq_regs;
596 
597 	struct mutex		 sc_atq_mtx;
598 	struct iavf_dmamem	 sc_atq;
599 	unsigned int		 sc_atq_prod;
600 	unsigned int		 sc_atq_cons;
601 
602 	struct iavf_dmamem	 sc_arq;
603 	struct iavf_aq_bufs	 sc_arq_idle;
604 	struct iavf_aq_bufs	 sc_arq_live;
605 	struct if_rxring	 sc_arq_ring;
606 	unsigned int		 sc_arq_prod;
607 	unsigned int		 sc_arq_cons;
608 
609 	struct task		 sc_reset_task;
610 	int			 sc_resetting;
611 
612 	unsigned int		 sc_tx_ring_ndescs;
613 	unsigned int		 sc_rx_ring_ndescs;
614 	unsigned int		 sc_nqueues;	/* 1 << sc_nqueues */
615 
616 	struct rwlock		 sc_cfg_lock;
617 	unsigned int		 sc_dead;
618 
619 	uint8_t			 sc_enaddr[ETHER_ADDR_LEN];
620 };
621 #define DEVNAME(_sc) ((_sc)->sc_dev.dv_xname)
622 
623 #define delaymsec(_ms)	delay(1000 * (_ms))
624 
625 static int	iavf_dmamem_alloc(struct iavf_softc *, struct iavf_dmamem *,
626 		    bus_size_t, u_int);
627 static void	iavf_dmamem_free(struct iavf_softc *, struct iavf_dmamem *);
628 
629 static int	iavf_arq_fill(struct iavf_softc *, int);
630 static void	iavf_arq_unfill(struct iavf_softc *);
631 static void	iavf_arq_timeout(void *);
632 static int	iavf_arq_wait(struct iavf_softc *, int);
633 
634 static int	iavf_atq_post(struct iavf_softc *, struct iavf_aq_desc *);
635 static void	iavf_atq_done(struct iavf_softc *);
636 
637 static void	iavf_init_admin_queue(struct iavf_softc *);
638 
639 static int	iavf_get_version(struct iavf_softc *);
640 static int	iavf_get_vf_resources(struct iavf_softc *);
641 static int	iavf_config_irq_map(struct iavf_softc *);
642 
643 static int	iavf_add_del_addr(struct iavf_softc *, uint8_t *, int);
644 static int	iavf_process_arq(struct iavf_softc *, int);
645 
646 static int	iavf_match(struct device *, void *, void *);
647 static void	iavf_attach(struct device *, struct device *, void *);
648 
649 static int	iavf_media_change(struct ifnet *);
650 static void	iavf_media_status(struct ifnet *, struct ifmediareq *);
651 static void	iavf_watchdog(struct ifnet *);
652 static int	iavf_ioctl(struct ifnet *, u_long, caddr_t);
653 static void	iavf_start(struct ifqueue *);
654 static int	iavf_intr(void *);
655 static int	iavf_up(struct iavf_softc *);
656 static int	iavf_down(struct iavf_softc *);
657 static int	iavf_iff(struct iavf_softc *);
658 static void	iavf_reset(void *);
659 
660 static struct iavf_tx_ring *
661 		iavf_txr_alloc(struct iavf_softc *, unsigned int);
662 static void	iavf_txr_clean(struct iavf_softc *, struct iavf_tx_ring *);
663 static void	iavf_txr_free(struct iavf_softc *, struct iavf_tx_ring *);
664 static int	iavf_txeof(struct iavf_softc *, struct ifqueue *);
665 
666 static struct iavf_rx_ring *
667 		iavf_rxr_alloc(struct iavf_softc *, unsigned int);
668 static void	iavf_rxr_clean(struct iavf_softc *, struct iavf_rx_ring *);
669 static void	iavf_rxr_free(struct iavf_softc *, struct iavf_rx_ring *);
670 static int	iavf_rxeof(struct iavf_softc *, struct ifiqueue *);
671 static void	iavf_rxfill(struct iavf_softc *, struct iavf_rx_ring *);
672 static void	iavf_rxrefill(void *);
673 static int	iavf_rxrinfo(struct iavf_softc *, struct if_rxrinfo *);
674 
675 struct cfdriver iavf_cd = {
676 	NULL,
677 	"iavf",
678 	DV_IFNET,
679 };
680 
681 const struct cfattach iavf_ca = {
682 	sizeof(struct iavf_softc),
683 	iavf_match,
684 	iavf_attach,
685 };
686 
687 static const struct iavf_aq_regs iavf_aq_regs = {
688 	.atq_tail	= I40E_VF_ATQT1,
689 	.atq_tail_mask	= I40E_VF_ATQT1_ATQT_MASK,
690 	.atq_head	= I40E_VF_ATQH1,
691 	.atq_head_mask	= I40E_VF_ARQH1_ARQH_MASK,
692 	.atq_len	= I40E_VF_ATQLEN1,
693 	.atq_bal	= I40E_VF_ATQBAL1,
694 	.atq_bah	= I40E_VF_ATQBAH1,
695 	.atq_len_enable	= I40E_VF_ATQLEN1_ATQENABLE_MASK,
696 
697 	.arq_tail	= I40E_VF_ARQT1,
698 	.arq_tail_mask	= I40E_VF_ARQT1_ARQT_MASK,
699 	.arq_head	= I40E_VF_ARQH1,
700 	.arq_head_mask	= I40E_VF_ARQH1_ARQH_MASK,
701 	.arq_len	= I40E_VF_ARQLEN1,
702 	.arq_bal	= I40E_VF_ARQBAL1,
703 	.arq_bah	= I40E_VF_ARQBAH1,
704 	.arq_len_enable	= I40E_VF_ARQLEN1_ARQENABLE_MASK,
705 };
706 
707 #define iavf_rd(_s, _r) \
708 	bus_space_read_4((_s)->sc_memt, (_s)->sc_memh, (_r))
709 #define iavf_wr(_s, _r, _v) \
710 	bus_space_write_4((_s)->sc_memt, (_s)->sc_memh, (_r), (_v))
711 #define iavf_barrier(_s, _r, _l, _o) \
712 	bus_space_barrier((_s)->sc_memt, (_s)->sc_memh, (_r), (_l), (_o))
713 #define iavf_intr_enable(_s) \
714 	iavf_wr((_s), I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL0_INTENA_MASK | \
715 	    I40E_VFINT_DYN_CTL0_CLEARPBA_MASK | \
716 	    (IAVF_NOITR << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT)); \
717 	iavf_wr((_s), I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK)
718 
719 #define iavf_nqueues(_sc)	(1 << (_sc)->sc_nqueues)
720 #define iavf_allqueues(_sc)	((1 << ((_sc)->sc_nqueues+1)) - 1)
721 
722 #ifdef __LP64__
723 #define iavf_dmamem_hi(_ixm)	(uint32_t)(IAVF_DMA_DVA(_ixm) >> 32)
724 #else
725 #define iavf_dmamem_hi(_ixm)	0
726 #endif
727 
728 #define iavf_dmamem_lo(_ixm) 	(uint32_t)IAVF_DMA_DVA(_ixm)
729 
730 static inline void
iavf_aq_dva(struct iavf_aq_desc * iaq,bus_addr_t addr)731 iavf_aq_dva(struct iavf_aq_desc *iaq, bus_addr_t addr)
732 {
733 #ifdef __LP64__
734 	htolem32(&iaq->iaq_param[2], addr >> 32);
735 #else
736 	iaq->iaq_param[2] = htole32(0);
737 #endif
738 	htolem32(&iaq->iaq_param[3], addr);
739 }
740 
741 #if _BYTE_ORDER == _BIG_ENDIAN
742 #define HTOLE16(_x)	(uint16_t)(((_x) & 0xff) << 8 | ((_x) & 0xff00) >> 8)
743 #else
744 #define HTOLE16(_x)	(_x)
745 #endif
746 
747 static const struct pci_matchid iavf_devices[] = {
748 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_XL710_VF },
749 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_XL710_VF_HV },
750 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_X722_VF },
751 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_ADAPTIVE_VF },
752 };
753 
754 static int
iavf_match(struct device * parent,void * match,void * aux)755 iavf_match(struct device *parent, void *match, void *aux)
756 {
757 	return (pci_matchbyid(aux, iavf_devices, nitems(iavf_devices)));
758 }
759 
760 void
iavf_attach(struct device * parent,struct device * self,void * aux)761 iavf_attach(struct device *parent, struct device *self, void *aux)
762 {
763 	struct iavf_softc *sc = (struct iavf_softc *)self;
764 	struct ifnet *ifp = &sc->sc_ac.ac_if;
765 	struct pci_attach_args *pa = aux;
766 	pcireg_t memtype;
767 	int tries;
768 
769 	rw_init(&sc->sc_cfg_lock, "iavfcfg");
770 
771 	sc->sc_pc = pa->pa_pc;
772 	sc->sc_tag = pa->pa_tag;
773 	sc->sc_dmat = pa->pa_dmat;
774 	sc->sc_aq_regs = &iavf_aq_regs;
775 
776 	sc->sc_nqueues = 0; /* 1 << 0 is 1 queue */
777 	sc->sc_tx_ring_ndescs = 1024;
778 	sc->sc_rx_ring_ndescs = 1024;
779 
780 	memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, IAVF_PCIREG);
781 	if (pci_mapreg_map(pa, IAVF_PCIREG, memtype, 0,
782 	    &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems, 0)) {
783 		printf(": unable to map registers\n");
784 		return;
785 	}
786 
787 	for (tries = 0; tries < 100; tries++) {
788 		uint32_t reg;
789 		reg = iavf_rd(sc, I40E_VFGEN_RSTAT) &
790 		    I40E_VFGEN_RSTAT_VFR_STATE_MASK;
791 		if (reg == IAVF_VFR_VFACTIVE ||
792 		    reg == IAVF_VFR_COMPLETED)
793 			break;
794 
795 		delay(10000);
796 	}
797 	if (tries == 100) {
798 		printf(": VF reset timed out\n");
799 		return;
800 	}
801 	task_set(&sc->sc_reset_task, iavf_reset, sc);
802 
803 	mtx_init(&sc->sc_atq_mtx, IPL_NET);
804 
805 	if (iavf_dmamem_alloc(sc, &sc->sc_atq,
806 	    sizeof(struct iavf_aq_desc) * IAVF_AQ_NUM, IAVF_AQ_ALIGN) != 0) {
807 		printf("\n" "%s: unable to allocate atq\n", DEVNAME(sc));
808 		goto unmap;
809 	}
810 
811 	SIMPLEQ_INIT(&sc->sc_arq_idle);
812 	SIMPLEQ_INIT(&sc->sc_arq_live);
813 	if_rxr_init(&sc->sc_arq_ring, 2, IAVF_AQ_NUM - 1);
814 	sc->sc_arq_cons = 0;
815 	sc->sc_arq_prod = 0;
816 
817 	if (iavf_dmamem_alloc(sc, &sc->sc_arq,
818 	    sizeof(struct iavf_aq_desc) * IAVF_AQ_NUM, IAVF_AQ_ALIGN) != 0) {
819 		printf("\n" "%s: unable to allocate arq\n", DEVNAME(sc));
820 		goto free_atq;
821 	}
822 
823 	if (!iavf_arq_fill(sc, 0)) {
824 		printf("\n" "%s: unable to fill arq descriptors\n",
825 		    DEVNAME(sc));
826 		goto free_arq;
827 	}
828 	timeout_set(&sc->sc_admin_timeout, iavf_arq_timeout, sc);
829 
830 	if (iavf_dmamem_alloc(sc, &sc->sc_scratch, PAGE_SIZE, IAVF_AQ_ALIGN) != 0) {
831 		printf("\n" "%s: unable to allocate scratch\n", DEVNAME(sc));
832 		goto shutdown;
833 	}
834 
835 	bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_atq),
836 	    0, IAVF_DMA_LEN(&sc->sc_atq),
837 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
838 
839 	bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_arq),
840 	    0, IAVF_DMA_LEN(&sc->sc_arq),
841 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
842 
843 	iavf_init_admin_queue(sc);
844 
845 	if (iavf_get_version(sc) != 0) {
846 		printf(", unable to get VF interface version\n");
847 		goto free_scratch;
848 	}
849 
850 	if (iavf_get_vf_resources(sc) != 0) {
851 		printf(", timed out waiting for VF resources\n");
852 		goto free_scratch;
853 	}
854 
855 	if (iavf_config_irq_map(sc) != 0) {
856 		printf(", timeout waiting for IRQ map response");
857 		goto free_scratch;
858 	}
859 
860 	/* msix only? */
861 	if (pci_intr_map_msix(pa, 0, &sc->sc_ih) != 0) {
862 		printf(", unable to map interrupt\n");
863 		goto free_scratch;
864 	}
865 
866 	/* generate an address if the pf didn't give us one */
867 	memcpy(sc->sc_enaddr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN);
868 	if (memcmp(sc->sc_ac.ac_enaddr, etheranyaddr, ETHER_ADDR_LEN) == 0)
869 		ether_fakeaddr(ifp);
870 
871 	printf(", %s, address %s\n", pci_intr_string(sc->sc_pc, sc->sc_ih),
872 	    ether_sprintf(sc->sc_ac.ac_enaddr));
873 
874 	sc->sc_ihc = pci_intr_establish(sc->sc_pc, sc->sc_ih,
875 	    IPL_NET | IPL_MPSAFE, iavf_intr, sc, DEVNAME(sc));
876 	if (sc->sc_ihc == NULL) {
877 		printf("%s: unable to establish interrupt handler\n",
878 		    DEVNAME(sc));
879 		goto free_scratch;
880 	}
881 
882 	ifp->if_softc = sc;
883 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
884 	ifp->if_xflags = IFXF_MPSAFE;
885 	ifp->if_ioctl = iavf_ioctl;
886 	ifp->if_qstart = iavf_start;
887 	ifp->if_watchdog = iavf_watchdog;
888 	if (ifp->if_hardmtu == 0)
889 		ifp->if_hardmtu = IAVF_HARDMTU;
890 	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
891 	ifq_init_maxlen(&ifp->if_snd, sc->sc_tx_ring_ndescs);
892 
893 	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
894 #if 0
895 	ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
896 	    IFCAP_CSUM_UDPv4;
897 #endif
898 
899 	ifmedia_init(&sc->sc_media, 0, iavf_media_change, iavf_media_status);
900 
901 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
902 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
903 
904 	if_attach(ifp);
905 	ether_ifattach(ifp);
906 
907 	if_attach_queues(ifp, iavf_nqueues(sc));
908 	if_attach_iqueues(ifp, iavf_nqueues(sc));
909 
910 	iavf_intr_enable(sc);
911 
912 	return;
913 free_scratch:
914 	iavf_dmamem_free(sc, &sc->sc_scratch);
915 shutdown:
916 	iavf_wr(sc, sc->sc_aq_regs->atq_head, 0);
917 	iavf_wr(sc, sc->sc_aq_regs->arq_head, 0);
918 	iavf_wr(sc, sc->sc_aq_regs->atq_tail, 0);
919 	iavf_wr(sc, sc->sc_aq_regs->arq_tail, 0);
920 
921 	iavf_wr(sc, sc->sc_aq_regs->atq_bal, 0);
922 	iavf_wr(sc, sc->sc_aq_regs->atq_bah, 0);
923 	iavf_wr(sc, sc->sc_aq_regs->atq_len, 0);
924 
925 	iavf_wr(sc, sc->sc_aq_regs->arq_bal, 0);
926 	iavf_wr(sc, sc->sc_aq_regs->arq_bah, 0);
927 	iavf_wr(sc, sc->sc_aq_regs->arq_len, 0);
928 
929 	bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_arq),
930 	    0, IAVF_DMA_LEN(&sc->sc_arq),
931 	    BUS_DMASYNC_POSTREAD);
932 	bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_atq),
933 	    0, IAVF_DMA_LEN(&sc->sc_atq),
934 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
935 
936 	iavf_arq_unfill(sc);
937 free_arq:
938 	iavf_dmamem_free(sc, &sc->sc_arq);
939 free_atq:
940 	iavf_dmamem_free(sc, &sc->sc_atq);
941 unmap:
942 	bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
943 	sc->sc_mems = 0;
944 }
945 
946 static int
iavf_media_change(struct ifnet * ifp)947 iavf_media_change(struct ifnet *ifp)
948 {
949 	return (EOPNOTSUPP);
950 }
951 
952 static void
iavf_media_status(struct ifnet * ifp,struct ifmediareq * ifm)953 iavf_media_status(struct ifnet *ifp, struct ifmediareq *ifm)
954 {
955 	struct iavf_softc *sc = ifp->if_softc;
956 
957 	NET_ASSERT_LOCKED();
958 
959 	ifm->ifm_status = sc->sc_media_status;
960 	ifm->ifm_active = sc->sc_media_active;
961 }
962 
963 static void
iavf_watchdog(struct ifnet * ifp)964 iavf_watchdog(struct ifnet *ifp)
965 {
966 
967 }
968 
969 int
iavf_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data)970 iavf_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
971 {
972 	struct iavf_softc *sc = (struct iavf_softc *)ifp->if_softc;
973 	struct ifreq *ifr = (struct ifreq *)data;
974 	uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN];
975 	int /*aqerror,*/ error = 0;
976 
977 	switch (cmd) {
978 	case SIOCSIFADDR:
979 		ifp->if_flags |= IFF_UP;
980 		/* FALLTHROUGH */
981 
982 	case SIOCSIFFLAGS:
983 		if (ISSET(ifp->if_flags, IFF_UP)) {
984 			if (ISSET(ifp->if_flags, IFF_RUNNING))
985 				error = ENETRESET;
986 			else
987 				error = iavf_up(sc);
988 		} else {
989 			if (ISSET(ifp->if_flags, IFF_RUNNING))
990 				error = iavf_down(sc);
991 		}
992 		break;
993 
994 	case SIOCGIFMEDIA:
995 	case SIOCSIFMEDIA:
996 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
997 		break;
998 
999 	case SIOCGIFRXR:
1000 		error = iavf_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
1001 		break;
1002 
1003 	case SIOCADDMULTI:
1004 		if (ether_addmulti(ifr, &sc->sc_ac) == ENETRESET) {
1005 			error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi);
1006 			if (error != 0)
1007 				return (error);
1008 
1009 			iavf_add_del_addr(sc, addrlo, 1);
1010 			/* check result i guess? */
1011 
1012 			if (sc->sc_ac.ac_multirangecnt > 0) {
1013 				SET(ifp->if_flags, IFF_ALLMULTI);
1014 				error = ENETRESET;
1015 			}
1016 		}
1017 		break;
1018 
1019 	case SIOCDELMULTI:
1020 		if (ether_delmulti(ifr, &sc->sc_ac) == ENETRESET) {
1021 			error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi);
1022 			if (error != 0)
1023 				return (error);
1024 
1025 			iavf_add_del_addr(sc, addrlo, 0);
1026 
1027 			if (ISSET(ifp->if_flags, IFF_ALLMULTI) &&
1028 			    sc->sc_ac.ac_multirangecnt == 0) {
1029 				CLR(ifp->if_flags, IFF_ALLMULTI);
1030 				error = ENETRESET;
1031 			}
1032 		}
1033 		break;
1034 
1035 	default:
1036 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
1037 		break;
1038 	}
1039 
1040 	if (error == ENETRESET)
1041 		error = iavf_iff(sc);
1042 
1043 	return (error);
1044 }
1045 
1046 static int
iavf_config_vsi_queues(struct iavf_softc * sc)1047 iavf_config_vsi_queues(struct iavf_softc *sc)
1048 {
1049 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1050 	struct iavf_aq_desc iaq;
1051 	struct iavf_vc_queue_config_info *config;
1052 	struct iavf_vc_txq_info *txq;
1053 	struct iavf_vc_rxq_info *rxq;
1054 	struct iavf_rx_ring *rxr;
1055 	struct iavf_tx_ring *txr;
1056 	int rv, i;
1057 
1058 	memset(&iaq, 0, sizeof(iaq));
1059 	iaq.iaq_flags = htole16(IAVF_AQ_BUF | IAVF_AQ_RD);
1060 	iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
1061 	iaq.iaq_vc_opcode = htole32(IAVF_VC_OP_CONFIG_VSI_QUEUES);
1062 	iaq.iaq_datalen = htole16(sizeof(*config) +
1063 	    iavf_nqueues(sc) * sizeof(struct iavf_vc_queue_pair_info));
1064 	iavf_aq_dva(&iaq, IAVF_DMA_DVA(&sc->sc_scratch));
1065 
1066 	config = IAVF_DMA_KVA(&sc->sc_scratch);
1067 	config->vsi_id = htole16(sc->sc_vsi_id);
1068 	config->num_queue_pairs = htole16(iavf_nqueues(sc));
1069 
1070 	for (i = 0; i < iavf_nqueues(sc); i++) {
1071 		rxr = ifp->if_iqs[i]->ifiq_softc;
1072 		txr = ifp->if_ifqs[i]->ifq_softc;
1073 
1074 		txq = &config->qpair[i].txq;
1075 		txq->vsi_id = htole16(sc->sc_vsi_id);
1076 		txq->queue_id = htole16(i);
1077 		txq->ring_len = sc->sc_tx_ring_ndescs;
1078 		txq->headwb_ena = 0;
1079 		htolem64(&txq->dma_ring_addr, IAVF_DMA_DVA(&txr->txr_mem));
1080 		txq->dma_headwb_addr = 0;
1081 
1082 		rxq = &config->qpair[i].rxq;
1083 		rxq->vsi_id = htole16(sc->sc_vsi_id);
1084 		rxq->queue_id = htole16(i);
1085 		rxq->ring_len = sc->sc_rx_ring_ndescs;
1086 		rxq->splithdr_ena = 0;
1087 		rxq->databuf_size = htole32(MCLBYTES);
1088 		rxq->max_pkt_size = htole32(IAVF_HARDMTU);
1089 		htolem64(&rxq->dma_ring_addr, IAVF_DMA_DVA(&rxr->rxr_mem));
1090 		rxq->rx_split_pos = 0;
1091 	}
1092 
1093 	bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_scratch), 0,
1094 	    IAVF_DMA_LEN(&sc->sc_scratch),
1095 	    BUS_DMASYNC_PREREAD);
1096 
1097 	iavf_atq_post(sc, &iaq);
1098 	rv = iavf_arq_wait(sc, 250);
1099 	if (rv != IAVF_VC_RC_SUCCESS) {
1100 		printf("%s: CONFIG_VSI_QUEUES failed: %d\n", DEVNAME(sc), rv);
1101 		return (1);
1102 	}
1103 
1104 	return (0);
1105 }
1106 
1107 static int
iavf_config_hena(struct iavf_softc * sc)1108 iavf_config_hena(struct iavf_softc *sc)
1109 {
1110 	struct iavf_aq_desc iaq;
1111 	uint64_t *caps;
1112 	int rv;
1113 
1114 	memset(&iaq, 0, sizeof(iaq));
1115 	iaq.iaq_flags = htole16(IAVF_AQ_BUF | IAVF_AQ_RD);
1116 	iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
1117 	iaq.iaq_vc_opcode = htole32(IAVF_VC_OP_SET_RSS_HENA);
1118 	iaq.iaq_datalen = htole32(sizeof(*caps));
1119 	iavf_aq_dva(&iaq, IAVF_DMA_DVA(&sc->sc_scratch));
1120 
1121 	caps = IAVF_DMA_KVA(&sc->sc_scratch);
1122 	*caps = 0;
1123 
1124 	iavf_atq_post(sc, &iaq);
1125 	rv = iavf_arq_wait(sc, 250);
1126 	if (rv != IAVF_VC_RC_SUCCESS) {
1127 		printf("%s: SET_RSS_HENA failed: %d\n", DEVNAME(sc), rv);
1128 		return (1);
1129 	}
1130 
1131 	caps = IAVF_DMA_KVA(&sc->sc_scratch);
1132 
1133 	return (0);
1134 }
1135 
1136 static int
iavf_queue_select(struct iavf_softc * sc,int opcode)1137 iavf_queue_select(struct iavf_softc *sc, int opcode)
1138 {
1139 	struct iavf_aq_desc iaq;
1140 	struct iavf_vc_queue_select *qsel;
1141 	int rv;
1142 
1143 	memset(&iaq, 0, sizeof(iaq));
1144 	iaq.iaq_flags = htole16(IAVF_AQ_BUF | IAVF_AQ_RD);
1145 	iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
1146 	iaq.iaq_vc_opcode = htole32(opcode);
1147 	iaq.iaq_datalen = htole16(sizeof(*qsel));
1148 	iavf_aq_dva(&iaq, IAVF_DMA_DVA(&sc->sc_scratch));
1149 
1150 	qsel = IAVF_DMA_KVA(&sc->sc_scratch);
1151 	qsel->vsi_id = htole16(sc->sc_vsi_id);
1152 	qsel->rx_queues = htole32(iavf_allqueues(sc));
1153 	qsel->tx_queues = htole32(iavf_allqueues(sc));
1154 
1155 	bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_scratch), 0,
1156 	    IAVF_DMA_LEN(&sc->sc_scratch),
1157 	    BUS_DMASYNC_PREREAD);
1158 
1159 	iavf_atq_post(sc, &iaq);
1160 	rv = iavf_arq_wait(sc, 250);
1161 	if (rv != IAVF_VC_RC_SUCCESS) {
1162 		printf("%s: queue op %d failed: %d\n", DEVNAME(sc), opcode, rv);
1163 		return (1);
1164 	}
1165 
1166 	return (0);
1167 }
1168 
1169 static int
iavf_up(struct iavf_softc * sc)1170 iavf_up(struct iavf_softc *sc)
1171 {
1172 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1173 	struct iavf_rx_ring *rxr;
1174 	struct iavf_tx_ring *txr;
1175 	unsigned int nqueues, i;
1176 	int rv = ENOMEM;
1177 
1178 	nqueues = iavf_nqueues(sc);
1179 	KASSERT(nqueues == 1); /* XXX */
1180 
1181 	rw_enter_write(&sc->sc_cfg_lock);
1182 	if (sc->sc_dead) {
1183 		rw_exit_write(&sc->sc_cfg_lock);
1184 		return (ENXIO);
1185 	}
1186 
1187 	for (i = 0; i < nqueues; i++) {
1188 		rxr = iavf_rxr_alloc(sc, i);
1189 		if (rxr == NULL)
1190 			goto free;
1191 
1192 		txr = iavf_txr_alloc(sc, i);
1193 		if (txr == NULL) {
1194 			iavf_rxr_free(sc, rxr);
1195 			goto free;
1196 		}
1197 
1198 		ifp->if_iqs[i]->ifiq_softc = rxr;
1199 		ifp->if_ifqs[i]->ifq_softc = txr;
1200 
1201 		iavf_rxfill(sc, rxr);
1202 	}
1203 
1204 	if (iavf_config_vsi_queues(sc) != 0)
1205 		goto down;
1206 
1207 	if (iavf_config_hena(sc) != 0)
1208 		goto down;
1209 
1210 	if (iavf_queue_select(sc, IAVF_VC_OP_ENABLE_QUEUES) != 0)
1211 		goto down;
1212 
1213 	SET(ifp->if_flags, IFF_RUNNING);
1214 
1215 	iavf_wr(sc, I40E_VFINT_ITR01(0), 0x7a);
1216 	iavf_wr(sc, I40E_VFINT_ITR01(1), 0x7a);
1217 	iavf_wr(sc, I40E_VFINT_ITR01(2), 0);
1218 
1219 	rw_exit_write(&sc->sc_cfg_lock);
1220 
1221 	return (ENETRESET);
1222 
1223 free:
1224 	for (i = 0; i < nqueues; i++) {
1225 		rxr = ifp->if_iqs[i]->ifiq_softc;
1226 		txr = ifp->if_ifqs[i]->ifq_softc;
1227 
1228 		if (rxr == NULL) {
1229 			/*
1230 			 * tx and rx get set at the same time, so if one
1231 			 * is NULL, the other is too.
1232 			 */
1233 			continue;
1234 		}
1235 
1236 		iavf_txr_free(sc, txr);
1237 		iavf_rxr_free(sc, rxr);
1238 	}
1239 	rw_exit_write(&sc->sc_cfg_lock);
1240 	return (rv);
1241 down:
1242 	rw_exit_write(&sc->sc_cfg_lock);
1243 	iavf_down(sc);
1244 	return (ETIMEDOUT);
1245 }
1246 
1247 static int
iavf_config_promisc_mode(struct iavf_softc * sc,int unicast,int multicast)1248 iavf_config_promisc_mode(struct iavf_softc *sc, int unicast, int multicast)
1249 {
1250 	struct iavf_aq_desc iaq;
1251 	struct iavf_vc_promisc_info *promisc;
1252 	int rv, flags;
1253 
1254 	memset(&iaq, 0, sizeof(iaq));
1255 	iaq.iaq_flags = htole16(IAVF_AQ_BUF | IAVF_AQ_RD);
1256 	iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
1257 	iaq.iaq_vc_opcode = htole32(IAVF_VC_OP_CONFIG_PROMISC);
1258 	iaq.iaq_datalen = htole16(sizeof(*promisc));
1259 	iavf_aq_dva(&iaq, IAVF_DMA_DVA(&sc->sc_scratch));
1260 
1261 	flags = 0;
1262 	if (unicast)
1263 		flags |= IAVF_FLAG_VF_UNICAST_PROMISC;
1264 	if (multicast)
1265 		flags |= IAVF_FLAG_VF_MULTICAST_PROMISC;
1266 
1267 	promisc = IAVF_DMA_KVA(&sc->sc_scratch);
1268 	promisc->vsi_id = htole16(sc->sc_vsi_id);
1269 	promisc->flags = htole16(flags);
1270 
1271 	bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_scratch), 0,
1272 	    IAVF_DMA_LEN(&sc->sc_scratch),
1273 	    BUS_DMASYNC_PREREAD);
1274 
1275 	iavf_atq_post(sc, &iaq);
1276 	rv = iavf_arq_wait(sc, 250);
1277 	if (rv != IAVF_VC_RC_SUCCESS) {
1278 		printf("%s: CONFIG_PROMISC_MODE failed: %d\n", DEVNAME(sc), rv);
1279 		return (1);
1280 	}
1281 
1282 	return (0);
1283 }
1284 
1285 static int
iavf_add_del_addr(struct iavf_softc * sc,uint8_t * addr,int add)1286 iavf_add_del_addr(struct iavf_softc *sc, uint8_t *addr, int add)
1287 {
1288 	struct iavf_aq_desc iaq;
1289 	struct iavf_vc_eth_addr_list *addrs;
1290 	struct iavf_vc_eth_addr *vcaddr;
1291 	int rv;
1292 
1293 	memset(&iaq, 0, sizeof(iaq));
1294 	iaq.iaq_flags = htole16(IAVF_AQ_BUF | IAVF_AQ_RD);
1295 	iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
1296 	if (add)
1297 		iaq.iaq_vc_opcode = htole32(IAVF_VC_OP_ADD_ETH_ADDR);
1298 	else
1299 		iaq.iaq_vc_opcode = htole32(IAVF_VC_OP_DEL_ETH_ADDR);
1300 	iaq.iaq_datalen = htole16(sizeof(*addrs) + sizeof(*vcaddr));
1301 	iavf_aq_dva(&iaq, IAVF_DMA_DVA(&sc->sc_scratch));
1302 
1303 	addrs = IAVF_DMA_KVA(&sc->sc_scratch);
1304 	addrs->vsi_id = htole16(sc->sc_vsi_id);
1305 	addrs->num_elements = htole16(1);
1306 
1307 	vcaddr = addrs->list;
1308 	memcpy(vcaddr->addr, addr, ETHER_ADDR_LEN);
1309 
1310 	bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_scratch), 0,
1311 	    IAVF_DMA_LEN(&sc->sc_scratch),
1312 	    BUS_DMASYNC_PREREAD);
1313 
1314 	iavf_atq_post(sc, &iaq);
1315 	rv = iavf_arq_wait(sc, 250);
1316 	if (rv != IAVF_VC_RC_SUCCESS) {
1317 		printf("%s: ADD/DEL_ETH_ADDR failed: %d\n", DEVNAME(sc), rv);
1318 		return (1);
1319 	}
1320 
1321 	return (0);
1322 }
1323 
1324 static int
iavf_iff(struct iavf_softc * sc)1325 iavf_iff(struct iavf_softc *sc)
1326 {
1327 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1328 	int unicast, multicast;
1329 
1330 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
1331 		return (0);
1332 
1333 	rw_enter_write(&sc->sc_cfg_lock);
1334 
1335 	unicast = 0;
1336 	multicast = 0;
1337 	if (ISSET(ifp->if_flags, IFF_PROMISC)) {
1338 		unicast = 1;
1339 		multicast = 1;
1340 	} else if (ISSET(ifp->if_flags, IFF_ALLMULTI)) {
1341 		multicast = 1;
1342 	}
1343 	iavf_config_promisc_mode(sc, unicast, multicast);
1344 
1345 	if (memcmp(sc->sc_enaddr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN) != 0) {
1346 		if (memcmp(sc->sc_enaddr, etheranyaddr, ETHER_ADDR_LEN) != 0)
1347 			iavf_add_del_addr(sc, sc->sc_enaddr, 0);
1348 		memcpy(sc->sc_enaddr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN);
1349 		iavf_add_del_addr(sc, sc->sc_enaddr, 1);
1350 	}
1351 
1352 	rw_exit_write(&sc->sc_cfg_lock);
1353 	return (0);
1354 }
1355 
1356 static int
iavf_down(struct iavf_softc * sc)1357 iavf_down(struct iavf_softc *sc)
1358 {
1359 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1360 	struct iavf_rx_ring *rxr;
1361 	struct iavf_tx_ring *txr;
1362 	unsigned int nqueues, i;
1363 	uint32_t reg;
1364 	int error = 0;
1365 
1366 	nqueues = iavf_nqueues(sc);
1367 
1368 	rw_enter_write(&sc->sc_cfg_lock);
1369 
1370 	CLR(ifp->if_flags, IFF_RUNNING);
1371 
1372 	NET_UNLOCK();
1373 
1374 	if (sc->sc_resetting == 0) {
1375 		/* disable queues */
1376 		if (iavf_queue_select(sc, IAVF_VC_OP_DISABLE_QUEUES) != 0)
1377 			goto die;
1378 	}
1379 
1380 	/* mask interrupts */
1381 	reg = iavf_rd(sc, I40E_VFINT_DYN_CTL01);
1382 	reg |= I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK |
1383 	    (IAVF_NOITR << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT);
1384 	iavf_wr(sc, I40E_VFINT_DYN_CTL01, reg);
1385 
1386 	/* make sure no hw generated work is still in flight */
1387 	intr_barrier(sc->sc_ihc);
1388 	for (i = 0; i < nqueues; i++) {
1389 		rxr = ifp->if_iqs[i]->ifiq_softc;
1390 		txr = ifp->if_ifqs[i]->ifq_softc;
1391 
1392 		ifq_barrier(ifp->if_ifqs[i]);
1393 
1394 		timeout_del_barrier(&rxr->rxr_refill);
1395 	}
1396 
1397 	for (i = 0; i < nqueues; i++) {
1398 		rxr = ifp->if_iqs[i]->ifiq_softc;
1399 		txr = ifp->if_ifqs[i]->ifq_softc;
1400 
1401 		iavf_txr_clean(sc, txr);
1402 		iavf_rxr_clean(sc, rxr);
1403 
1404 		iavf_txr_free(sc, txr);
1405 		iavf_rxr_free(sc, rxr);
1406 
1407 		ifp->if_iqs[i]->ifiq_softc = NULL;
1408 		ifp->if_ifqs[i]->ifq_softc =  NULL;
1409 	}
1410 
1411 	/* unmask */
1412 	reg = iavf_rd(sc, I40E_VFINT_DYN_CTL01);
1413 	reg |= (IAVF_NOITR << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT);
1414 	iavf_wr(sc, I40E_VFINT_DYN_CTL01, reg);
1415 
1416 out:
1417 	rw_exit_write(&sc->sc_cfg_lock);
1418 	NET_LOCK();
1419 	return (error);
1420 die:
1421 	sc->sc_dead = 1;
1422 	log(LOG_CRIT, "%s: failed to shut down rings", DEVNAME(sc));
1423 	error = ETIMEDOUT;
1424 	goto out;
1425 }
1426 
1427 static void
iavf_reset(void * xsc)1428 iavf_reset(void *xsc)
1429 {
1430 	struct iavf_softc *sc = xsc;
1431 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1432 	int tries, up, link_state;
1433 
1434 	NET_LOCK();
1435 
1436 	/* treat the reset as a loss of link */
1437 	link_state = ifp->if_link_state;
1438 	if (ifp->if_link_state != LINK_STATE_DOWN) {
1439 		ifp->if_link_state = LINK_STATE_DOWN;
1440 		if_link_state_change(ifp);
1441 	}
1442 
1443 	up = 0;
1444 	if (ISSET(ifp->if_flags, IFF_RUNNING)) {
1445 		iavf_down(sc);
1446 		up = 1;
1447 	}
1448 
1449 	rw_enter_write(&sc->sc_cfg_lock);
1450 
1451 	sc->sc_major_ver = UINT_MAX;
1452 	sc->sc_minor_ver = UINT_MAX;
1453 	sc->sc_got_vf_resources = 0;
1454 	sc->sc_got_irq_map = 0;
1455 
1456 	for (tries = 0; tries < 100; tries++) {
1457 		uint32_t reg;
1458 		reg = iavf_rd(sc, I40E_VFGEN_RSTAT) &
1459 		    I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1460 		if (reg == IAVF_VFR_VFACTIVE ||
1461 		    reg == IAVF_VFR_COMPLETED)
1462 			break;
1463 
1464 		delay(10000);
1465 	}
1466 	if (tries == 100) {
1467 		printf("%s: VF reset timed out\n", DEVNAME(sc));
1468 		goto failed;
1469 	}
1470 
1471 	iavf_arq_unfill(sc);
1472 	sc->sc_arq_cons = 0;
1473 	sc->sc_arq_prod = 0;
1474 	if (!iavf_arq_fill(sc, 0)) {
1475 		printf("\n" "%s: unable to fill arq descriptors\n",
1476 		    DEVNAME(sc));
1477 		goto failed;
1478 	}
1479 
1480 	iavf_init_admin_queue(sc);
1481 
1482 	if (iavf_get_version(sc) != 0) {
1483 		printf("%s: unable to get VF interface version\n",
1484 		    DEVNAME(sc));
1485 		goto failed;
1486 	}
1487 
1488 	if (iavf_get_vf_resources(sc) != 0) {
1489 		printf("%s: timed out waiting for VF resources\n",
1490 		    DEVNAME(sc));
1491 		goto failed;
1492 	}
1493 
1494 	if (iavf_config_irq_map(sc) != 0) {
1495 		printf("%s: timed out configuring IRQ map\n", DEVNAME(sc));
1496 		goto failed;
1497 	}
1498 
1499 	/* do we need to re-add mac addresses here? */
1500 
1501 	sc->sc_resetting = 0;
1502 	iavf_intr_enable(sc);
1503 	rw_exit_write(&sc->sc_cfg_lock);
1504 
1505 	/* the PF-assigned MAC address might have changed */
1506 	if ((memcmp(sc->sc_ac.ac_enaddr, etheranyaddr, ETHER_ADDR_LEN) != 0) &&
1507 	    (memcmp(sc->sc_ac.ac_enaddr, sc->sc_enaddr, ETHER_ADDR_LEN) != 0)) {
1508 		memcpy(sc->sc_enaddr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN);
1509 		if_setlladdr(ifp, sc->sc_ac.ac_enaddr);
1510 		ifnewlladdr(ifp);
1511 	}
1512 
1513 	/* restore link state */
1514 	if (link_state != LINK_STATE_DOWN) {
1515 		ifp->if_link_state = link_state;
1516 		if_link_state_change(ifp);
1517 	}
1518 
1519 	if (up) {
1520 		int i;
1521 
1522 		iavf_up(sc);
1523 
1524 		for (i = 0; i < iavf_nqueues(sc); i++) {
1525 			if (ifq_is_oactive(ifp->if_ifqs[i]))
1526 				ifq_restart(ifp->if_ifqs[i]);
1527 		}
1528 	}
1529 
1530 	NET_UNLOCK();
1531 	return;
1532 failed:
1533 	sc->sc_dead = 1;
1534 	sc->sc_resetting = 0;
1535 	rw_exit_write(&sc->sc_cfg_lock);
1536 	NET_UNLOCK();
1537 }
1538 
1539 static struct iavf_tx_ring *
iavf_txr_alloc(struct iavf_softc * sc,unsigned int qid)1540 iavf_txr_alloc(struct iavf_softc *sc, unsigned int qid)
1541 {
1542 	struct iavf_tx_ring *txr;
1543 	struct iavf_tx_map *maps, *txm;
1544 	unsigned int i;
1545 
1546 	txr = malloc(sizeof(*txr), M_DEVBUF, M_WAITOK|M_CANFAIL);
1547 	if (txr == NULL)
1548 		return (NULL);
1549 
1550 	maps = mallocarray(sizeof(*maps),
1551 	    sc->sc_tx_ring_ndescs, M_DEVBUF, M_WAITOK|M_CANFAIL|M_ZERO);
1552 	if (maps == NULL)
1553 		goto free;
1554 
1555 	if (iavf_dmamem_alloc(sc, &txr->txr_mem,
1556 	    sizeof(struct iavf_tx_desc) * sc->sc_tx_ring_ndescs,
1557 	    IAVF_TX_QUEUE_ALIGN) != 0)
1558 		goto freemap;
1559 
1560 	for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
1561 		txm = &maps[i];
1562 
1563 		if (bus_dmamap_create(sc->sc_dmat,
1564 		    IAVF_HARDMTU, IAVF_TX_PKT_DESCS, IAVF_HARDMTU, 0,
1565 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
1566 		    &txm->txm_map) != 0)
1567 			goto uncreate;
1568 
1569 		txm->txm_eop = -1;
1570 		txm->txm_m = NULL;
1571 	}
1572 
1573 	txr->txr_cons = txr->txr_prod = 0;
1574 	txr->txr_maps = maps;
1575 
1576 	txr->txr_tail = I40E_QTX_TAIL1(qid);
1577 	txr->txr_qid = qid;
1578 
1579 	return (txr);
1580 
1581 uncreate:
1582 	for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
1583 		txm = &maps[i];
1584 
1585 		if (txm->txm_map == NULL)
1586 			continue;
1587 
1588 		bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
1589 	}
1590 
1591 	iavf_dmamem_free(sc, &txr->txr_mem);
1592 freemap:
1593 	free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_tx_ring_ndescs);
1594 free:
1595 	free(txr, M_DEVBUF, sizeof(*txr));
1596 	return (NULL);
1597 }
1598 
1599 static void
iavf_txr_clean(struct iavf_softc * sc,struct iavf_tx_ring * txr)1600 iavf_txr_clean(struct iavf_softc *sc, struct iavf_tx_ring *txr)
1601 {
1602 	struct iavf_tx_map *maps, *txm;
1603 	bus_dmamap_t map;
1604 	unsigned int i;
1605 
1606 	maps = txr->txr_maps;
1607 	for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
1608 		txm = &maps[i];
1609 
1610 		if (txm->txm_m == NULL)
1611 			continue;
1612 
1613 		map = txm->txm_map;
1614 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1615 		    BUS_DMASYNC_POSTWRITE);
1616 		bus_dmamap_unload(sc->sc_dmat, map);
1617 
1618 		m_freem(txm->txm_m);
1619 		txm->txm_m = NULL;
1620 	}
1621 }
1622 
1623 static void
iavf_txr_free(struct iavf_softc * sc,struct iavf_tx_ring * txr)1624 iavf_txr_free(struct iavf_softc *sc, struct iavf_tx_ring *txr)
1625 {
1626 	struct iavf_tx_map *maps, *txm;
1627 	unsigned int i;
1628 
1629 	maps = txr->txr_maps;
1630 	for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
1631 		txm = &maps[i];
1632 
1633 		bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
1634 	}
1635 
1636 	iavf_dmamem_free(sc, &txr->txr_mem);
1637 	free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_tx_ring_ndescs);
1638 	free(txr, M_DEVBUF, sizeof(*txr));
1639 }
1640 
1641 static inline int
iavf_load_mbuf(bus_dma_tag_t dmat,bus_dmamap_t map,struct mbuf * m)1642 iavf_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m)
1643 {
1644 	int error;
1645 
1646 	error = bus_dmamap_load_mbuf(dmat, map, m,
1647 	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT);
1648 	if (error != EFBIG)
1649 		return (error);
1650 
1651 	error = m_defrag(m, M_DONTWAIT);
1652 	if (error != 0)
1653 		return (error);
1654 
1655 	return (bus_dmamap_load_mbuf(dmat, map, m,
1656 	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT));
1657 }
1658 
1659 static void
iavf_start(struct ifqueue * ifq)1660 iavf_start(struct ifqueue *ifq)
1661 {
1662 	struct ifnet *ifp = ifq->ifq_if;
1663 	struct iavf_softc *sc = ifp->if_softc;
1664 	struct iavf_tx_ring *txr = ifq->ifq_softc;
1665 	struct iavf_tx_desc *ring, *txd;
1666 	struct iavf_tx_map *txm;
1667 	bus_dmamap_t map;
1668 	struct mbuf *m;
1669 	uint64_t cmd;
1670 	uint64_t vlan_cmd;
1671 	unsigned int prod, free, last, i;
1672 	unsigned int mask;
1673 	int post = 0;
1674 #if NBPFILTER > 0
1675 	caddr_t if_bpf;
1676 #endif
1677 
1678 	if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
1679 		ifq_purge(ifq);
1680 		return;
1681 	}
1682 
1683 	prod = txr->txr_prod;
1684 	free = txr->txr_cons;
1685 	if (free <= prod)
1686 		free += sc->sc_tx_ring_ndescs;
1687 	free -= prod;
1688 
1689 	bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&txr->txr_mem),
1690 	    0, IAVF_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTWRITE);
1691 
1692 	ring = IAVF_DMA_KVA(&txr->txr_mem);
1693 	mask = sc->sc_tx_ring_ndescs - 1;
1694 
1695 	for (;;) {
1696 		if (free <= IAVF_TX_PKT_DESCS) {
1697 			ifq_set_oactive(ifq);
1698 			break;
1699 		}
1700 
1701 		m = ifq_dequeue(ifq);
1702 		if (m == NULL)
1703 			break;
1704 
1705 		txm = &txr->txr_maps[prod];
1706 		map = txm->txm_map;
1707 
1708 		if (iavf_load_mbuf(sc->sc_dmat, map, m) != 0) {
1709 			ifq->ifq_errors++;
1710 			m_freem(m);
1711 			continue;
1712 		}
1713 
1714 		bus_dmamap_sync(sc->sc_dmat, map, 0,
1715 		    map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1716 
1717 		vlan_cmd = 0;
1718 		if (m->m_flags & M_VLANTAG) {
1719 			vlan_cmd = IAVF_TX_DESC_CMD_IL2TAG1 |
1720 			    (((uint64_t)m->m_pkthdr.ether_vtag) <<
1721 			    IAVF_TX_DESC_L2TAG1_SHIFT);
1722 		}
1723 
1724 		for (i = 0; i < map->dm_nsegs; i++) {
1725 			txd = &ring[prod];
1726 
1727 			cmd = (uint64_t)map->dm_segs[i].ds_len <<
1728 			    IAVF_TX_DESC_BSIZE_SHIFT;
1729 			cmd |= IAVF_TX_DESC_DTYPE_DATA | IAVF_TX_DESC_CMD_ICRC |
1730 			    vlan_cmd;
1731 
1732 			htolem64(&txd->addr, map->dm_segs[i].ds_addr);
1733 			htolem64(&txd->cmd, cmd);
1734 
1735 			last = prod;
1736 
1737 			prod++;
1738 			prod &= mask;
1739 		}
1740 		cmd |= IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_RS;
1741 		htolem64(&txd->cmd, cmd);
1742 
1743 		txm->txm_m = m;
1744 		txm->txm_eop = last;
1745 
1746 #if NBPFILTER > 0
1747 		if_bpf = ifp->if_bpf;
1748 		if (if_bpf)
1749 			bpf_mtap_ether(if_bpf, m, BPF_DIRECTION_OUT);
1750 #endif
1751 
1752 		free -= i;
1753 		post = 1;
1754 	}
1755 
1756 	bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&txr->txr_mem),
1757 	    0, IAVF_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREWRITE);
1758 
1759 	if (post) {
1760 		txr->txr_prod = prod;
1761 		iavf_wr(sc, txr->txr_tail, prod);
1762 	}
1763 }
1764 
1765 static int
iavf_txeof(struct iavf_softc * sc,struct ifqueue * ifq)1766 iavf_txeof(struct iavf_softc *sc, struct ifqueue *ifq)
1767 {
1768 	struct iavf_tx_ring *txr = ifq->ifq_softc;
1769 	struct iavf_tx_desc *ring, *txd;
1770 	struct iavf_tx_map *txm;
1771 	bus_dmamap_t map;
1772 	unsigned int cons, prod, last;
1773 	unsigned int mask;
1774 	uint64_t dtype;
1775 	int done = 0;
1776 
1777 	prod = txr->txr_prod;
1778 	cons = txr->txr_cons;
1779 
1780 	if (cons == prod)
1781 		return (0);
1782 
1783 	bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&txr->txr_mem),
1784 	    0, IAVF_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTREAD);
1785 
1786 	ring = IAVF_DMA_KVA(&txr->txr_mem);
1787 	mask = sc->sc_tx_ring_ndescs - 1;
1788 
1789 	do {
1790 		txm = &txr->txr_maps[cons];
1791 		last = txm->txm_eop;
1792 		txd = &ring[last];
1793 
1794 		dtype = txd->cmd & htole64(IAVF_TX_DESC_DTYPE_MASK);
1795 		if (dtype != htole64(IAVF_TX_DESC_DTYPE_DONE))
1796 			break;
1797 
1798 		map = txm->txm_map;
1799 
1800 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1801 		    BUS_DMASYNC_POSTWRITE);
1802 		bus_dmamap_unload(sc->sc_dmat, map);
1803 		m_freem(txm->txm_m);
1804 
1805 		txm->txm_m = NULL;
1806 		txm->txm_eop = -1;
1807 
1808 		cons = last + 1;
1809 		cons &= mask;
1810 
1811 		done = 1;
1812 	} while (cons != prod);
1813 
1814 	bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&txr->txr_mem),
1815 	    0, IAVF_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREREAD);
1816 
1817 	txr->txr_cons = cons;
1818 
1819 	//ixl_enable(sc, txr->txr_msix);
1820 
1821 	if (ifq_is_oactive(ifq))
1822 		ifq_restart(ifq);
1823 
1824 	return (done);
1825 }
1826 
1827 static struct iavf_rx_ring *
iavf_rxr_alloc(struct iavf_softc * sc,unsigned int qid)1828 iavf_rxr_alloc(struct iavf_softc *sc, unsigned int qid)
1829 {
1830 	struct iavf_rx_ring *rxr;
1831 	struct iavf_rx_map *maps, *rxm;
1832 	unsigned int i;
1833 
1834 	rxr = malloc(sizeof(*rxr), M_DEVBUF, M_WAITOK|M_CANFAIL);
1835 	if (rxr == NULL)
1836 		return (NULL);
1837 
1838 	maps = mallocarray(sizeof(*maps),
1839 	    sc->sc_rx_ring_ndescs, M_DEVBUF, M_WAITOK|M_CANFAIL|M_ZERO);
1840 	if (maps == NULL)
1841 		goto free;
1842 
1843 	if (iavf_dmamem_alloc(sc, &rxr->rxr_mem,
1844 	    sizeof(struct iavf_rx_rd_desc_32) * sc->sc_rx_ring_ndescs,
1845 	    IAVF_RX_QUEUE_ALIGN) != 0)
1846 		goto freemap;
1847 
1848 	for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
1849 		rxm = &maps[i];
1850 
1851 		if (bus_dmamap_create(sc->sc_dmat,
1852 		    IAVF_HARDMTU, 1, IAVF_HARDMTU, 0,
1853 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
1854 		    &rxm->rxm_map) != 0)
1855 			goto uncreate;
1856 
1857 		rxm->rxm_m = NULL;
1858 	}
1859 
1860 	rxr->rxr_sc = sc;
1861 	if_rxr_init(&rxr->rxr_acct, 17, sc->sc_rx_ring_ndescs - 1);
1862 	timeout_set(&rxr->rxr_refill, iavf_rxrefill, rxr);
1863 	rxr->rxr_cons = rxr->rxr_prod = 0;
1864 	rxr->rxr_m_head = NULL;
1865 	rxr->rxr_m_tail = &rxr->rxr_m_head;
1866 	rxr->rxr_maps = maps;
1867 
1868 	rxr->rxr_tail = I40E_QRX_TAIL1(qid);
1869 	rxr->rxr_qid = qid;
1870 
1871 	return (rxr);
1872 
1873 uncreate:
1874 	for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
1875 		rxm = &maps[i];
1876 
1877 		if (rxm->rxm_map == NULL)
1878 			continue;
1879 
1880 		bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
1881 	}
1882 
1883 	iavf_dmamem_free(sc, &rxr->rxr_mem);
1884 freemap:
1885 	free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_rx_ring_ndescs);
1886 free:
1887 	free(rxr, M_DEVBUF, sizeof(*rxr));
1888 	return (NULL);
1889 }
1890 
1891 static void
iavf_rxr_clean(struct iavf_softc * sc,struct iavf_rx_ring * rxr)1892 iavf_rxr_clean(struct iavf_softc *sc, struct iavf_rx_ring *rxr)
1893 {
1894 	struct iavf_rx_map *maps, *rxm;
1895 	bus_dmamap_t map;
1896 	unsigned int i;
1897 
1898 	timeout_del_barrier(&rxr->rxr_refill);
1899 
1900 	maps = rxr->rxr_maps;
1901 	for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
1902 		rxm = &maps[i];
1903 
1904 		if (rxm->rxm_m == NULL)
1905 			continue;
1906 
1907 		map = rxm->rxm_map;
1908 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1909 		    BUS_DMASYNC_POSTWRITE);
1910 		bus_dmamap_unload(sc->sc_dmat, map);
1911 
1912 		m_freem(rxm->rxm_m);
1913 		rxm->rxm_m = NULL;
1914 	}
1915 
1916 	m_freem(rxr->rxr_m_head);
1917 	rxr->rxr_m_head = NULL;
1918 	rxr->rxr_m_tail = &rxr->rxr_m_head;
1919 
1920 	rxr->rxr_prod = rxr->rxr_cons = 0;
1921 }
1922 
1923 static void
iavf_rxr_free(struct iavf_softc * sc,struct iavf_rx_ring * rxr)1924 iavf_rxr_free(struct iavf_softc *sc, struct iavf_rx_ring *rxr)
1925 {
1926 	struct iavf_rx_map *maps, *rxm;
1927 	unsigned int i;
1928 
1929 	maps = rxr->rxr_maps;
1930 	for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
1931 		rxm = &maps[i];
1932 
1933 		bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
1934 	}
1935 
1936 	iavf_dmamem_free(sc, &rxr->rxr_mem);
1937 	free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_rx_ring_ndescs);
1938 	free(rxr, M_DEVBUF, sizeof(*rxr));
1939 }
1940 
1941 static int
iavf_rxeof(struct iavf_softc * sc,struct ifiqueue * ifiq)1942 iavf_rxeof(struct iavf_softc *sc, struct ifiqueue *ifiq)
1943 {
1944 	struct iavf_rx_ring *rxr = ifiq->ifiq_softc;
1945 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1946 	struct iavf_rx_wb_desc_32 *ring, *rxd;
1947 	struct iavf_rx_map *rxm;
1948 	bus_dmamap_t map;
1949 	unsigned int cons, prod;
1950 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1951 	struct mbuf *m;
1952 	uint64_t word;
1953 	uint16_t vlan;
1954 	unsigned int len;
1955 	unsigned int mask;
1956 	int done = 0;
1957 
1958 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
1959 		return (0);
1960 
1961 	prod = rxr->rxr_prod;
1962 	cons = rxr->rxr_cons;
1963 
1964 	if (cons == prod)
1965 		return (0);
1966 
1967 	bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&rxr->rxr_mem),
1968 	    0, IAVF_DMA_LEN(&rxr->rxr_mem),
1969 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1970 
1971 	ring = IAVF_DMA_KVA(&rxr->rxr_mem);
1972 	mask = sc->sc_rx_ring_ndescs - 1;
1973 
1974 	do {
1975 		rxd = &ring[cons];
1976 
1977 		word = lemtoh64(&rxd->qword1);
1978 		if (!ISSET(word, IAVF_RX_DESC_DD))
1979 			break;
1980 
1981 		if_rxr_put(&rxr->rxr_acct, 1);
1982 
1983 		rxm = &rxr->rxr_maps[cons];
1984 
1985 		map = rxm->rxm_map;
1986 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1987 		    BUS_DMASYNC_POSTREAD);
1988 		bus_dmamap_unload(sc->sc_dmat, map);
1989 
1990 		m = rxm->rxm_m;
1991 		rxm->rxm_m = NULL;
1992 
1993 		len = (word & IAVF_RX_DESC_PLEN_MASK) >> IAVF_RX_DESC_PLEN_SHIFT;
1994 		m->m_len = len;
1995 		m->m_pkthdr.len = 0;
1996 
1997 		m->m_next = NULL;
1998 		*rxr->rxr_m_tail = m;
1999 		rxr->rxr_m_tail = &m->m_next;
2000 
2001 		m = rxr->rxr_m_head;
2002 		m->m_pkthdr.len += len;
2003 
2004 		if (ISSET(word, IAVF_RX_DESC_EOP)) {
2005 			if (ISSET(word, IAVF_RX_DESC_L2TAG1P)) {
2006 				vlan = (lemtoh64(&rxd->qword0) &
2007 				    IAVF_RX_DESC_L2TAG1_MASK)
2008 				    >> IAVF_RX_DESC_L2TAG1_SHIFT;
2009 				m->m_pkthdr.ether_vtag = vlan;
2010 				m->m_flags |= M_VLANTAG;
2011 			}
2012 			if (!ISSET(word,
2013 			    IAVF_RX_DESC_RXE | IAVF_RX_DESC_OVERSIZE)) {
2014 				ml_enqueue(&ml, m);
2015 			} else {
2016 				ifp->if_ierrors++; /* XXX */
2017 				m_freem(m);
2018 			}
2019 
2020 			rxr->rxr_m_head = NULL;
2021 			rxr->rxr_m_tail = &rxr->rxr_m_head;
2022 		}
2023 
2024 		cons++;
2025 		cons &= mask;
2026 
2027 		done = 1;
2028 	} while (cons != prod);
2029 
2030 	if (done) {
2031 		rxr->rxr_cons = cons;
2032 		if (ifiq_input(ifiq, &ml))
2033 			if_rxr_livelocked(&rxr->rxr_acct);
2034 		iavf_rxfill(sc, rxr);
2035 	}
2036 
2037 	bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&rxr->rxr_mem),
2038 	    0, IAVF_DMA_LEN(&rxr->rxr_mem),
2039 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2040 
2041 	return (done);
2042 }
2043 
2044 static void
iavf_rxfill(struct iavf_softc * sc,struct iavf_rx_ring * rxr)2045 iavf_rxfill(struct iavf_softc *sc, struct iavf_rx_ring *rxr)
2046 {
2047 	struct iavf_rx_rd_desc_32 *ring, *rxd;
2048 	struct iavf_rx_map *rxm;
2049 	bus_dmamap_t map;
2050 	struct mbuf *m;
2051 	unsigned int prod;
2052 	unsigned int slots;
2053 	unsigned int mask;
2054 	int post = 0;
2055 
2056 	slots = if_rxr_get(&rxr->rxr_acct, sc->sc_rx_ring_ndescs);
2057 	if (slots == 0)
2058 		return;
2059 
2060 	prod = rxr->rxr_prod;
2061 
2062 	ring = IAVF_DMA_KVA(&rxr->rxr_mem);
2063 	mask = sc->sc_rx_ring_ndescs - 1;
2064 
2065 	do {
2066 		rxm = &rxr->rxr_maps[prod];
2067 
2068 		m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES + ETHER_ALIGN);
2069 		if (m == NULL)
2070 			break;
2071 		m->m_data += (m->m_ext.ext_size - (MCLBYTES + ETHER_ALIGN));
2072 		m->m_len = m->m_pkthdr.len = MCLBYTES + ETHER_ALIGN;
2073 
2074 		map = rxm->rxm_map;
2075 
2076 		if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
2077 		    BUS_DMA_NOWAIT) != 0) {
2078 			m_freem(m);
2079 			break;
2080 		}
2081 
2082 		rxm->rxm_m = m;
2083 
2084 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2085 		    BUS_DMASYNC_PREREAD);
2086 
2087 		rxd = &ring[prod];
2088 
2089 		htolem64(&rxd->paddr, map->dm_segs[0].ds_addr);
2090 		rxd->haddr = htole64(0);
2091 
2092 		prod++;
2093 		prod &= mask;
2094 
2095 		post = 1;
2096 	} while (--slots);
2097 
2098 	if_rxr_put(&rxr->rxr_acct, slots);
2099 
2100 	if (if_rxr_inuse(&rxr->rxr_acct) == 0)
2101 		timeout_add(&rxr->rxr_refill, 1);
2102 	else if (post) {
2103 		rxr->rxr_prod = prod;
2104 		iavf_wr(sc, rxr->rxr_tail, prod);
2105 	}
2106 }
2107 
2108 void
iavf_rxrefill(void * arg)2109 iavf_rxrefill(void *arg)
2110 {
2111 	struct iavf_rx_ring *rxr = arg;
2112 	struct iavf_softc *sc = rxr->rxr_sc;
2113 
2114 	iavf_rxfill(sc, rxr);
2115 }
2116 
2117 static int
iavf_rxrinfo(struct iavf_softc * sc,struct if_rxrinfo * ifri)2118 iavf_rxrinfo(struct iavf_softc *sc, struct if_rxrinfo *ifri)
2119 {
2120 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2121 	struct if_rxring_info *ifr;
2122 	struct iavf_rx_ring *ring;
2123 	int i, rv;
2124 
2125 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
2126 		return (ENOTTY);
2127 
2128 	ifr = mallocarray(sizeof(*ifr), iavf_nqueues(sc), M_TEMP,
2129 	    M_WAITOK|M_CANFAIL|M_ZERO);
2130 	if (ifr == NULL)
2131 		return (ENOMEM);
2132 
2133 	for (i = 0; i < iavf_nqueues(sc); i++) {
2134 		ring = ifp->if_iqs[i]->ifiq_softc;
2135 		ifr[i].ifr_size = MCLBYTES;
2136 		ifr[i].ifr_info = ring->rxr_acct;
2137 	}
2138 
2139 	rv = if_rxr_info_ioctl(ifri, iavf_nqueues(sc), ifr);
2140 	free(ifr, M_TEMP, iavf_nqueues(sc) * sizeof(*ifr));
2141 
2142 	return (rv);
2143 }
2144 
2145 static int
iavf_intr(void * xsc)2146 iavf_intr(void *xsc)
2147 {
2148 	struct iavf_softc *sc = xsc;
2149 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2150 	uint32_t icr, ena;
2151 	int i, rv = 0;
2152 
2153 	ena = iavf_rd(sc, I40E_VFINT_ICR0_ENA1);
2154 	iavf_intr_enable(sc);
2155 	icr = iavf_rd(sc, I40E_VFINT_ICR01);
2156 
2157 	if (icr == IAVF_REG_VFR) {
2158 		printf("%s: VF reset in progress\n", DEVNAME(sc));
2159 		sc->sc_resetting = 1;
2160 		task_add(systq, &sc->sc_reset_task);
2161 		return (1);
2162 	}
2163 
2164 	if (ISSET(icr, I40E_VFINT_ICR01_ADMINQ_MASK)) {
2165 		iavf_atq_done(sc);
2166 		iavf_process_arq(sc, 0);
2167 		rv = 1;
2168 	}
2169 
2170 	if (ISSET(icr, I40E_VFINT_ICR01_QUEUE_0_MASK)) {
2171 		for (i = 0; i < iavf_nqueues(sc); i++) {
2172 			rv |= iavf_rxeof(sc, ifp->if_iqs[i]);
2173 			rv |= iavf_txeof(sc, ifp->if_ifqs[i]);
2174 		}
2175 	}
2176 
2177 	return (rv);
2178 }
2179 
2180 static void
iavf_process_vf_resources(struct iavf_softc * sc,struct iavf_aq_desc * desc,struct iavf_aq_buf * buf)2181 iavf_process_vf_resources(struct iavf_softc *sc, struct iavf_aq_desc *desc,
2182     struct iavf_aq_buf *buf)
2183 {
2184 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2185 	struct iavf_vc_vf_resource *vf_res;
2186 	struct iavf_vc_vsi_resource *vsi_res;
2187 	int mtu;
2188 
2189 	sc->sc_got_vf_resources = 1;
2190 
2191 	vf_res = buf->aqb_data;
2192 	if (letoh16(vf_res->num_vsis) == 0) {
2193 		printf(", no VSI available\n");
2194 		/* set vsi number to something */
2195 		return;
2196 	}
2197 
2198 	mtu = letoh16(vf_res->max_mtu);
2199 	if (mtu != 0)
2200 		ifp->if_hardmtu = MIN(IAVF_HARDMTU, mtu);
2201 
2202 	/* limit vectors to what we got here? */
2203 
2204 	/* just take the first vsi */
2205 	vsi_res = &vf_res->vsi_res[0];
2206 	sc->sc_vsi_id = letoh16(vsi_res->vsi_id);
2207 	sc->sc_qset_handle = letoh16(vsi_res->qset_handle);
2208 	/* limit number of queues to what we got here */
2209 	/* is vsi type interesting? */
2210 
2211 	sc->sc_vf_id = letoh32(desc->iaq_param[0]);
2212 
2213 	memcpy(sc->sc_ac.ac_enaddr, vsi_res->default_mac, ETHER_ADDR_LEN);
2214 
2215 	if (sc->sc_resetting == 0)
2216 		printf(", VF %d VSI %d", sc->sc_vf_id, sc->sc_vsi_id);
2217 }
2218 
2219 static const struct iavf_link_speed *
iavf_find_link_speed(struct iavf_softc * sc,uint32_t link_speed)2220 iavf_find_link_speed(struct iavf_softc *sc, uint32_t link_speed)
2221 {
2222 	int i;
2223 	for (i = 0; i < nitems(iavf_link_speeds); i++) {
2224 		if (link_speed & (1 << i))
2225 			return (&iavf_link_speeds[i]);
2226 	}
2227 
2228 	return (NULL);
2229 }
2230 
2231 static void
iavf_process_vc_event(struct iavf_softc * sc,struct iavf_aq_desc * desc,struct iavf_aq_buf * buf)2232 iavf_process_vc_event(struct iavf_softc *sc, struct iavf_aq_desc *desc,
2233     struct iavf_aq_buf *buf)
2234 {
2235 	struct iavf_vc_pf_event *event;
2236 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2237 	const struct iavf_link_speed *speed;
2238 	int link;
2239 
2240 	event = buf->aqb_data;
2241 	switch (event->event) {
2242 	case IAVF_VC_EVENT_LINK_CHANGE:
2243 		sc->sc_media_status = IFM_AVALID;
2244 		sc->sc_media_active = IFM_ETHER;
2245 		link = LINK_STATE_DOWN;
2246 		if (event->link_status) {
2247 			link = LINK_STATE_UP;
2248 			sc->sc_media_status |= IFM_ACTIVE;
2249 
2250 			ifp->if_baudrate = 0;
2251 			speed = iavf_find_link_speed(sc, event->link_speed);
2252 			if (speed != NULL) {
2253 				sc->sc_media_active |= speed->media;
2254 				ifp->if_baudrate = speed->baudrate;
2255 			}
2256 		}
2257 
2258 		if (ifp->if_link_state != link) {
2259 			ifp->if_link_state = link;
2260 			if_link_state_change(ifp);
2261 		}
2262 		break;
2263 
2264 	default:
2265 		break;
2266 	}
2267 }
2268 
2269 static void
iavf_process_irq_map(struct iavf_softc * sc,struct iavf_aq_desc * desc)2270 iavf_process_irq_map(struct iavf_softc *sc, struct iavf_aq_desc *desc)
2271 {
2272 	if (letoh32(desc->iaq_vc_retval) != IAVF_VC_RC_SUCCESS) {
2273 		printf("config irq map failed: %d\n", letoh32(desc->iaq_vc_retval));
2274 	}
2275 	sc->sc_got_irq_map = 1;
2276 }
2277 
2278 static void
iavf_init_admin_queue(struct iavf_softc * sc)2279 iavf_init_admin_queue(struct iavf_softc *sc)
2280 {
2281 	iavf_wr(sc, sc->sc_aq_regs->atq_head, 0);
2282 	iavf_wr(sc, sc->sc_aq_regs->arq_head, 0);
2283 	iavf_wr(sc, sc->sc_aq_regs->atq_tail, 0);
2284 
2285 	iavf_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
2286 
2287 	iavf_wr(sc, sc->sc_aq_regs->atq_bal,
2288 	    iavf_dmamem_lo(&sc->sc_atq));
2289 	iavf_wr(sc, sc->sc_aq_regs->atq_bah,
2290 	    iavf_dmamem_hi(&sc->sc_atq));
2291 	iavf_wr(sc, sc->sc_aq_regs->atq_len,
2292 	    sc->sc_aq_regs->atq_len_enable | IAVF_AQ_NUM);
2293 
2294 	iavf_wr(sc, sc->sc_aq_regs->arq_bal,
2295 	    iavf_dmamem_lo(&sc->sc_arq));
2296 	iavf_wr(sc, sc->sc_aq_regs->arq_bah,
2297 	    iavf_dmamem_hi(&sc->sc_arq));
2298 	iavf_wr(sc, sc->sc_aq_regs->arq_len,
2299 	    sc->sc_aq_regs->arq_len_enable | IAVF_AQ_NUM);
2300 
2301 	iavf_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
2302 }
2303 
2304 static int
iavf_process_arq(struct iavf_softc * sc,int fill)2305 iavf_process_arq(struct iavf_softc *sc, int fill)
2306 {
2307 	struct iavf_aq_desc *arq, *iaq;
2308 	struct iavf_aq_buf *aqb;
2309 	struct iavf_vc_version_info *ver;
2310 	unsigned int cons = sc->sc_arq_cons;
2311 	unsigned int prod;
2312 	int done = 0;
2313 
2314 	prod = iavf_rd(sc, sc->sc_aq_regs->arq_head) &
2315 	    sc->sc_aq_regs->arq_head_mask;
2316 
2317 	if (cons == prod)
2318 		return (0);
2319 
2320 	arq = IAVF_DMA_KVA(&sc->sc_arq);
2321 
2322 	bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_arq),
2323 	    0, IAVF_DMA_LEN(&sc->sc_arq),
2324 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2325 
2326 	do {
2327 		iaq = &arq[cons];
2328 
2329 		aqb = SIMPLEQ_FIRST(&sc->sc_arq_live);
2330 		SIMPLEQ_REMOVE_HEAD(&sc->sc_arq_live, aqb_entry);
2331 		bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IAVF_AQ_BUFLEN,
2332 		    BUS_DMASYNC_POSTREAD);
2333 
2334 		switch (letoh32(iaq->iaq_vc_opcode)) {
2335 		case IAVF_VC_OP_VERSION:
2336 			ver = aqb->aqb_data;
2337 			sc->sc_major_ver = letoh32(ver->major);
2338 			sc->sc_minor_ver = letoh32(ver->minor);
2339 			break;
2340 
2341 		case IAVF_VC_OP_GET_VF_RESOURCES:
2342 			iavf_process_vf_resources(sc, iaq, aqb);
2343 			break;
2344 
2345 		case IAVF_VC_OP_EVENT:
2346 			iavf_process_vc_event(sc, iaq, aqb);
2347 			break;
2348 
2349 		case IAVF_VC_OP_CONFIG_IRQ_MAP:
2350 			iavf_process_irq_map(sc, iaq);
2351 			break;
2352 
2353 		case IAVF_VC_OP_CONFIG_TX_QUEUE:
2354 		case IAVF_VC_OP_CONFIG_RX_QUEUE:
2355 		case IAVF_VC_OP_CONFIG_VSI_QUEUES:
2356 		case IAVF_VC_OP_ENABLE_QUEUES:
2357 		case IAVF_VC_OP_DISABLE_QUEUES:
2358 		case IAVF_VC_OP_GET_RSS_HENA_CAPS:
2359 		case IAVF_VC_OP_SET_RSS_HENA:
2360 		case IAVF_VC_OP_ADD_ETH_ADDR:
2361 		case IAVF_VC_OP_DEL_ETH_ADDR:
2362 		case IAVF_VC_OP_CONFIG_PROMISC:
2363 			sc->sc_admin_result = letoh32(iaq->iaq_vc_retval);
2364 			cond_signal(&sc->sc_admin_cond);
2365 			break;
2366 		}
2367 
2368 		memset(iaq, 0, sizeof(*iaq));
2369 		SIMPLEQ_INSERT_TAIL(&sc->sc_arq_idle, aqb, aqb_entry);
2370 		if_rxr_put(&sc->sc_arq_ring, 1);
2371 
2372 		cons++;
2373 		cons &= IAVF_AQ_MASK;
2374 
2375 		done = 1;
2376 	} while (cons != prod);
2377 
2378 	if (fill)
2379 		iavf_arq_fill(sc, 1);
2380 
2381 	bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_arq),
2382 	    0, IAVF_DMA_LEN(&sc->sc_arq),
2383 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2384 
2385 	sc->sc_arq_cons = cons;
2386 	return (done);
2387 }
2388 
2389 static void
iavf_atq_done(struct iavf_softc * sc)2390 iavf_atq_done(struct iavf_softc *sc)
2391 {
2392 	struct iavf_aq_desc *atq, *slot;
2393 	unsigned int cons;
2394 	unsigned int prod;
2395 
2396 	prod = sc->sc_atq_prod;
2397 	cons = sc->sc_atq_cons;
2398 
2399 	if (prod == cons)
2400 		return;
2401 
2402 	atq = IAVF_DMA_KVA(&sc->sc_atq);
2403 
2404 	bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_atq),
2405 	    0, IAVF_DMA_LEN(&sc->sc_atq),
2406 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2407 
2408 	do {
2409 		slot = &atq[cons];
2410 		if (!ISSET(slot->iaq_flags, htole16(IAVF_AQ_DD)))
2411 			break;
2412 
2413 		memset(slot, 0, sizeof(*slot));
2414 
2415 		cons++;
2416 		cons &= IAVF_AQ_MASK;
2417 	} while (cons != prod);
2418 
2419 	bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_atq),
2420 	    0, IAVF_DMA_LEN(&sc->sc_atq),
2421 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2422 
2423 	sc->sc_atq_cons = cons;
2424 }
2425 
2426 static int
iavf_atq_post(struct iavf_softc * sc,struct iavf_aq_desc * iaq)2427 iavf_atq_post(struct iavf_softc *sc, struct iavf_aq_desc *iaq)
2428 {
2429 	struct iavf_aq_desc *atq, *slot;
2430 	unsigned int prod;
2431 
2432 	atq = IAVF_DMA_KVA(&sc->sc_atq);
2433 	prod = sc->sc_atq_prod;
2434 	slot = atq + prod;
2435 
2436 	bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_atq),
2437 	    0, IAVF_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
2438 
2439 	*slot = *iaq;
2440 	slot->iaq_flags |= htole16(IAVF_AQ_SI);
2441 
2442 	bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_atq),
2443 	    0, IAVF_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
2444 
2445 	prod++;
2446 	prod &= IAVF_AQ_MASK;
2447 	sc->sc_atq_prod = prod;
2448 	iavf_wr(sc, sc->sc_aq_regs->atq_tail, prod);
2449 	return (prod);
2450 }
2451 
2452 static int
iavf_get_version(struct iavf_softc * sc)2453 iavf_get_version(struct iavf_softc *sc)
2454 {
2455 	struct iavf_aq_desc iaq;
2456 	struct iavf_vc_version_info *ver;
2457 	int tries;
2458 
2459 	memset(&iaq, 0, sizeof(iaq));
2460 	iaq.iaq_flags = htole16(IAVF_AQ_BUF | IAVF_AQ_RD);
2461 	iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
2462 	iaq.iaq_vc_opcode = htole32(IAVF_VC_OP_VERSION);
2463 	iaq.iaq_datalen = htole16(sizeof(struct iavf_vc_version_info));
2464 	iavf_aq_dva(&iaq, IAVF_DMA_DVA(&sc->sc_scratch));
2465 
2466 	ver = IAVF_DMA_KVA(&sc->sc_scratch);
2467 	ver->major = htole32(IAVF_VF_MAJOR);
2468 	ver->minor = htole32(IAVF_VF_MINOR);
2469 	sc->sc_major_ver = UINT_MAX;
2470 	sc->sc_minor_ver = UINT_MAX;
2471 
2472 	membar_sync();
2473 	bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_scratch), 0, IAVF_DMA_LEN(&sc->sc_scratch),
2474 	    BUS_DMASYNC_PREREAD);
2475 
2476 	iavf_atq_post(sc, &iaq);
2477 
2478 	for (tries = 0; tries < 100; tries++) {
2479 		iavf_process_arq(sc, 1);
2480 		if (sc->sc_major_ver != -1)
2481 			break;
2482 
2483 		delaymsec(1);
2484 	}
2485 	if (tries == 100) {
2486 		printf(", timeout waiting for VF version");
2487 		return (1);
2488 	}
2489 
2490 	if (sc->sc_major_ver != IAVF_VF_MAJOR) {
2491 		printf(", unsupported VF version %d", sc->sc_major_ver);
2492 		return (1);
2493 	}
2494 
2495 	if (sc->sc_resetting == 0) {
2496 		printf(", VF version %d.%d%s", sc->sc_major_ver,
2497 		    sc->sc_minor_ver,
2498 		    (sc->sc_minor_ver > IAVF_VF_MINOR) ? " (minor mismatch)" : "");
2499 	}
2500 
2501 	return (0);
2502 }
2503 
2504 static int
iavf_get_vf_resources(struct iavf_softc * sc)2505 iavf_get_vf_resources(struct iavf_softc *sc)
2506 {
2507 	struct iavf_aq_desc iaq;
2508 	uint32_t *cap;
2509 	int tries;
2510 
2511 	memset(&iaq, 0, sizeof(iaq));
2512 	iaq.iaq_flags = htole16(IAVF_AQ_BUF | IAVF_AQ_RD);
2513 	iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
2514 	iaq.iaq_vc_opcode = htole32(IAVF_VC_OP_GET_VF_RESOURCES);
2515 	iavf_aq_dva(&iaq, IAVF_DMA_DVA(&sc->sc_scratch));
2516 
2517 	if (sc->sc_minor_ver > 0) {
2518 		iaq.iaq_datalen = htole16(sizeof(uint32_t));
2519 		cap = IAVF_DMA_KVA(&sc->sc_scratch);
2520 		*cap = htole32(IAVF_VC_OFFLOAD_L2 | IAVF_VC_OFFLOAD_VLAN |
2521 		    IAVF_VC_OFFLOAD_RSS_PF);
2522 	}
2523 
2524 	bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_scratch), 0, IAVF_DMA_LEN(&sc->sc_scratch),
2525 	    BUS_DMASYNC_PREREAD);
2526 
2527 	sc->sc_got_vf_resources = 0;
2528 	iavf_atq_post(sc, &iaq);
2529 
2530 	for (tries = 0; tries < 100; tries++) {
2531 		iavf_process_arq(sc, 1);
2532 		if (sc->sc_got_vf_resources != 0)
2533 			return (0);
2534 
2535 		delaymsec(1);
2536 	}
2537 
2538 	return (1);
2539 }
2540 
2541 static int
iavf_config_irq_map(struct iavf_softc * sc)2542 iavf_config_irq_map(struct iavf_softc *sc)
2543 {
2544 	struct iavf_aq_desc iaq;
2545 	struct iavf_vc_vector_map *vec;
2546 	struct iavf_vc_irq_map_info *map;
2547 	int tries;
2548 
2549 	memset(&iaq, 0, sizeof(iaq));
2550 	iaq.iaq_flags = htole16(IAVF_AQ_BUF | IAVF_AQ_RD);
2551 	iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
2552 	iaq.iaq_vc_opcode = htole32(IAVF_VC_OP_CONFIG_IRQ_MAP);
2553 	iaq.iaq_datalen = htole16(sizeof(*map) + sizeof(*vec));
2554 	iavf_aq_dva(&iaq, IAVF_DMA_DVA(&sc->sc_scratch));
2555 
2556 	map = IAVF_DMA_KVA(&sc->sc_scratch);
2557 	map->num_vectors = letoh16(1);
2558 
2559 	vec = map->vecmap;
2560 	vec[0].vsi_id = letoh16(sc->sc_vsi_id);
2561 	vec[0].vector_id = 0;
2562 	vec[0].rxq_map = letoh16(iavf_allqueues(sc));
2563 	vec[0].txq_map = letoh16(iavf_allqueues(sc));
2564 	vec[0].rxitr_idx = IAVF_NOITR;
2565 	vec[0].txitr_idx = IAVF_NOITR;
2566 
2567 	bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_scratch), 0, IAVF_DMA_LEN(&sc->sc_scratch),
2568 	    BUS_DMASYNC_PREREAD);
2569 
2570 	sc->sc_got_irq_map = 0;
2571 	iavf_atq_post(sc, &iaq);
2572 
2573 	for (tries = 0; tries < 100; tries++) {
2574 		iavf_process_arq(sc, 1);
2575 		if (sc->sc_got_irq_map != 0)
2576 			return (0);
2577 
2578 		delaymsec(1);
2579 	}
2580 
2581 	return (1);
2582 }
2583 
2584 static struct iavf_aq_buf *
iavf_aqb_alloc(struct iavf_softc * sc)2585 iavf_aqb_alloc(struct iavf_softc *sc)
2586 {
2587 	struct iavf_aq_buf *aqb;
2588 
2589 	aqb = malloc(sizeof(*aqb), M_DEVBUF, M_WAITOK);
2590 	if (aqb == NULL)
2591 		return (NULL);
2592 
2593 	aqb->aqb_data = dma_alloc(IAVF_AQ_BUFLEN, PR_WAITOK);
2594 	if (aqb->aqb_data == NULL)
2595 		goto free;
2596 
2597 	if (bus_dmamap_create(sc->sc_dmat, IAVF_AQ_BUFLEN, 1,
2598 	    IAVF_AQ_BUFLEN, 0,
2599 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
2600 	    &aqb->aqb_map) != 0)
2601 		goto dma_free;
2602 
2603 	if (bus_dmamap_load(sc->sc_dmat, aqb->aqb_map, aqb->aqb_data,
2604 	    IAVF_AQ_BUFLEN, NULL, BUS_DMA_WAITOK) != 0)
2605 		goto destroy;
2606 
2607 	return (aqb);
2608 
2609 destroy:
2610 	bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
2611 dma_free:
2612 	dma_free(aqb->aqb_data, IAVF_AQ_BUFLEN);
2613 free:
2614 	free(aqb, M_DEVBUF, sizeof(*aqb));
2615 
2616 	return (NULL);
2617 }
2618 
2619 static void
iavf_aqb_free(struct iavf_softc * sc,struct iavf_aq_buf * aqb)2620 iavf_aqb_free(struct iavf_softc *sc, struct iavf_aq_buf *aqb)
2621 {
2622 	bus_dmamap_unload(sc->sc_dmat, aqb->aqb_map);
2623 	bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
2624 	dma_free(aqb->aqb_data, IAVF_AQ_BUFLEN);
2625 	free(aqb, M_DEVBUF, sizeof(*aqb));
2626 }
2627 
2628 static int
iavf_arq_fill(struct iavf_softc * sc,int post)2629 iavf_arq_fill(struct iavf_softc *sc, int post)
2630 {
2631 	struct iavf_aq_buf *aqb;
2632 	struct iavf_aq_desc *arq, *iaq;
2633 	unsigned int prod = sc->sc_arq_prod;
2634 	unsigned int n;
2635 	int filled = 0;
2636 
2637 	n = if_rxr_get(&sc->sc_arq_ring, IAVF_AQ_NUM);
2638 	arq = IAVF_DMA_KVA(&sc->sc_arq);
2639 
2640 	while (n > 0) {
2641 		aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle);
2642 		if (aqb != NULL)
2643 			SIMPLEQ_REMOVE_HEAD(&sc->sc_arq_idle, aqb_entry);
2644 		else if ((aqb = iavf_aqb_alloc(sc)) == NULL)
2645 			break;
2646 
2647 		memset(aqb->aqb_data, 0, IAVF_AQ_BUFLEN);
2648 
2649 		bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IAVF_AQ_BUFLEN,
2650 		    BUS_DMASYNC_PREREAD);
2651 
2652 		iaq = &arq[prod];
2653 		iaq->iaq_flags = htole16(IAVF_AQ_BUF |
2654 		    (IAVF_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IAVF_AQ_LB : 0));
2655 		iaq->iaq_opcode = 0;
2656 		iaq->iaq_datalen = htole16(IAVF_AQ_BUFLEN);
2657 		iaq->iaq_retval = 0;
2658 		iaq->iaq_vc_opcode = 0;
2659 		iaq->iaq_vc_retval = 0;
2660 		iaq->iaq_param[0] = 0;
2661 		iaq->iaq_param[1] = 0;
2662 		iavf_aq_dva(iaq, aqb->aqb_map->dm_segs[0].ds_addr);
2663 
2664 		SIMPLEQ_INSERT_TAIL(&sc->sc_arq_live, aqb, aqb_entry);
2665 
2666 		prod++;
2667 		prod &= IAVF_AQ_MASK;
2668 
2669 		filled = 1;
2670 
2671 		n--;
2672 	}
2673 
2674 	if_rxr_put(&sc->sc_arq_ring, n);
2675 	sc->sc_arq_prod = prod;
2676 
2677 	if (filled && post)
2678 		iavf_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
2679 
2680 	return (filled);
2681 }
2682 
2683 static void
iavf_arq_unfill(struct iavf_softc * sc)2684 iavf_arq_unfill(struct iavf_softc *sc)
2685 {
2686 	struct iavf_aq_buf *aqb;
2687 
2688 	while ((aqb = SIMPLEQ_FIRST(&sc->sc_arq_live)) != NULL) {
2689 		SIMPLEQ_REMOVE_HEAD(&sc->sc_arq_live, aqb_entry);
2690 
2691 		bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IAVF_AQ_BUFLEN,
2692 		    BUS_DMASYNC_POSTREAD);
2693 		iavf_aqb_free(sc, aqb);
2694 		if_rxr_put(&sc->sc_arq_ring, 1);
2695 	}
2696 }
2697 
2698 static void
iavf_arq_timeout(void * xsc)2699 iavf_arq_timeout(void *xsc)
2700 {
2701 	struct iavf_softc *sc = xsc;
2702 
2703 	sc->sc_admin_result = -1;
2704 	cond_signal(&sc->sc_admin_cond);
2705 }
2706 
2707 static int
iavf_arq_wait(struct iavf_softc * sc,int msec)2708 iavf_arq_wait(struct iavf_softc *sc, int msec)
2709 {
2710 	cond_init(&sc->sc_admin_cond);
2711 
2712 	timeout_add_msec(&sc->sc_admin_timeout, msec);
2713 
2714 	cond_wait(&sc->sc_admin_cond, "iavfarq");
2715 	timeout_del(&sc->sc_admin_timeout);
2716 
2717 	iavf_arq_fill(sc, 1);
2718 	return sc->sc_admin_result;
2719 }
2720 
2721 static int
iavf_dmamem_alloc(struct iavf_softc * sc,struct iavf_dmamem * ixm,bus_size_t size,u_int align)2722 iavf_dmamem_alloc(struct iavf_softc *sc, struct iavf_dmamem *ixm,
2723     bus_size_t size, u_int align)
2724 {
2725 	ixm->ixm_size = size;
2726 
2727 	if (bus_dmamap_create(sc->sc_dmat, ixm->ixm_size, 1,
2728 	    ixm->ixm_size, 0,
2729 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
2730 	    &ixm->ixm_map) != 0)
2731 		return (1);
2732 	if (bus_dmamem_alloc(sc->sc_dmat, ixm->ixm_size,
2733 	    align, 0, &ixm->ixm_seg, 1, &ixm->ixm_nsegs,
2734 	    BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0)
2735 		goto destroy;
2736 	if (bus_dmamem_map(sc->sc_dmat, &ixm->ixm_seg, ixm->ixm_nsegs,
2737 	    ixm->ixm_size, &ixm->ixm_kva, BUS_DMA_WAITOK) != 0)
2738 		goto free;
2739 	if (bus_dmamap_load(sc->sc_dmat, ixm->ixm_map, ixm->ixm_kva,
2740 	    ixm->ixm_size, NULL, BUS_DMA_WAITOK) != 0)
2741 		goto unmap;
2742 
2743 	return (0);
2744 unmap:
2745 	bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
2746 free:
2747 	bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
2748 destroy:
2749 	bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
2750 	return (1);
2751 }
2752 
2753 static void
iavf_dmamem_free(struct iavf_softc * sc,struct iavf_dmamem * ixm)2754 iavf_dmamem_free(struct iavf_softc *sc, struct iavf_dmamem *ixm)
2755 {
2756 	bus_dmamap_unload(sc->sc_dmat, ixm->ixm_map);
2757 	bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
2758 	bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
2759 	bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
2760 }
2761