xref: /openbsd/sys/dev/pci/if_iavf.c (revision e5dd7070)
1 /*	$OpenBSD: if_iavf.c,v 1.9 2020/07/10 13:26:38 patrick Exp $	*/
2 
3 /*
4  * Copyright (c) 2013-2015, Intel Corporation
5  * All rights reserved.
6 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions are met:
9  *
10  *  1. Redistributions of source code must retain the above copyright notice,
11  *     this list of conditions and the following disclaimer.
12  *
13  *  2. Redistributions in binary form must reproduce the above copyright
14  *     notice, this list of conditions and the following disclaimer in the
15  *     documentation and/or other materials provided with the distribution.
16  *
17  *  3. Neither the name of the Intel Corporation nor the names of its
18  *     contributors may be used to endorse or promote products derived from
19  *     this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * Copyright (c) 2016,2017 David Gwynne <dlg@openbsd.org>
36  * Copyright (c) 2019 Jonathan Matthew <jmatthew@openbsd.org>
37  *
38  * Permission to use, copy, modify, and distribute this software for any
39  * purpose with or without fee is hereby granted, provided that the above
40  * copyright notice and this permission notice appear in all copies.
41  *
42  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
43  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
44  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
45  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
46  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
47  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
48  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
49  */
50 
51 #include "bpfilter.h"
52 
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/proc.h>
56 #include <sys/sockio.h>
57 #include <sys/mbuf.h>
58 #include <sys/kernel.h>
59 #include <sys/socket.h>
60 #include <sys/device.h>
61 #include <sys/pool.h>
62 #include <sys/queue.h>
63 #include <sys/timeout.h>
64 #include <sys/task.h>
65 #include <sys/syslog.h>
66 
67 #include <machine/bus.h>
68 #include <machine/intr.h>
69 
70 #include <net/if.h>
71 #include <net/if_dl.h>
72 #include <net/if_media.h>
73 
74 #if NBPFILTER > 0
75 #include <net/bpf.h>
76 #endif
77 
78 #include <netinet/in.h>
79 #include <netinet/if_ether.h>
80 
81 #include <dev/pci/pcireg.h>
82 #include <dev/pci/pcivar.h>
83 #include <dev/pci/pcidevs.h>
84 
85 #define I40E_MASK(mask, shift)		((mask) << (shift))
86 #define I40E_AQ_LARGE_BUF		512
87 
88 #define IAVF_REG_VFR			0xdeadbeef
89 
90 #define IAVF_VFR_INPROGRESS		0
91 #define IAVF_VFR_COMPLETED		1
92 #define IAVF_VFR_VFACTIVE		2
93 
94 #include <dev/pci/if_ixlreg.h>
95 
96 struct iavf_aq_desc {
97 	uint16_t	iaq_flags;
98 #define	IAVF_AQ_DD		(1U << 0)
99 #define	IAVF_AQ_CMP		(1U << 1)
100 #define IAVF_AQ_ERR		(1U << 2)
101 #define IAVF_AQ_VFE		(1U << 3)
102 #define IAVF_AQ_LB		(1U << 9)
103 #define IAVF_AQ_RD		(1U << 10)
104 #define IAVF_AQ_VFC		(1U << 11)
105 #define IAVF_AQ_BUF		(1U << 12)
106 #define IAVF_AQ_SI		(1U << 13)
107 #define IAVF_AQ_EI		(1U << 14)
108 #define IAVF_AQ_FE		(1U << 15)
109 
110 #define IAVF_AQ_FLAGS_FMT	"\020" "\020FE" "\017EI" "\016SI" "\015BUF" \
111 				    "\014VFC" "\013DB" "\012LB" "\004VFE" \
112 				    "\003ERR" "\002CMP" "\001DD"
113 
114 	uint16_t	iaq_opcode;
115 
116 	uint16_t	iaq_datalen;
117 	uint16_t	iaq_retval;
118 
119 	uint32_t	iaq_vc_opcode;
120 	uint32_t	iaq_vc_retval;
121 
122 	uint32_t	iaq_param[4];
123 /*	iaq_vfid	iaq_param[0] */
124 /*	iaq_data_hi	iaq_param[2] */
125 /*	iaq_data_lo	iaq_param[3] */
126 } __packed __aligned(8);
127 
128 /* aq commands */
129 #define IAVF_AQ_OP_SEND_TO_PF		0x0801
130 #define IAVF_AQ_OP_MSG_FROM_PF		0x0802
131 #define IAVF_AQ_OP_SHUTDOWN		0x0803
132 
133 /* virt channel messages */
134 #define IAVF_VC_OP_VERSION		1
135 #define IAVF_VC_OP_RESET_VF		2
136 #define IAVF_VC_OP_GET_VF_RESOURCES	3
137 #define IAVF_VC_OP_CONFIG_TX_QUEUE	4
138 #define IAVF_VC_OP_CONFIG_RX_QUEUE	5
139 #define IAVF_VC_OP_CONFIG_VSI_QUEUES	6
140 #define IAVF_VC_OP_CONFIG_IRQ_MAP	7
141 #define IAVF_VC_OP_ENABLE_QUEUES	8
142 #define IAVF_VC_OP_DISABLE_QUEUES	9
143 #define IAVF_VC_OP_ADD_ETH_ADDR		10
144 #define IAVF_VC_OP_DEL_ETH_ADDR		11
145 #define IAVF_VC_OP_ADD_VLAN		12
146 #define IAVF_VC_OP_DEL_VLAN		13
147 #define IAVF_VC_OP_CONFIG_PROMISC	14
148 #define IAVF_VC_OP_GET_STATS		15
149 #define IAVF_VC_OP_EVENT		17
150 #define IAVF_VC_OP_GET_RSS_HENA_CAPS	25
151 #define IAVF_VC_OP_SET_RSS_HENA		26
152 
153 /* virt channel response codes */
154 #define IAVF_VC_RC_SUCCESS		0
155 #define IAVF_VC_RC_ERR_PARAM		-5
156 #define IAVF_VC_RC_ERR_OPCODE		-38
157 #define IAVF_VC_RC_ERR_CQP_COMPL	-39
158 #define IAVF_VC_RC_ERR_VF_ID		-40
159 #define IAVF_VC_RC_ERR_NOT_SUP		-64
160 
161 /* virt channel events */
162 #define IAVF_VC_EVENT_LINK_CHANGE	1
163 #define IAVF_VC_EVENT_RESET_IMPENDING	2
164 #define IAVF_VC_EVENT_PF_DRIVER_CLOSE	3
165 
166 /* virt channel offloads */
167 #define IAVF_VC_OFFLOAD_L2		0x00000001
168 #define IAVF_VC_OFFLOAD_IWARP		0x00000002
169 #define IAVF_VC_OFFLOAD_RSVD		0x00000004
170 #define IAVF_VC_OFFLOAD_RSS_AQ		0x00000008
171 #define IAVF_VC_OFFLOAD_RSS_REG		0x00000010
172 #define IAVF_VC_OFFLOAD_WB_ON_ITR	0x00000020
173 #define IAVF_VC_OFFLOAD_VLAN		0x00010000
174 #define IAVF_VC_OFFLOAD_RX_POLLING	0x00020000
175 #define IAVF_VC_OFFLOAD_RSS_PCTYPE_V2	0x00040000
176 #define IAVF_VC_OFFLOAD_RSS_PF		0x00080000
177 #define IAVF_VC_OFFLOAD_ENCAP		0x00100000
178 #define IAVF_VC_OFFLOAD_ENCAP_CSUM	0x00200000
179 #define IAVF_VC_OFFLOAD_RX_ENCAP_CSUM	0x00400000
180 
181 /* link speeds */
182 #define IAVF_VC_LINK_SPEED_100MB	0x1
183 #define IAVC_VC_LINK_SPEED_1000MB	0x2
184 #define IAVC_VC_LINK_SPEED_10GB		0x3
185 #define IAVC_VC_LINK_SPEED_40GB		0x4
186 #define IAVC_VC_LINK_SPEED_20GB		0x5
187 #define IAVC_VC_LINK_SPEED_25GB		0x6
188 
189 struct iavf_link_speed {
190 	uint64_t	baudrate;
191 	uint64_t	media;
192 };
193 
194 static const struct iavf_link_speed iavf_link_speeds[] = {
195 	{ 0, 0 },
196 	{ IF_Mbps(100), IFM_100_TX },
197 	{ IF_Mbps(1000), IFM_1000_T },
198 	{ IF_Gbps(10), IFM_10G_T },
199 	{ IF_Gbps(40), IFM_40G_CR4 },
200 	{ IF_Gbps(20), IFM_20G_KR2 },
201 	{ IF_Gbps(25), IFM_25G_CR }
202 };
203 
204 
205 struct iavf_vc_version_info {
206 	uint32_t	major;
207 	uint32_t	minor;
208 } __packed;
209 
210 struct iavf_vc_txq_info {
211 	uint16_t	vsi_id;
212 	uint16_t	queue_id;
213 	uint16_t	ring_len;
214 	uint16_t	headwb_ena;		/* deprecated */
215 	uint64_t	dma_ring_addr;
216 	uint64_t	dma_headwb_addr;	/* deprecated */
217 } __packed;
218 
219 struct iavf_vc_rxq_info {
220 	uint16_t	vsi_id;
221 	uint16_t	queue_id;
222 	uint32_t	ring_len;
223 	uint16_t	hdr_size;
224 	uint16_t	splithdr_ena;
225 	uint32_t	databuf_size;
226 	uint32_t	max_pkt_size;
227 	uint32_t	pad1;
228 	uint64_t	dma_ring_addr;
229 	uint32_t	rx_split_pos;
230 	uint32_t	pad2;
231 } __packed;
232 
233 struct iavf_vc_queue_pair_info {
234 	struct iavf_vc_txq_info	txq;
235 	struct iavf_vc_rxq_info	rxq;
236 } __packed;
237 
238 struct iavf_vc_queue_config_info {
239 	uint16_t	vsi_id;
240 	uint16_t	num_queue_pairs;
241 	uint32_t	pad;
242 	struct iavf_vc_queue_pair_info qpair[1];
243 } __packed;
244 
245 struct iavf_vc_vector_map {
246 	uint16_t	vsi_id;
247 	uint16_t	vector_id;
248 	uint16_t	rxq_map;
249 	uint16_t	txq_map;
250 	uint16_t	rxitr_idx;
251 	uint16_t	txitr_idx;
252 } __packed;
253 
254 struct iavf_vc_irq_map_info {
255 	uint16_t	num_vectors;
256 	struct iavf_vc_vector_map vecmap[1];
257 } __packed;
258 
259 struct iavf_vc_queue_select {
260 	uint16_t	vsi_id;
261 	uint16_t	pad;
262 	uint32_t	rx_queues;
263 	uint32_t	tx_queues;
264 } __packed;
265 
266 struct iavf_vc_vsi_resource {
267 	uint16_t	vsi_id;
268 	uint16_t	num_queue_pairs;
269 	uint32_t	vsi_type;
270 	uint16_t	qset_handle;
271 	uint8_t		default_mac[ETHER_ADDR_LEN];
272 } __packed;
273 
274 struct iavf_vc_vf_resource {
275 	uint16_t	num_vsis;
276 	uint16_t	num_qp;
277 	uint16_t	max_vectors;
278 	uint16_t	max_mtu;
279 	uint32_t	offload_flags;
280 	uint32_t	rss_key_size;
281 	uint32_t	rss_lut_size;
282 	struct iavf_vc_vsi_resource vsi_res[1];
283 } __packed;
284 
285 struct iavf_vc_eth_addr {
286 	uint8_t		addr[ETHER_ADDR_LEN];
287 	uint8_t		pad[2];
288 } __packed;
289 
290 struct iavf_vc_eth_addr_list {
291 	uint16_t	vsi_id;
292 	uint16_t	num_elements;
293 	struct iavf_vc_eth_addr list[1];
294 } __packed;
295 
296 struct iavf_vc_vlan_list {
297 	uint16_t	vsi_id;
298 	uint16_t	num_elements;
299 	uint16_t	vlan_id[1];
300 } __packed;
301 
302 struct iavf_vc_promisc_info {
303 	uint16_t	vsi_id;
304 	uint16_t	flags;
305 #define IAVF_FLAG_VF_UNICAST_PROMISC	0x0001
306 #define IAVF_FLAG_VF_MULTICAST_PROMISC	0x0002
307 } __packed;
308 
309 struct iavf_vc_pf_event {
310 	uint32_t	event;
311 	uint32_t	link_speed;
312 	uint8_t		link_status;
313 	uint8_t		pad[3];
314 	uint32_t	severity;
315 } __packed;
316 
317 /* aq response codes */
318 #define IAVF_AQ_RC_OK			0  /* success */
319 #define IAVF_AQ_RC_EPERM		1  /* Operation not permitted */
320 #define IAVF_AQ_RC_ENOENT		2  /* No such element */
321 #define IAVF_AQ_RC_ESRCH		3  /* Bad opcode */
322 #define IAVF_AQ_RC_EINTR		4  /* operation interrupted */
323 #define IAVF_AQ_RC_EIO			5  /* I/O error */
324 #define IAVF_AQ_RC_ENXIO		6  /* No such resource */
325 #define IAVF_AQ_RC_E2BIG		7  /* Arg too long */
326 #define IAVF_AQ_RC_EAGAIN		8  /* Try again */
327 #define IAVF_AQ_RC_ENOMEM		9  /* Out of memory */
328 #define IAVF_AQ_RC_EACCES		10 /* Permission denied */
329 #define IAVF_AQ_RC_EFAULT		11 /* Bad address */
330 #define IAVF_AQ_RC_EBUSY		12 /* Device or resource busy */
331 #define IAVF_AQ_RC_EEXIST		13 /* object already exists */
332 #define IAVF_AQ_RC_EINVAL		14 /* invalid argument */
333 #define IAVF_AQ_RC_ENOTTY		15 /* not a typewriter */
334 #define IAVF_AQ_RC_ENOSPC		16 /* No space or alloc failure */
335 #define IAVF_AQ_RC_ENOSYS		17 /* function not implemented */
336 #define IAVF_AQ_RC_ERANGE		18 /* parameter out of range */
337 #define IAVF_AQ_RC_EFLUSHED		19 /* cmd flushed due to prev error */
338 #define IAVF_AQ_RC_BAD_ADDR		20 /* contains a bad pointer */
339 #define IAVF_AQ_RC_EMODE		21 /* not allowed in current mode */
340 #define IAVF_AQ_RC_EFBIG		22 /* file too large */
341 
342 struct iavf_tx_desc {
343 	uint64_t		addr;
344 	uint64_t		cmd;
345 #define IAVF_TX_DESC_DTYPE_SHIFT		0
346 #define IAVF_TX_DESC_DTYPE_MASK		(0xfULL << IAVF_TX_DESC_DTYPE_SHIFT)
347 #define IAVF_TX_DESC_DTYPE_DATA		(0x0ULL << IAVF_TX_DESC_DTYPE_SHIFT)
348 #define IAVF_TX_DESC_DTYPE_NOP		(0x1ULL << IAVF_TX_DESC_DTYPE_SHIFT)
349 #define IAVF_TX_DESC_DTYPE_CONTEXT	(0x1ULL << IAVF_TX_DESC_DTYPE_SHIFT)
350 #define IAVF_TX_DESC_DTYPE_FCOE_CTX	(0x2ULL << IAVF_TX_DESC_DTYPE_SHIFT)
351 #define IAVF_TX_DESC_DTYPE_FD		(0x8ULL << IAVF_TX_DESC_DTYPE_SHIFT)
352 #define IAVF_TX_DESC_DTYPE_DDP_CTX	(0x9ULL << IAVF_TX_DESC_DTYPE_SHIFT)
353 #define IAVF_TX_DESC_DTYPE_FLEX_DATA	(0xbULL << IAVF_TX_DESC_DTYPE_SHIFT)
354 #define IAVF_TX_DESC_DTYPE_FLEX_CTX_1	(0xcULL << IAVF_TX_DESC_DTYPE_SHIFT)
355 #define IAVF_TX_DESC_DTYPE_FLEX_CTX_2	(0xdULL << IAVF_TX_DESC_DTYPE_SHIFT)
356 #define IAVF_TX_DESC_DTYPE_DONE		(0xfULL << IAVF_TX_DESC_DTYPE_SHIFT)
357 
358 #define IAVF_TX_DESC_CMD_SHIFT		4
359 #define IAVF_TX_DESC_CMD_MASK		(0x3ffULL << IAVF_TX_DESC_CMD_SHIFT)
360 #define IAVF_TX_DESC_CMD_EOP		(0x001 << IAVF_TX_DESC_CMD_SHIFT)
361 #define IAVF_TX_DESC_CMD_RS		(0x002 << IAVF_TX_DESC_CMD_SHIFT)
362 #define IAVF_TX_DESC_CMD_ICRC		(0x004 << IAVF_TX_DESC_CMD_SHIFT)
363 #define IAVF_TX_DESC_CMD_IL2TAG1	(0x008 << IAVF_TX_DESC_CMD_SHIFT)
364 #define IAVF_TX_DESC_CMD_DUMMY		(0x010 << IAVF_TX_DESC_CMD_SHIFT)
365 #define IAVF_TX_DESC_CMD_IIPT_MASK	(0x060 << IAVF_TX_DESC_CMD_SHIFT)
366 #define IAVF_TX_DESC_CMD_IIPT_NONIP	(0x000 << IAVF_TX_DESC_CMD_SHIFT)
367 #define IAVF_TX_DESC_CMD_IIPT_IPV6	(0x020 << IAVF_TX_DESC_CMD_SHIFT)
368 #define IAVF_TX_DESC_CMD_IIPT_IPV4	(0x040 << IAVF_TX_DESC_CMD_SHIFT)
369 #define IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM	(0x060 << IAVF_TX_DESC_CMD_SHIFT)
370 #define IAVF_TX_DESC_CMD_FCOET		(0x080 << IAVF_TX_DESC_CMD_SHIFT)
371 #define IAVF_TX_DESC_CMD_L4T_EOFT_MASK	(0x300 << IAVF_TX_DESC_CMD_SHIFT)
372 #define IAVF_TX_DESC_CMD_L4T_EOFT_UNK	(0x000 << IAVF_TX_DESC_CMD_SHIFT)
373 #define IAVF_TX_DESC_CMD_L4T_EOFT_TCP	(0x100 << IAVF_TX_DESC_CMD_SHIFT)
374 #define IAVF_TX_DESC_CMD_L4T_EOFT_SCTP	(0x200 << IAVF_TX_DESC_CMD_SHIFT)
375 #define IAVF_TX_DESC_CMD_L4T_EOFT_UDP	(0x300 << IAVF_TX_DESC_CMD_SHIFT)
376 
377 #define IAVF_TX_DESC_MACLEN_SHIFT	16
378 #define IAVF_TX_DESC_MACLEN_MASK	(0x7fULL << IAVF_TX_DESC_MACLEN_SHIFT)
379 #define IAVF_TX_DESC_IPLEN_SHIFT	23
380 #define IAVF_TX_DESC_IPLEN_MASK		(0x7fULL << IAVF_TX_DESC_IPLEN_SHIFT)
381 #define IAVF_TX_DESC_L4LEN_SHIFT	30
382 #define IAVF_TX_DESC_L4LEN_MASK		(0xfULL << IAVF_TX_DESC_L4LEN_SHIFT)
383 #define IAVF_TX_DESC_FCLEN_SHIFT	30
384 #define IAVF_TX_DESC_FCLEN_MASK		(0xfULL << IAVF_TX_DESC_FCLEN_SHIFT)
385 
386 #define IAVF_TX_DESC_BSIZE_SHIFT	34
387 #define IAVF_TX_DESC_BSIZE_MAX		0x3fffULL
388 #define IAVF_TX_DESC_BSIZE_MASK		\
389 	(IAVF_TX_DESC_BSIZE_MAX << IAVF_TX_DESC_BSIZE_SHIFT)
390 
391 #define IAVF_TX_DESC_L2TAG1_SHIFT	48
392 #define IAVF_TX_DESC_L2TAG1_MASK	(0xffff << IAVF_TX_DESC_L2TAG1_SHIFT)
393 } __packed __aligned(16);
394 
395 struct iavf_rx_rd_desc_16 {
396 	uint64_t		paddr; /* packet addr */
397 	uint64_t		haddr; /* header addr */
398 } __packed __aligned(16);
399 
400 struct iavf_rx_rd_desc_32 {
401 	uint64_t		paddr; /* packet addr */
402 	uint64_t		haddr; /* header addr */
403 	uint64_t		_reserved1;
404 	uint64_t		_reserved2;
405 } __packed __aligned(16);
406 
407 struct iavf_rx_wb_desc_16 {
408 	uint64_t		qword0;
409 #define IAVF_RX_DESC_L2TAG1_SHIFT	16
410 #define IAVF_RX_DESC_L2TAG1_MASK	(0xffff << IAVF_RX_DESC_L2TAG1_SHIFT)
411 	uint64_t		qword1;
412 #define IAVF_RX_DESC_DD			(1 << 0)
413 #define IAVF_RX_DESC_EOP		(1 << 1)
414 #define IAVF_RX_DESC_L2TAG1P		(1 << 2)
415 #define IAVF_RX_DESC_L3L4P		(1 << 3)
416 #define IAVF_RX_DESC_CRCP		(1 << 4)
417 #define IAVF_RX_DESC_TSYNINDX_SHIFT	5	/* TSYNINDX */
418 #define IAVF_RX_DESC_TSYNINDX_MASK	(7 << IAVF_RX_DESC_TSYNINDX_SHIFT)
419 #define IAVF_RX_DESC_UMB_SHIFT		9
420 #define IAVF_RX_DESC_UMB_MASK		(0x3 << IAVF_RX_DESC_UMB_SHIFT)
421 #define IAVF_RX_DESC_UMB_UCAST		(0x0 << IAVF_RX_DESC_UMB_SHIFT)
422 #define IAVF_RX_DESC_UMB_MCAST		(0x1 << IAVF_RX_DESC_UMB_SHIFT)
423 #define IAVF_RX_DESC_UMB_BCAST		(0x2 << IAVF_RX_DESC_UMB_SHIFT)
424 #define IAVF_RX_DESC_UMB_MIRROR		(0x3 << IAVF_RX_DESC_UMB_SHIFT)
425 #define IAVF_RX_DESC_FLM		(1 << 11)
426 #define IAVF_RX_DESC_FLTSTAT_SHIFT 	12
427 #define IAVF_RX_DESC_FLTSTAT_MASK 	(0x3 << IAVF_RX_DESC_FLTSTAT_SHIFT)
428 #define IAVF_RX_DESC_FLTSTAT_NODATA 	(0x0 << IAVF_RX_DESC_FLTSTAT_SHIFT)
429 #define IAVF_RX_DESC_FLTSTAT_FDFILTID 	(0x1 << IAVF_RX_DESC_FLTSTAT_SHIFT)
430 #define IAVF_RX_DESC_FLTSTAT_RSS 	(0x3 << IAVF_RX_DESC_FLTSTAT_SHIFT)
431 #define IAVF_RX_DESC_LPBK		(1 << 14)
432 #define IAVF_RX_DESC_IPV6EXTADD		(1 << 15)
433 #define IAVF_RX_DESC_INT_UDP_0		(1 << 18)
434 
435 #define IAVF_RX_DESC_RXE		(1 << 19)
436 #define IAVF_RX_DESC_HBO		(1 << 21)
437 #define IAVF_RX_DESC_IPE		(1 << 22)
438 #define IAVF_RX_DESC_L4E		(1 << 23)
439 #define IAVF_RX_DESC_EIPE		(1 << 24)
440 #define IAVF_RX_DESC_OVERSIZE		(1 << 25)
441 
442 #define IAVF_RX_DESC_PTYPE_SHIFT	30
443 #define IAVF_RX_DESC_PTYPE_MASK		(0xffULL << IAVF_RX_DESC_PTYPE_SHIFT)
444 
445 #define IAVF_RX_DESC_PLEN_SHIFT		38
446 #define IAVF_RX_DESC_PLEN_MASK		(0x3fffULL << IAVF_RX_DESC_PLEN_SHIFT)
447 #define IAVF_RX_DESC_HLEN_SHIFT		42
448 #define IAVF_RX_DESC_HLEN_MASK		(0x7ffULL << IAVF_RX_DESC_HLEN_SHIFT)
449 } __packed __aligned(16);
450 
451 struct iavf_rx_wb_desc_32 {
452 	uint64_t		qword0;
453 	uint64_t		qword1;
454 	uint64_t		qword2;
455 	uint64_t		qword3;
456 } __packed __aligned(16);
457 
458 
459 #define IAVF_VF_MAJOR			1
460 #define IAVF_VF_MINOR			1
461 
462 #define IAVF_TX_PKT_DESCS		8
463 #define IAVF_TX_QUEUE_ALIGN		128
464 #define IAVF_RX_QUEUE_ALIGN		128
465 
466 #define IAVF_HARDMTU			9712 /* 9726 - ETHER_HDR_LEN */
467 
468 #define IAVF_PCIREG			PCI_MAPREG_START
469 
470 #define IAVF_ITR0			0x0
471 #define IAVF_ITR1			0x1
472 #define IAVF_ITR2			0x2
473 #define IAVF_NOITR			0x3
474 
475 #define IAVF_AQ_NUM			256
476 #define IAVF_AQ_MASK			(IAVF_AQ_NUM - 1)
477 #define IAVF_AQ_ALIGN			64 /* lol */
478 #define IAVF_AQ_BUFLEN			4096
479 
480 struct iavf_aq_regs {
481 	bus_size_t		atq_tail;
482 	bus_size_t		atq_head;
483 	bus_size_t		atq_len;
484 	bus_size_t		atq_bal;
485 	bus_size_t		atq_bah;
486 
487 	bus_size_t		arq_tail;
488 	bus_size_t		arq_head;
489 	bus_size_t		arq_len;
490 	bus_size_t		arq_bal;
491 	bus_size_t		arq_bah;
492 
493 	uint32_t		atq_len_enable;
494 	uint32_t		atq_tail_mask;
495 	uint32_t		atq_head_mask;
496 
497 	uint32_t		arq_len_enable;
498 	uint32_t		arq_tail_mask;
499 	uint32_t		arq_head_mask;
500 };
501 
502 struct iavf_aq_buf {
503 	SIMPLEQ_ENTRY(iavf_aq_buf)
504 				 aqb_entry;
505 	void			*aqb_data;
506 	bus_dmamap_t		 aqb_map;
507 };
508 SIMPLEQ_HEAD(iavf_aq_bufs, iavf_aq_buf);
509 
510 struct iavf_dmamem {
511 	bus_dmamap_t		ixm_map;
512 	bus_dma_segment_t	ixm_seg;
513 	int			ixm_nsegs;
514 	size_t			ixm_size;
515 	caddr_t			ixm_kva;
516 };
517 #define IAVF_DMA_MAP(_ixm)	((_ixm)->ixm_map)
518 #define IAVF_DMA_DVA(_ixm)	((_ixm)->ixm_map->dm_segs[0].ds_addr)
519 #define IAVF_DMA_KVA(_ixm)	((void *)(_ixm)->ixm_kva)
520 #define IAVF_DMA_LEN(_ixm)	((_ixm)->ixm_size)
521 
522 struct iavf_tx_map {
523 	struct mbuf		*txm_m;
524 	bus_dmamap_t		 txm_map;
525 	unsigned int		 txm_eop;
526 };
527 
528 struct iavf_tx_ring {
529 	unsigned int		 txr_prod;
530 	unsigned int		 txr_cons;
531 
532 	struct iavf_tx_map	*txr_maps;
533 	struct iavf_dmamem	 txr_mem;
534 
535 	bus_size_t		 txr_tail;
536 	unsigned int		 txr_qid;
537 };
538 
539 struct iavf_rx_map {
540 	struct mbuf		*rxm_m;
541 	bus_dmamap_t		 rxm_map;
542 };
543 
544 struct iavf_rx_ring {
545 	struct iavf_softc	*rxr_sc;
546 
547 	struct if_rxring	 rxr_acct;
548 	struct timeout		 rxr_refill;
549 
550 	unsigned int		 rxr_prod;
551 	unsigned int		 rxr_cons;
552 
553 	struct iavf_rx_map	*rxr_maps;
554 	struct iavf_dmamem	 rxr_mem;
555 
556 	struct mbuf		*rxr_m_head;
557 	struct mbuf		**rxr_m_tail;
558 
559 	bus_size_t		 rxr_tail;
560 	unsigned int		 rxr_qid;
561 };
562 
563 struct iavf_softc {
564 	struct device		 sc_dev;
565 	struct arpcom		 sc_ac;
566 	struct ifmedia		 sc_media;
567 	uint64_t		 sc_media_status;
568 	uint64_t		 sc_media_active;
569 
570 	pci_chipset_tag_t	 sc_pc;
571 	pci_intr_handle_t	 sc_ih;
572 	void			*sc_ihc;
573 	pcitag_t		 sc_tag;
574 
575 	bus_dma_tag_t		 sc_dmat;
576 	bus_space_tag_t		 sc_memt;
577 	bus_space_handle_t	 sc_memh;
578 	bus_size_t		 sc_mems;
579 
580 	uint32_t		 sc_major_ver;
581 	uint32_t		 sc_minor_ver;
582 
583 	int			 sc_got_vf_resources;
584 	int			 sc_got_irq_map;
585 	uint32_t		 sc_vf_id;
586 	uint16_t		 sc_vsi_id;
587 	uint16_t		 sc_qset_handle;
588 	unsigned int		 sc_base_queue;
589 
590 	struct cond		 sc_admin_cond;
591 	int			 sc_admin_result;
592 	struct timeout		 sc_admin_timeout;
593 
594 	struct iavf_dmamem	 sc_scratch;
595 
596 	const struct iavf_aq_regs *
597 				 sc_aq_regs;
598 
599 	struct mutex		 sc_atq_mtx;
600 	struct iavf_dmamem	 sc_atq;
601 	unsigned int		 sc_atq_prod;
602 	unsigned int		 sc_atq_cons;
603 
604 	struct iavf_dmamem	 sc_arq;
605 	struct iavf_aq_bufs	 sc_arq_idle;
606 	struct iavf_aq_bufs	 sc_arq_live;
607 	struct if_rxring	 sc_arq_ring;
608 	unsigned int		 sc_arq_prod;
609 	unsigned int		 sc_arq_cons;
610 
611 	struct task		 sc_reset_task;
612 	int			 sc_resetting;
613 
614 	unsigned int		 sc_tx_ring_ndescs;
615 	unsigned int		 sc_rx_ring_ndescs;
616 	unsigned int		 sc_nqueues;	/* 1 << sc_nqueues */
617 
618 	struct rwlock		 sc_cfg_lock;
619 	unsigned int		 sc_dead;
620 
621 	uint8_t			 sc_enaddr[ETHER_ADDR_LEN];
622 };
623 #define DEVNAME(_sc) ((_sc)->sc_dev.dv_xname)
624 
625 #define delaymsec(_ms)	delay(1000 * (_ms))
626 
627 static int	iavf_dmamem_alloc(struct iavf_softc *, struct iavf_dmamem *,
628 		    bus_size_t, u_int);
629 static void	iavf_dmamem_free(struct iavf_softc *, struct iavf_dmamem *);
630 
631 static int	iavf_arq_fill(struct iavf_softc *, int);
632 static void	iavf_arq_unfill(struct iavf_softc *);
633 static void	iavf_arq_timeout(void *);
634 static int	iavf_arq_wait(struct iavf_softc *, int);
635 
636 static int	iavf_atq_post(struct iavf_softc *, struct iavf_aq_desc *);
637 static void	iavf_atq_done(struct iavf_softc *);
638 
639 static void	iavf_init_admin_queue(struct iavf_softc *);
640 
641 static int	iavf_get_version(struct iavf_softc *);
642 static int	iavf_get_vf_resources(struct iavf_softc *);
643 static int	iavf_config_irq_map(struct iavf_softc *);
644 
645 static int	iavf_add_del_addr(struct iavf_softc *, uint8_t *, int);
646 static int	iavf_process_arq(struct iavf_softc *, int);
647 
648 static int	iavf_match(struct device *, void *, void *);
649 static void	iavf_attach(struct device *, struct device *, void *);
650 
651 static int	iavf_media_change(struct ifnet *);
652 static void	iavf_media_status(struct ifnet *, struct ifmediareq *);
653 static void	iavf_watchdog(struct ifnet *);
654 static int	iavf_ioctl(struct ifnet *, u_long, caddr_t);
655 static void	iavf_start(struct ifqueue *);
656 static int	iavf_intr(void *);
657 static int	iavf_up(struct iavf_softc *);
658 static int	iavf_down(struct iavf_softc *);
659 static int	iavf_iff(struct iavf_softc *);
660 static void	iavf_reset(void *);
661 
662 static struct iavf_tx_ring *
663 		iavf_txr_alloc(struct iavf_softc *, unsigned int);
664 static void	iavf_txr_clean(struct iavf_softc *, struct iavf_tx_ring *);
665 static void	iavf_txr_free(struct iavf_softc *, struct iavf_tx_ring *);
666 static int	iavf_txeof(struct iavf_softc *, struct ifqueue *);
667 
668 static struct iavf_rx_ring *
669 		iavf_rxr_alloc(struct iavf_softc *, unsigned int);
670 static void	iavf_rxr_clean(struct iavf_softc *, struct iavf_rx_ring *);
671 static void	iavf_rxr_free(struct iavf_softc *, struct iavf_rx_ring *);
672 static int	iavf_rxeof(struct iavf_softc *, struct ifiqueue *);
673 static void	iavf_rxfill(struct iavf_softc *, struct iavf_rx_ring *);
674 static void	iavf_rxrefill(void *);
675 static int	iavf_rxrinfo(struct iavf_softc *, struct if_rxrinfo *);
676 
677 struct cfdriver iavf_cd = {
678 	NULL,
679 	"iavf",
680 	DV_IFNET,
681 };
682 
683 struct cfattach iavf_ca = {
684 	sizeof(struct iavf_softc),
685 	iavf_match,
686 	iavf_attach,
687 };
688 
689 static const struct iavf_aq_regs iavf_aq_regs = {
690 	.atq_tail	= I40E_VF_ATQT1,
691 	.atq_tail_mask	= I40E_VF_ATQT1_ATQT_MASK,
692 	.atq_head	= I40E_VF_ATQH1,
693 	.atq_head_mask	= I40E_VF_ARQH1_ARQH_MASK,
694 	.atq_len	= I40E_VF_ATQLEN1,
695 	.atq_bal	= I40E_VF_ATQBAL1,
696 	.atq_bah	= I40E_VF_ATQBAH1,
697 	.atq_len_enable	= I40E_VF_ATQLEN1_ATQENABLE_MASK,
698 
699 	.arq_tail	= I40E_VF_ARQT1,
700 	.arq_tail_mask	= I40E_VF_ARQT1_ARQT_MASK,
701 	.arq_head	= I40E_VF_ARQH1,
702 	.arq_head_mask	= I40E_VF_ARQH1_ARQH_MASK,
703 	.arq_len	= I40E_VF_ARQLEN1,
704 	.arq_bal	= I40E_VF_ARQBAL1,
705 	.arq_bah	= I40E_VF_ARQBAH1,
706 	.arq_len_enable	= I40E_VF_ARQLEN1_ARQENABLE_MASK,
707 };
708 
709 #define iavf_rd(_s, _r) \
710 	bus_space_read_4((_s)->sc_memt, (_s)->sc_memh, (_r))
711 #define iavf_wr(_s, _r, _v) \
712 	bus_space_write_4((_s)->sc_memt, (_s)->sc_memh, (_r), (_v))
713 #define iavf_barrier(_s, _r, _l, _o) \
714 	bus_space_barrier((_s)->sc_memt, (_s)->sc_memh, (_r), (_l), (_o))
715 #define iavf_intr_enable(_s) \
716 	iavf_wr((_s), I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL0_INTENA_MASK | \
717 	    I40E_VFINT_DYN_CTL0_CLEARPBA_MASK | \
718 	    (IAVF_NOITR << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT)); \
719 	iavf_wr((_s), I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK)
720 
721 #define iavf_nqueues(_sc)	(1 << (_sc)->sc_nqueues)
722 #define iavf_allqueues(_sc)	((1 << ((_sc)->sc_nqueues+1)) - 1)
723 
724 #ifdef __LP64__
725 #define iavf_dmamem_hi(_ixm)	(uint32_t)(IAVF_DMA_DVA(_ixm) >> 32)
726 #else
727 #define iavf_dmamem_hi(_ixm)	0
728 #endif
729 
730 #define iavf_dmamem_lo(_ixm) 	(uint32_t)IAVF_DMA_DVA(_ixm)
731 
732 static inline void
733 iavf_aq_dva(struct iavf_aq_desc *iaq, bus_addr_t addr)
734 {
735 #ifdef __LP64__
736 	htolem32(&iaq->iaq_param[2], addr >> 32);
737 #else
738 	iaq->iaq_param[2] = htole32(0);
739 #endif
740 	htolem32(&iaq->iaq_param[3], addr);
741 }
742 
743 #if _BYTE_ORDER == _BIG_ENDIAN
744 #define HTOLE16(_x)	(uint16_t)(((_x) & 0xff) << 8 | ((_x) & 0xff00) >> 8)
745 #else
746 #define HTOLE16(_x)	(_x)
747 #endif
748 
749 static const struct pci_matchid iavf_devices[] = {
750 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_XL710_VF },
751 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_XL710_VF_HV },
752 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_X722_VF },
753 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_ADAPTIVE_VF },
754 };
755 
756 static int
757 iavf_match(struct device *parent, void *match, void *aux)
758 {
759 	return (pci_matchbyid(aux, iavf_devices, nitems(iavf_devices)));
760 }
761 
762 void
763 iavf_attach(struct device *parent, struct device *self, void *aux)
764 {
765 	struct iavf_softc *sc = (struct iavf_softc *)self;
766 	struct ifnet *ifp = &sc->sc_ac.ac_if;
767 	struct pci_attach_args *pa = aux;
768 	pcireg_t memtype;
769 	int tries;
770 
771 	rw_init(&sc->sc_cfg_lock, "iavfcfg");
772 
773 	sc->sc_pc = pa->pa_pc;
774 	sc->sc_tag = pa->pa_tag;
775 	sc->sc_dmat = pa->pa_dmat;
776 	sc->sc_aq_regs = &iavf_aq_regs;
777 
778 	sc->sc_nqueues = 0; /* 1 << 0 is 1 queue */
779 	sc->sc_tx_ring_ndescs = 1024;
780 	sc->sc_rx_ring_ndescs = 1024;
781 
782 	memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, IAVF_PCIREG);
783 	if (pci_mapreg_map(pa, IAVF_PCIREG, memtype, 0,
784 	    &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems, 0)) {
785 		printf(": unable to map registers\n");
786 		return;
787 	}
788 
789 	for (tries = 0; tries < 100; tries++) {
790 		uint32_t reg;
791 		reg = iavf_rd(sc, I40E_VFGEN_RSTAT) &
792 		    I40E_VFGEN_RSTAT_VFR_STATE_MASK;
793 		if (reg == IAVF_VFR_VFACTIVE ||
794 		    reg == IAVF_VFR_COMPLETED)
795 			break;
796 
797 		delay(10000);
798 	}
799 	if (tries == 100) {
800 		printf(": VF reset timed out\n");
801 		return;
802 	}
803 	task_set(&sc->sc_reset_task, iavf_reset, sc);
804 
805 	mtx_init(&sc->sc_atq_mtx, IPL_NET);
806 
807 	if (iavf_dmamem_alloc(sc, &sc->sc_atq,
808 	    sizeof(struct iavf_aq_desc) * IAVF_AQ_NUM, IAVF_AQ_ALIGN) != 0) {
809 		printf("\n" "%s: unable to allocate atq\n", DEVNAME(sc));
810 		goto unmap;
811 	}
812 
813 	SIMPLEQ_INIT(&sc->sc_arq_idle);
814 	SIMPLEQ_INIT(&sc->sc_arq_live);
815 	if_rxr_init(&sc->sc_arq_ring, 2, IAVF_AQ_NUM - 1);
816 	sc->sc_arq_cons = 0;
817 	sc->sc_arq_prod = 0;
818 
819 	if (iavf_dmamem_alloc(sc, &sc->sc_arq,
820 	    sizeof(struct iavf_aq_desc) * IAVF_AQ_NUM, IAVF_AQ_ALIGN) != 0) {
821 		printf("\n" "%s: unable to allocate arq\n", DEVNAME(sc));
822 		goto free_atq;
823 	}
824 
825 	if (!iavf_arq_fill(sc, 0)) {
826 		printf("\n" "%s: unable to fill arq descriptors\n",
827 		    DEVNAME(sc));
828 		goto free_arq;
829 	}
830 	timeout_set(&sc->sc_admin_timeout, iavf_arq_timeout, sc);
831 
832 	if (iavf_dmamem_alloc(sc, &sc->sc_scratch, PAGE_SIZE, IAVF_AQ_ALIGN) != 0) {
833 		printf("\n" "%s: unable to allocate scratch\n", DEVNAME(sc));
834 		goto shutdown;
835 	}
836 
837 	bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_atq),
838 	    0, IAVF_DMA_LEN(&sc->sc_atq),
839 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
840 
841 	bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_arq),
842 	    0, IAVF_DMA_LEN(&sc->sc_arq),
843 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
844 
845 	iavf_init_admin_queue(sc);
846 
847 	if (iavf_get_version(sc) != 0) {
848 		printf(", unable to get VF interface version\n");
849 		goto free_scratch;
850 	}
851 
852 	if (iavf_get_vf_resources(sc) != 0) {
853 		printf(", timed out waiting for VF resources\n");
854 		goto free_scratch;
855 	}
856 
857 	if (iavf_config_irq_map(sc) != 0) {
858 		printf(", timeout waiting for IRQ map response");
859 		goto free_scratch;
860 	}
861 
862 	/* msix only? */
863 	if (pci_intr_map_msix(pa, 0, &sc->sc_ih) != 0) {
864 		printf(", unable to map interrupt\n");
865 		goto free_scratch;
866 	}
867 
868 	/* generate an address if the pf didn't give us one */
869 	memcpy(sc->sc_enaddr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN);
870 	if (memcmp(sc->sc_ac.ac_enaddr, etheranyaddr, ETHER_ADDR_LEN) == 0)
871 		ether_fakeaddr(ifp);
872 
873 	printf(", %s, address %s\n", pci_intr_string(sc->sc_pc, sc->sc_ih),
874 	    ether_sprintf(sc->sc_ac.ac_enaddr));
875 
876 	sc->sc_ihc = pci_intr_establish(sc->sc_pc, sc->sc_ih,
877 	    IPL_NET | IPL_MPSAFE, iavf_intr, sc, DEVNAME(sc));
878 	if (sc->sc_ihc == NULL) {
879 		printf("%s: unable to establish interrupt handler\n",
880 		    DEVNAME(sc));
881 		goto free_scratch;
882 	}
883 
884 	ifp->if_softc = sc;
885 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
886 	ifp->if_xflags = IFXF_MPSAFE;
887 	ifp->if_ioctl = iavf_ioctl;
888 	ifp->if_qstart = iavf_start;
889 	ifp->if_watchdog = iavf_watchdog;
890 	if (ifp->if_hardmtu == 0)
891 		ifp->if_hardmtu = IAVF_HARDMTU;
892 	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
893 	ifq_set_maxlen(&ifp->if_snd, sc->sc_tx_ring_ndescs);
894 
895 	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
896 #if 0
897 	ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
898 	    IFCAP_CSUM_UDPv4;
899 #endif
900 
901 	ifmedia_init(&sc->sc_media, 0, iavf_media_change, iavf_media_status);
902 
903 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
904 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
905 
906 	if_attach(ifp);
907 	ether_ifattach(ifp);
908 
909 	if_attach_queues(ifp, iavf_nqueues(sc));
910 	if_attach_iqueues(ifp, iavf_nqueues(sc));
911 
912 	iavf_intr_enable(sc);
913 
914 	return;
915 free_scratch:
916 	iavf_dmamem_free(sc, &sc->sc_scratch);
917 shutdown:
918 	iavf_wr(sc, sc->sc_aq_regs->atq_head, 0);
919 	iavf_wr(sc, sc->sc_aq_regs->arq_head, 0);
920 	iavf_wr(sc, sc->sc_aq_regs->atq_tail, 0);
921 	iavf_wr(sc, sc->sc_aq_regs->arq_tail, 0);
922 
923 	iavf_wr(sc, sc->sc_aq_regs->atq_bal, 0);
924 	iavf_wr(sc, sc->sc_aq_regs->atq_bah, 0);
925 	iavf_wr(sc, sc->sc_aq_regs->atq_len, 0);
926 
927 	iavf_wr(sc, sc->sc_aq_regs->arq_bal, 0);
928 	iavf_wr(sc, sc->sc_aq_regs->arq_bah, 0);
929 	iavf_wr(sc, sc->sc_aq_regs->arq_len, 0);
930 
931 	bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_arq),
932 	    0, IAVF_DMA_LEN(&sc->sc_arq),
933 	    BUS_DMASYNC_POSTREAD);
934 	bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_atq),
935 	    0, IAVF_DMA_LEN(&sc->sc_atq),
936 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
937 
938 	iavf_arq_unfill(sc);
939 free_arq:
940 	iavf_dmamem_free(sc, &sc->sc_arq);
941 free_atq:
942 	iavf_dmamem_free(sc, &sc->sc_atq);
943 unmap:
944 	bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
945 	sc->sc_mems = 0;
946 }
947 
948 static int
949 iavf_media_change(struct ifnet *ifp)
950 {
951 	return (EOPNOTSUPP);
952 }
953 
954 static void
955 iavf_media_status(struct ifnet *ifp, struct ifmediareq *ifm)
956 {
957 	struct iavf_softc *sc = ifp->if_softc;
958 
959 	NET_ASSERT_LOCKED();
960 
961 	ifm->ifm_status = sc->sc_media_status;
962 	ifm->ifm_active = sc->sc_media_active;
963 }
964 
965 static void
966 iavf_watchdog(struct ifnet *ifp)
967 {
968 
969 }
970 
971 int
972 iavf_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
973 {
974 	struct iavf_softc *sc = (struct iavf_softc *)ifp->if_softc;
975 	struct ifreq *ifr = (struct ifreq *)data;
976 	uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN];
977 	int /*aqerror,*/ error = 0;
978 
979 	switch (cmd) {
980 	case SIOCSIFADDR:
981 		ifp->if_flags |= IFF_UP;
982 		/* FALLTHROUGH */
983 
984 	case SIOCSIFFLAGS:
985 		if (ISSET(ifp->if_flags, IFF_UP)) {
986 			if (ISSET(ifp->if_flags, IFF_RUNNING))
987 				error = ENETRESET;
988 			else
989 				error = iavf_up(sc);
990 		} else {
991 			if (ISSET(ifp->if_flags, IFF_RUNNING))
992 				error = iavf_down(sc);
993 		}
994 		break;
995 
996 	case SIOCGIFMEDIA:
997 	case SIOCSIFMEDIA:
998 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
999 		break;
1000 
1001 	case SIOCGIFRXR:
1002 		error = iavf_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
1003 		break;
1004 
1005 	case SIOCADDMULTI:
1006 		if (ether_addmulti(ifr, &sc->sc_ac) == ENETRESET) {
1007 			error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi);
1008 			if (error != 0)
1009 				return (error);
1010 
1011 			iavf_add_del_addr(sc, addrlo, 1);
1012 			/* check result i guess? */
1013 
1014 			if (sc->sc_ac.ac_multirangecnt > 0) {
1015 				SET(ifp->if_flags, IFF_ALLMULTI);
1016 				error = ENETRESET;
1017 			}
1018 		}
1019 		break;
1020 
1021 	case SIOCDELMULTI:
1022 		if (ether_delmulti(ifr, &sc->sc_ac) == ENETRESET) {
1023 			error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi);
1024 			if (error != 0)
1025 				return (error);
1026 
1027 			iavf_add_del_addr(sc, addrlo, 0);
1028 
1029 			if (ISSET(ifp->if_flags, IFF_ALLMULTI) &&
1030 			    sc->sc_ac.ac_multirangecnt == 0) {
1031 				CLR(ifp->if_flags, IFF_ALLMULTI);
1032 				error = ENETRESET;
1033 			}
1034 		}
1035 		break;
1036 
1037 	default:
1038 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
1039 		break;
1040 	}
1041 
1042 	if (error == ENETRESET)
1043 		error = iavf_iff(sc);
1044 
1045 	return (error);
1046 }
1047 
1048 static int
1049 iavf_config_vsi_queues(struct iavf_softc *sc)
1050 {
1051 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1052 	struct iavf_aq_desc iaq;
1053 	struct iavf_vc_queue_config_info *config;
1054 	struct iavf_vc_txq_info *txq;
1055 	struct iavf_vc_rxq_info *rxq;
1056 	struct iavf_rx_ring *rxr;
1057 	struct iavf_tx_ring *txr;
1058 	int rv, i;
1059 
1060 	memset(&iaq, 0, sizeof(iaq));
1061 	iaq.iaq_flags = htole16(IAVF_AQ_BUF | IAVF_AQ_RD);
1062 	iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
1063 	iaq.iaq_vc_opcode = htole32(IAVF_VC_OP_CONFIG_VSI_QUEUES);
1064 	iaq.iaq_datalen = htole16(sizeof(*config) +
1065 	    iavf_nqueues(sc) * sizeof(struct iavf_vc_queue_pair_info));
1066 	iavf_aq_dva(&iaq, IAVF_DMA_DVA(&sc->sc_scratch));
1067 
1068 	config = IAVF_DMA_KVA(&sc->sc_scratch);
1069 	config->vsi_id = htole16(sc->sc_vsi_id);
1070 	config->num_queue_pairs = htole16(iavf_nqueues(sc));
1071 
1072 	for (i = 0; i < iavf_nqueues(sc); i++) {
1073 		rxr = ifp->if_iqs[i]->ifiq_softc;
1074 		txr = ifp->if_ifqs[i]->ifq_softc;
1075 
1076 		txq = &config->qpair[i].txq;
1077 		txq->vsi_id = htole16(sc->sc_vsi_id);
1078 		txq->queue_id = htole16(i);
1079 		txq->ring_len = sc->sc_tx_ring_ndescs;
1080 		txq->headwb_ena = 0;
1081 		htolem64(&txq->dma_ring_addr, IAVF_DMA_DVA(&txr->txr_mem));
1082 		txq->dma_headwb_addr = 0;
1083 
1084 		rxq = &config->qpair[i].rxq;
1085 		rxq->vsi_id = htole16(sc->sc_vsi_id);
1086 		rxq->queue_id = htole16(i);
1087 		rxq->ring_len = sc->sc_rx_ring_ndescs;
1088 		rxq->splithdr_ena = 0;
1089 		rxq->databuf_size = htole32(MCLBYTES);
1090 		rxq->max_pkt_size = htole32(IAVF_HARDMTU);
1091 		htolem64(&rxq->dma_ring_addr, IAVF_DMA_DVA(&rxr->rxr_mem));
1092 		rxq->rx_split_pos = 0;
1093 	}
1094 
1095 	bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_scratch), 0,
1096 	    IAVF_DMA_LEN(&sc->sc_scratch),
1097 	    BUS_DMASYNC_PREREAD);
1098 
1099 	iavf_atq_post(sc, &iaq);
1100 	rv = iavf_arq_wait(sc, 250);
1101 	if (rv != IAVF_VC_RC_SUCCESS) {
1102 		printf("%s: CONFIG_VSI_QUEUES failed: %d\n", DEVNAME(sc), rv);
1103 		return (1);
1104 	}
1105 
1106 	return (0);
1107 }
1108 
1109 static int
1110 iavf_config_hena(struct iavf_softc *sc)
1111 {
1112 	struct iavf_aq_desc iaq;
1113 	uint64_t *caps;
1114 	int rv;
1115 
1116 	memset(&iaq, 0, sizeof(iaq));
1117 	iaq.iaq_flags = htole16(IAVF_AQ_BUF | IAVF_AQ_RD);
1118 	iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
1119 	iaq.iaq_vc_opcode = htole32(IAVF_VC_OP_SET_RSS_HENA);
1120 	iaq.iaq_datalen = htole32(sizeof(*caps));
1121 	iavf_aq_dva(&iaq, IAVF_DMA_DVA(&sc->sc_scratch));
1122 
1123 	caps = IAVF_DMA_KVA(&sc->sc_scratch);
1124 	*caps = 0;
1125 
1126 	iavf_atq_post(sc, &iaq);
1127 	rv = iavf_arq_wait(sc, 250);
1128 	if (rv != IAVF_VC_RC_SUCCESS) {
1129 		printf("%s: SET_RSS_HENA failed: %d\n", DEVNAME(sc), rv);
1130 		return (1);
1131 	}
1132 
1133 	caps = IAVF_DMA_KVA(&sc->sc_scratch);
1134 
1135 	return (0);
1136 }
1137 
1138 static int
1139 iavf_queue_select(struct iavf_softc *sc, int opcode)
1140 {
1141 	struct iavf_aq_desc iaq;
1142 	struct iavf_vc_queue_select *qsel;
1143 	int rv;
1144 
1145 	memset(&iaq, 0, sizeof(iaq));
1146 	iaq.iaq_flags = htole16(IAVF_AQ_BUF | IAVF_AQ_RD);
1147 	iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
1148 	iaq.iaq_vc_opcode = htole32(opcode);
1149 	iaq.iaq_datalen = htole16(sizeof(*qsel));
1150 	iavf_aq_dva(&iaq, IAVF_DMA_DVA(&sc->sc_scratch));
1151 
1152 	qsel = IAVF_DMA_KVA(&sc->sc_scratch);
1153 	qsel->vsi_id = htole16(sc->sc_vsi_id);
1154 	qsel->rx_queues = htole32(iavf_allqueues(sc));
1155 	qsel->tx_queues = htole32(iavf_allqueues(sc));
1156 
1157 	bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_scratch), 0,
1158 	    IAVF_DMA_LEN(&sc->sc_scratch),
1159 	    BUS_DMASYNC_PREREAD);
1160 
1161 	iavf_atq_post(sc, &iaq);
1162 	rv = iavf_arq_wait(sc, 250);
1163 	if (rv != IAVF_VC_RC_SUCCESS) {
1164 		printf("%s: queue op %d failed: %d\n", DEVNAME(sc), opcode, rv);
1165 		return (1);
1166 	}
1167 
1168 	return (0);
1169 }
1170 
1171 static int
1172 iavf_up(struct iavf_softc *sc)
1173 {
1174 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1175 	struct iavf_rx_ring *rxr;
1176 	struct iavf_tx_ring *txr;
1177 	unsigned int nqueues, i;
1178 	int rv = ENOMEM;
1179 
1180 	nqueues = iavf_nqueues(sc);
1181 	KASSERT(nqueues == 1); /* XXX */
1182 
1183 	rw_enter_write(&sc->sc_cfg_lock);
1184 	if (sc->sc_dead) {
1185 		rw_exit_write(&sc->sc_cfg_lock);
1186 		return (ENXIO);
1187 	}
1188 
1189 	for (i = 0; i < nqueues; i++) {
1190 		rxr = iavf_rxr_alloc(sc, i);
1191 		if (rxr == NULL)
1192 			goto free;
1193 
1194 		txr = iavf_txr_alloc(sc, i);
1195 		if (txr == NULL) {
1196 			iavf_rxr_free(sc, rxr);
1197 			goto free;
1198 		}
1199 
1200 		ifp->if_iqs[i]->ifiq_softc = rxr;
1201 		ifp->if_ifqs[i]->ifq_softc = txr;
1202 
1203 		iavf_rxfill(sc, rxr);
1204 	}
1205 
1206 	if (iavf_config_vsi_queues(sc) != 0)
1207 		goto down;
1208 
1209 	if (iavf_config_hena(sc) != 0)
1210 		goto down;
1211 
1212 	if (iavf_queue_select(sc, IAVF_VC_OP_ENABLE_QUEUES) != 0)
1213 		goto down;
1214 
1215 	SET(ifp->if_flags, IFF_RUNNING);
1216 
1217 	iavf_wr(sc, I40E_VFINT_ITR01(0), 0x7a);
1218 	iavf_wr(sc, I40E_VFINT_ITR01(1), 0x7a);
1219 	iavf_wr(sc, I40E_VFINT_ITR01(2), 0);
1220 
1221 	rw_exit_write(&sc->sc_cfg_lock);
1222 
1223 	return (ENETRESET);
1224 
1225 free:
1226 	for (i = 0; i < nqueues; i++) {
1227 		rxr = ifp->if_iqs[i]->ifiq_softc;
1228 		txr = ifp->if_ifqs[i]->ifq_softc;
1229 
1230 		if (rxr == NULL) {
1231 			/*
1232 			 * tx and rx get set at the same time, so if one
1233 			 * is NULL, the other is too.
1234 			 */
1235 			continue;
1236 		}
1237 
1238 		iavf_txr_free(sc, txr);
1239 		iavf_rxr_free(sc, rxr);
1240 	}
1241 	rw_exit_write(&sc->sc_cfg_lock);
1242 	return (rv);
1243 down:
1244 	rw_exit_write(&sc->sc_cfg_lock);
1245 	iavf_down(sc);
1246 	return (ETIMEDOUT);
1247 }
1248 
1249 static int
1250 iavf_config_promisc_mode(struct iavf_softc *sc, int unicast, int multicast)
1251 {
1252 	struct iavf_aq_desc iaq;
1253 	struct iavf_vc_promisc_info *promisc;
1254 	int rv, flags;
1255 
1256 	memset(&iaq, 0, sizeof(iaq));
1257 	iaq.iaq_flags = htole16(IAVF_AQ_BUF | IAVF_AQ_RD);
1258 	iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
1259 	iaq.iaq_vc_opcode = htole32(IAVF_VC_OP_CONFIG_PROMISC);
1260 	iaq.iaq_datalen = htole16(sizeof(*promisc));
1261 	iavf_aq_dva(&iaq, IAVF_DMA_DVA(&sc->sc_scratch));
1262 
1263 	flags = 0;
1264 	if (unicast)
1265 		flags |= IAVF_FLAG_VF_UNICAST_PROMISC;
1266 	if (multicast)
1267 		flags |= IAVF_FLAG_VF_MULTICAST_PROMISC;
1268 
1269 	promisc = IAVF_DMA_KVA(&sc->sc_scratch);
1270 	promisc->vsi_id = htole16(sc->sc_vsi_id);
1271 	promisc->flags = htole16(flags);
1272 
1273 	bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_scratch), 0,
1274 	    IAVF_DMA_LEN(&sc->sc_scratch),
1275 	    BUS_DMASYNC_PREREAD);
1276 
1277 	iavf_atq_post(sc, &iaq);
1278 	rv = iavf_arq_wait(sc, 250);
1279 	if (rv != IAVF_VC_RC_SUCCESS) {
1280 		printf("%s: CONFIG_PROMISC_MODE failed: %d\n", DEVNAME(sc), rv);
1281 		return (1);
1282 	}
1283 
1284 	return (0);
1285 }
1286 
1287 static int
1288 iavf_add_del_addr(struct iavf_softc *sc, uint8_t *addr, int add)
1289 {
1290 	struct iavf_aq_desc iaq;
1291 	struct iavf_vc_eth_addr_list *addrs;
1292 	struct iavf_vc_eth_addr *vcaddr;
1293 	int rv;
1294 
1295 	memset(&iaq, 0, sizeof(iaq));
1296 	iaq.iaq_flags = htole16(IAVF_AQ_BUF | IAVF_AQ_RD);
1297 	iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
1298 	if (add)
1299 		iaq.iaq_vc_opcode = htole32(IAVF_VC_OP_ADD_ETH_ADDR);
1300 	else
1301 		iaq.iaq_vc_opcode = htole32(IAVF_VC_OP_DEL_ETH_ADDR);
1302 	iaq.iaq_datalen = htole16(sizeof(*addrs) + sizeof(*vcaddr));
1303 	iavf_aq_dva(&iaq, IAVF_DMA_DVA(&sc->sc_scratch));
1304 
1305 	addrs = IAVF_DMA_KVA(&sc->sc_scratch);
1306 	addrs->vsi_id = htole16(sc->sc_vsi_id);
1307 	addrs->num_elements = htole16(1);
1308 
1309 	vcaddr = addrs->list;
1310 	memcpy(vcaddr->addr, addr, ETHER_ADDR_LEN);
1311 
1312 	bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_scratch), 0,
1313 	    IAVF_DMA_LEN(&sc->sc_scratch),
1314 	    BUS_DMASYNC_PREREAD);
1315 
1316 	iavf_atq_post(sc, &iaq);
1317 	rv = iavf_arq_wait(sc, 250);
1318 	if (rv != IAVF_VC_RC_SUCCESS) {
1319 		printf("%s: ADD/DEL_ETH_ADDR failed: %d\n", DEVNAME(sc), rv);
1320 		return (1);
1321 	}
1322 
1323 	return (0);
1324 }
1325 
1326 static int
1327 iavf_iff(struct iavf_softc *sc)
1328 {
1329 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1330 	int unicast, multicast;
1331 
1332 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
1333 		return (0);
1334 
1335 	rw_enter_write(&sc->sc_cfg_lock);
1336 
1337 	unicast = 0;
1338 	multicast = 0;
1339 	if (ISSET(ifp->if_flags, IFF_PROMISC)) {
1340 		unicast = 1;
1341 		multicast = 1;
1342 	} else if (ISSET(ifp->if_flags, IFF_ALLMULTI)) {
1343 		multicast = 1;
1344 	}
1345 	iavf_config_promisc_mode(sc, unicast, multicast);
1346 
1347 	if (memcmp(sc->sc_enaddr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN) != 0) {
1348 		if (memcmp(sc->sc_enaddr, etheranyaddr, ETHER_ADDR_LEN) != 0)
1349 			iavf_add_del_addr(sc, sc->sc_enaddr, 0);
1350 		memcpy(sc->sc_enaddr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN);
1351 		iavf_add_del_addr(sc, sc->sc_enaddr, 1);
1352 	}
1353 
1354 	rw_exit_write(&sc->sc_cfg_lock);
1355 	return (0);
1356 }
1357 
1358 static int
1359 iavf_down(struct iavf_softc *sc)
1360 {
1361 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1362 	struct iavf_rx_ring *rxr;
1363 	struct iavf_tx_ring *txr;
1364 	unsigned int nqueues, i;
1365 	uint32_t reg;
1366 	int error = 0;
1367 
1368 	nqueues = iavf_nqueues(sc);
1369 
1370 	rw_enter_write(&sc->sc_cfg_lock);
1371 
1372 	CLR(ifp->if_flags, IFF_RUNNING);
1373 
1374 	NET_UNLOCK();
1375 
1376 	if (sc->sc_resetting == 0) {
1377 		/* disable queues */
1378 		if (iavf_queue_select(sc, IAVF_VC_OP_DISABLE_QUEUES) != 0)
1379 			goto die;
1380 	}
1381 
1382 	/* mask interrupts */
1383 	reg = iavf_rd(sc, I40E_VFINT_DYN_CTL01);
1384 	reg |= I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK |
1385 	    (IAVF_NOITR << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT);
1386 	iavf_wr(sc, I40E_VFINT_DYN_CTL01, reg);
1387 
1388 	/* make sure no hw generated work is still in flight */
1389 	intr_barrier(sc->sc_ihc);
1390 	for (i = 0; i < nqueues; i++) {
1391 		rxr = ifp->if_iqs[i]->ifiq_softc;
1392 		txr = ifp->if_ifqs[i]->ifq_softc;
1393 
1394 		ifq_barrier(ifp->if_ifqs[i]);
1395 
1396 		timeout_del_barrier(&rxr->rxr_refill);
1397 	}
1398 
1399 	for (i = 0; i < nqueues; i++) {
1400 		rxr = ifp->if_iqs[i]->ifiq_softc;
1401 		txr = ifp->if_ifqs[i]->ifq_softc;
1402 
1403 		iavf_txr_clean(sc, txr);
1404 		iavf_rxr_clean(sc, rxr);
1405 
1406 		iavf_txr_free(sc, txr);
1407 		iavf_rxr_free(sc, rxr);
1408 
1409 		ifp->if_iqs[i]->ifiq_softc = NULL;
1410 		ifp->if_ifqs[i]->ifq_softc =  NULL;
1411 	}
1412 
1413 	/* unmask */
1414 	reg = iavf_rd(sc, I40E_VFINT_DYN_CTL01);
1415 	reg |= (IAVF_NOITR << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT);
1416 	iavf_wr(sc, I40E_VFINT_DYN_CTL01, reg);
1417 
1418 out:
1419 	rw_exit_write(&sc->sc_cfg_lock);
1420 	NET_LOCK();
1421 	return (error);
1422 die:
1423 	sc->sc_dead = 1;
1424 	log(LOG_CRIT, "%s: failed to shut down rings", DEVNAME(sc));
1425 	error = ETIMEDOUT;
1426 	goto out;
1427 }
1428 
1429 static void
1430 iavf_reset(void *xsc)
1431 {
1432 	struct iavf_softc *sc = xsc;
1433 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1434 	int tries, up, link_state;
1435 
1436 	NET_LOCK();
1437 
1438 	/* treat the reset as a loss of link */
1439 	link_state = ifp->if_link_state;
1440 	if (ifp->if_link_state != LINK_STATE_DOWN) {
1441 		ifp->if_link_state = LINK_STATE_DOWN;
1442 		if_link_state_change(ifp);
1443 	}
1444 
1445 	up = 0;
1446 	if (ISSET(ifp->if_flags, IFF_RUNNING)) {
1447 		iavf_down(sc);
1448 		up = 1;
1449 	}
1450 
1451 	rw_enter_write(&sc->sc_cfg_lock);
1452 
1453 	sc->sc_major_ver = UINT_MAX;
1454 	sc->sc_minor_ver = UINT_MAX;
1455 	sc->sc_got_vf_resources = 0;
1456 	sc->sc_got_irq_map = 0;
1457 
1458 	for (tries = 0; tries < 100; tries++) {
1459 		uint32_t reg;
1460 		reg = iavf_rd(sc, I40E_VFGEN_RSTAT) &
1461 		    I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1462 		if (reg == IAVF_VFR_VFACTIVE ||
1463 		    reg == IAVF_VFR_COMPLETED)
1464 			break;
1465 
1466 		delay(10000);
1467 	}
1468 	if (tries == 100) {
1469 		printf("%s: VF reset timed out\n", DEVNAME(sc));
1470 		goto failed;
1471 	}
1472 
1473 	iavf_arq_unfill(sc);
1474 	sc->sc_arq_cons = 0;
1475 	sc->sc_arq_prod = 0;
1476 	if (!iavf_arq_fill(sc, 0)) {
1477 		printf("\n" "%s: unable to fill arq descriptors\n",
1478 		    DEVNAME(sc));
1479 		goto failed;
1480 	}
1481 
1482 	iavf_init_admin_queue(sc);
1483 
1484 	if (iavf_get_version(sc) != 0) {
1485 		printf("%s: unable to get VF interface version\n",
1486 		    DEVNAME(sc));
1487 		goto failed;
1488 	}
1489 
1490 	if (iavf_get_vf_resources(sc) != 0) {
1491 		printf("%s: timed out waiting for VF resources\n",
1492 		    DEVNAME(sc));
1493 		goto failed;
1494 	}
1495 
1496 	if (iavf_config_irq_map(sc) != 0) {
1497 		printf("%s: timed out configuring IRQ map\n", DEVNAME(sc));
1498 		goto failed;
1499 	}
1500 
1501 	/* do we need to re-add mac addresses here? */
1502 
1503 	sc->sc_resetting = 0;
1504 	iavf_intr_enable(sc);
1505 	rw_exit_write(&sc->sc_cfg_lock);
1506 
1507 	/* the PF-assigned MAC address might have changed */
1508 	if ((memcmp(sc->sc_ac.ac_enaddr, etheranyaddr, ETHER_ADDR_LEN) != 0) &&
1509 	    (memcmp(sc->sc_ac.ac_enaddr, sc->sc_enaddr, ETHER_ADDR_LEN) != 0)) {
1510 		memcpy(sc->sc_enaddr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN);
1511 		if_setlladdr(ifp, sc->sc_ac.ac_enaddr);
1512 		ifnewlladdr(ifp);
1513 	}
1514 
1515 	/* restore link state */
1516 	if (link_state != LINK_STATE_DOWN) {
1517 		ifp->if_link_state = link_state;
1518 		if_link_state_change(ifp);
1519 	}
1520 
1521 	if (up) {
1522 		int i;
1523 
1524 		iavf_up(sc);
1525 
1526 		for (i = 0; i < iavf_nqueues(sc); i++) {
1527 			if (ifq_is_oactive(ifp->if_ifqs[i]))
1528 				ifq_restart(ifp->if_ifqs[i]);
1529 		}
1530 	}
1531 
1532 	NET_UNLOCK();
1533 	return;
1534 failed:
1535 	sc->sc_dead = 1;
1536 	sc->sc_resetting = 0;
1537 	rw_exit_write(&sc->sc_cfg_lock);
1538 	NET_UNLOCK();
1539 }
1540 
1541 static struct iavf_tx_ring *
1542 iavf_txr_alloc(struct iavf_softc *sc, unsigned int qid)
1543 {
1544 	struct iavf_tx_ring *txr;
1545 	struct iavf_tx_map *maps, *txm;
1546 	unsigned int i;
1547 
1548 	txr = malloc(sizeof(*txr), M_DEVBUF, M_WAITOK|M_CANFAIL);
1549 	if (txr == NULL)
1550 		return (NULL);
1551 
1552 	maps = mallocarray(sizeof(*maps),
1553 	    sc->sc_tx_ring_ndescs, M_DEVBUF, M_WAITOK|M_CANFAIL|M_ZERO);
1554 	if (maps == NULL)
1555 		goto free;
1556 
1557 	if (iavf_dmamem_alloc(sc, &txr->txr_mem,
1558 	    sizeof(struct iavf_tx_desc) * sc->sc_tx_ring_ndescs,
1559 	    IAVF_TX_QUEUE_ALIGN) != 0)
1560 		goto freemap;
1561 
1562 	for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
1563 		txm = &maps[i];
1564 
1565 		if (bus_dmamap_create(sc->sc_dmat,
1566 		    IAVF_HARDMTU, IAVF_TX_PKT_DESCS, IAVF_HARDMTU, 0,
1567 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
1568 		    &txm->txm_map) != 0)
1569 			goto uncreate;
1570 
1571 		txm->txm_eop = -1;
1572 		txm->txm_m = NULL;
1573 	}
1574 
1575 	txr->txr_cons = txr->txr_prod = 0;
1576 	txr->txr_maps = maps;
1577 
1578 	txr->txr_tail = I40E_QTX_TAIL1(qid);
1579 	txr->txr_qid = qid;
1580 
1581 	return (txr);
1582 
1583 uncreate:
1584 	for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
1585 		txm = &maps[i];
1586 
1587 		if (txm->txm_map == NULL)
1588 			continue;
1589 
1590 		bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
1591 	}
1592 
1593 	iavf_dmamem_free(sc, &txr->txr_mem);
1594 freemap:
1595 	free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_tx_ring_ndescs);
1596 free:
1597 	free(txr, M_DEVBUF, sizeof(*txr));
1598 	return (NULL);
1599 }
1600 
1601 static void
1602 iavf_txr_clean(struct iavf_softc *sc, struct iavf_tx_ring *txr)
1603 {
1604 	struct iavf_tx_map *maps, *txm;
1605 	bus_dmamap_t map;
1606 	unsigned int i;
1607 
1608 	maps = txr->txr_maps;
1609 	for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
1610 		txm = &maps[i];
1611 
1612 		if (txm->txm_m == NULL)
1613 			continue;
1614 
1615 		map = txm->txm_map;
1616 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1617 		    BUS_DMASYNC_POSTWRITE);
1618 		bus_dmamap_unload(sc->sc_dmat, map);
1619 
1620 		m_freem(txm->txm_m);
1621 		txm->txm_m = NULL;
1622 	}
1623 }
1624 
1625 static void
1626 iavf_txr_free(struct iavf_softc *sc, struct iavf_tx_ring *txr)
1627 {
1628 	struct iavf_tx_map *maps, *txm;
1629 	unsigned int i;
1630 
1631 	maps = txr->txr_maps;
1632 	for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
1633 		txm = &maps[i];
1634 
1635 		bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
1636 	}
1637 
1638 	iavf_dmamem_free(sc, &txr->txr_mem);
1639 	free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_tx_ring_ndescs);
1640 	free(txr, M_DEVBUF, sizeof(*txr));
1641 }
1642 
1643 static inline int
1644 iavf_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m)
1645 {
1646 	int error;
1647 
1648 	error = bus_dmamap_load_mbuf(dmat, map, m,
1649 	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT);
1650 	if (error != EFBIG)
1651 		return (error);
1652 
1653 	error = m_defrag(m, M_DONTWAIT);
1654 	if (error != 0)
1655 		return (error);
1656 
1657 	return (bus_dmamap_load_mbuf(dmat, map, m,
1658 	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT));
1659 }
1660 
1661 static void
1662 iavf_start(struct ifqueue *ifq)
1663 {
1664 	struct ifnet *ifp = ifq->ifq_if;
1665 	struct iavf_softc *sc = ifp->if_softc;
1666 	struct iavf_tx_ring *txr = ifq->ifq_softc;
1667 	struct iavf_tx_desc *ring, *txd;
1668 	struct iavf_tx_map *txm;
1669 	bus_dmamap_t map;
1670 	struct mbuf *m;
1671 	uint64_t cmd;
1672 	uint64_t vlan_cmd;
1673 	unsigned int prod, free, last, i;
1674 	unsigned int mask;
1675 	int post = 0;
1676 #if NBPFILTER > 0
1677 	caddr_t if_bpf;
1678 #endif
1679 
1680 	if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
1681 		ifq_purge(ifq);
1682 		return;
1683 	}
1684 
1685 	prod = txr->txr_prod;
1686 	free = txr->txr_cons;
1687 	if (free <= prod)
1688 		free += sc->sc_tx_ring_ndescs;
1689 	free -= prod;
1690 
1691 	bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&txr->txr_mem),
1692 	    0, IAVF_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTWRITE);
1693 
1694 	ring = IAVF_DMA_KVA(&txr->txr_mem);
1695 	mask = sc->sc_tx_ring_ndescs - 1;
1696 
1697 	for (;;) {
1698 		if (free <= IAVF_TX_PKT_DESCS) {
1699 			ifq_set_oactive(ifq);
1700 			break;
1701 		}
1702 
1703 		m = ifq_dequeue(ifq);
1704 		if (m == NULL)
1705 			break;
1706 
1707 		txm = &txr->txr_maps[prod];
1708 		map = txm->txm_map;
1709 
1710 		if (iavf_load_mbuf(sc->sc_dmat, map, m) != 0) {
1711 			ifq->ifq_errors++;
1712 			m_freem(m);
1713 			continue;
1714 		}
1715 
1716 		bus_dmamap_sync(sc->sc_dmat, map, 0,
1717 		    map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1718 
1719 		vlan_cmd = 0;
1720 		if (m->m_flags & M_VLANTAG) {
1721 			vlan_cmd = IAVF_TX_DESC_CMD_IL2TAG1 |
1722 			    (((uint64_t)m->m_pkthdr.ether_vtag) <<
1723 			    IAVF_TX_DESC_L2TAG1_SHIFT);
1724 		}
1725 
1726 		for (i = 0; i < map->dm_nsegs; i++) {
1727 			txd = &ring[prod];
1728 
1729 			cmd = (uint64_t)map->dm_segs[i].ds_len <<
1730 			    IAVF_TX_DESC_BSIZE_SHIFT;
1731 			cmd |= IAVF_TX_DESC_DTYPE_DATA | IAVF_TX_DESC_CMD_ICRC |
1732 			    vlan_cmd;
1733 
1734 			htolem64(&txd->addr, map->dm_segs[i].ds_addr);
1735 			htolem64(&txd->cmd, cmd);
1736 
1737 			last = prod;
1738 
1739 			prod++;
1740 			prod &= mask;
1741 		}
1742 		cmd |= IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_RS;
1743 		htolem64(&txd->cmd, cmd);
1744 
1745 		txm->txm_m = m;
1746 		txm->txm_eop = last;
1747 
1748 #if NBPFILTER > 0
1749 		if_bpf = ifp->if_bpf;
1750 		if (if_bpf)
1751 			bpf_mtap_ether(if_bpf, m, BPF_DIRECTION_OUT);
1752 #endif
1753 
1754 		free -= i;
1755 		post = 1;
1756 	}
1757 
1758 	bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&txr->txr_mem),
1759 	    0, IAVF_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREWRITE);
1760 
1761 	if (post) {
1762 		txr->txr_prod = prod;
1763 		iavf_wr(sc, txr->txr_tail, prod);
1764 	}
1765 }
1766 
1767 static int
1768 iavf_txeof(struct iavf_softc *sc, struct ifqueue *ifq)
1769 {
1770 	struct iavf_tx_ring *txr = ifq->ifq_softc;
1771 	struct iavf_tx_desc *ring, *txd;
1772 	struct iavf_tx_map *txm;
1773 	bus_dmamap_t map;
1774 	unsigned int cons, prod, last;
1775 	unsigned int mask;
1776 	uint64_t dtype;
1777 	int done = 0;
1778 
1779 	prod = txr->txr_prod;
1780 	cons = txr->txr_cons;
1781 
1782 	if (cons == prod)
1783 		return (0);
1784 
1785 	bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&txr->txr_mem),
1786 	    0, IAVF_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTREAD);
1787 
1788 	ring = IAVF_DMA_KVA(&txr->txr_mem);
1789 	mask = sc->sc_tx_ring_ndescs - 1;
1790 
1791 	do {
1792 		txm = &txr->txr_maps[cons];
1793 		last = txm->txm_eop;
1794 		txd = &ring[last];
1795 
1796 		dtype = txd->cmd & htole64(IAVF_TX_DESC_DTYPE_MASK);
1797 		if (dtype != htole64(IAVF_TX_DESC_DTYPE_DONE))
1798 			break;
1799 
1800 		map = txm->txm_map;
1801 
1802 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1803 		    BUS_DMASYNC_POSTWRITE);
1804 		bus_dmamap_unload(sc->sc_dmat, map);
1805 		m_freem(txm->txm_m);
1806 
1807 		txm->txm_m = NULL;
1808 		txm->txm_eop = -1;
1809 
1810 		cons = last + 1;
1811 		cons &= mask;
1812 
1813 		done = 1;
1814 	} while (cons != prod);
1815 
1816 	bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&txr->txr_mem),
1817 	    0, IAVF_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREREAD);
1818 
1819 	txr->txr_cons = cons;
1820 
1821 	//ixl_enable(sc, txr->txr_msix);
1822 
1823 	if (ifq_is_oactive(ifq))
1824 		ifq_restart(ifq);
1825 
1826 	return (done);
1827 }
1828 
1829 static struct iavf_rx_ring *
1830 iavf_rxr_alloc(struct iavf_softc *sc, unsigned int qid)
1831 {
1832 	struct iavf_rx_ring *rxr;
1833 	struct iavf_rx_map *maps, *rxm;
1834 	unsigned int i;
1835 
1836 	rxr = malloc(sizeof(*rxr), M_DEVBUF, M_WAITOK|M_CANFAIL);
1837 	if (rxr == NULL)
1838 		return (NULL);
1839 
1840 	maps = mallocarray(sizeof(*maps),
1841 	    sc->sc_rx_ring_ndescs, M_DEVBUF, M_WAITOK|M_CANFAIL|M_ZERO);
1842 	if (maps == NULL)
1843 		goto free;
1844 
1845 	if (iavf_dmamem_alloc(sc, &rxr->rxr_mem,
1846 	    sizeof(struct iavf_rx_rd_desc_32) * sc->sc_rx_ring_ndescs,
1847 	    IAVF_RX_QUEUE_ALIGN) != 0)
1848 		goto freemap;
1849 
1850 	for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
1851 		rxm = &maps[i];
1852 
1853 		if (bus_dmamap_create(sc->sc_dmat,
1854 		    IAVF_HARDMTU, 1, IAVF_HARDMTU, 0,
1855 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
1856 		    &rxm->rxm_map) != 0)
1857 			goto uncreate;
1858 
1859 		rxm->rxm_m = NULL;
1860 	}
1861 
1862 	rxr->rxr_sc = sc;
1863 	if_rxr_init(&rxr->rxr_acct, 17, sc->sc_rx_ring_ndescs - 1);
1864 	timeout_set(&rxr->rxr_refill, iavf_rxrefill, rxr);
1865 	rxr->rxr_cons = rxr->rxr_prod = 0;
1866 	rxr->rxr_m_head = NULL;
1867 	rxr->rxr_m_tail = &rxr->rxr_m_head;
1868 	rxr->rxr_maps = maps;
1869 
1870 	rxr->rxr_tail = I40E_QRX_TAIL1(qid);
1871 	rxr->rxr_qid = qid;
1872 
1873 	return (rxr);
1874 
1875 uncreate:
1876 	for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
1877 		rxm = &maps[i];
1878 
1879 		if (rxm->rxm_map == NULL)
1880 			continue;
1881 
1882 		bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
1883 	}
1884 
1885 	iavf_dmamem_free(sc, &rxr->rxr_mem);
1886 freemap:
1887 	free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_rx_ring_ndescs);
1888 free:
1889 	free(rxr, M_DEVBUF, sizeof(*rxr));
1890 	return (NULL);
1891 }
1892 
1893 static void
1894 iavf_rxr_clean(struct iavf_softc *sc, struct iavf_rx_ring *rxr)
1895 {
1896 	struct iavf_rx_map *maps, *rxm;
1897 	bus_dmamap_t map;
1898 	unsigned int i;
1899 
1900 	timeout_del_barrier(&rxr->rxr_refill);
1901 
1902 	maps = rxr->rxr_maps;
1903 	for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
1904 		rxm = &maps[i];
1905 
1906 		if (rxm->rxm_m == NULL)
1907 			continue;
1908 
1909 		map = rxm->rxm_map;
1910 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1911 		    BUS_DMASYNC_POSTWRITE);
1912 		bus_dmamap_unload(sc->sc_dmat, map);
1913 
1914 		m_freem(rxm->rxm_m);
1915 		rxm->rxm_m = NULL;
1916 	}
1917 
1918 	m_freem(rxr->rxr_m_head);
1919 	rxr->rxr_m_head = NULL;
1920 	rxr->rxr_m_tail = &rxr->rxr_m_head;
1921 
1922 	rxr->rxr_prod = rxr->rxr_cons = 0;
1923 }
1924 
1925 static void
1926 iavf_rxr_free(struct iavf_softc *sc, struct iavf_rx_ring *rxr)
1927 {
1928 	struct iavf_rx_map *maps, *rxm;
1929 	unsigned int i;
1930 
1931 	maps = rxr->rxr_maps;
1932 	for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
1933 		rxm = &maps[i];
1934 
1935 		bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
1936 	}
1937 
1938 	iavf_dmamem_free(sc, &rxr->rxr_mem);
1939 	free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_rx_ring_ndescs);
1940 	free(rxr, M_DEVBUF, sizeof(*rxr));
1941 }
1942 
1943 static int
1944 iavf_rxeof(struct iavf_softc *sc, struct ifiqueue *ifiq)
1945 {
1946 	struct iavf_rx_ring *rxr = ifiq->ifiq_softc;
1947 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1948 	struct iavf_rx_wb_desc_32 *ring, *rxd;
1949 	struct iavf_rx_map *rxm;
1950 	bus_dmamap_t map;
1951 	unsigned int cons, prod;
1952 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1953 	struct mbuf *m;
1954 	uint64_t word;
1955 	uint16_t vlan;
1956 	unsigned int len;
1957 	unsigned int mask;
1958 	int done = 0;
1959 
1960 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
1961 		return (0);
1962 
1963 	prod = rxr->rxr_prod;
1964 	cons = rxr->rxr_cons;
1965 
1966 	if (cons == prod)
1967 		return (0);
1968 
1969 	bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&rxr->rxr_mem),
1970 	    0, IAVF_DMA_LEN(&rxr->rxr_mem),
1971 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1972 
1973 	ring = IAVF_DMA_KVA(&rxr->rxr_mem);
1974 	mask = sc->sc_rx_ring_ndescs - 1;
1975 
1976 	do {
1977 		rxd = &ring[cons];
1978 
1979 		word = lemtoh64(&rxd->qword1);
1980 		if (!ISSET(word, IAVF_RX_DESC_DD))
1981 			break;
1982 
1983 		if_rxr_put(&rxr->rxr_acct, 1);
1984 
1985 		rxm = &rxr->rxr_maps[cons];
1986 
1987 		map = rxm->rxm_map;
1988 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1989 		    BUS_DMASYNC_POSTREAD);
1990 		bus_dmamap_unload(sc->sc_dmat, map);
1991 
1992 		m = rxm->rxm_m;
1993 		rxm->rxm_m = NULL;
1994 
1995 		len = (word & IAVF_RX_DESC_PLEN_MASK) >> IAVF_RX_DESC_PLEN_SHIFT;
1996 		m->m_len = len;
1997 		m->m_pkthdr.len = 0;
1998 
1999 		m->m_next = NULL;
2000 		*rxr->rxr_m_tail = m;
2001 		rxr->rxr_m_tail = &m->m_next;
2002 
2003 		m = rxr->rxr_m_head;
2004 		m->m_pkthdr.len += len;
2005 
2006 		if (ISSET(word, IAVF_RX_DESC_EOP)) {
2007 			if (ISSET(word, IAVF_RX_DESC_L2TAG1P)) {
2008 				vlan = (lemtoh64(&rxd->qword0) &
2009 				    IAVF_RX_DESC_L2TAG1_MASK)
2010 				    >> IAVF_RX_DESC_L2TAG1_SHIFT;
2011 				m->m_pkthdr.ether_vtag = vlan;
2012 				m->m_flags |= M_VLANTAG;
2013 			}
2014 			if (!ISSET(word,
2015 			    IAVF_RX_DESC_RXE | IAVF_RX_DESC_OVERSIZE)) {
2016 				ml_enqueue(&ml, m);
2017 			} else {
2018 				ifp->if_ierrors++; /* XXX */
2019 				m_freem(m);
2020 			}
2021 
2022 			rxr->rxr_m_head = NULL;
2023 			rxr->rxr_m_tail = &rxr->rxr_m_head;
2024 		}
2025 
2026 		cons++;
2027 		cons &= mask;
2028 
2029 		done = 1;
2030 	} while (cons != prod);
2031 
2032 	if (done) {
2033 		rxr->rxr_cons = cons;
2034 		if (ifiq_input(ifiq, &ml))
2035 			if_rxr_livelocked(&rxr->rxr_acct);
2036 		iavf_rxfill(sc, rxr);
2037 	}
2038 
2039 	bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&rxr->rxr_mem),
2040 	    0, IAVF_DMA_LEN(&rxr->rxr_mem),
2041 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2042 
2043 	return (done);
2044 }
2045 
2046 static void
2047 iavf_rxfill(struct iavf_softc *sc, struct iavf_rx_ring *rxr)
2048 {
2049 	struct iavf_rx_rd_desc_32 *ring, *rxd;
2050 	struct iavf_rx_map *rxm;
2051 	bus_dmamap_t map;
2052 	struct mbuf *m;
2053 	unsigned int prod;
2054 	unsigned int slots;
2055 	unsigned int mask;
2056 	int post = 0;
2057 
2058 	slots = if_rxr_get(&rxr->rxr_acct, sc->sc_rx_ring_ndescs);
2059 	if (slots == 0)
2060 		return;
2061 
2062 	prod = rxr->rxr_prod;
2063 
2064 	ring = IAVF_DMA_KVA(&rxr->rxr_mem);
2065 	mask = sc->sc_rx_ring_ndescs - 1;
2066 
2067 	do {
2068 		rxm = &rxr->rxr_maps[prod];
2069 
2070 		m = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES + ETHER_ALIGN);
2071 		if (m == NULL)
2072 			break;
2073 		m->m_data += (m->m_ext.ext_size - (MCLBYTES + ETHER_ALIGN));
2074 		m->m_len = m->m_pkthdr.len = MCLBYTES + ETHER_ALIGN;
2075 
2076 		map = rxm->rxm_map;
2077 
2078 		if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
2079 		    BUS_DMA_NOWAIT) != 0) {
2080 			m_freem(m);
2081 			break;
2082 		}
2083 
2084 		rxm->rxm_m = m;
2085 
2086 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2087 		    BUS_DMASYNC_PREREAD);
2088 
2089 		rxd = &ring[prod];
2090 
2091 		htolem64(&rxd->paddr, map->dm_segs[0].ds_addr);
2092 		rxd->haddr = htole64(0);
2093 
2094 		prod++;
2095 		prod &= mask;
2096 
2097 		post = 1;
2098 	} while (--slots);
2099 
2100 	if_rxr_put(&rxr->rxr_acct, slots);
2101 
2102 	if (if_rxr_inuse(&rxr->rxr_acct) == 0)
2103 		timeout_add(&rxr->rxr_refill, 1);
2104 	else if (post) {
2105 		rxr->rxr_prod = prod;
2106 		iavf_wr(sc, rxr->rxr_tail, prod);
2107 	}
2108 }
2109 
2110 void
2111 iavf_rxrefill(void *arg)
2112 {
2113 	struct iavf_rx_ring *rxr = arg;
2114 	struct iavf_softc *sc = rxr->rxr_sc;
2115 
2116 	iavf_rxfill(sc, rxr);
2117 }
2118 
2119 static int
2120 iavf_rxrinfo(struct iavf_softc *sc, struct if_rxrinfo *ifri)
2121 {
2122 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2123 	struct if_rxring_info *ifr;
2124 	struct iavf_rx_ring *ring;
2125 	int i, rv;
2126 
2127 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
2128 		return (ENOTTY);
2129 
2130 	ifr = mallocarray(sizeof(*ifr), iavf_nqueues(sc), M_TEMP,
2131 	    M_WAITOK|M_CANFAIL|M_ZERO);
2132 	if (ifr == NULL)
2133 		return (ENOMEM);
2134 
2135 	for (i = 0; i < iavf_nqueues(sc); i++) {
2136 		ring = ifp->if_iqs[i]->ifiq_softc;
2137 		ifr[i].ifr_size = MCLBYTES;
2138 		ifr[i].ifr_info = ring->rxr_acct;
2139 	}
2140 
2141 	rv = if_rxr_info_ioctl(ifri, iavf_nqueues(sc), ifr);
2142 	free(ifr, M_TEMP, iavf_nqueues(sc) * sizeof(*ifr));
2143 
2144 	return (rv);
2145 }
2146 
2147 static int
2148 iavf_intr(void *xsc)
2149 {
2150 	struct iavf_softc *sc = xsc;
2151 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2152 	uint32_t icr, ena;
2153 	int i, rv = 0;
2154 
2155 	ena = iavf_rd(sc, I40E_VFINT_ICR0_ENA1);
2156 	iavf_intr_enable(sc);
2157 	icr = iavf_rd(sc, I40E_VFINT_ICR01);
2158 
2159 	if (icr == IAVF_REG_VFR) {
2160 		printf("%s: VF reset in progress\n", DEVNAME(sc));
2161 		sc->sc_resetting = 1;
2162 		task_add(systq, &sc->sc_reset_task);
2163 		return (1);
2164 	}
2165 
2166 	if (ISSET(icr, I40E_VFINT_ICR01_ADMINQ_MASK)) {
2167 		iavf_atq_done(sc);
2168 		iavf_process_arq(sc, 0);
2169 		rv = 1;
2170 	}
2171 
2172 	if (ISSET(icr, I40E_VFINT_ICR01_QUEUE_0_MASK)) {
2173 		for (i = 0; i < iavf_nqueues(sc); i++) {
2174 			rv |= iavf_rxeof(sc, ifp->if_iqs[i]);
2175 			rv |= iavf_txeof(sc, ifp->if_ifqs[i]);
2176 		}
2177 	}
2178 
2179 	return (rv);
2180 }
2181 
2182 static void
2183 iavf_process_vf_resources(struct iavf_softc *sc, struct iavf_aq_desc *desc,
2184     struct iavf_aq_buf *buf)
2185 {
2186 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2187 	struct iavf_vc_vf_resource *vf_res;
2188 	struct iavf_vc_vsi_resource *vsi_res;
2189 	int mtu;
2190 
2191 	sc->sc_got_vf_resources = 1;
2192 
2193 	vf_res = buf->aqb_data;
2194 	if (letoh16(vf_res->num_vsis) == 0) {
2195 		printf(", no VSI available\n");
2196 		/* set vsi number to something */
2197 		return;
2198 	}
2199 
2200 	mtu = letoh16(vf_res->max_mtu);
2201 	if (mtu != 0)
2202 		ifp->if_hardmtu = MIN(IAVF_HARDMTU, mtu);
2203 
2204 	/* limit vectors to what we got here? */
2205 
2206 	/* just take the first vsi */
2207 	vsi_res = &vf_res->vsi_res[0];
2208 	sc->sc_vsi_id = letoh16(vsi_res->vsi_id);
2209 	sc->sc_qset_handle = letoh16(vsi_res->qset_handle);
2210 	/* limit number of queues to what we got here */
2211 	/* is vsi type interesting? */
2212 
2213 	sc->sc_vf_id = letoh32(desc->iaq_param[0]);
2214 
2215 	memcpy(sc->sc_ac.ac_enaddr, vsi_res->default_mac, ETHER_ADDR_LEN);
2216 
2217 	if (sc->sc_resetting == 0)
2218 		printf(", VF %d VSI %d", sc->sc_vf_id, sc->sc_vsi_id);
2219 }
2220 
2221 static const struct iavf_link_speed *
2222 iavf_find_link_speed(struct iavf_softc *sc, uint32_t link_speed)
2223 {
2224 	int i;
2225 	for (i = 0; i < nitems(iavf_link_speeds); i++) {
2226 		if (link_speed & (1 << i))
2227 			return (&iavf_link_speeds[i]);
2228 	}
2229 
2230 	return (NULL);
2231 }
2232 
2233 static void
2234 iavf_process_vc_event(struct iavf_softc *sc, struct iavf_aq_desc *desc,
2235     struct iavf_aq_buf *buf)
2236 {
2237 	struct iavf_vc_pf_event *event;
2238 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2239 	const struct iavf_link_speed *speed;
2240 	int link;
2241 
2242 	event = buf->aqb_data;
2243 	switch (event->event) {
2244 	case IAVF_VC_EVENT_LINK_CHANGE:
2245 		sc->sc_media_status = IFM_AVALID;
2246 		sc->sc_media_active = IFM_ETHER;
2247 		link = LINK_STATE_DOWN;
2248 		if (event->link_status) {
2249 			link = LINK_STATE_UP;
2250 			sc->sc_media_status |= IFM_ACTIVE;
2251 
2252 			ifp->if_baudrate = 0;
2253 			speed = iavf_find_link_speed(sc, event->link_speed);
2254 			if (speed != NULL) {
2255 				sc->sc_media_active |= speed->media;
2256 				ifp->if_baudrate = speed->baudrate;
2257 			}
2258 		}
2259 
2260 		if (ifp->if_link_state != link) {
2261 			ifp->if_link_state = link;
2262 			if_link_state_change(ifp);
2263 		}
2264 		break;
2265 
2266 	default:
2267 		break;
2268 	}
2269 }
2270 
2271 static void
2272 iavf_process_irq_map(struct iavf_softc *sc, struct iavf_aq_desc *desc)
2273 {
2274 	if (letoh32(desc->iaq_vc_retval) != IAVF_VC_RC_SUCCESS) {
2275 		printf("config irq map failed: %d\n", letoh32(desc->iaq_vc_retval));
2276 	}
2277 	sc->sc_got_irq_map = 1;
2278 }
2279 
2280 static void
2281 iavf_init_admin_queue(struct iavf_softc *sc)
2282 {
2283 	iavf_wr(sc, sc->sc_aq_regs->atq_head, 0);
2284 	iavf_wr(sc, sc->sc_aq_regs->arq_head, 0);
2285 	iavf_wr(sc, sc->sc_aq_regs->atq_tail, 0);
2286 
2287 	iavf_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
2288 
2289 	iavf_wr(sc, sc->sc_aq_regs->atq_bal,
2290 	    iavf_dmamem_lo(&sc->sc_atq));
2291 	iavf_wr(sc, sc->sc_aq_regs->atq_bah,
2292 	    iavf_dmamem_hi(&sc->sc_atq));
2293 	iavf_wr(sc, sc->sc_aq_regs->atq_len,
2294 	    sc->sc_aq_regs->atq_len_enable | IAVF_AQ_NUM);
2295 
2296 	iavf_wr(sc, sc->sc_aq_regs->arq_bal,
2297 	    iavf_dmamem_lo(&sc->sc_arq));
2298 	iavf_wr(sc, sc->sc_aq_regs->arq_bah,
2299 	    iavf_dmamem_hi(&sc->sc_arq));
2300 	iavf_wr(sc, sc->sc_aq_regs->arq_len,
2301 	    sc->sc_aq_regs->arq_len_enable | IAVF_AQ_NUM);
2302 
2303 	iavf_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
2304 }
2305 
2306 static int
2307 iavf_process_arq(struct iavf_softc *sc, int fill)
2308 {
2309 	struct iavf_aq_desc *arq, *iaq;
2310 	struct iavf_aq_buf *aqb;
2311 	struct iavf_vc_version_info *ver;
2312 	unsigned int cons = sc->sc_arq_cons;
2313 	unsigned int prod;
2314 	int done = 0;
2315 
2316 	prod = iavf_rd(sc, sc->sc_aq_regs->arq_head) &
2317 	    sc->sc_aq_regs->arq_head_mask;
2318 
2319 	if (cons == prod)
2320 		return (0);
2321 
2322 	arq = IAVF_DMA_KVA(&sc->sc_arq);
2323 
2324 	bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_arq),
2325 	    0, IAVF_DMA_LEN(&sc->sc_arq),
2326 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2327 
2328 	do {
2329 		iaq = &arq[cons];
2330 
2331 		aqb = SIMPLEQ_FIRST(&sc->sc_arq_live);
2332 		SIMPLEQ_REMOVE_HEAD(&sc->sc_arq_live, aqb_entry);
2333 		bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IAVF_AQ_BUFLEN,
2334 		    BUS_DMASYNC_POSTREAD);
2335 
2336 		switch (letoh32(iaq->iaq_vc_opcode)) {
2337 		case IAVF_VC_OP_VERSION:
2338 			ver = aqb->aqb_data;
2339 			sc->sc_major_ver = letoh32(ver->major);
2340 			sc->sc_minor_ver = letoh32(ver->minor);
2341 			break;
2342 
2343 		case IAVF_VC_OP_GET_VF_RESOURCES:
2344 			iavf_process_vf_resources(sc, iaq, aqb);
2345 			break;
2346 
2347 		case IAVF_VC_OP_EVENT:
2348 			iavf_process_vc_event(sc, iaq, aqb);
2349 			break;
2350 
2351 		case IAVF_VC_OP_CONFIG_IRQ_MAP:
2352 			iavf_process_irq_map(sc, iaq);
2353 			break;
2354 
2355 		case IAVF_VC_OP_CONFIG_TX_QUEUE:
2356 		case IAVF_VC_OP_CONFIG_RX_QUEUE:
2357 		case IAVF_VC_OP_CONFIG_VSI_QUEUES:
2358 		case IAVF_VC_OP_ENABLE_QUEUES:
2359 		case IAVF_VC_OP_DISABLE_QUEUES:
2360 		case IAVF_VC_OP_GET_RSS_HENA_CAPS:
2361 		case IAVF_VC_OP_SET_RSS_HENA:
2362 		case IAVF_VC_OP_ADD_ETH_ADDR:
2363 		case IAVF_VC_OP_DEL_ETH_ADDR:
2364 		case IAVF_VC_OP_CONFIG_PROMISC:
2365 			sc->sc_admin_result = letoh32(iaq->iaq_vc_retval);
2366 			cond_signal(&sc->sc_admin_cond);
2367 			break;
2368 		}
2369 
2370 		memset(iaq, 0, sizeof(*iaq));
2371 		SIMPLEQ_INSERT_TAIL(&sc->sc_arq_idle, aqb, aqb_entry);
2372 		if_rxr_put(&sc->sc_arq_ring, 1);
2373 
2374 		cons++;
2375 		cons &= IAVF_AQ_MASK;
2376 
2377 		done = 1;
2378 	} while (cons != prod);
2379 
2380 	if (fill)
2381 		iavf_arq_fill(sc, 1);
2382 
2383 	bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_arq),
2384 	    0, IAVF_DMA_LEN(&sc->sc_arq),
2385 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2386 
2387 	sc->sc_arq_cons = cons;
2388 	return (done);
2389 }
2390 
2391 static void
2392 iavf_atq_done(struct iavf_softc *sc)
2393 {
2394 	struct iavf_aq_desc *atq, *slot;
2395 	unsigned int cons;
2396 	unsigned int prod;
2397 
2398 	prod = sc->sc_atq_prod;
2399 	cons = sc->sc_atq_cons;
2400 
2401 	if (prod == cons)
2402 		return;
2403 
2404 	atq = IAVF_DMA_KVA(&sc->sc_atq);
2405 
2406 	bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_atq),
2407 	    0, IAVF_DMA_LEN(&sc->sc_atq),
2408 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2409 
2410 	do {
2411 		slot = &atq[cons];
2412 		if (!ISSET(slot->iaq_flags, htole16(IAVF_AQ_DD)))
2413 			break;
2414 
2415 		memset(slot, 0, sizeof(*slot));
2416 
2417 		cons++;
2418 		cons &= IAVF_AQ_MASK;
2419 	} while (cons != prod);
2420 
2421 	bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_atq),
2422 	    0, IAVF_DMA_LEN(&sc->sc_atq),
2423 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2424 
2425 	sc->sc_atq_cons = cons;
2426 }
2427 
2428 static int
2429 iavf_atq_post(struct iavf_softc *sc, struct iavf_aq_desc *iaq)
2430 {
2431 	struct iavf_aq_desc *atq, *slot;
2432 	unsigned int prod;
2433 
2434 	atq = IAVF_DMA_KVA(&sc->sc_atq);
2435 	prod = sc->sc_atq_prod;
2436 	slot = atq + prod;
2437 
2438 	bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_atq),
2439 	    0, IAVF_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
2440 
2441 	*slot = *iaq;
2442 	slot->iaq_flags |= htole16(IAVF_AQ_SI);
2443 
2444 	bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_atq),
2445 	    0, IAVF_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
2446 
2447 	prod++;
2448 	prod &= IAVF_AQ_MASK;
2449 	sc->sc_atq_prod = prod;
2450 	iavf_wr(sc, sc->sc_aq_regs->atq_tail, prod);
2451 	return (prod);
2452 }
2453 
2454 static int
2455 iavf_get_version(struct iavf_softc *sc)
2456 {
2457 	struct iavf_aq_desc iaq;
2458 	struct iavf_vc_version_info *ver;
2459 	int tries;
2460 
2461 	memset(&iaq, 0, sizeof(iaq));
2462 	iaq.iaq_flags = htole16(IAVF_AQ_BUF | IAVF_AQ_RD);
2463 	iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
2464 	iaq.iaq_vc_opcode = htole32(IAVF_VC_OP_VERSION);
2465 	iaq.iaq_datalen = htole16(sizeof(struct iavf_vc_version_info));
2466 	iavf_aq_dva(&iaq, IAVF_DMA_DVA(&sc->sc_scratch));
2467 
2468 	ver = IAVF_DMA_KVA(&sc->sc_scratch);
2469 	ver->major = htole32(IAVF_VF_MAJOR);
2470 	ver->minor = htole32(IAVF_VF_MINOR);
2471 	sc->sc_major_ver = UINT_MAX;
2472 	sc->sc_minor_ver = UINT_MAX;
2473 
2474 	membar_sync();
2475 	bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_scratch), 0, IAVF_DMA_LEN(&sc->sc_scratch),
2476 	    BUS_DMASYNC_PREREAD);
2477 
2478 	iavf_atq_post(sc, &iaq);
2479 
2480 	for (tries = 0; tries < 100; tries++) {
2481 		iavf_process_arq(sc, 1);
2482 		if (sc->sc_major_ver != -1)
2483 			break;
2484 
2485 		delaymsec(1);
2486 	}
2487 	if (tries == 100) {
2488 		printf(", timeout waiting for VF version");
2489 		return (1);
2490 	}
2491 
2492 	if (sc->sc_major_ver != IAVF_VF_MAJOR) {
2493 		printf(", unsupported VF version %d", sc->sc_major_ver);
2494 		return (1);
2495 	}
2496 
2497 	if (sc->sc_resetting == 0) {
2498 		printf(", VF version %d.%d%s", sc->sc_major_ver,
2499 		    sc->sc_minor_ver,
2500 		    (sc->sc_minor_ver > IAVF_VF_MINOR) ? " (minor mismatch)" : "");
2501 	}
2502 
2503 	return (0);
2504 }
2505 
2506 static int
2507 iavf_get_vf_resources(struct iavf_softc *sc)
2508 {
2509 	struct iavf_aq_desc iaq;
2510 	uint32_t *cap;
2511 	int tries;
2512 
2513 	memset(&iaq, 0, sizeof(iaq));
2514 	iaq.iaq_flags = htole16(IAVF_AQ_BUF | IAVF_AQ_RD);
2515 	iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
2516 	iaq.iaq_vc_opcode = htole32(IAVF_VC_OP_GET_VF_RESOURCES);
2517 	iavf_aq_dva(&iaq, IAVF_DMA_DVA(&sc->sc_scratch));
2518 
2519 	if (sc->sc_minor_ver > 0) {
2520 		iaq.iaq_datalen = htole16(sizeof(uint32_t));
2521 		cap = IAVF_DMA_KVA(&sc->sc_scratch);
2522 		*cap = htole32(IAVF_VC_OFFLOAD_L2 | IAVF_VC_OFFLOAD_VLAN |
2523 		    IAVF_VC_OFFLOAD_RSS_PF);
2524 	}
2525 
2526 	bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_scratch), 0, IAVF_DMA_LEN(&sc->sc_scratch),
2527 	    BUS_DMASYNC_PREREAD);
2528 
2529 	sc->sc_got_vf_resources = 0;
2530 	iavf_atq_post(sc, &iaq);
2531 
2532 	for (tries = 0; tries < 100; tries++) {
2533 		iavf_process_arq(sc, 1);
2534 		if (sc->sc_got_vf_resources != 0)
2535 			return (0);
2536 
2537 		delaymsec(1);
2538 	}
2539 
2540 	return (1);
2541 }
2542 
2543 static int
2544 iavf_config_irq_map(struct iavf_softc *sc)
2545 {
2546 	struct iavf_aq_desc iaq;
2547 	struct iavf_vc_vector_map *vec;
2548 	struct iavf_vc_irq_map_info *map;
2549 	int tries;
2550 
2551 	memset(&iaq, 0, sizeof(iaq));
2552 	iaq.iaq_flags = htole16(IAVF_AQ_BUF | IAVF_AQ_RD);
2553 	iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
2554 	iaq.iaq_vc_opcode = htole32(IAVF_VC_OP_CONFIG_IRQ_MAP);
2555 	iaq.iaq_datalen = htole16(sizeof(*map) + sizeof(*vec));
2556 	iavf_aq_dva(&iaq, IAVF_DMA_DVA(&sc->sc_scratch));
2557 
2558 	map = IAVF_DMA_KVA(&sc->sc_scratch);
2559 	map->num_vectors = letoh16(1);
2560 
2561 	vec = map->vecmap;
2562 	vec[0].vsi_id = letoh16(sc->sc_vsi_id);
2563 	vec[0].vector_id = 0;
2564 	vec[0].rxq_map = letoh16(iavf_allqueues(sc));
2565 	vec[0].txq_map = letoh16(iavf_allqueues(sc));
2566 	vec[0].rxitr_idx = IAVF_NOITR;
2567 	vec[0].txitr_idx = IAVF_NOITR;
2568 
2569 	bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_scratch), 0, IAVF_DMA_LEN(&sc->sc_scratch),
2570 	    BUS_DMASYNC_PREREAD);
2571 
2572 	sc->sc_got_irq_map = 0;
2573 	iavf_atq_post(sc, &iaq);
2574 
2575 	for (tries = 0; tries < 100; tries++) {
2576 		iavf_process_arq(sc, 1);
2577 		if (sc->sc_got_irq_map != 0)
2578 			return (0);
2579 
2580 		delaymsec(1);
2581 	}
2582 
2583 	return (1);
2584 }
2585 
2586 static struct iavf_aq_buf *
2587 iavf_aqb_alloc(struct iavf_softc *sc)
2588 {
2589 	struct iavf_aq_buf *aqb;
2590 
2591 	aqb = malloc(sizeof(*aqb), M_DEVBUF, M_WAITOK);
2592 	if (aqb == NULL)
2593 		return (NULL);
2594 
2595 	aqb->aqb_data = dma_alloc(IAVF_AQ_BUFLEN, PR_WAITOK);
2596 	if (aqb->aqb_data == NULL)
2597 		goto free;
2598 
2599 	if (bus_dmamap_create(sc->sc_dmat, IAVF_AQ_BUFLEN, 1,
2600 	    IAVF_AQ_BUFLEN, 0,
2601 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
2602 	    &aqb->aqb_map) != 0)
2603 		goto dma_free;
2604 
2605 	if (bus_dmamap_load(sc->sc_dmat, aqb->aqb_map, aqb->aqb_data,
2606 	    IAVF_AQ_BUFLEN, NULL, BUS_DMA_WAITOK) != 0)
2607 		goto destroy;
2608 
2609 	return (aqb);
2610 
2611 destroy:
2612 	bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
2613 dma_free:
2614 	dma_free(aqb->aqb_data, IAVF_AQ_BUFLEN);
2615 free:
2616 	free(aqb, M_DEVBUF, sizeof(*aqb));
2617 
2618 	return (NULL);
2619 }
2620 
2621 static void
2622 iavf_aqb_free(struct iavf_softc *sc, struct iavf_aq_buf *aqb)
2623 {
2624 	bus_dmamap_unload(sc->sc_dmat, aqb->aqb_map);
2625 	bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
2626 	dma_free(aqb->aqb_data, IAVF_AQ_BUFLEN);
2627 	free(aqb, M_DEVBUF, sizeof(*aqb));
2628 }
2629 
2630 static int
2631 iavf_arq_fill(struct iavf_softc *sc, int post)
2632 {
2633 	struct iavf_aq_buf *aqb;
2634 	struct iavf_aq_desc *arq, *iaq;
2635 	unsigned int prod = sc->sc_arq_prod;
2636 	unsigned int n;
2637 	int filled = 0;
2638 
2639 	n = if_rxr_get(&sc->sc_arq_ring, IAVF_AQ_NUM);
2640 	arq = IAVF_DMA_KVA(&sc->sc_arq);
2641 
2642 	while (n > 0) {
2643 		aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle);
2644 		if (aqb != NULL)
2645 			SIMPLEQ_REMOVE_HEAD(&sc->sc_arq_idle, aqb_entry);
2646 		else if ((aqb = iavf_aqb_alloc(sc)) == NULL)
2647 			break;
2648 
2649 		memset(aqb->aqb_data, 0, IAVF_AQ_BUFLEN);
2650 
2651 		bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IAVF_AQ_BUFLEN,
2652 		    BUS_DMASYNC_PREREAD);
2653 
2654 		iaq = &arq[prod];
2655 		iaq->iaq_flags = htole16(IAVF_AQ_BUF |
2656 		    (IAVF_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IAVF_AQ_LB : 0));
2657 		iaq->iaq_opcode = 0;
2658 		iaq->iaq_datalen = htole16(IAVF_AQ_BUFLEN);
2659 		iaq->iaq_retval = 0;
2660 		iaq->iaq_vc_opcode = 0;
2661 		iaq->iaq_vc_retval = 0;
2662 		iaq->iaq_param[0] = 0;
2663 		iaq->iaq_param[1] = 0;
2664 		iavf_aq_dva(iaq, aqb->aqb_map->dm_segs[0].ds_addr);
2665 
2666 		SIMPLEQ_INSERT_TAIL(&sc->sc_arq_live, aqb, aqb_entry);
2667 
2668 		prod++;
2669 		prod &= IAVF_AQ_MASK;
2670 
2671 		filled = 1;
2672 
2673 		n--;
2674 	}
2675 
2676 	if_rxr_put(&sc->sc_arq_ring, n);
2677 	sc->sc_arq_prod = prod;
2678 
2679 	if (filled && post)
2680 		iavf_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
2681 
2682 	return (filled);
2683 }
2684 
2685 static void
2686 iavf_arq_unfill(struct iavf_softc *sc)
2687 {
2688 	struct iavf_aq_buf *aqb;
2689 
2690 	while ((aqb = SIMPLEQ_FIRST(&sc->sc_arq_live)) != NULL) {
2691 		SIMPLEQ_REMOVE_HEAD(&sc->sc_arq_live, aqb_entry);
2692 
2693 		bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IAVF_AQ_BUFLEN,
2694 		    BUS_DMASYNC_POSTREAD);
2695 		iavf_aqb_free(sc, aqb);
2696 		if_rxr_put(&sc->sc_arq_ring, 1);
2697 	}
2698 }
2699 
2700 static void
2701 iavf_arq_timeout(void *xsc)
2702 {
2703 	struct iavf_softc *sc = xsc;
2704 
2705 	sc->sc_admin_result = -1;
2706 	cond_signal(&sc->sc_admin_cond);
2707 }
2708 
2709 static int
2710 iavf_arq_wait(struct iavf_softc *sc, int msec)
2711 {
2712 	cond_init(&sc->sc_admin_cond);
2713 
2714 	timeout_add_msec(&sc->sc_admin_timeout, msec);
2715 
2716 	cond_wait(&sc->sc_admin_cond, "iavfarq");
2717 	timeout_del(&sc->sc_admin_timeout);
2718 
2719 	iavf_arq_fill(sc, 1);
2720 	return sc->sc_admin_result;
2721 }
2722 
2723 static int
2724 iavf_dmamem_alloc(struct iavf_softc *sc, struct iavf_dmamem *ixm,
2725     bus_size_t size, u_int align)
2726 {
2727 	ixm->ixm_size = size;
2728 
2729 	if (bus_dmamap_create(sc->sc_dmat, ixm->ixm_size, 1,
2730 	    ixm->ixm_size, 0,
2731 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
2732 	    &ixm->ixm_map) != 0)
2733 		return (1);
2734 	if (bus_dmamem_alloc(sc->sc_dmat, ixm->ixm_size,
2735 	    align, 0, &ixm->ixm_seg, 1, &ixm->ixm_nsegs,
2736 	    BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0)
2737 		goto destroy;
2738 	if (bus_dmamem_map(sc->sc_dmat, &ixm->ixm_seg, ixm->ixm_nsegs,
2739 	    ixm->ixm_size, &ixm->ixm_kva, BUS_DMA_WAITOK) != 0)
2740 		goto free;
2741 	if (bus_dmamap_load(sc->sc_dmat, ixm->ixm_map, ixm->ixm_kva,
2742 	    ixm->ixm_size, NULL, BUS_DMA_WAITOK) != 0)
2743 		goto unmap;
2744 
2745 	return (0);
2746 unmap:
2747 	bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
2748 free:
2749 	bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
2750 destroy:
2751 	bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
2752 	return (1);
2753 }
2754 
2755 static void
2756 iavf_dmamem_free(struct iavf_softc *sc, struct iavf_dmamem *ixm)
2757 {
2758 	bus_dmamap_unload(sc->sc_dmat, ixm->ixm_map);
2759 	bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
2760 	bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
2761 	bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
2762 }
2763