xref: /openbsd/sys/dev/pci/if_ixl.c (revision f6aab3d8)
1 /*	$OpenBSD: if_ixl.c,v 1.93 2023/11/10 15:51:20 bluhm Exp $ */
2 
3 /*
4  * Copyright (c) 2013-2015, Intel Corporation
5  * All rights reserved.
6 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions are met:
9  *
10  *  1. Redistributions of source code must retain the above copyright notice,
11  *     this list of conditions and the following disclaimer.
12  *
13  *  2. Redistributions in binary form must reproduce the above copyright
14  *     notice, this list of conditions and the following disclaimer in the
15  *     documentation and/or other materials provided with the distribution.
16  *
17  *  3. Neither the name of the Intel Corporation nor the names of its
18  *     contributors may be used to endorse or promote products derived from
19  *     this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * Copyright (c) 2016,2017 David Gwynne <dlg@openbsd.org>
36  *
37  * Permission to use, copy, modify, and distribute this software for any
38  * purpose with or without fee is hereby granted, provided that the above
39  * copyright notice and this permission notice appear in all copies.
40  *
41  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
46  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
47  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
48  */
49 
50 #include "bpfilter.h"
51 #include "kstat.h"
52 
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/proc.h>
56 #include <sys/sockio.h>
57 #include <sys/mbuf.h>
58 #include <sys/kernel.h>
59 #include <sys/socket.h>
60 #include <sys/device.h>
61 #include <sys/pool.h>
62 #include <sys/queue.h>
63 #include <sys/timeout.h>
64 #include <sys/task.h>
65 #include <sys/syslog.h>
66 #include <sys/intrmap.h>
67 
68 #include <machine/bus.h>
69 #include <machine/intr.h>
70 
71 #include <net/if.h>
72 #include <net/if_dl.h>
73 #include <net/if_media.h>
74 #include <net/route.h>
75 #include <net/toeplitz.h>
76 
77 #if NBPFILTER > 0
78 #include <net/bpf.h>
79 #endif
80 
81 #if NKSTAT > 0
82 #include <sys/kstat.h>
83 #endif
84 
85 #include <netinet/in.h>
86 #include <netinet/ip.h>
87 #include <netinet/ip6.h>
88 #include <netinet/tcp.h>
89 #include <netinet/tcp_timer.h>
90 #include <netinet/tcp_var.h>
91 #include <netinet/udp.h>
92 #include <netinet/if_ether.h>
93 
94 #include <dev/pci/pcireg.h>
95 #include <dev/pci/pcivar.h>
96 #include <dev/pci/pcidevs.h>
97 
98 #ifdef __sparc64__
99 #include <dev/ofw/openfirm.h>
100 #endif
101 
102 #ifndef CACHE_LINE_SIZE
103 #define CACHE_LINE_SIZE 64
104 #endif
105 
106 #define IXL_MAX_VECTORS			8 /* XXX this is pretty arbitrary */
107 
108 #define I40E_MASK(mask, shift)		((mask) << (shift))
109 #define I40E_PF_RESET_WAIT_COUNT	200
110 #define I40E_AQ_LARGE_BUF		512
111 
112 /* bitfields for Tx queue mapping in QTX_CTL */
113 #define I40E_QTX_CTL_VF_QUEUE		0x0
114 #define I40E_QTX_CTL_VM_QUEUE		0x1
115 #define I40E_QTX_CTL_PF_QUEUE		0x2
116 
117 #define I40E_QUEUE_TYPE_EOL		0x7ff
118 #define I40E_INTR_NOTX_QUEUE		0
119 
120 #define I40E_QUEUE_TYPE_RX		0x0
121 #define I40E_QUEUE_TYPE_TX		0x1
122 #define I40E_QUEUE_TYPE_PE_CEQ		0x2
123 #define I40E_QUEUE_TYPE_UNKNOWN		0x3
124 
125 #define I40E_ITR_INDEX_RX		0x0
126 #define I40E_ITR_INDEX_TX		0x1
127 #define I40E_ITR_INDEX_OTHER		0x2
128 #define I40E_ITR_INDEX_NONE		0x3
129 
130 #include <dev/pci/if_ixlreg.h>
131 
132 #define I40E_INTR_NOTX_QUEUE		0
133 #define I40E_INTR_NOTX_INTR		0
134 #define I40E_INTR_NOTX_RX_QUEUE		0
135 #define I40E_INTR_NOTX_TX_QUEUE		1
136 #define I40E_INTR_NOTX_RX_MASK		I40E_PFINT_ICR0_QUEUE_0_MASK
137 #define I40E_INTR_NOTX_TX_MASK		I40E_PFINT_ICR0_QUEUE_1_MASK
138 
139 struct ixl_aq_desc {
140 	uint16_t	iaq_flags;
141 #define	IXL_AQ_DD		(1U << 0)
142 #define	IXL_AQ_CMP		(1U << 1)
143 #define IXL_AQ_ERR		(1U << 2)
144 #define IXL_AQ_VFE		(1U << 3)
145 #define IXL_AQ_LB		(1U << 9)
146 #define IXL_AQ_RD		(1U << 10)
147 #define IXL_AQ_VFC		(1U << 11)
148 #define IXL_AQ_BUF		(1U << 12)
149 #define IXL_AQ_SI		(1U << 13)
150 #define IXL_AQ_EI		(1U << 14)
151 #define IXL_AQ_FE		(1U << 15)
152 
153 #define IXL_AQ_FLAGS_FMT	"\020" "\020FE" "\017EI" "\016SI" "\015BUF" \
154 				    "\014VFC" "\013DB" "\012LB" "\004VFE" \
155 				    "\003ERR" "\002CMP" "\001DD"
156 
157 	uint16_t	iaq_opcode;
158 
159 	uint16_t	iaq_datalen;
160 	uint16_t	iaq_retval;
161 
162 	uint64_t	iaq_cookie;
163 
164 	uint32_t	iaq_param[4];
165 /*	iaq_data_hi	iaq_param[2] */
166 /*	iaq_data_lo	iaq_param[3] */
167 } __packed __aligned(8);
168 
169 /* aq commands */
170 #define IXL_AQ_OP_GET_VERSION		0x0001
171 #define IXL_AQ_OP_DRIVER_VERSION	0x0002
172 #define IXL_AQ_OP_QUEUE_SHUTDOWN	0x0003
173 #define IXL_AQ_OP_SET_PF_CONTEXT	0x0004
174 #define IXL_AQ_OP_GET_AQ_ERR_REASON	0x0005
175 #define IXL_AQ_OP_REQUEST_RESOURCE	0x0008
176 #define IXL_AQ_OP_RELEASE_RESOURCE	0x0009
177 #define IXL_AQ_OP_LIST_FUNC_CAP		0x000a
178 #define IXL_AQ_OP_LIST_DEV_CAP		0x000b
179 #define IXL_AQ_OP_MAC_ADDRESS_READ	0x0107
180 #define IXL_AQ_OP_CLEAR_PXE_MODE	0x0110
181 #define IXL_AQ_OP_SWITCH_GET_CONFIG	0x0200
182 #define IXL_AQ_OP_RX_CTL_READ		0x0206
183 #define IXL_AQ_OP_RX_CTL_WRITE		0x0207
184 #define IXL_AQ_OP_ADD_VSI		0x0210
185 #define IXL_AQ_OP_UPD_VSI_PARAMS	0x0211
186 #define IXL_AQ_OP_GET_VSI_PARAMS	0x0212
187 #define IXL_AQ_OP_ADD_VEB		0x0230
188 #define IXL_AQ_OP_UPD_VEB_PARAMS	0x0231
189 #define IXL_AQ_OP_GET_VEB_PARAMS	0x0232
190 #define IXL_AQ_OP_ADD_MACVLAN		0x0250
191 #define IXL_AQ_OP_REMOVE_MACVLAN	0x0251
192 #define IXL_AQ_OP_SET_VSI_PROMISC	0x0254
193 #define IXL_AQ_OP_PHY_GET_ABILITIES	0x0600
194 #define IXL_AQ_OP_PHY_SET_CONFIG	0x0601
195 #define IXL_AQ_OP_PHY_SET_MAC_CONFIG	0x0603
196 #define IXL_AQ_OP_PHY_RESTART_AN	0x0605
197 #define IXL_AQ_OP_PHY_LINK_STATUS	0x0607
198 #define IXL_AQ_OP_PHY_SET_EVENT_MASK	0x0613
199 #define IXL_AQ_OP_PHY_SET_REGISTER	0x0628
200 #define IXL_AQ_OP_PHY_GET_REGISTER	0x0629
201 #define IXL_AQ_OP_LLDP_GET_MIB		0x0a00
202 #define IXL_AQ_OP_LLDP_MIB_CHG_EV	0x0a01
203 #define IXL_AQ_OP_LLDP_ADD_TLV		0x0a02
204 #define IXL_AQ_OP_LLDP_UPD_TLV		0x0a03
205 #define IXL_AQ_OP_LLDP_DEL_TLV		0x0a04
206 #define IXL_AQ_OP_LLDP_STOP_AGENT	0x0a05
207 #define IXL_AQ_OP_LLDP_START_AGENT	0x0a06
208 #define IXL_AQ_OP_LLDP_GET_CEE_DCBX	0x0a07
209 #define IXL_AQ_OP_LLDP_SPECIFIC_AGENT	0x0a09
210 #define IXL_AQ_OP_SET_RSS_KEY		0x0b02 /* 722 only */
211 #define IXL_AQ_OP_SET_RSS_LUT		0x0b03 /* 722 only */
212 #define IXL_AQ_OP_GET_RSS_KEY		0x0b04 /* 722 only */
213 #define IXL_AQ_OP_GET_RSS_LUT		0x0b05 /* 722 only */
214 
215 struct ixl_aq_mac_addresses {
216 	uint8_t		pf_lan[ETHER_ADDR_LEN];
217 	uint8_t		pf_san[ETHER_ADDR_LEN];
218 	uint8_t		port[ETHER_ADDR_LEN];
219 	uint8_t		pf_wol[ETHER_ADDR_LEN];
220 } __packed;
221 
222 #define IXL_AQ_MAC_PF_LAN_VALID		(1U << 4)
223 #define IXL_AQ_MAC_PF_SAN_VALID		(1U << 5)
224 #define IXL_AQ_MAC_PORT_VALID		(1U << 6)
225 #define IXL_AQ_MAC_PF_WOL_VALID		(1U << 7)
226 
227 struct ixl_aq_capability {
228 	uint16_t	cap_id;
229 #define IXL_AQ_CAP_SWITCH_MODE		0x0001
230 #define IXL_AQ_CAP_MNG_MODE		0x0002
231 #define IXL_AQ_CAP_NPAR_ACTIVE		0x0003
232 #define IXL_AQ_CAP_OS2BMC_CAP		0x0004
233 #define IXL_AQ_CAP_FUNCTIONS_VALID	0x0005
234 #define IXL_AQ_CAP_ALTERNATE_RAM	0x0006
235 #define IXL_AQ_CAP_WOL_AND_PROXY	0x0008
236 #define IXL_AQ_CAP_SRIOV		0x0012
237 #define IXL_AQ_CAP_VF			0x0013
238 #define IXL_AQ_CAP_VMDQ			0x0014
239 #define IXL_AQ_CAP_8021QBG		0x0015
240 #define IXL_AQ_CAP_8021QBR		0x0016
241 #define IXL_AQ_CAP_VSI			0x0017
242 #define IXL_AQ_CAP_DCB			0x0018
243 #define IXL_AQ_CAP_FCOE			0x0021
244 #define IXL_AQ_CAP_ISCSI		0x0022
245 #define IXL_AQ_CAP_RSS			0x0040
246 #define IXL_AQ_CAP_RXQ			0x0041
247 #define IXL_AQ_CAP_TXQ			0x0042
248 #define IXL_AQ_CAP_MSIX			0x0043
249 #define IXL_AQ_CAP_VF_MSIX		0x0044
250 #define IXL_AQ_CAP_FLOW_DIRECTOR	0x0045
251 #define IXL_AQ_CAP_1588			0x0046
252 #define IXL_AQ_CAP_IWARP		0x0051
253 #define IXL_AQ_CAP_LED			0x0061
254 #define IXL_AQ_CAP_SDP			0x0062
255 #define IXL_AQ_CAP_MDIO			0x0063
256 #define IXL_AQ_CAP_WSR_PROT		0x0064
257 #define IXL_AQ_CAP_NVM_MGMT		0x0080
258 #define IXL_AQ_CAP_FLEX10		0x00F1
259 #define IXL_AQ_CAP_CEM			0x00F2
260 	uint8_t		major_rev;
261 	uint8_t		minor_rev;
262 	uint32_t	number;
263 	uint32_t	logical_id;
264 	uint32_t	phys_id;
265 	uint8_t		_reserved[16];
266 } __packed __aligned(4);
267 
268 #define IXL_LLDP_SHUTDOWN		0x1
269 
270 struct ixl_aq_switch_config {
271 	uint16_t	num_reported;
272 	uint16_t	num_total;
273 	uint8_t		_reserved[12];
274 } __packed __aligned(4);
275 
276 struct ixl_aq_switch_config_element {
277 	uint8_t		type;
278 #define IXL_AQ_SW_ELEM_TYPE_MAC		1
279 #define IXL_AQ_SW_ELEM_TYPE_PF		2
280 #define IXL_AQ_SW_ELEM_TYPE_VF		3
281 #define IXL_AQ_SW_ELEM_TYPE_EMP		4
282 #define IXL_AQ_SW_ELEM_TYPE_BMC		5
283 #define IXL_AQ_SW_ELEM_TYPE_PV		16
284 #define IXL_AQ_SW_ELEM_TYPE_VEB		17
285 #define IXL_AQ_SW_ELEM_TYPE_PA		18
286 #define IXL_AQ_SW_ELEM_TYPE_VSI		19
287 	uint8_t		revision;
288 #define IXL_AQ_SW_ELEM_REV_1		1
289 	uint16_t	seid;
290 
291 	uint16_t	uplink_seid;
292 	uint16_t	downlink_seid;
293 
294 	uint8_t		_reserved[3];
295 	uint8_t		connection_type;
296 #define IXL_AQ_CONN_TYPE_REGULAR	0x1
297 #define IXL_AQ_CONN_TYPE_DEFAULT	0x2
298 #define IXL_AQ_CONN_TYPE_CASCADED	0x3
299 
300 	uint16_t	scheduler_id;
301 	uint16_t	element_info;
302 } __packed __aligned(4);
303 
304 #define IXL_PHY_TYPE_SGMII		0x00
305 #define IXL_PHY_TYPE_1000BASE_KX	0x01
306 #define IXL_PHY_TYPE_10GBASE_KX4	0x02
307 #define IXL_PHY_TYPE_10GBASE_KR		0x03
308 #define IXL_PHY_TYPE_40GBASE_KR4	0x04
309 #define IXL_PHY_TYPE_XAUI		0x05
310 #define IXL_PHY_TYPE_XFI		0x06
311 #define IXL_PHY_TYPE_SFI		0x07
312 #define IXL_PHY_TYPE_XLAUI		0x08
313 #define IXL_PHY_TYPE_XLPPI		0x09
314 #define IXL_PHY_TYPE_40GBASE_CR4_CU	0x0a
315 #define IXL_PHY_TYPE_10GBASE_CR1_CU	0x0b
316 #define IXL_PHY_TYPE_10GBASE_AOC	0x0c
317 #define IXL_PHY_TYPE_40GBASE_AOC	0x0d
318 #define IXL_PHY_TYPE_100BASE_TX		0x11
319 #define IXL_PHY_TYPE_1000BASE_T		0x12
320 #define IXL_PHY_TYPE_10GBASE_T		0x13
321 #define IXL_PHY_TYPE_10GBASE_SR		0x14
322 #define IXL_PHY_TYPE_10GBASE_LR		0x15
323 #define IXL_PHY_TYPE_10GBASE_SFPP_CU	0x16
324 #define IXL_PHY_TYPE_10GBASE_CR1	0x17
325 #define IXL_PHY_TYPE_40GBASE_CR4	0x18
326 #define IXL_PHY_TYPE_40GBASE_SR4	0x19
327 #define IXL_PHY_TYPE_40GBASE_LR4	0x1a
328 #define IXL_PHY_TYPE_1000BASE_SX	0x1b
329 #define IXL_PHY_TYPE_1000BASE_LX	0x1c
330 #define IXL_PHY_TYPE_1000BASE_T_OPTICAL	0x1d
331 #define IXL_PHY_TYPE_20GBASE_KR2	0x1e
332 
333 #define IXL_PHY_TYPE_25GBASE_KR		0x1f
334 #define IXL_PHY_TYPE_25GBASE_CR		0x20
335 #define IXL_PHY_TYPE_25GBASE_SR		0x21
336 #define IXL_PHY_TYPE_25GBASE_LR		0x22
337 #define IXL_PHY_TYPE_25GBASE_AOC	0x23
338 #define IXL_PHY_TYPE_25GBASE_ACC	0x24
339 
340 struct ixl_aq_module_desc {
341 	uint8_t		oui[3];
342 	uint8_t		_reserved1;
343 	uint8_t		part_number[16];
344 	uint8_t		revision[4];
345 	uint8_t		_reserved2[8];
346 } __packed __aligned(4);
347 
348 struct ixl_aq_phy_abilities {
349 	uint32_t	phy_type;
350 
351 	uint8_t		link_speed;
352 #define IXL_AQ_PHY_LINK_SPEED_100MB	(1 << 1)
353 #define IXL_AQ_PHY_LINK_SPEED_1000MB	(1 << 2)
354 #define IXL_AQ_PHY_LINK_SPEED_10GB	(1 << 3)
355 #define IXL_AQ_PHY_LINK_SPEED_40GB	(1 << 4)
356 #define IXL_AQ_PHY_LINK_SPEED_20GB	(1 << 5)
357 #define IXL_AQ_PHY_LINK_SPEED_25GB	(1 << 6)
358 	uint8_t		abilities;
359 	uint16_t	eee_capability;
360 
361 	uint32_t	eeer_val;
362 
363 	uint8_t		d3_lpan;
364 	uint8_t		phy_type_ext;
365 #define IXL_AQ_PHY_TYPE_EXT_25G_KR	0x01
366 #define IXL_AQ_PHY_TYPE_EXT_25G_CR	0x02
367 #define IXL_AQ_PHY_TYPE_EXT_25G_SR	0x04
368 #define IXL_AQ_PHY_TYPE_EXT_25G_LR	0x08
369 	uint8_t		fec_cfg_curr_mod_ext_info;
370 #define IXL_AQ_ENABLE_FEC_KR		0x01
371 #define IXL_AQ_ENABLE_FEC_RS		0x02
372 #define IXL_AQ_REQUEST_FEC_KR		0x04
373 #define IXL_AQ_REQUEST_FEC_RS		0x08
374 #define IXL_AQ_ENABLE_FEC_AUTO		0x10
375 #define IXL_AQ_MODULE_TYPE_EXT_MASK	0xe0
376 #define IXL_AQ_MODULE_TYPE_EXT_SHIFT	5
377 	uint8_t		ext_comp_code;
378 
379 	uint8_t		phy_id[4];
380 
381 	uint8_t		module_type[3];
382 #define IXL_SFF8024_ID_SFP		0x03
383 #define IXL_SFF8024_ID_QSFP		0x0c
384 #define IXL_SFF8024_ID_QSFP_PLUS	0x0d
385 #define IXL_SFF8024_ID_QSFP28		0x11
386 	uint8_t		qualified_module_count;
387 #define IXL_AQ_PHY_MAX_QMS		16
388 	struct ixl_aq_module_desc
389 			qualified_module[IXL_AQ_PHY_MAX_QMS];
390 } __packed __aligned(4);
391 
392 struct ixl_aq_link_param {
393 	uint8_t		notify;
394 #define IXL_AQ_LINK_NOTIFY	0x03
395 	uint8_t		_reserved1;
396 	uint8_t		phy;
397 	uint8_t		speed;
398 	uint8_t		status;
399 	uint8_t		_reserved2[11];
400 } __packed __aligned(4);
401 
402 struct ixl_aq_vsi_param {
403 	uint16_t	uplink_seid;
404 	uint8_t		connect_type;
405 #define IXL_AQ_VSI_CONN_TYPE_NORMAL	(0x1)
406 #define IXL_AQ_VSI_CONN_TYPE_DEFAULT	(0x2)
407 #define IXL_AQ_VSI_CONN_TYPE_CASCADED	(0x3)
408 	uint8_t		_reserved1;
409 
410 	uint8_t		vf_id;
411 	uint8_t		_reserved2;
412 	uint16_t	vsi_flags;
413 #define IXL_AQ_VSI_TYPE_SHIFT		0x0
414 #define IXL_AQ_VSI_TYPE_MASK		(0x3 << IXL_AQ_VSI_TYPE_SHIFT)
415 #define IXL_AQ_VSI_TYPE_VF		0x0
416 #define IXL_AQ_VSI_TYPE_VMDQ2		0x1
417 #define IXL_AQ_VSI_TYPE_PF		0x2
418 #define IXL_AQ_VSI_TYPE_EMP_MNG		0x3
419 #define IXL_AQ_VSI_FLAG_CASCADED_PV	0x4
420 
421 	uint32_t	addr_hi;
422 	uint32_t	addr_lo;
423 } __packed __aligned(16);
424 
425 struct ixl_aq_add_macvlan {
426 	uint16_t	num_addrs;
427 	uint16_t	seid0;
428 	uint16_t	seid1;
429 	uint16_t	seid2;
430 	uint32_t	addr_hi;
431 	uint32_t	addr_lo;
432 } __packed __aligned(16);
433 
434 struct ixl_aq_add_macvlan_elem {
435 	uint8_t		macaddr[6];
436 	uint16_t	vlan;
437 	uint16_t	flags;
438 #define IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH	0x0001
439 #define IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN	0x0004
440 	uint16_t	queue;
441 	uint32_t	_reserved;
442 } __packed __aligned(16);
443 
444 struct ixl_aq_remove_macvlan {
445 	uint16_t	num_addrs;
446 	uint16_t	seid0;
447 	uint16_t	seid1;
448 	uint16_t	seid2;
449 	uint32_t	addr_hi;
450 	uint32_t	addr_lo;
451 } __packed __aligned(16);
452 
453 struct ixl_aq_remove_macvlan_elem {
454 	uint8_t		macaddr[6];
455 	uint16_t	vlan;
456 	uint8_t		flags;
457 #define IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH	0x0001
458 #define IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN	0x0008
459 	uint8_t		_reserved[7];
460 } __packed __aligned(16);
461 
462 struct ixl_aq_vsi_reply {
463 	uint16_t	seid;
464 	uint16_t	vsi_number;
465 
466 	uint16_t	vsis_used;
467 	uint16_t	vsis_free;
468 
469 	uint32_t	addr_hi;
470 	uint32_t	addr_lo;
471 } __packed __aligned(16);
472 
473 struct ixl_aq_vsi_data {
474 	/* first 96 byte are written by SW */
475 	uint16_t	valid_sections;
476 #define IXL_AQ_VSI_VALID_SWITCH		(1 << 0)
477 #define IXL_AQ_VSI_VALID_SECURITY	(1 << 1)
478 #define IXL_AQ_VSI_VALID_VLAN		(1 << 2)
479 #define IXL_AQ_VSI_VALID_CAS_PV		(1 << 3)
480 #define IXL_AQ_VSI_VALID_INGRESS_UP	(1 << 4)
481 #define IXL_AQ_VSI_VALID_EGRESS_UP	(1 << 5)
482 #define IXL_AQ_VSI_VALID_QUEUE_MAP	(1 << 6)
483 #define IXL_AQ_VSI_VALID_QUEUE_OPT	(1 << 7)
484 #define IXL_AQ_VSI_VALID_OUTER_UP	(1 << 8)
485 #define IXL_AQ_VSI_VALID_SCHED		(1 << 9)
486 	/* switch section */
487 	uint16_t	switch_id;
488 #define IXL_AQ_VSI_SWITCH_ID_SHIFT	0
489 #define IXL_AQ_VSI_SWITCH_ID_MASK	(0xfff << IXL_AQ_VSI_SWITCH_ID_SHIFT)
490 #define IXL_AQ_VSI_SWITCH_NOT_STAG	(1 << 12)
491 #define IXL_AQ_VSI_SWITCH_LOCAL_LB	(1 << 14)
492 
493 	uint8_t		_reserved1[2];
494 	/* security section */
495 	uint8_t		sec_flags;
496 #define IXL_AQ_VSI_SEC_ALLOW_DEST_OVRD	(1 << 0)
497 #define IXL_AQ_VSI_SEC_ENABLE_VLAN_CHK	(1 << 1)
498 #define IXL_AQ_VSI_SEC_ENABLE_MAC_CHK	(1 << 2)
499 	uint8_t		_reserved2;
500 
501 	/* vlan section */
502 	uint16_t	pvid;
503 	uint16_t	fcoe_pvid;
504 
505 	uint8_t		port_vlan_flags;
506 #define IXL_AQ_VSI_PVLAN_MODE_SHIFT	0
507 #define IXL_AQ_VSI_PVLAN_MODE_MASK	(0x3 << IXL_AQ_VSI_PVLAN_MODE_SHIFT)
508 #define IXL_AQ_VSI_PVLAN_MODE_TAGGED	(0x1 << IXL_AQ_VSI_PVLAN_MODE_SHIFT)
509 #define IXL_AQ_VSI_PVLAN_MODE_UNTAGGED	(0x2 << IXL_AQ_VSI_PVLAN_MODE_SHIFT)
510 #define IXL_AQ_VSI_PVLAN_MODE_ALL	(0x3 << IXL_AQ_VSI_PVLAN_MODE_SHIFT)
511 #define IXL_AQ_VSI_PVLAN_INSERT_PVID	(0x4 << IXL_AQ_VSI_PVLAN_MODE_SHIFT)
512 #define IXL_AQ_VSI_PVLAN_EMOD_SHIFT	0x3
513 #define IXL_AQ_VSI_PVLAN_EMOD_MASK	(0x3 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT)
514 #define IXL_AQ_VSI_PVLAN_EMOD_STR_BOTH	(0x0 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT)
515 #define IXL_AQ_VSI_PVLAN_EMOD_STR_UP	(0x1 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT)
516 #define IXL_AQ_VSI_PVLAN_EMOD_STR	(0x2 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT)
517 #define IXL_AQ_VSI_PVLAN_EMOD_NOTHING	(0x3 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT)
518 	uint8_t		_reserved3[3];
519 
520 	/* ingress egress up section */
521 	uint32_t	ingress_table;
522 #define IXL_AQ_VSI_UP_SHIFT(_up)	((_up) * 3)
523 #define IXL_AQ_VSI_UP_MASK(_up)		(0x7 << (IXL_AQ_VSI_UP_SHIFT(_up))
524 	uint32_t	egress_table;
525 
526 	/* cascaded pv section */
527 	uint16_t	cas_pv_tag;
528 	uint8_t		cas_pv_flags;
529 #define IXL_AQ_VSI_CAS_PV_TAGX_SHIFT	0
530 #define IXL_AQ_VSI_CAS_PV_TAGX_MASK	(0x3 << IXL_AQ_VSI_CAS_PV_TAGX_SHIFT)
531 #define IXL_AQ_VSI_CAS_PV_TAGX_LEAVE	(0x0 << IXL_AQ_VSI_CAS_PV_TAGX_SHIFT)
532 #define IXL_AQ_VSI_CAS_PV_TAGX_REMOVE	(0x1 << IXL_AQ_VSI_CAS_PV_TAGX_SHIFT)
533 #define IXL_AQ_VSI_CAS_PV_TAGX_COPY	(0x2 << IXL_AQ_VSI_CAS_PV_TAGX_SHIFT)
534 #define IXL_AQ_VSI_CAS_PV_INSERT_TAG	(1 << 4)
535 #define IXL_AQ_VSI_CAS_PV_ETAG_PRUNE	(1 << 5)
536 #define IXL_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG \
537 					(1 << 6)
538 	uint8_t		_reserved4;
539 
540 	/* queue mapping section */
541 	uint16_t	mapping_flags;
542 #define IXL_AQ_VSI_QUE_MAP_MASK		0x1
543 #define IXL_AQ_VSI_QUE_MAP_CONTIG	0x0
544 #define IXL_AQ_VSI_QUE_MAP_NONCONTIG	0x1
545 	uint16_t	queue_mapping[16];
546 #define IXL_AQ_VSI_QUEUE_SHIFT		0x0
547 #define IXL_AQ_VSI_QUEUE_MASK		(0x7ff << IXL_AQ_VSI_QUEUE_SHIFT)
548 	uint16_t	tc_mapping[8];
549 #define IXL_AQ_VSI_TC_Q_OFFSET_SHIFT	0
550 #define IXL_AQ_VSI_TC_Q_OFFSET_MASK	(0x1ff << IXL_AQ_VSI_TC_Q_OFFSET_SHIFT)
551 #define IXL_AQ_VSI_TC_Q_NUMBER_SHIFT	9
552 #define IXL_AQ_VSI_TC_Q_NUMBER_MASK	(0x7 << IXL_AQ_VSI_TC_Q_NUMBER_SHIFT)
553 
554 	/* queueing option section */
555 	uint8_t		queueing_opt_flags;
556 #define IXL_AQ_VSI_QUE_OPT_MCAST_UDP_EN	(1 << 2)
557 #define IXL_AQ_VSI_QUE_OPT_UCAST_UDP_EN	(1 << 3)
558 #define IXL_AQ_VSI_QUE_OPT_TCP_EN	(1 << 4)
559 #define IXL_AQ_VSI_QUE_OPT_FCOE_EN	(1 << 5)
560 #define IXL_AQ_VSI_QUE_OPT_RSS_LUT_PF	0
561 #define IXL_AQ_VSI_QUE_OPT_RSS_LUT_VSI	(1 << 6)
562 	uint8_t		_reserved5[3];
563 
564 	/* scheduler section */
565 	uint8_t		up_enable_bits;
566 	uint8_t		_reserved6;
567 
568 	/* outer up section */
569 	uint32_t	outer_up_table; /* same as ingress/egress tables */
570 	uint8_t		_reserved7[8];
571 
572 	/* last 32 bytes are written by FW */
573 	uint16_t	qs_handle[8];
574 #define IXL_AQ_VSI_QS_HANDLE_INVALID	0xffff
575 	uint16_t	stat_counter_idx;
576 	uint16_t	sched_id;
577 
578 	uint8_t		_reserved8[12];
579 } __packed __aligned(8);
580 
581 CTASSERT(sizeof(struct ixl_aq_vsi_data) == 128);
582 
583 struct ixl_aq_vsi_promisc_param {
584 	uint16_t	flags;
585 	uint16_t	valid_flags;
586 #define IXL_AQ_VSI_PROMISC_FLAG_UCAST	(1 << 0)
587 #define IXL_AQ_VSI_PROMISC_FLAG_MCAST	(1 << 1)
588 #define IXL_AQ_VSI_PROMISC_FLAG_BCAST	(1 << 2)
589 #define IXL_AQ_VSI_PROMISC_FLAG_DFLT	(1 << 3)
590 #define IXL_AQ_VSI_PROMISC_FLAG_VLAN	(1 << 4)
591 #define IXL_AQ_VSI_PROMISC_FLAG_RXONLY	(1 << 15)
592 
593 	uint16_t	seid;
594 #define IXL_AQ_VSI_PROMISC_SEID_VALID	(1 << 15)
595 	uint16_t	vlan;
596 #define IXL_AQ_VSI_PROMISC_VLAN_VALID	(1 << 15)
597 	uint32_t	reserved[2];
598 } __packed __aligned(8);
599 
600 struct ixl_aq_veb_param {
601 	uint16_t	uplink_seid;
602 	uint16_t	downlink_seid;
603 	uint16_t	veb_flags;
604 #define IXL_AQ_ADD_VEB_FLOATING		(1 << 0)
605 #define IXL_AQ_ADD_VEB_PORT_TYPE_SHIFT	1
606 #define IXL_AQ_ADD_VEB_PORT_TYPE_MASK	(0x3 << IXL_AQ_ADD_VEB_PORT_TYPE_SHIFT)
607 #define IXL_AQ_ADD_VEB_PORT_TYPE_DEFAULT \
608 					(0x2 << IXL_AQ_ADD_VEB_PORT_TYPE_SHIFT)
609 #define IXL_AQ_ADD_VEB_PORT_TYPE_DATA	(0x4 << IXL_AQ_ADD_VEB_PORT_TYPE_SHIFT)
610 #define IXL_AQ_ADD_VEB_ENABLE_L2_FILTER	(1 << 3) /* deprecated */
611 #define IXL_AQ_ADD_VEB_DISABLE_STATS	(1 << 4)
612 	uint8_t		enable_tcs;
613 	uint8_t		_reserved[9];
614 } __packed __aligned(16);
615 
616 struct ixl_aq_veb_reply {
617 	uint16_t	_reserved1;
618 	uint16_t	_reserved2;
619 	uint16_t	_reserved3;
620 	uint16_t	switch_seid;
621 	uint16_t	veb_seid;
622 #define IXL_AQ_VEB_ERR_FLAG_NO_VEB	(1 << 0)
623 #define IXL_AQ_VEB_ERR_FLAG_NO_SCHED	(1 << 1)
624 #define IXL_AQ_VEB_ERR_FLAG_NO_COUNTER	(1 << 2)
625 #define IXL_AQ_VEB_ERR_FLAG_NO_ENTRY	(1 << 3);
626 	uint16_t	statistic_index;
627 	uint16_t	vebs_used;
628 	uint16_t	vebs_free;
629 } __packed __aligned(16);
630 
631 /* GET PHY ABILITIES param[0] */
632 #define IXL_AQ_PHY_REPORT_QUAL		(1 << 0)
633 #define IXL_AQ_PHY_REPORT_INIT		(1 << 1)
634 
635 struct ixl_aq_phy_reg_access {
636 	uint8_t		phy_iface;
637 #define IXL_AQ_PHY_IF_INTERNAL		0
638 #define IXL_AQ_PHY_IF_EXTERNAL		1
639 #define IXL_AQ_PHY_IF_MODULE		2
640 	uint8_t		dev_addr;
641 	uint16_t	recall;
642 #define IXL_AQ_PHY_QSFP_DEV_ADDR	0
643 #define IXL_AQ_PHY_QSFP_LAST		1
644 	uint32_t	reg;
645 	uint32_t	val;
646 	uint32_t	_reserved2;
647 } __packed __aligned(16);
648 
649 /* RESTART_AN param[0] */
650 #define IXL_AQ_PHY_RESTART_AN		(1 << 1)
651 #define IXL_AQ_PHY_LINK_ENABLE		(1 << 2)
652 
653 struct ixl_aq_link_status { /* this occupies the iaq_param space */
654 	uint16_t	command_flags; /* only field set on command */
655 #define IXL_AQ_LSE_MASK			0x3
656 #define IXL_AQ_LSE_NOP			0x0
657 #define IXL_AQ_LSE_DISABLE		0x2
658 #define IXL_AQ_LSE_ENABLE		0x3
659 #define IXL_AQ_LSE_IS_ENABLED		0x1 /* only set in response */
660 	uint8_t		phy_type;
661 	uint8_t		link_speed;
662 #define IXL_AQ_LINK_SPEED_1GB		(1 << 2)
663 #define IXL_AQ_LINK_SPEED_10GB		(1 << 3)
664 #define IXL_AQ_LINK_SPEED_40GB		(1 << 4)
665 #define IXL_AQ_LINK_SPEED_25GB		(1 << 6)
666 	uint8_t		link_info;
667 #define IXL_AQ_LINK_UP_FUNCTION		0x01
668 #define IXL_AQ_LINK_FAULT		0x02
669 #define IXL_AQ_LINK_FAULT_TX		0x04
670 #define IXL_AQ_LINK_FAULT_RX		0x08
671 #define IXL_AQ_LINK_FAULT_REMOTE	0x10
672 #define IXL_AQ_LINK_UP_PORT		0x20
673 #define IXL_AQ_MEDIA_AVAILABLE		0x40
674 #define IXL_AQ_SIGNAL_DETECT		0x80
675 	uint8_t		an_info;
676 #define IXL_AQ_AN_COMPLETED		0x01
677 #define IXL_AQ_LP_AN_ABILITY		0x02
678 #define IXL_AQ_PD_FAULT			0x04
679 #define IXL_AQ_FEC_EN			0x08
680 #define IXL_AQ_PHY_LOW_POWER		0x10
681 #define IXL_AQ_LINK_PAUSE_TX		0x20
682 #define IXL_AQ_LINK_PAUSE_RX		0x40
683 #define IXL_AQ_QUALIFIED_MODULE		0x80
684 
685 	uint8_t		ext_info;
686 #define IXL_AQ_LINK_PHY_TEMP_ALARM	0x01
687 #define IXL_AQ_LINK_XCESSIVE_ERRORS	0x02
688 #define IXL_AQ_LINK_TX_SHIFT		0x02
689 #define IXL_AQ_LINK_TX_MASK		(0x03 << IXL_AQ_LINK_TX_SHIFT)
690 #define IXL_AQ_LINK_TX_ACTIVE		0x00
691 #define IXL_AQ_LINK_TX_DRAINED		0x01
692 #define IXL_AQ_LINK_TX_FLUSHED		0x03
693 #define IXL_AQ_LINK_FORCED_40G		0x10
694 /* 25G Error Codes */
695 #define IXL_AQ_25G_NO_ERR		0X00
696 #define IXL_AQ_25G_NOT_PRESENT		0X01
697 #define IXL_AQ_25G_NVM_CRC_ERR		0X02
698 #define IXL_AQ_25G_SBUS_UCODE_ERR	0X03
699 #define IXL_AQ_25G_SERDES_UCODE_ERR	0X04
700 #define IXL_AQ_25G_NIMB_UCODE_ERR	0X05
701 	uint8_t		loopback;
702 	uint16_t	max_frame_size;
703 
704 	uint8_t		config;
705 #define IXL_AQ_CONFIG_FEC_KR_ENA	0x01
706 #define IXL_AQ_CONFIG_FEC_RS_ENA	0x02
707 #define IXL_AQ_CONFIG_CRC_ENA	0x04
708 #define IXL_AQ_CONFIG_PACING_MASK	0x78
709 	uint8_t		power_desc;
710 #define IXL_AQ_LINK_POWER_CLASS_1	0x00
711 #define IXL_AQ_LINK_POWER_CLASS_2	0x01
712 #define IXL_AQ_LINK_POWER_CLASS_3	0x02
713 #define IXL_AQ_LINK_POWER_CLASS_4	0x03
714 #define IXL_AQ_PWR_CLASS_MASK		0x03
715 
716 	uint8_t		reserved[4];
717 } __packed __aligned(4);
718 /* event mask command flags for param[2] */
719 #define IXL_AQ_PHY_EV_MASK		0x3ff
720 #define IXL_AQ_PHY_EV_LINK_UPDOWN	(1 << 1)
721 #define IXL_AQ_PHY_EV_MEDIA_NA		(1 << 2)
722 #define IXL_AQ_PHY_EV_LINK_FAULT	(1 << 3)
723 #define IXL_AQ_PHY_EV_PHY_TEMP_ALARM	(1 << 4)
724 #define IXL_AQ_PHY_EV_EXCESS_ERRORS	(1 << 5)
725 #define IXL_AQ_PHY_EV_SIGNAL_DETECT	(1 << 6)
726 #define IXL_AQ_PHY_EV_AN_COMPLETED	(1 << 7)
727 #define IXL_AQ_PHY_EV_MODULE_QUAL_FAIL	(1 << 8)
728 #define IXL_AQ_PHY_EV_PORT_TX_SUSPENDED	(1 << 9)
729 
730 struct ixl_aq_rss_lut { /* 722 */
731 #define IXL_AQ_SET_RSS_LUT_VSI_VALID	(1 << 15)
732 #define IXL_AQ_SET_RSS_LUT_VSI_ID_SHIFT	0
733 #define IXL_AQ_SET_RSS_LUT_VSI_ID_MASK	\
734 	(0x3FF << IXL_AQ_SET_RSS_LUT_VSI_ID_SHIFT)
735 
736 	uint16_t	vsi_number;
737 #define IXL_AQ_SET_RSS_LUT_TABLE_TYPE_SHIFT 0
738 #define IXL_AQ_SET_RSS_LUT_TABLE_TYPE_MASK \
739 	(0x1 << IXL_AQ_SET_RSS_LUT_TABLE_TYPE_SHIFT)
740 #define IXL_AQ_SET_RSS_LUT_TABLE_TYPE_VSI	0
741 #define IXL_AQ_SET_RSS_LUT_TABLE_TYPE_PF	1
742 	uint16_t	flags;
743 	uint8_t		_reserved[4];
744 	uint32_t	addr_hi;
745 	uint32_t	addr_lo;
746 } __packed __aligned(16);
747 
748 struct ixl_aq_get_set_rss_key { /* 722 */
749 #define IXL_AQ_SET_RSS_KEY_VSI_VALID	(1 << 15)
750 #define IXL_AQ_SET_RSS_KEY_VSI_ID_SHIFT	0
751 #define IXL_AQ_SET_RSS_KEY_VSI_ID_MASK	\
752 	(0x3FF << IXL_AQ_SET_RSS_KEY_VSI_ID_SHIFT)
753 	uint16_t	vsi_number;
754 	uint8_t		_reserved[6];
755 	uint32_t	addr_hi;
756 	uint32_t	addr_lo;
757 } __packed __aligned(16);
758 
759 /* aq response codes */
760 #define IXL_AQ_RC_OK			0  /* success */
761 #define IXL_AQ_RC_EPERM			1  /* Operation not permitted */
762 #define IXL_AQ_RC_ENOENT		2  /* No such element */
763 #define IXL_AQ_RC_ESRCH			3  /* Bad opcode */
764 #define IXL_AQ_RC_EINTR			4  /* operation interrupted */
765 #define IXL_AQ_RC_EIO			5  /* I/O error */
766 #define IXL_AQ_RC_ENXIO			6  /* No such resource */
767 #define IXL_AQ_RC_E2BIG			7  /* Arg too long */
768 #define IXL_AQ_RC_EAGAIN		8  /* Try again */
769 #define IXL_AQ_RC_ENOMEM		9  /* Out of memory */
770 #define IXL_AQ_RC_EACCES		10 /* Permission denied */
771 #define IXL_AQ_RC_EFAULT		11 /* Bad address */
772 #define IXL_AQ_RC_EBUSY			12 /* Device or resource busy */
773 #define IXL_AQ_RC_EEXIST		13 /* object already exists */
774 #define IXL_AQ_RC_EINVAL		14 /* invalid argument */
775 #define IXL_AQ_RC_ENOTTY		15 /* not a typewriter */
776 #define IXL_AQ_RC_ENOSPC		16 /* No space or alloc failure */
777 #define IXL_AQ_RC_ENOSYS		17 /* function not implemented */
778 #define IXL_AQ_RC_ERANGE		18 /* parameter out of range */
779 #define IXL_AQ_RC_EFLUSHED		19 /* cmd flushed due to prev error */
780 #define IXL_AQ_RC_BAD_ADDR		20 /* contains a bad pointer */
781 #define IXL_AQ_RC_EMODE			21 /* not allowed in current mode */
782 #define IXL_AQ_RC_EFBIG			22 /* file too large */
783 
784 struct ixl_tx_desc {
785 	uint64_t		addr;
786 	uint64_t		cmd;
787 #define IXL_TX_DESC_DTYPE_SHIFT		0
788 #define IXL_TX_DESC_DTYPE_MASK		(0xfULL << IXL_TX_DESC_DTYPE_SHIFT)
789 #define IXL_TX_DESC_DTYPE_DATA		(0x0ULL << IXL_TX_DESC_DTYPE_SHIFT)
790 #define IXL_TX_DESC_DTYPE_NOP		(0x1ULL << IXL_TX_DESC_DTYPE_SHIFT)
791 #define IXL_TX_DESC_DTYPE_CONTEXT	(0x1ULL << IXL_TX_DESC_DTYPE_SHIFT)
792 #define IXL_TX_DESC_DTYPE_FCOE_CTX	(0x2ULL << IXL_TX_DESC_DTYPE_SHIFT)
793 #define IXL_TX_DESC_DTYPE_FD		(0x8ULL << IXL_TX_DESC_DTYPE_SHIFT)
794 #define IXL_TX_DESC_DTYPE_DDP_CTX	(0x9ULL << IXL_TX_DESC_DTYPE_SHIFT)
795 #define IXL_TX_DESC_DTYPE_FLEX_DATA	(0xbULL << IXL_TX_DESC_DTYPE_SHIFT)
796 #define IXL_TX_DESC_DTYPE_FLEX_CTX_1	(0xcULL << IXL_TX_DESC_DTYPE_SHIFT)
797 #define IXL_TX_DESC_DTYPE_FLEX_CTX_2	(0xdULL << IXL_TX_DESC_DTYPE_SHIFT)
798 #define IXL_TX_DESC_DTYPE_DONE		(0xfULL << IXL_TX_DESC_DTYPE_SHIFT)
799 
800 #define IXL_TX_DESC_CMD_SHIFT		4
801 #define IXL_TX_DESC_CMD_MASK		(0x3ffULL << IXL_TX_DESC_CMD_SHIFT)
802 #define IXL_TX_DESC_CMD_EOP		(0x001 << IXL_TX_DESC_CMD_SHIFT)
803 #define IXL_TX_DESC_CMD_RS		(0x002 << IXL_TX_DESC_CMD_SHIFT)
804 #define IXL_TX_DESC_CMD_ICRC		(0x004 << IXL_TX_DESC_CMD_SHIFT)
805 #define IXL_TX_DESC_CMD_IL2TAG1		(0x008 << IXL_TX_DESC_CMD_SHIFT)
806 #define IXL_TX_DESC_CMD_DUMMY		(0x010 << IXL_TX_DESC_CMD_SHIFT)
807 #define IXL_TX_DESC_CMD_IIPT_MASK	(0x060 << IXL_TX_DESC_CMD_SHIFT)
808 #define IXL_TX_DESC_CMD_IIPT_NONIP	(0x000 << IXL_TX_DESC_CMD_SHIFT)
809 #define IXL_TX_DESC_CMD_IIPT_IPV6	(0x020 << IXL_TX_DESC_CMD_SHIFT)
810 #define IXL_TX_DESC_CMD_IIPT_IPV4	(0x040 << IXL_TX_DESC_CMD_SHIFT)
811 #define IXL_TX_DESC_CMD_IIPT_IPV4_CSUM	(0x060 << IXL_TX_DESC_CMD_SHIFT)
812 #define IXL_TX_DESC_CMD_FCOET		(0x080 << IXL_TX_DESC_CMD_SHIFT)
813 #define IXL_TX_DESC_CMD_L4T_EOFT_MASK	(0x300 << IXL_TX_DESC_CMD_SHIFT)
814 #define IXL_TX_DESC_CMD_L4T_EOFT_UNK	(0x000 << IXL_TX_DESC_CMD_SHIFT)
815 #define IXL_TX_DESC_CMD_L4T_EOFT_TCP	(0x100 << IXL_TX_DESC_CMD_SHIFT)
816 #define IXL_TX_DESC_CMD_L4T_EOFT_SCTP	(0x200 << IXL_TX_DESC_CMD_SHIFT)
817 #define IXL_TX_DESC_CMD_L4T_EOFT_UDP	(0x300 << IXL_TX_DESC_CMD_SHIFT)
818 
819 #define IXL_TX_DESC_MACLEN_SHIFT	16
820 #define IXL_TX_DESC_MACLEN_MASK		(0x7fULL << IXL_TX_DESC_MACLEN_SHIFT)
821 #define IXL_TX_DESC_IPLEN_SHIFT		23
822 #define IXL_TX_DESC_IPLEN_MASK		(0x7fULL << IXL_TX_DESC_IPLEN_SHIFT)
823 #define IXL_TX_DESC_L4LEN_SHIFT		30
824 #define IXL_TX_DESC_L4LEN_MASK		(0xfULL << IXL_TX_DESC_L4LEN_SHIFT)
825 #define IXL_TX_DESC_FCLEN_SHIFT		30
826 #define IXL_TX_DESC_FCLEN_MASK		(0xfULL << IXL_TX_DESC_FCLEN_SHIFT)
827 
828 #define IXL_TX_DESC_BSIZE_SHIFT		34
829 #define IXL_TX_DESC_BSIZE_MAX		0x3fffULL
830 #define IXL_TX_DESC_BSIZE_MASK		\
831 	(IXL_TX_DESC_BSIZE_MAX << IXL_TX_DESC_BSIZE_SHIFT)
832 
833 #define IXL_TX_CTX_DESC_CMD_TSO		0x10
834 #define IXL_TX_CTX_DESC_TLEN_SHIFT	30
835 #define IXL_TX_CTX_DESC_MSS_SHIFT	50
836 
837 #define IXL_TX_DESC_L2TAG1_SHIFT	48
838 } __packed __aligned(16);
839 
840 struct ixl_rx_rd_desc_16 {
841 	uint64_t		paddr; /* packet addr */
842 	uint64_t		haddr; /* header addr */
843 } __packed __aligned(16);
844 
845 struct ixl_rx_rd_desc_32 {
846 	uint64_t		paddr; /* packet addr */
847 	uint64_t		haddr; /* header addr */
848 	uint64_t		_reserved1;
849 	uint64_t		_reserved2;
850 } __packed __aligned(16);
851 
852 struct ixl_rx_wb_desc_16 {
853 	uint16_t		_reserved1;
854 	uint16_t		l2tag1;
855 	uint32_t		filter_status;
856 	uint64_t		qword1;
857 #define IXL_RX_DESC_DD			(1 << 0)
858 #define IXL_RX_DESC_EOP			(1 << 1)
859 #define IXL_RX_DESC_L2TAG1P		(1 << 2)
860 #define IXL_RX_DESC_L3L4P		(1 << 3)
861 #define IXL_RX_DESC_CRCP		(1 << 4)
862 #define IXL_RX_DESC_TSYNINDX_SHIFT	5	/* TSYNINDX */
863 #define IXL_RX_DESC_TSYNINDX_MASK	(7 << IXL_RX_DESC_TSYNINDX_SHIFT)
864 #define IXL_RX_DESC_UMB_SHIFT		9
865 #define IXL_RX_DESC_UMB_MASK		(0x3 << IXL_RX_DESC_UMB_SHIFT)
866 #define IXL_RX_DESC_UMB_UCAST		(0x0 << IXL_RX_DESC_UMB_SHIFT)
867 #define IXL_RX_DESC_UMB_MCAST		(0x1 << IXL_RX_DESC_UMB_SHIFT)
868 #define IXL_RX_DESC_UMB_BCAST		(0x2 << IXL_RX_DESC_UMB_SHIFT)
869 #define IXL_RX_DESC_UMB_MIRROR		(0x3 << IXL_RX_DESC_UMB_SHIFT)
870 #define IXL_RX_DESC_FLM			(1 << 11)
871 #define IXL_RX_DESC_FLTSTAT_SHIFT	12
872 #define IXL_RX_DESC_FLTSTAT_MASK	(0x3 << IXL_RX_DESC_FLTSTAT_SHIFT)
873 #define IXL_RX_DESC_FLTSTAT_NODATA	(0x0 << IXL_RX_DESC_FLTSTAT_SHIFT)
874 #define IXL_RX_DESC_FLTSTAT_FDFILTID	(0x1 << IXL_RX_DESC_FLTSTAT_SHIFT)
875 #define IXL_RX_DESC_FLTSTAT_RSS		(0x3 << IXL_RX_DESC_FLTSTAT_SHIFT)
876 #define IXL_RX_DESC_LPBK		(1 << 14)
877 #define IXL_RX_DESC_IPV6EXTADD		(1 << 15)
878 #define IXL_RX_DESC_INT_UDP_0		(1 << 18)
879 
880 #define IXL_RX_DESC_RXE			(1 << 19)
881 #define IXL_RX_DESC_HBO			(1 << 21)
882 #define IXL_RX_DESC_IPE			(1 << 22)
883 #define IXL_RX_DESC_L4E			(1 << 23)
884 #define IXL_RX_DESC_EIPE		(1 << 24)
885 #define IXL_RX_DESC_OVERSIZE		(1 << 25)
886 
887 #define IXL_RX_DESC_PTYPE_SHIFT		30
888 #define IXL_RX_DESC_PTYPE_MASK		(0xffULL << IXL_RX_DESC_PTYPE_SHIFT)
889 
890 #define IXL_RX_DESC_PLEN_SHIFT		38
891 #define IXL_RX_DESC_PLEN_MASK		(0x3fffULL << IXL_RX_DESC_PLEN_SHIFT)
892 #define IXL_RX_DESC_HLEN_SHIFT		42
893 #define IXL_RX_DESC_HLEN_MASK		(0x7ffULL << IXL_RX_DESC_HLEN_SHIFT)
894 } __packed __aligned(16);
895 
896 struct ixl_rx_wb_desc_32 {
897 	uint64_t		qword0;
898 	uint64_t		qword1;
899 	uint64_t		qword2;
900 	uint64_t		qword3;
901 } __packed __aligned(16);
902 
903 #define IXL_TX_PKT_DESCS		32
904 #define IXL_TX_QUEUE_ALIGN		128
905 #define IXL_RX_QUEUE_ALIGN		128
906 
907 #define IXL_HARDMTU			9712 /* 9726 - ETHER_HDR_LEN */
908 #define IXL_TSO_SIZE			((255 * 1024) - 1)
909 #define IXL_MAX_DMA_SEG_SIZE		((16 * 1024) - 1)
910 
911 /*
912  * Our TCP/IP Stack is unable handle packets greater than MAXMCLBYTES.
913  * This interface is unable handle packets greater than IXL_TSO_SIZE.
914  */
915 CTASSERT(MAXMCLBYTES < IXL_TSO_SIZE);
916 
917 #define IXL_PCIREG			PCI_MAPREG_START
918 
919 #define IXL_ITR0			0x0
920 #define IXL_ITR1			0x1
921 #define IXL_ITR2			0x2
922 #define IXL_NOITR			0x2
923 
924 #define IXL_AQ_NUM			256
925 #define IXL_AQ_MASK			(IXL_AQ_NUM - 1)
926 #define IXL_AQ_ALIGN			64 /* lol */
927 #define IXL_AQ_BUFLEN			4096
928 
929 /* Packet Classifier Types for filters */
930 /* bits 0-28 are reserved for future use */
931 #define IXL_PCT_NONF_IPV4_UDP_UCAST	(1ULL << 29)	/* 722 */
932 #define IXL_PCT_NONF_IPV4_UDP_MCAST	(1ULL << 30)	/* 722 */
933 #define IXL_PCT_NONF_IPV4_UDP		(1ULL << 31)
934 #define IXL_PCT_NONF_IPV4_TCP_SYN_NOACK	(1ULL << 32)	/* 722 */
935 #define IXL_PCT_NONF_IPV4_TCP		(1ULL << 33)
936 #define IXL_PCT_NONF_IPV4_SCTP		(1ULL << 34)
937 #define IXL_PCT_NONF_IPV4_OTHER		(1ULL << 35)
938 #define IXL_PCT_FRAG_IPV4		(1ULL << 36)
939 /* bits 37-38 are reserved for future use */
940 #define IXL_PCT_NONF_IPV6_UDP_UCAST	(1ULL << 39)	/* 722 */
941 #define IXL_PCT_NONF_IPV6_UDP_MCAST	(1ULL << 40)	/* 722 */
942 #define IXL_PCT_NONF_IPV6_UDP		(1ULL << 41)
943 #define IXL_PCT_NONF_IPV6_TCP_SYN_NOACK	(1ULL << 42)	/* 722 */
944 #define IXL_PCT_NONF_IPV6_TCP		(1ULL << 43)
945 #define IXL_PCT_NONF_IPV6_SCTP		(1ULL << 44)
946 #define IXL_PCT_NONF_IPV6_OTHER		(1ULL << 45)
947 #define IXL_PCT_FRAG_IPV6		(1ULL << 46)
948 /* bit 47 is reserved for future use */
949 #define IXL_PCT_FCOE_OX			(1ULL << 48)
950 #define IXL_PCT_FCOE_RX			(1ULL << 49)
951 #define IXL_PCT_FCOE_OTHER		(1ULL << 50)
952 /* bits 51-62 are reserved for future use */
953 #define IXL_PCT_L2_PAYLOAD		(1ULL << 63)
954 
955 #define IXL_RSS_HENA_BASE_DEFAULT		\
956 	IXL_PCT_NONF_IPV4_UDP |			\
957 	IXL_PCT_NONF_IPV4_TCP |			\
958 	IXL_PCT_NONF_IPV4_SCTP |		\
959 	IXL_PCT_NONF_IPV4_OTHER |		\
960 	IXL_PCT_FRAG_IPV4 |			\
961 	IXL_PCT_NONF_IPV6_UDP |			\
962 	IXL_PCT_NONF_IPV6_TCP |			\
963 	IXL_PCT_NONF_IPV6_SCTP |		\
964 	IXL_PCT_NONF_IPV6_OTHER |		\
965 	IXL_PCT_FRAG_IPV6 |			\
966 	IXL_PCT_L2_PAYLOAD
967 
968 #define IXL_RSS_HENA_BASE_710		IXL_RSS_HENA_BASE_DEFAULT
969 #define IXL_RSS_HENA_BASE_722		IXL_RSS_HENA_BASE_DEFAULT | \
970 	IXL_PCT_NONF_IPV4_UDP_UCAST |		\
971 	IXL_PCT_NONF_IPV4_UDP_MCAST |		\
972 	IXL_PCT_NONF_IPV6_UDP_UCAST |		\
973 	IXL_PCT_NONF_IPV6_UDP_MCAST |		\
974 	IXL_PCT_NONF_IPV4_TCP_SYN_NOACK |	\
975 	IXL_PCT_NONF_IPV6_TCP_SYN_NOACK
976 
977 #define IXL_HMC_ROUNDUP			512
978 #define IXL_HMC_PGSIZE			4096
979 #define IXL_HMC_DVASZ			sizeof(uint64_t)
980 #define IXL_HMC_PGS			(IXL_HMC_PGSIZE / IXL_HMC_DVASZ)
981 #define IXL_HMC_L2SZ			(IXL_HMC_PGSIZE * IXL_HMC_PGS)
982 #define IXL_HMC_PDVALID			1ULL
983 
984 struct ixl_aq_regs {
985 	bus_size_t		atq_tail;
986 	bus_size_t		atq_head;
987 	bus_size_t		atq_len;
988 	bus_size_t		atq_bal;
989 	bus_size_t		atq_bah;
990 
991 	bus_size_t		arq_tail;
992 	bus_size_t		arq_head;
993 	bus_size_t		arq_len;
994 	bus_size_t		arq_bal;
995 	bus_size_t		arq_bah;
996 
997 	uint32_t		atq_len_enable;
998 	uint32_t		atq_tail_mask;
999 	uint32_t		atq_head_mask;
1000 
1001 	uint32_t		arq_len_enable;
1002 	uint32_t		arq_tail_mask;
1003 	uint32_t		arq_head_mask;
1004 };
1005 
1006 struct ixl_phy_type {
1007 	uint64_t	phy_type;
1008 	uint64_t	ifm_type;
1009 };
1010 
1011 struct ixl_speed_type {
1012 	uint8_t		dev_speed;
1013 	uint64_t	net_speed;
1014 };
1015 
1016 struct ixl_aq_buf {
1017 	SIMPLEQ_ENTRY(ixl_aq_buf)
1018 				 aqb_entry;
1019 	void			*aqb_data;
1020 	bus_dmamap_t		 aqb_map;
1021 };
1022 SIMPLEQ_HEAD(ixl_aq_bufs, ixl_aq_buf);
1023 
1024 struct ixl_dmamem {
1025 	bus_dmamap_t		ixm_map;
1026 	bus_dma_segment_t	ixm_seg;
1027 	int			ixm_nsegs;
1028 	size_t			ixm_size;
1029 	caddr_t			ixm_kva;
1030 };
1031 #define IXL_DMA_MAP(_ixm)	((_ixm)->ixm_map)
1032 #define IXL_DMA_DVA(_ixm)	((_ixm)->ixm_map->dm_segs[0].ds_addr)
1033 #define IXL_DMA_KVA(_ixm)	((void *)(_ixm)->ixm_kva)
1034 #define IXL_DMA_LEN(_ixm)	((_ixm)->ixm_size)
1035 
1036 struct ixl_hmc_entry {
1037 	uint64_t		 hmc_base;
1038 	uint32_t		 hmc_count;
1039 	uint32_t		 hmc_size;
1040 };
1041 
1042 #define IXL_HMC_LAN_TX		 0
1043 #define IXL_HMC_LAN_RX		 1
1044 #define IXL_HMC_FCOE_CTX	 2
1045 #define IXL_HMC_FCOE_FILTER	 3
1046 #define IXL_HMC_COUNT		 4
1047 
1048 struct ixl_hmc_pack {
1049 	uint16_t		offset;
1050 	uint16_t		width;
1051 	uint16_t		lsb;
1052 };
1053 
1054 /*
1055  * these hmc objects have weird sizes and alignments, so these are abstract
1056  * representations of them that are nice for c to populate.
1057  *
1058  * the packing code relies on little-endian values being stored in the fields,
1059  * no high bits in the fields being set, and the fields must be packed in the
1060  * same order as they are in the ctx structure.
1061  */
1062 
1063 struct ixl_hmc_rxq {
1064 	uint16_t		 head;
1065 	uint8_t			 cpuid;
1066 	uint64_t		 base;
1067 #define IXL_HMC_RXQ_BASE_UNIT		128
1068 	uint16_t		 qlen;
1069 	uint16_t		 dbuff;
1070 #define IXL_HMC_RXQ_DBUFF_UNIT		128
1071 	uint8_t			 hbuff;
1072 #define IXL_HMC_RXQ_HBUFF_UNIT		64
1073 	uint8_t			 dtype;
1074 #define IXL_HMC_RXQ_DTYPE_NOSPLIT	0x0
1075 #define IXL_HMC_RXQ_DTYPE_HSPLIT	0x1
1076 #define IXL_HMC_RXQ_DTYPE_SPLIT_ALWAYS	0x2
1077 	uint8_t			 dsize;
1078 #define IXL_HMC_RXQ_DSIZE_16		0
1079 #define IXL_HMC_RXQ_DSIZE_32		1
1080 	uint8_t			 crcstrip;
1081 	uint8_t			 fc_ena;
1082 	uint8_t			 l2tsel;
1083 #define IXL_HMC_RXQ_L2TSEL_2ND_TAG_TO_L2TAG1 \
1084 					0
1085 #define IXL_HMC_RXQ_L2TSEL_1ST_TAG_TO_L2TAG1 \
1086 					1
1087 	uint8_t			 hsplit_0;
1088 	uint8_t			 hsplit_1;
1089 	uint8_t			 showiv;
1090 	uint16_t		 rxmax;
1091 	uint8_t			 tphrdesc_ena;
1092 	uint8_t			 tphwdesc_ena;
1093 	uint8_t			 tphdata_ena;
1094 	uint8_t			 tphhead_ena;
1095 	uint8_t			 lrxqthresh;
1096 	uint8_t			 prefena;
1097 };
1098 
1099 static const struct ixl_hmc_pack ixl_hmc_pack_rxq[] = {
1100 	{ offsetof(struct ixl_hmc_rxq, head),		13,	0 },
1101 	{ offsetof(struct ixl_hmc_rxq, cpuid),		8,	13 },
1102 	{ offsetof(struct ixl_hmc_rxq, base),		57,	32 },
1103 	{ offsetof(struct ixl_hmc_rxq, qlen),		13,	89 },
1104 	{ offsetof(struct ixl_hmc_rxq, dbuff),		7,	102 },
1105 	{ offsetof(struct ixl_hmc_rxq, hbuff),		5,	109 },
1106 	{ offsetof(struct ixl_hmc_rxq, dtype),		2,	114 },
1107 	{ offsetof(struct ixl_hmc_rxq, dsize),		1,	116 },
1108 	{ offsetof(struct ixl_hmc_rxq, crcstrip),	1,	117 },
1109 	{ offsetof(struct ixl_hmc_rxq, fc_ena),		1,	118 },
1110 	{ offsetof(struct ixl_hmc_rxq, l2tsel),		1,	119 },
1111 	{ offsetof(struct ixl_hmc_rxq, hsplit_0),	4,	120 },
1112 	{ offsetof(struct ixl_hmc_rxq, hsplit_1),	2,	124 },
1113 	{ offsetof(struct ixl_hmc_rxq, showiv),		1,	127 },
1114 	{ offsetof(struct ixl_hmc_rxq, rxmax),		14,	174 },
1115 	{ offsetof(struct ixl_hmc_rxq, tphrdesc_ena),	1,	193 },
1116 	{ offsetof(struct ixl_hmc_rxq, tphwdesc_ena),	1,	194 },
1117 	{ offsetof(struct ixl_hmc_rxq, tphdata_ena),	1,	195 },
1118 	{ offsetof(struct ixl_hmc_rxq, tphhead_ena),	1,	196 },
1119 	{ offsetof(struct ixl_hmc_rxq, lrxqthresh),	3,	198 },
1120 	{ offsetof(struct ixl_hmc_rxq, prefena),	1,	201 },
1121 };
1122 
1123 #define IXL_HMC_RXQ_MINSIZE (201 + 1)
1124 
1125 struct ixl_hmc_txq {
1126 	uint16_t		head;
1127 	uint8_t			new_context;
1128 	uint64_t		base;
1129 #define IXL_HMC_TXQ_BASE_UNIT		128
1130 	uint8_t			fc_ena;
1131 	uint8_t			timesync_ena;
1132 	uint8_t			fd_ena;
1133 	uint8_t			alt_vlan_ena;
1134 	uint16_t		thead_wb;
1135 	uint8_t			cpuid;
1136 	uint8_t			head_wb_ena;
1137 #define IXL_HMC_TXQ_DESC_WB		0
1138 #define IXL_HMC_TXQ_HEAD_WB		1
1139 	uint16_t		qlen;
1140 	uint8_t			tphrdesc_ena;
1141 	uint8_t			tphrpacket_ena;
1142 	uint8_t			tphwdesc_ena;
1143 	uint64_t		head_wb_addr;
1144 	uint32_t		crc;
1145 	uint16_t		rdylist;
1146 	uint8_t			rdylist_act;
1147 };
1148 
1149 static const struct ixl_hmc_pack ixl_hmc_pack_txq[] = {
1150 	{ offsetof(struct ixl_hmc_txq, head),		13,	0 },
1151 	{ offsetof(struct ixl_hmc_txq, new_context),	1,	30 },
1152 	{ offsetof(struct ixl_hmc_txq, base),		57,	32 },
1153 	{ offsetof(struct ixl_hmc_txq, fc_ena),		1,	89 },
1154 	{ offsetof(struct ixl_hmc_txq, timesync_ena),	1,	90 },
1155 	{ offsetof(struct ixl_hmc_txq, fd_ena),		1,	91 },
1156 	{ offsetof(struct ixl_hmc_txq, alt_vlan_ena),	1,	92 },
1157 	{ offsetof(struct ixl_hmc_txq, cpuid),		8,	96 },
1158 /* line 1 */
1159 	{ offsetof(struct ixl_hmc_txq, thead_wb),	13,	0 + 128 },
1160 	{ offsetof(struct ixl_hmc_txq, head_wb_ena),	1,	32 + 128 },
1161 	{ offsetof(struct ixl_hmc_txq, qlen),		13,	33 + 128 },
1162 	{ offsetof(struct ixl_hmc_txq, tphrdesc_ena),	1,	46 + 128 },
1163 	{ offsetof(struct ixl_hmc_txq, tphrpacket_ena),	1,	47 + 128 },
1164 	{ offsetof(struct ixl_hmc_txq, tphwdesc_ena),	1,	48 + 128 },
1165 	{ offsetof(struct ixl_hmc_txq, head_wb_addr),	64,	64 + 128 },
1166 /* line 7 */
1167 	{ offsetof(struct ixl_hmc_txq, crc),		32,	0 + (7*128) },
1168 	{ offsetof(struct ixl_hmc_txq, rdylist),	10,	84 + (7*128) },
1169 	{ offsetof(struct ixl_hmc_txq, rdylist_act),	1,	94 + (7*128) },
1170 };
1171 
1172 #define IXL_HMC_TXQ_MINSIZE (94 + (7*128) + 1)
1173 
1174 struct ixl_rss_key {
1175 	uint32_t		 key[13];
1176 };
1177 
1178 struct ixl_rss_lut_128 {
1179 	uint32_t		 entries[128 / sizeof(uint32_t)];
1180 };
1181 
1182 struct ixl_rss_lut_512 {
1183 	uint32_t		 entries[512 / sizeof(uint32_t)];
1184 };
1185 
1186 /* driver structures */
1187 
1188 struct ixl_vector;
1189 struct ixl_chip;
1190 
1191 struct ixl_tx_map {
1192 	struct mbuf		*txm_m;
1193 	bus_dmamap_t		 txm_map;
1194 	unsigned int		 txm_eop;
1195 };
1196 
1197 struct ixl_tx_ring {
1198 	struct ixl_softc	*txr_sc;
1199 	struct ixl_vector	*txr_vector;
1200 	struct ifqueue		*txr_ifq;
1201 
1202 	unsigned int		 txr_prod;
1203 	unsigned int		 txr_cons;
1204 
1205 	struct ixl_tx_map	*txr_maps;
1206 	struct ixl_dmamem	 txr_mem;
1207 
1208 	bus_size_t		 txr_tail;
1209 	unsigned int		 txr_qid;
1210 } __aligned(CACHE_LINE_SIZE);
1211 
1212 struct ixl_rx_map {
1213 	struct mbuf		*rxm_m;
1214 	bus_dmamap_t		 rxm_map;
1215 };
1216 
1217 struct ixl_rx_ring {
1218 	struct ixl_softc	*rxr_sc;
1219 	struct ixl_vector	*rxr_vector;
1220 	struct ifiqueue		*rxr_ifiq;
1221 
1222 	struct if_rxring	 rxr_acct;
1223 	struct timeout		 rxr_refill;
1224 
1225 	unsigned int		 rxr_prod;
1226 	unsigned int		 rxr_cons;
1227 
1228 	struct ixl_rx_map	*rxr_maps;
1229 	struct ixl_dmamem	 rxr_mem;
1230 
1231 	struct mbuf		*rxr_m_head;
1232 	struct mbuf		**rxr_m_tail;
1233 
1234 	bus_size_t		 rxr_tail;
1235 	unsigned int		 rxr_qid;
1236 } __aligned(CACHE_LINE_SIZE);
1237 
1238 struct ixl_atq {
1239 	struct ixl_aq_desc	  iatq_desc;
1240 	void			 *iatq_arg;
1241 	void			(*iatq_fn)(struct ixl_softc *, void *);
1242 };
1243 SIMPLEQ_HEAD(ixl_atq_list, ixl_atq);
1244 
1245 struct ixl_vector {
1246 	struct ixl_softc	*iv_sc;
1247 	struct ixl_rx_ring	*iv_rxr;
1248 	struct ixl_tx_ring	*iv_txr;
1249 	int			 iv_qid;
1250 	void			*iv_ihc;
1251 	char			 iv_name[16];
1252 } __aligned(CACHE_LINE_SIZE);
1253 
1254 struct ixl_softc {
1255 	struct device		 sc_dev;
1256 	const struct ixl_chip	*sc_chip;
1257 	struct arpcom		 sc_ac;
1258 	struct ifmedia		 sc_media;
1259 	uint64_t		 sc_media_status;
1260 	uint64_t		 sc_media_active;
1261 
1262 	pci_chipset_tag_t	 sc_pc;
1263 	pci_intr_handle_t	 sc_ih;
1264 	void			*sc_ihc;
1265 	pcitag_t		 sc_tag;
1266 
1267 	bus_dma_tag_t		 sc_dmat;
1268 	bus_space_tag_t		 sc_memt;
1269 	bus_space_handle_t	 sc_memh;
1270 	bus_size_t		 sc_mems;
1271 
1272 	uint16_t		 sc_api_major;
1273 	uint16_t		 sc_api_minor;
1274 	uint8_t			 sc_pf_id;
1275 	uint16_t		 sc_uplink_seid;	/* le */
1276 	uint16_t		 sc_downlink_seid;	/* le */
1277 	uint16_t		 sc_veb_seid;		/* le */
1278 	uint16_t		 sc_vsi_number;		/* le */
1279 	uint16_t		 sc_seid;
1280 	unsigned int		 sc_base_queue;
1281 	unsigned int		 sc_port;
1282 
1283 	struct ixl_dmamem	 sc_scratch;
1284 
1285 	const struct ixl_aq_regs *
1286 				 sc_aq_regs;
1287 
1288 	struct ixl_dmamem	 sc_atq;
1289 	unsigned int		 sc_atq_prod;
1290 	unsigned int		 sc_atq_cons;
1291 
1292 	struct mutex		 sc_atq_mtx;
1293 	struct ixl_dmamem	 sc_arq;
1294 	struct task		 sc_arq_task;
1295 	struct ixl_aq_bufs	 sc_arq_idle;
1296 	struct ixl_aq_bufs	 sc_arq_live;
1297 	struct if_rxring	 sc_arq_ring;
1298 	unsigned int		 sc_arq_prod;
1299 	unsigned int		 sc_arq_cons;
1300 
1301 	struct mutex		 sc_link_state_mtx;
1302 	struct task		 sc_link_state_task;
1303 	struct ixl_atq		 sc_link_state_atq;
1304 
1305 	struct ixl_dmamem	 sc_hmc_sd;
1306 	struct ixl_dmamem	 sc_hmc_pd;
1307 	struct ixl_hmc_entry	 sc_hmc_entries[IXL_HMC_COUNT];
1308 
1309 	unsigned int		 sc_tx_ring_ndescs;
1310 	unsigned int		 sc_rx_ring_ndescs;
1311 	unsigned int		 sc_nqueues;	/* 1 << sc_nqueues */
1312 
1313 	struct intrmap		*sc_intrmap;
1314 	struct ixl_vector	*sc_vectors;
1315 
1316 	struct rwlock		 sc_cfg_lock;
1317 	unsigned int		 sc_dead;
1318 
1319 	uint8_t			 sc_enaddr[ETHER_ADDR_LEN];
1320 
1321 #if NKSTAT > 0
1322 	struct mutex		 sc_kstat_mtx;
1323 	struct timeout		 sc_kstat_tmo;
1324 	struct kstat		*sc_port_kstat;
1325 	struct kstat		*sc_vsi_kstat;
1326 #endif
1327 };
1328 #define DEVNAME(_sc) ((_sc)->sc_dev.dv_xname)
1329 
1330 #define delaymsec(_ms)	delay(1000 * (_ms))
1331 
1332 static void	ixl_clear_hw(struct ixl_softc *);
1333 static int	ixl_pf_reset(struct ixl_softc *);
1334 
1335 static int	ixl_dmamem_alloc(struct ixl_softc *, struct ixl_dmamem *,
1336 		    bus_size_t, u_int);
1337 static void	ixl_dmamem_free(struct ixl_softc *, struct ixl_dmamem *);
1338 
1339 static int	ixl_arq_fill(struct ixl_softc *);
1340 static void	ixl_arq_unfill(struct ixl_softc *);
1341 
1342 static int	ixl_atq_poll(struct ixl_softc *, struct ixl_aq_desc *,
1343 		    unsigned int);
1344 static void	ixl_atq_set(struct ixl_atq *,
1345 		    void (*)(struct ixl_softc *, void *), void *);
1346 static void	ixl_atq_post(struct ixl_softc *, struct ixl_atq *);
1347 static void	ixl_atq_done(struct ixl_softc *);
1348 static void	ixl_atq_exec(struct ixl_softc *, struct ixl_atq *,
1349 		    const char *);
1350 static int	ixl_get_version(struct ixl_softc *);
1351 static int	ixl_pxe_clear(struct ixl_softc *);
1352 static int	ixl_lldp_shut(struct ixl_softc *);
1353 static int	ixl_get_mac(struct ixl_softc *);
1354 static int	ixl_get_switch_config(struct ixl_softc *);
1355 static int	ixl_phy_mask_ints(struct ixl_softc *);
1356 static int	ixl_get_phy_types(struct ixl_softc *, uint64_t *);
1357 static int	ixl_restart_an(struct ixl_softc *);
1358 static int	ixl_hmc(struct ixl_softc *);
1359 static void	ixl_hmc_free(struct ixl_softc *);
1360 static int	ixl_get_vsi(struct ixl_softc *);
1361 static int	ixl_set_vsi(struct ixl_softc *);
1362 static int	ixl_get_link_status(struct ixl_softc *);
1363 static int	ixl_set_link_status(struct ixl_softc *,
1364 		    const struct ixl_aq_desc *);
1365 static int	ixl_add_macvlan(struct ixl_softc *, uint8_t *, uint16_t,
1366 		    uint16_t);
1367 static int	ixl_remove_macvlan(struct ixl_softc *, uint8_t *, uint16_t,
1368 		    uint16_t);
1369 static void	ixl_link_state_update(void *);
1370 static void	ixl_arq(void *);
1371 static void	ixl_hmc_pack(void *, const void *,
1372 		    const struct ixl_hmc_pack *, unsigned int);
1373 
1374 static int	ixl_get_sffpage(struct ixl_softc *, struct if_sffpage *);
1375 static int	ixl_sff_get_byte(struct ixl_softc *, uint8_t, uint32_t,
1376 		    uint8_t *);
1377 static int	ixl_sff_set_byte(struct ixl_softc *, uint8_t, uint32_t,
1378 		    uint8_t);
1379 
1380 static int	ixl_match(struct device *, void *, void *);
1381 static void	ixl_attach(struct device *, struct device *, void *);
1382 
1383 static void	ixl_media_add(struct ixl_softc *, uint64_t);
1384 static int	ixl_media_change(struct ifnet *);
1385 static void	ixl_media_status(struct ifnet *, struct ifmediareq *);
1386 static void	ixl_watchdog(struct ifnet *);
1387 static int	ixl_ioctl(struct ifnet *, u_long, caddr_t);
1388 static void	ixl_start(struct ifqueue *);
1389 static int	ixl_intr0(void *);
1390 static int	ixl_intr_vector(void *);
1391 static int	ixl_up(struct ixl_softc *);
1392 static int	ixl_down(struct ixl_softc *);
1393 static int	ixl_iff(struct ixl_softc *);
1394 
1395 static struct ixl_tx_ring *
1396 		ixl_txr_alloc(struct ixl_softc *, unsigned int);
1397 static void	ixl_txr_qdis(struct ixl_softc *, struct ixl_tx_ring *, int);
1398 static void	ixl_txr_config(struct ixl_softc *, struct ixl_tx_ring *);
1399 static int	ixl_txr_enabled(struct ixl_softc *, struct ixl_tx_ring *);
1400 static int	ixl_txr_disabled(struct ixl_softc *, struct ixl_tx_ring *);
1401 static void	ixl_txr_unconfig(struct ixl_softc *, struct ixl_tx_ring *);
1402 static void	ixl_txr_clean(struct ixl_softc *, struct ixl_tx_ring *);
1403 static void	ixl_txr_free(struct ixl_softc *, struct ixl_tx_ring *);
1404 static int	ixl_txeof(struct ixl_softc *, struct ixl_tx_ring *);
1405 
1406 static struct ixl_rx_ring *
1407 		ixl_rxr_alloc(struct ixl_softc *, unsigned int);
1408 static void	ixl_rxr_config(struct ixl_softc *, struct ixl_rx_ring *);
1409 static int	ixl_rxr_enabled(struct ixl_softc *, struct ixl_rx_ring *);
1410 static int	ixl_rxr_disabled(struct ixl_softc *, struct ixl_rx_ring *);
1411 static void	ixl_rxr_unconfig(struct ixl_softc *, struct ixl_rx_ring *);
1412 static void	ixl_rxr_clean(struct ixl_softc *, struct ixl_rx_ring *);
1413 static void	ixl_rxr_free(struct ixl_softc *, struct ixl_rx_ring *);
1414 static int	ixl_rxeof(struct ixl_softc *, struct ixl_rx_ring *);
1415 static void	ixl_rxfill(struct ixl_softc *, struct ixl_rx_ring *);
1416 static void	ixl_rxrefill(void *);
1417 static int	ixl_rxrinfo(struct ixl_softc *, struct if_rxrinfo *);
1418 static void	ixl_rx_checksum(struct mbuf *, uint64_t);
1419 
1420 #if NKSTAT > 0
1421 static void	ixl_kstat_attach(struct ixl_softc *);
1422 #endif
1423 
1424 struct cfdriver ixl_cd = {
1425 	NULL,
1426 	"ixl",
1427 	DV_IFNET,
1428 };
1429 
1430 const struct cfattach ixl_ca = {
1431 	sizeof(struct ixl_softc),
1432 	ixl_match,
1433 	ixl_attach,
1434 };
1435 
1436 static const struct ixl_phy_type ixl_phy_type_map[] = {
1437 	{ 1ULL << IXL_PHY_TYPE_SGMII,		IFM_1000_SGMII },
1438 	{ 1ULL << IXL_PHY_TYPE_1000BASE_KX,	IFM_1000_KX },
1439 	{ 1ULL << IXL_PHY_TYPE_10GBASE_KX4,	IFM_10G_KX4 },
1440 	{ 1ULL << IXL_PHY_TYPE_10GBASE_KR,	IFM_10G_KR },
1441 	{ 1ULL << IXL_PHY_TYPE_40GBASE_KR4,	IFM_40G_KR4 },
1442 	{ 1ULL << IXL_PHY_TYPE_XAUI |
1443 	  1ULL << IXL_PHY_TYPE_XFI,		IFM_10G_CX4 },
1444 	{ 1ULL << IXL_PHY_TYPE_SFI,		IFM_10G_SFI },
1445 	{ 1ULL << IXL_PHY_TYPE_XLAUI |
1446 	  1ULL << IXL_PHY_TYPE_XLPPI,		IFM_40G_XLPPI },
1447 	{ 1ULL << IXL_PHY_TYPE_40GBASE_CR4_CU |
1448 	  1ULL << IXL_PHY_TYPE_40GBASE_CR4,	IFM_40G_CR4 },
1449 	{ 1ULL << IXL_PHY_TYPE_10GBASE_CR1_CU |
1450 	  1ULL << IXL_PHY_TYPE_10GBASE_CR1,	IFM_10G_CR1 },
1451 	{ 1ULL << IXL_PHY_TYPE_10GBASE_AOC,	IFM_10G_AOC },
1452 	{ 1ULL << IXL_PHY_TYPE_40GBASE_AOC,	IFM_40G_AOC },
1453 	{ 1ULL << IXL_PHY_TYPE_100BASE_TX,	IFM_100_TX },
1454 	{ 1ULL << IXL_PHY_TYPE_1000BASE_T_OPTICAL |
1455 	  1ULL << IXL_PHY_TYPE_1000BASE_T,	IFM_1000_T },
1456 	{ 1ULL << IXL_PHY_TYPE_10GBASE_T,	IFM_10G_T },
1457 	{ 1ULL << IXL_PHY_TYPE_10GBASE_SR,	IFM_10G_SR },
1458 	{ 1ULL << IXL_PHY_TYPE_10GBASE_LR,	IFM_10G_LR },
1459 	{ 1ULL << IXL_PHY_TYPE_10GBASE_SFPP_CU,	IFM_10G_SFP_CU },
1460 	{ 1ULL << IXL_PHY_TYPE_40GBASE_SR4,	IFM_40G_SR4 },
1461 	{ 1ULL << IXL_PHY_TYPE_40GBASE_LR4,	IFM_40G_LR4 },
1462 	{ 1ULL << IXL_PHY_TYPE_1000BASE_SX,	IFM_1000_SX },
1463 	{ 1ULL << IXL_PHY_TYPE_1000BASE_LX,	IFM_1000_LX },
1464 	{ 1ULL << IXL_PHY_TYPE_20GBASE_KR2,	IFM_20G_KR2 },
1465 	{ 1ULL << IXL_PHY_TYPE_25GBASE_KR,	IFM_25G_KR },
1466 	{ 1ULL << IXL_PHY_TYPE_25GBASE_CR,	IFM_25G_CR },
1467 	{ 1ULL << IXL_PHY_TYPE_25GBASE_SR,	IFM_25G_SR },
1468 	{ 1ULL << IXL_PHY_TYPE_25GBASE_LR,	IFM_25G_LR },
1469 	{ 1ULL << IXL_PHY_TYPE_25GBASE_AOC,	IFM_25G_AOC },
1470 	{ 1ULL << IXL_PHY_TYPE_25GBASE_ACC,	IFM_25G_CR },
1471 };
1472 
1473 static const struct ixl_speed_type ixl_speed_type_map[] = {
1474 	{ IXL_AQ_LINK_SPEED_40GB,		IF_Gbps(40) },
1475 	{ IXL_AQ_LINK_SPEED_25GB,		IF_Gbps(25) },
1476 	{ IXL_AQ_LINK_SPEED_10GB,		IF_Gbps(10) },
1477 	{ IXL_AQ_LINK_SPEED_1GB,		IF_Gbps(1) },
1478 };
1479 
1480 static const struct ixl_aq_regs ixl_pf_aq_regs = {
1481 	.atq_tail	= I40E_PF_ATQT,
1482 	.atq_tail_mask	= I40E_PF_ATQT_ATQT_MASK,
1483 	.atq_head	= I40E_PF_ATQH,
1484 	.atq_head_mask	= I40E_PF_ATQH_ATQH_MASK,
1485 	.atq_len	= I40E_PF_ATQLEN,
1486 	.atq_bal	= I40E_PF_ATQBAL,
1487 	.atq_bah	= I40E_PF_ATQBAH,
1488 	.atq_len_enable	= I40E_PF_ATQLEN_ATQENABLE_MASK,
1489 
1490 	.arq_tail	= I40E_PF_ARQT,
1491 	.arq_tail_mask	= I40E_PF_ARQT_ARQT_MASK,
1492 	.arq_head	= I40E_PF_ARQH,
1493 	.arq_head_mask	= I40E_PF_ARQH_ARQH_MASK,
1494 	.arq_len	= I40E_PF_ARQLEN,
1495 	.arq_bal	= I40E_PF_ARQBAL,
1496 	.arq_bah	= I40E_PF_ARQBAH,
1497 	.arq_len_enable	= I40E_PF_ARQLEN_ARQENABLE_MASK,
1498 };
1499 
1500 #define ixl_rd(_s, _r) \
1501 	bus_space_read_4((_s)->sc_memt, (_s)->sc_memh, (_r))
1502 #define ixl_wr(_s, _r, _v) \
1503 	bus_space_write_4((_s)->sc_memt, (_s)->sc_memh, (_r), (_v))
1504 #define ixl_barrier(_s, _r, _l, _o) \
1505 	bus_space_barrier((_s)->sc_memt, (_s)->sc_memh, (_r), (_l), (_o))
1506 #define ixl_intr_enable(_s) \
1507 	ixl_wr((_s), I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_INTENA_MASK | \
1508 	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | \
1509 	    (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT))
1510 
1511 #define ixl_nqueues(_sc)	(1 << (_sc)->sc_nqueues)
1512 
1513 #ifdef __LP64__
1514 #define ixl_dmamem_hi(_ixm)	(uint32_t)(IXL_DMA_DVA(_ixm) >> 32)
1515 #else
1516 #define ixl_dmamem_hi(_ixm)	0
1517 #endif
1518 
1519 #define ixl_dmamem_lo(_ixm)	(uint32_t)IXL_DMA_DVA(_ixm)
1520 
1521 static inline void
1522 ixl_aq_dva(struct ixl_aq_desc *iaq, bus_addr_t addr)
1523 {
1524 #ifdef __LP64__
1525 	htolem32(&iaq->iaq_param[2], addr >> 32);
1526 #else
1527 	iaq->iaq_param[2] = htole32(0);
1528 #endif
1529 	htolem32(&iaq->iaq_param[3], addr);
1530 }
1531 
1532 #if _BYTE_ORDER == _BIG_ENDIAN
1533 #define HTOLE16(_x)	(uint16_t)(((_x) & 0xff) << 8 | ((_x) & 0xff00) >> 8)
1534 #else
1535 #define HTOLE16(_x)	(_x)
1536 #endif
1537 
1538 static struct rwlock ixl_sff_lock = RWLOCK_INITIALIZER("ixlsff");
1539 
1540 /* deal with differences between chips */
1541 
1542 struct ixl_chip {
1543 	uint64_t		  ic_rss_hena;
1544 	uint32_t		(*ic_rd_ctl)(struct ixl_softc *, uint32_t);
1545 	void			(*ic_wr_ctl)(struct ixl_softc *, uint32_t,
1546 				      uint32_t);
1547 
1548 	int			(*ic_set_rss_key)(struct ixl_softc *,
1549 				      const struct ixl_rss_key *);
1550 	int			(*ic_set_rss_lut)(struct ixl_softc *,
1551 				      const struct ixl_rss_lut_128 *);
1552 };
1553 
1554 static inline uint64_t
1555 ixl_rss_hena(struct ixl_softc *sc)
1556 {
1557 	return (sc->sc_chip->ic_rss_hena);
1558 }
1559 
1560 static inline uint32_t
1561 ixl_rd_ctl(struct ixl_softc *sc, uint32_t r)
1562 {
1563 	return ((*sc->sc_chip->ic_rd_ctl)(sc, r));
1564 }
1565 
1566 static inline void
1567 ixl_wr_ctl(struct ixl_softc *sc, uint32_t r, uint32_t v)
1568 {
1569 	(*sc->sc_chip->ic_wr_ctl)(sc, r, v);
1570 }
1571 
1572 static inline int
1573 ixl_set_rss_key(struct ixl_softc *sc, const struct ixl_rss_key *rsskey)
1574 {
1575 	return ((*sc->sc_chip->ic_set_rss_key)(sc, rsskey));
1576 }
1577 
1578 static inline int
1579 ixl_set_rss_lut(struct ixl_softc *sc, const struct ixl_rss_lut_128 *lut)
1580 {
1581 	return ((*sc->sc_chip->ic_set_rss_lut)(sc, lut));
1582 }
1583 
1584 /* 710 chip specifics */
1585 
1586 static uint32_t		ixl_710_rd_ctl(struct ixl_softc *, uint32_t);
1587 static void		ixl_710_wr_ctl(struct ixl_softc *, uint32_t, uint32_t);
1588 static int		ixl_710_set_rss_key(struct ixl_softc *,
1589 			    const struct ixl_rss_key *);
1590 static int		ixl_710_set_rss_lut(struct ixl_softc *,
1591 			    const struct ixl_rss_lut_128 *);
1592 
1593 static const struct ixl_chip ixl_710 = {
1594 	.ic_rss_hena =		IXL_RSS_HENA_BASE_710,
1595 	.ic_rd_ctl =		ixl_710_rd_ctl,
1596 	.ic_wr_ctl =		ixl_710_wr_ctl,
1597 	.ic_set_rss_key =	ixl_710_set_rss_key,
1598 	.ic_set_rss_lut =	ixl_710_set_rss_lut,
1599 };
1600 
1601 /* 722 chip specifics */
1602 
1603 static uint32_t		ixl_722_rd_ctl(struct ixl_softc *, uint32_t);
1604 static void		ixl_722_wr_ctl(struct ixl_softc *, uint32_t, uint32_t);
1605 static int		ixl_722_set_rss_key(struct ixl_softc *,
1606 			    const struct ixl_rss_key *);
1607 static int		ixl_722_set_rss_lut(struct ixl_softc *,
1608 			    const struct ixl_rss_lut_128 *);
1609 
1610 static const struct ixl_chip ixl_722 = {
1611 	.ic_rss_hena =		IXL_RSS_HENA_BASE_722,
1612 	.ic_rd_ctl =		ixl_722_rd_ctl,
1613 	.ic_wr_ctl =		ixl_722_wr_ctl,
1614 	.ic_set_rss_key =	ixl_722_set_rss_key,
1615 	.ic_set_rss_lut =	ixl_722_set_rss_lut,
1616 };
1617 
1618 /*
1619  * 710 chips using an older firmware/API use the same ctl ops as
1620  * 722 chips. or 722 chips use the same ctl ops as 710 chips in early
1621  * firmware/API versions?
1622 */
1623 
1624 static const struct ixl_chip ixl_710_decrepit = {
1625 	.ic_rss_hena =		IXL_RSS_HENA_BASE_710,
1626 	.ic_rd_ctl =		ixl_722_rd_ctl,
1627 	.ic_wr_ctl =		ixl_722_wr_ctl,
1628 	.ic_set_rss_key =	ixl_710_set_rss_key,
1629 	.ic_set_rss_lut =	ixl_710_set_rss_lut,
1630 };
1631 
1632 /* driver code */
1633 
1634 struct ixl_device {
1635 	const struct ixl_chip	*id_chip;
1636 	pci_vendor_id_t		 id_vid;
1637 	pci_product_id_t	 id_pid;
1638 };
1639 
1640 static const struct ixl_device ixl_devices[] = {
1641 	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_SFP },
1642 	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_SFP_2 },
1643 	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_40G_BP },
1644 	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_BP, },
1645 	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_1 },
1646 	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_2 },
1647 	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_QSFP },
1648 	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_BASET },
1649 	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_1 },
1650 	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_2 },
1651 	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_T4_10G },
1652 	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_BP },
1653 	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_SFP28, },
1654 	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_T, },
1655 	{ &ixl_722, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_10G_KX },
1656 	{ &ixl_722, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_10G_QSFP },
1657 	{ &ixl_722, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_10G_SFP_1 },
1658 	{ &ixl_722, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_1G },
1659 	{ &ixl_722, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_10G_T },
1660 	{ &ixl_722, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_10G_SFP_2 },
1661 };
1662 
1663 static const struct ixl_device *
1664 ixl_device_lookup(struct pci_attach_args *pa)
1665 {
1666 	pci_vendor_id_t vid = PCI_VENDOR(pa->pa_id);
1667 	pci_product_id_t pid = PCI_PRODUCT(pa->pa_id);
1668 	const struct ixl_device *id;
1669 	unsigned int i;
1670 
1671 	for (i = 0; i < nitems(ixl_devices); i++) {
1672 		id = &ixl_devices[i];
1673 		if (id->id_vid == vid && id->id_pid == pid)
1674 			return (id);
1675 	}
1676 
1677 	return (NULL);
1678 }
1679 
1680 static int
1681 ixl_match(struct device *parent, void *match, void *aux)
1682 {
1683 	return (ixl_device_lookup(aux) != NULL);
1684 }
1685 
1686 void
1687 ixl_attach(struct device *parent, struct device *self, void *aux)
1688 {
1689 	struct ixl_softc *sc = (struct ixl_softc *)self;
1690 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1691 	struct pci_attach_args *pa = aux;
1692 	pcireg_t memtype;
1693 	uint32_t port, ari, func;
1694 	uint64_t phy_types = 0;
1695 	unsigned int nqueues, i;
1696 	int tries;
1697 
1698 	rw_init(&sc->sc_cfg_lock, "ixlcfg");
1699 
1700 	sc->sc_chip = ixl_device_lookup(pa)->id_chip;
1701 	sc->sc_pc = pa->pa_pc;
1702 	sc->sc_tag = pa->pa_tag;
1703 	sc->sc_dmat = pa->pa_dmat;
1704 	sc->sc_aq_regs = &ixl_pf_aq_regs;
1705 
1706 	sc->sc_nqueues = 0; /* 1 << 0 is 1 queue */
1707 	sc->sc_tx_ring_ndescs = 1024;
1708 	sc->sc_rx_ring_ndescs = 1024;
1709 
1710 	memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, IXL_PCIREG);
1711 	if (pci_mapreg_map(pa, IXL_PCIREG, memtype, 0,
1712 	    &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems, 0)) {
1713 		printf(": unable to map registers\n");
1714 		return;
1715 	}
1716 
1717 	sc->sc_base_queue = (ixl_rd(sc, I40E_PFLAN_QALLOC) &
1718 	    I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
1719 	    I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
1720 
1721 	ixl_clear_hw(sc);
1722 	if (ixl_pf_reset(sc) == -1) {
1723 		/* error printed by ixl_pf_reset */
1724 		goto unmap;
1725 	}
1726 
1727 	port = ixl_rd(sc, I40E_PFGEN_PORTNUM);
1728 	port &= I40E_PFGEN_PORTNUM_PORT_NUM_MASK;
1729 	port >>= I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
1730 	sc->sc_port = port;
1731 	printf(": port %u", port);
1732 
1733 	ari = ixl_rd(sc, I40E_GLPCI_CAPSUP);
1734 	ari &= I40E_GLPCI_CAPSUP_ARI_EN_MASK;
1735 	ari >>= I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
1736 
1737 	func = ixl_rd(sc, I40E_PF_FUNC_RID);
1738 	sc->sc_pf_id = func & (ari ? 0xff : 0x7);
1739 
1740 	/* initialise the adminq */
1741 
1742 	mtx_init(&sc->sc_atq_mtx, IPL_NET);
1743 
1744 	if (ixl_dmamem_alloc(sc, &sc->sc_atq,
1745 	    sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
1746 		printf("\n" "%s: unable to allocate atq\n", DEVNAME(sc));
1747 		goto unmap;
1748 	}
1749 
1750 	SIMPLEQ_INIT(&sc->sc_arq_idle);
1751 	SIMPLEQ_INIT(&sc->sc_arq_live);
1752 	if_rxr_init(&sc->sc_arq_ring, 2, IXL_AQ_NUM - 1);
1753 	task_set(&sc->sc_arq_task, ixl_arq, sc);
1754 	sc->sc_arq_cons = 0;
1755 	sc->sc_arq_prod = 0;
1756 
1757 	if (ixl_dmamem_alloc(sc, &sc->sc_arq,
1758 	    sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
1759 		printf("\n" "%s: unable to allocate arq\n", DEVNAME(sc));
1760 		goto free_atq;
1761 	}
1762 
1763 	if (!ixl_arq_fill(sc)) {
1764 		printf("\n" "%s: unable to fill arq descriptors\n",
1765 		    DEVNAME(sc));
1766 		goto free_arq;
1767 	}
1768 
1769 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1770 	    0, IXL_DMA_LEN(&sc->sc_atq),
1771 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1772 
1773 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1774 	    0, IXL_DMA_LEN(&sc->sc_arq),
1775 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1776 
1777 	for (tries = 0; tries < 10; tries++) {
1778 		int rv;
1779 
1780 		sc->sc_atq_cons = 0;
1781 		sc->sc_atq_prod = 0;
1782 
1783 		ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1784 		ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1785 		ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1786 		ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1787 
1788 		ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
1789 
1790 		ixl_wr(sc, sc->sc_aq_regs->atq_bal,
1791 		    ixl_dmamem_lo(&sc->sc_atq));
1792 		ixl_wr(sc, sc->sc_aq_regs->atq_bah,
1793 		    ixl_dmamem_hi(&sc->sc_atq));
1794 		ixl_wr(sc, sc->sc_aq_regs->atq_len,
1795 		    sc->sc_aq_regs->atq_len_enable | IXL_AQ_NUM);
1796 
1797 		ixl_wr(sc, sc->sc_aq_regs->arq_bal,
1798 		    ixl_dmamem_lo(&sc->sc_arq));
1799 		ixl_wr(sc, sc->sc_aq_regs->arq_bah,
1800 		    ixl_dmamem_hi(&sc->sc_arq));
1801 		ixl_wr(sc, sc->sc_aq_regs->arq_len,
1802 		    sc->sc_aq_regs->arq_len_enable | IXL_AQ_NUM);
1803 
1804 		rv = ixl_get_version(sc);
1805 		if (rv == 0)
1806 			break;
1807 		if (rv != ETIMEDOUT) {
1808 			printf(", unable to get firmware version\n");
1809 			goto shutdown;
1810 		}
1811 
1812 		delaymsec(100);
1813 	}
1814 
1815 	ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
1816 
1817 	if (ixl_pxe_clear(sc) != 0) {
1818 		/* error printed by ixl_pxe_clear */
1819 		goto shutdown;
1820 	}
1821 
1822 	if (ixl_get_mac(sc) != 0) {
1823 		/* error printed by ixl_get_mac */
1824 		goto shutdown;
1825 	}
1826 
1827 	if (pci_intr_map_msix(pa, 0, &sc->sc_ih) == 0) {
1828 		int nmsix = pci_intr_msix_count(pa);
1829 		if (nmsix > 1) { /* we used 1 (the 0th) for the adminq */
1830 			nmsix--;
1831 
1832 			sc->sc_intrmap = intrmap_create(&sc->sc_dev,
1833 			    nmsix, IXL_MAX_VECTORS, INTRMAP_POWEROF2);
1834 			nqueues = intrmap_count(sc->sc_intrmap);
1835 			KASSERT(nqueues > 0);
1836 			KASSERT(powerof2(nqueues));
1837 			sc->sc_nqueues = fls(nqueues) - 1;
1838 		}
1839 	} else {
1840 		if (pci_intr_map_msi(pa, &sc->sc_ih) != 0 &&
1841 		    pci_intr_map(pa, &sc->sc_ih) != 0) {
1842 			printf(", unable to map interrupt\n");
1843 			goto shutdown;
1844 		}
1845 	}
1846 
1847 	nqueues = ixl_nqueues(sc);
1848 
1849 	printf(", %s, %d queue%s, address %s\n",
1850 	    pci_intr_string(sc->sc_pc, sc->sc_ih), ixl_nqueues(sc),
1851 	    (nqueues > 1 ? "s" : ""),
1852 	    ether_sprintf(sc->sc_ac.ac_enaddr));
1853 
1854 	if (ixl_hmc(sc) != 0) {
1855 		/* error printed by ixl_hmc */
1856 		goto shutdown;
1857 	}
1858 
1859 	if (ixl_lldp_shut(sc) != 0) {
1860 		/* error printed by ixl_lldp_shut */
1861 		goto free_hmc;
1862 	}
1863 
1864 	if (ixl_phy_mask_ints(sc) != 0) {
1865 		/* error printed by ixl_phy_mask_ints */
1866 		goto free_hmc;
1867 	}
1868 
1869 	if (ixl_restart_an(sc) != 0) {
1870 		/* error printed by ixl_restart_an */
1871 		goto free_hmc;
1872 	}
1873 
1874 	if (ixl_get_switch_config(sc) != 0) {
1875 		/* error printed by ixl_get_switch_config */
1876 		goto free_hmc;
1877 	}
1878 
1879 	if (ixl_get_phy_types(sc, &phy_types) != 0) {
1880 		/* error printed by ixl_get_phy_abilities */
1881 		goto free_hmc;
1882 	}
1883 
1884 	if (ixl_get_link_status(sc) != 0) {
1885 		/* error printed by ixl_get_link_status */
1886 		goto free_hmc;
1887 	}
1888 
1889 	if (ixl_dmamem_alloc(sc, &sc->sc_scratch,
1890 	    sizeof(struct ixl_aq_vsi_data), 8) != 0) {
1891 		printf("%s: unable to allocate scratch buffer\n", DEVNAME(sc));
1892 		goto free_hmc;
1893 	}
1894 
1895 	if (ixl_get_vsi(sc) != 0) {
1896 		/* error printed by ixl_get_vsi */
1897 		goto free_hmc;
1898 	}
1899 
1900 	if (ixl_set_vsi(sc) != 0) {
1901 		/* error printed by ixl_set_vsi */
1902 		goto free_scratch;
1903 	}
1904 
1905 	sc->sc_ihc = pci_intr_establish(sc->sc_pc, sc->sc_ih,
1906 	    IPL_NET | IPL_MPSAFE, ixl_intr0, sc, DEVNAME(sc));
1907 	if (sc->sc_ihc == NULL) {
1908 		printf("%s: unable to establish interrupt handler\n",
1909 		    DEVNAME(sc));
1910 		goto free_scratch;
1911 	}
1912 
1913 	sc->sc_vectors = mallocarray(sizeof(*sc->sc_vectors), nqueues,
1914 	    M_DEVBUF, M_WAITOK|M_CANFAIL|M_ZERO);
1915 	if (sc->sc_vectors == NULL) {
1916 		printf("%s: unable to allocate vectors\n", DEVNAME(sc));
1917 		goto free_scratch;
1918 	}
1919 
1920 	for (i = 0; i < nqueues; i++) {
1921 		struct ixl_vector *iv = &sc->sc_vectors[i];
1922 		iv->iv_sc = sc;
1923 		iv->iv_qid = i;
1924 		snprintf(iv->iv_name, sizeof(iv->iv_name),
1925 		    "%s:%u", DEVNAME(sc), i); /* truncated? */
1926 	}
1927 
1928 	if (sc->sc_intrmap) {
1929 		for (i = 0; i < nqueues; i++) {
1930 			struct ixl_vector *iv = &sc->sc_vectors[i];
1931 			pci_intr_handle_t ih;
1932 			int v = i + 1; /* 0 is used for adminq */
1933 
1934 			if (pci_intr_map_msix(pa, v, &ih)) {
1935 				printf("%s: unable to map msi-x vector %d\n",
1936 				    DEVNAME(sc), v);
1937 				goto free_vectors;
1938 			}
1939 
1940 			iv->iv_ihc = pci_intr_establish_cpu(sc->sc_pc, ih,
1941 			    IPL_NET | IPL_MPSAFE,
1942 			    intrmap_cpu(sc->sc_intrmap, i),
1943 			    ixl_intr_vector, iv, iv->iv_name);
1944 			if (iv->iv_ihc == NULL) {
1945 				printf("%s: unable to establish interrupt %d\n",
1946 				    DEVNAME(sc), v);
1947 				goto free_vectors;
1948 			}
1949 
1950 			ixl_wr(sc, I40E_PFINT_DYN_CTLN(i),
1951 			    I40E_PFINT_DYN_CTLN_INTENA_MASK |
1952 			    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1953 			    (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
1954 		}
1955 	}
1956 
1957 	/* fixup the chip ops for older fw releases */
1958 	if (sc->sc_chip == &ixl_710 &&
1959 	    sc->sc_api_major == 1 && sc->sc_api_minor < 5)
1960 		sc->sc_chip = &ixl_710_decrepit;
1961 
1962 	ifp->if_softc = sc;
1963 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1964 	ifp->if_xflags = IFXF_MPSAFE;
1965 	ifp->if_ioctl = ixl_ioctl;
1966 	ifp->if_qstart = ixl_start;
1967 	ifp->if_watchdog = ixl_watchdog;
1968 	ifp->if_hardmtu = IXL_HARDMTU;
1969 	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
1970 	ifq_init_maxlen(&ifp->if_snd, sc->sc_tx_ring_ndescs);
1971 
1972 	ifp->if_capabilities = IFCAP_VLAN_HWTAGGING;
1973 	ifp->if_capabilities |= IFCAP_CSUM_IPv4 |
1974 	    IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4 |
1975 	    IFCAP_CSUM_TCPv6 | IFCAP_CSUM_UDPv6;
1976 	ifp->if_capabilities |= IFCAP_TSOv4 | IFCAP_TSOv6;
1977 
1978 	ifmedia_init(&sc->sc_media, 0, ixl_media_change, ixl_media_status);
1979 
1980 	ixl_media_add(sc, phy_types);
1981 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1982 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
1983 
1984 	if_attach(ifp);
1985 	ether_ifattach(ifp);
1986 
1987 	if_attach_queues(ifp, nqueues);
1988 	if_attach_iqueues(ifp, nqueues);
1989 
1990 	mtx_init(&sc->sc_link_state_mtx, IPL_NET);
1991 	task_set(&sc->sc_link_state_task, ixl_link_state_update, sc);
1992 	ixl_wr(sc, I40E_PFINT_ICR0_ENA,
1993 	    I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK |
1994 	    I40E_PFINT_ICR0_ENA_ADMINQ_MASK);
1995 	ixl_wr(sc, I40E_PFINT_STAT_CTL0,
1996 	    IXL_NOITR << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT);
1997 
1998 	/* remove default mac filter and replace it so we can see vlans */
1999 	ixl_remove_macvlan(sc, sc->sc_ac.ac_enaddr, 0, 0);
2000 	ixl_remove_macvlan(sc, sc->sc_ac.ac_enaddr, 0,
2001 	    IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
2002 	ixl_add_macvlan(sc, sc->sc_ac.ac_enaddr, 0,
2003 	    IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
2004 	ixl_add_macvlan(sc, etherbroadcastaddr, 0,
2005 	    IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
2006 	memcpy(sc->sc_enaddr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN);
2007 
2008 	ixl_intr_enable(sc);
2009 
2010 #if NKSTAT > 0
2011 	ixl_kstat_attach(sc);
2012 #endif
2013 
2014 	return;
2015 free_vectors:
2016 	if (sc->sc_intrmap != NULL) {
2017 		for (i = 0; i < nqueues; i++) {
2018 			struct ixl_vector *iv = &sc->sc_vectors[i];
2019 			if (iv->iv_ihc == NULL)
2020 				continue;
2021 			pci_intr_disestablish(sc->sc_pc, iv->iv_ihc);
2022 		}
2023 	}
2024 	free(sc->sc_vectors, M_DEVBUF, nqueues * sizeof(*sc->sc_vectors));
2025 free_scratch:
2026 	ixl_dmamem_free(sc, &sc->sc_scratch);
2027 free_hmc:
2028 	ixl_hmc_free(sc);
2029 shutdown:
2030 	ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
2031 	ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
2032 	ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
2033 	ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
2034 
2035 	ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0);
2036 	ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0);
2037 	ixl_wr(sc, sc->sc_aq_regs->atq_len, 0);
2038 
2039 	ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0);
2040 	ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0);
2041 	ixl_wr(sc, sc->sc_aq_regs->arq_len, 0);
2042 
2043 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
2044 	    0, IXL_DMA_LEN(&sc->sc_arq),
2045 	    BUS_DMASYNC_POSTREAD);
2046 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
2047 	    0, IXL_DMA_LEN(&sc->sc_atq),
2048 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2049 
2050 	ixl_arq_unfill(sc);
2051 
2052 free_arq:
2053 	ixl_dmamem_free(sc, &sc->sc_arq);
2054 free_atq:
2055 	ixl_dmamem_free(sc, &sc->sc_atq);
2056 unmap:
2057 	bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
2058 	sc->sc_mems = 0;
2059 
2060 	if (sc->sc_intrmap != NULL)
2061 		intrmap_destroy(sc->sc_intrmap);
2062 }
2063 
2064 static void
2065 ixl_media_add(struct ixl_softc *sc, uint64_t phy_types)
2066 {
2067 	struct ifmedia *ifm = &sc->sc_media;
2068 	const struct ixl_phy_type *itype;
2069 	unsigned int i;
2070 
2071 	for (i = 0; i < nitems(ixl_phy_type_map); i++) {
2072 		itype = &ixl_phy_type_map[i];
2073 
2074 		if (ISSET(phy_types, itype->phy_type))
2075 			ifmedia_add(ifm, IFM_ETHER | itype->ifm_type, 0, NULL);
2076 	}
2077 }
2078 
2079 static int
2080 ixl_media_change(struct ifnet *ifp)
2081 {
2082 	/* ignore? */
2083 	return (EOPNOTSUPP);
2084 }
2085 
2086 static void
2087 ixl_media_status(struct ifnet *ifp, struct ifmediareq *ifm)
2088 {
2089 	struct ixl_softc *sc = ifp->if_softc;
2090 
2091 	KERNEL_ASSERT_LOCKED();
2092 
2093 	mtx_enter(&sc->sc_link_state_mtx);
2094 	ifm->ifm_status = sc->sc_media_status;
2095 	ifm->ifm_active = sc->sc_media_active;
2096 	mtx_leave(&sc->sc_link_state_mtx);
2097 }
2098 
2099 static void
2100 ixl_watchdog(struct ifnet *ifp)
2101 {
2102 
2103 }
2104 
2105 int
2106 ixl_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2107 {
2108 	struct ixl_softc *sc = (struct ixl_softc *)ifp->if_softc;
2109 	struct ifreq *ifr = (struct ifreq *)data;
2110 	uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN];
2111 	int aqerror, error = 0;
2112 
2113 	switch (cmd) {
2114 	case SIOCSIFADDR:
2115 		ifp->if_flags |= IFF_UP;
2116 		/* FALLTHROUGH */
2117 
2118 	case SIOCSIFFLAGS:
2119 		if (ISSET(ifp->if_flags, IFF_UP)) {
2120 			if (ISSET(ifp->if_flags, IFF_RUNNING))
2121 				error = ENETRESET;
2122 			else
2123 				error = ixl_up(sc);
2124 		} else {
2125 			if (ISSET(ifp->if_flags, IFF_RUNNING))
2126 				error = ixl_down(sc);
2127 		}
2128 		break;
2129 
2130 	case SIOCGIFMEDIA:
2131 	case SIOCSIFMEDIA:
2132 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
2133 		break;
2134 
2135 	case SIOCGIFRXR:
2136 		error = ixl_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
2137 		break;
2138 
2139 	case SIOCADDMULTI:
2140 		if (ether_addmulti(ifr, &sc->sc_ac) == ENETRESET) {
2141 			error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi);
2142 			if (error != 0)
2143 				return (error);
2144 
2145 			aqerror = ixl_add_macvlan(sc, addrlo, 0,
2146 			    IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
2147 			if (aqerror == IXL_AQ_RC_ENOSPC) {
2148 				ether_delmulti(ifr, &sc->sc_ac);
2149 				error = ENOSPC;
2150 			}
2151 
2152 			if (sc->sc_ac.ac_multirangecnt > 0) {
2153 				SET(ifp->if_flags, IFF_ALLMULTI);
2154 				error = ENETRESET;
2155 			}
2156 		}
2157 		break;
2158 
2159 	case SIOCDELMULTI:
2160 		if (ether_delmulti(ifr, &sc->sc_ac) == ENETRESET) {
2161 			error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi);
2162 			if (error != 0)
2163 				return (error);
2164 
2165 			ixl_remove_macvlan(sc, addrlo, 0,
2166 			    IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
2167 
2168 			if (ISSET(ifp->if_flags, IFF_ALLMULTI) &&
2169 			    sc->sc_ac.ac_multirangecnt == 0) {
2170 				CLR(ifp->if_flags, IFF_ALLMULTI);
2171 				error = ENETRESET;
2172 			}
2173 		}
2174 		break;
2175 
2176 	case SIOCGIFSFFPAGE:
2177 		error = rw_enter(&ixl_sff_lock, RW_WRITE|RW_INTR);
2178 		if (error != 0)
2179 			break;
2180 
2181 		error = ixl_get_sffpage(sc, (struct if_sffpage *)data);
2182 		rw_exit(&ixl_sff_lock);
2183 		break;
2184 
2185 	default:
2186 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
2187 		break;
2188 	}
2189 
2190 	if (error == ENETRESET)
2191 		error = ixl_iff(sc);
2192 
2193 	return (error);
2194 }
2195 
2196 static inline void *
2197 ixl_hmc_kva(struct ixl_softc *sc, unsigned int type, unsigned int i)
2198 {
2199 	uint8_t *kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
2200 	struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
2201 
2202 	if (i >= e->hmc_count)
2203 		return (NULL);
2204 
2205 	kva += e->hmc_base;
2206 	kva += i * e->hmc_size;
2207 
2208 	return (kva);
2209 }
2210 
2211 static inline size_t
2212 ixl_hmc_len(struct ixl_softc *sc, unsigned int type)
2213 {
2214 	struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
2215 
2216 	return (e->hmc_size);
2217 }
2218 
2219 static int
2220 ixl_configure_rss(struct ixl_softc *sc)
2221 {
2222 	struct ixl_rss_key rsskey;
2223 	struct ixl_rss_lut_128 lut;
2224 	uint8_t *lute = (uint8_t *)&lut;
2225 	uint64_t rss_hena;
2226 	unsigned int i, nqueues;
2227 	int error;
2228 
2229 #if 0
2230 	/* if we want to do a 512 entry LUT, do this. */
2231 	uint32_t v = ixl_rd_ctl(sc, I40E_PFQF_CTL_0);
2232 	SET(v, I40E_PFQF_CTL_0_HASHLUTSIZE_MASK);
2233 	ixl_wr_ctl(sc, I40E_PFQF_CTL_0, v);
2234 #endif
2235 
2236 	stoeplitz_to_key(&rsskey, sizeof(rsskey));
2237 
2238 	nqueues = ixl_nqueues(sc);
2239 	for (i = 0; i < sizeof(lut); i++) {
2240 		/*
2241 		 * ixl must have a power of 2 rings, so using mod
2242 		 * to populate the table is fine.
2243 		 */
2244 		lute[i] = i % nqueues;
2245 	}
2246 
2247 	error = ixl_set_rss_key(sc, &rsskey);
2248 	if (error != 0)
2249 		return (error);
2250 
2251 	rss_hena = (uint64_t)ixl_rd_ctl(sc, I40E_PFQF_HENA(0));
2252 	rss_hena |= (uint64_t)ixl_rd_ctl(sc, I40E_PFQF_HENA(1)) << 32;
2253 	rss_hena |= ixl_rss_hena(sc);
2254 	ixl_wr_ctl(sc, I40E_PFQF_HENA(0), rss_hena);
2255 	ixl_wr_ctl(sc, I40E_PFQF_HENA(1), rss_hena >> 32);
2256 
2257 	error = ixl_set_rss_lut(sc, &lut);
2258 	if (error != 0)
2259 		return (error);
2260 
2261 	/* nothing to clena up :( */
2262 
2263 	return (0);
2264 }
2265 
2266 static int
2267 ixl_up(struct ixl_softc *sc)
2268 {
2269 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2270 	struct ifqueue *ifq;
2271 	struct ifiqueue *ifiq;
2272 	struct ixl_vector *iv;
2273 	struct ixl_rx_ring *rxr;
2274 	struct ixl_tx_ring *txr;
2275 	unsigned int nqueues, i;
2276 	uint32_t reg;
2277 	int rv = ENOMEM;
2278 
2279 	nqueues = ixl_nqueues(sc);
2280 
2281 	rw_enter_write(&sc->sc_cfg_lock);
2282 	if (sc->sc_dead) {
2283 		rw_exit_write(&sc->sc_cfg_lock);
2284 		return (ENXIO);
2285 	}
2286 
2287 	/* allocation is the only thing that can fail, so do it up front */
2288 	for (i = 0; i < nqueues; i++) {
2289 		rxr = ixl_rxr_alloc(sc, i);
2290 		if (rxr == NULL)
2291 			goto free;
2292 
2293 		txr = ixl_txr_alloc(sc, i);
2294 		if (txr == NULL) {
2295 			ixl_rxr_free(sc, rxr);
2296 			goto free;
2297 		}
2298 
2299 		/* wire everything together */
2300 		iv = &sc->sc_vectors[i];
2301 		iv->iv_rxr = rxr;
2302 		iv->iv_txr = txr;
2303 
2304 		ifq = ifp->if_ifqs[i];
2305 		ifq->ifq_softc = txr;
2306 		txr->txr_ifq = ifq;
2307 
2308 		ifiq = ifp->if_iqs[i];
2309 		ifiq->ifiq_softc = rxr;
2310 		rxr->rxr_ifiq = ifiq;
2311 	}
2312 
2313 	/* XXX wait 50ms from completion of last RX queue disable */
2314 
2315 	for (i = 0; i < nqueues; i++) {
2316 		iv = &sc->sc_vectors[i];
2317 		rxr = iv->iv_rxr;
2318 		txr = iv->iv_txr;
2319 
2320 		ixl_txr_qdis(sc, txr, 1);
2321 
2322 		ixl_rxr_config(sc, rxr);
2323 		ixl_txr_config(sc, txr);
2324 
2325 		ixl_wr(sc, I40E_QTX_CTL(i), I40E_QTX_CTL_PF_QUEUE |
2326 		    (sc->sc_pf_id << I40E_QTX_CTL_PF_INDX_SHIFT));
2327 
2328 		ixl_wr(sc, rxr->rxr_tail, 0);
2329 		ixl_rxfill(sc, rxr);
2330 
2331 		reg = ixl_rd(sc, I40E_QRX_ENA(i));
2332 		SET(reg, I40E_QRX_ENA_QENA_REQ_MASK);
2333 		ixl_wr(sc, I40E_QRX_ENA(i), reg);
2334 
2335 		reg = ixl_rd(sc, I40E_QTX_ENA(i));
2336 		SET(reg, I40E_QTX_ENA_QENA_REQ_MASK);
2337 		ixl_wr(sc, I40E_QTX_ENA(i), reg);
2338 	}
2339 
2340 	for (i = 0; i < nqueues; i++) {
2341 		iv = &sc->sc_vectors[i];
2342 		rxr = iv->iv_rxr;
2343 		txr = iv->iv_txr;
2344 
2345 		if (ixl_rxr_enabled(sc, rxr) != 0)
2346 			goto down;
2347 
2348 		if (ixl_txr_enabled(sc, txr) != 0)
2349 			goto down;
2350 	}
2351 
2352 	ixl_configure_rss(sc);
2353 
2354 	SET(ifp->if_flags, IFF_RUNNING);
2355 
2356 	if (sc->sc_intrmap == NULL) {
2357 		ixl_wr(sc, I40E_PFINT_LNKLST0,
2358 		    (I40E_INTR_NOTX_QUEUE <<
2359 		     I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
2360 		    (I40E_QUEUE_TYPE_RX <<
2361 		     I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
2362 
2363 		ixl_wr(sc, I40E_QINT_RQCTL(I40E_INTR_NOTX_QUEUE),
2364 		    (I40E_INTR_NOTX_INTR << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2365 		    (I40E_ITR_INDEX_RX << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2366 		    (I40E_INTR_NOTX_RX_QUEUE <<
2367 		     I40E_QINT_RQCTL_MSIX0_INDX_SHIFT) |
2368 		    (I40E_INTR_NOTX_QUEUE << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2369 		    (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
2370 		    I40E_QINT_RQCTL_CAUSE_ENA_MASK);
2371 
2372 		ixl_wr(sc, I40E_QINT_TQCTL(I40E_INTR_NOTX_QUEUE),
2373 		    (I40E_INTR_NOTX_INTR << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2374 		    (I40E_ITR_INDEX_TX << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2375 		    (I40E_INTR_NOTX_TX_QUEUE <<
2376 		     I40E_QINT_TQCTL_MSIX0_INDX_SHIFT) |
2377 		    (I40E_QUEUE_TYPE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
2378 		    (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT) |
2379 		    I40E_QINT_TQCTL_CAUSE_ENA_MASK);
2380 	} else {
2381 		/* vector 0 has no queues */
2382 		ixl_wr(sc, I40E_PFINT_LNKLST0,
2383 		    I40E_QUEUE_TYPE_EOL <<
2384 		    I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT);
2385 
2386 		/* queue n is mapped to vector n+1 */
2387 		for (i = 0; i < nqueues; i++) {
2388 			/* LNKLSTN(i) configures vector i+1 */
2389 			ixl_wr(sc, I40E_PFINT_LNKLSTN(i),
2390 			    (i << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
2391 			    (I40E_QUEUE_TYPE_RX <<
2392 			     I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
2393 			ixl_wr(sc, I40E_QINT_RQCTL(i),
2394 			    ((i+1) << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2395 			    (I40E_ITR_INDEX_RX <<
2396 			     I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2397 			    (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2398 			    (I40E_QUEUE_TYPE_TX <<
2399 			     I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
2400 			    I40E_QINT_RQCTL_CAUSE_ENA_MASK);
2401 			ixl_wr(sc, I40E_QINT_TQCTL(i),
2402 			    ((i+1) << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2403 			    (I40E_ITR_INDEX_TX <<
2404 			     I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2405 			    (I40E_QUEUE_TYPE_EOL <<
2406 			     I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
2407 			    (I40E_QUEUE_TYPE_RX <<
2408 			     I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT) |
2409 			    I40E_QINT_TQCTL_CAUSE_ENA_MASK);
2410 
2411 			ixl_wr(sc, I40E_PFINT_ITRN(0, i), 0x7a);
2412 			ixl_wr(sc, I40E_PFINT_ITRN(1, i), 0x7a);
2413 			ixl_wr(sc, I40E_PFINT_ITRN(2, i), 0);
2414 		}
2415 	}
2416 
2417 	ixl_wr(sc, I40E_PFINT_ITR0(0), 0x7a);
2418 	ixl_wr(sc, I40E_PFINT_ITR0(1), 0x7a);
2419 	ixl_wr(sc, I40E_PFINT_ITR0(2), 0);
2420 
2421 	rw_exit_write(&sc->sc_cfg_lock);
2422 
2423 	return (ENETRESET);
2424 
2425 free:
2426 	for (i = 0; i < nqueues; i++) {
2427 		iv = &sc->sc_vectors[i];
2428 		rxr = iv->iv_rxr;
2429 		txr = iv->iv_txr;
2430 
2431 		if (rxr == NULL) {
2432 			/*
2433 			 * tx and rx get set at the same time, so if one
2434 			 * is NULL, the other is too.
2435 			 */
2436 			continue;
2437 		}
2438 
2439 		ixl_txr_free(sc, txr);
2440 		ixl_rxr_free(sc, rxr);
2441 	}
2442 	rw_exit_write(&sc->sc_cfg_lock);
2443 	return (rv);
2444 down:
2445 	rw_exit_write(&sc->sc_cfg_lock);
2446 	ixl_down(sc);
2447 	return (ETIMEDOUT);
2448 }
2449 
2450 static int
2451 ixl_iff(struct ixl_softc *sc)
2452 {
2453 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2454 	struct ixl_atq iatq;
2455 	struct ixl_aq_desc *iaq;
2456 	struct ixl_aq_vsi_promisc_param *param;
2457 
2458 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
2459 		return (0);
2460 
2461 	memset(&iatq, 0, sizeof(iatq));
2462 
2463 	iaq = &iatq.iatq_desc;
2464 	iaq->iaq_opcode = htole16(IXL_AQ_OP_SET_VSI_PROMISC);
2465 
2466 	param = (struct ixl_aq_vsi_promisc_param *)&iaq->iaq_param;
2467 	param->flags = htole16(IXL_AQ_VSI_PROMISC_FLAG_BCAST |
2468 	    IXL_AQ_VSI_PROMISC_FLAG_VLAN);
2469 	if (ISSET(ifp->if_flags, IFF_PROMISC)) {
2470 		param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
2471 		    IXL_AQ_VSI_PROMISC_FLAG_MCAST);
2472 	} else if (ISSET(ifp->if_flags, IFF_ALLMULTI)) {
2473 		param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_MCAST);
2474 	}
2475 	param->valid_flags = htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
2476 	    IXL_AQ_VSI_PROMISC_FLAG_MCAST | IXL_AQ_VSI_PROMISC_FLAG_BCAST |
2477 	    IXL_AQ_VSI_PROMISC_FLAG_VLAN);
2478 	param->seid = sc->sc_seid;
2479 
2480 	ixl_atq_exec(sc, &iatq, "ixliff");
2481 
2482 	if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK))
2483 		return (EIO);
2484 
2485 	if (memcmp(sc->sc_enaddr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN) != 0) {
2486 		ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
2487 		    IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
2488 		ixl_add_macvlan(sc, sc->sc_ac.ac_enaddr, 0,
2489 		    IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
2490 		memcpy(sc->sc_enaddr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN);
2491 	}
2492 	return (0);
2493 }
2494 
2495 static int
2496 ixl_down(struct ixl_softc *sc)
2497 {
2498 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2499 	struct ixl_vector *iv;
2500 	struct ixl_rx_ring *rxr;
2501 	struct ixl_tx_ring *txr;
2502 	unsigned int nqueues, i;
2503 	uint32_t reg;
2504 	int error = 0;
2505 
2506 	nqueues = ixl_nqueues(sc);
2507 
2508 	rw_enter_write(&sc->sc_cfg_lock);
2509 
2510 	CLR(ifp->if_flags, IFF_RUNNING);
2511 
2512 	NET_UNLOCK();
2513 
2514 	/* mask interrupts */
2515 	reg = ixl_rd(sc, I40E_QINT_RQCTL(I40E_INTR_NOTX_QUEUE));
2516 	CLR(reg, I40E_QINT_RQCTL_CAUSE_ENA_MASK);
2517 	ixl_wr(sc, I40E_QINT_RQCTL(I40E_INTR_NOTX_QUEUE), reg);
2518 
2519 	reg = ixl_rd(sc, I40E_QINT_TQCTL(I40E_INTR_NOTX_QUEUE));
2520 	CLR(reg, I40E_QINT_TQCTL_CAUSE_ENA_MASK);
2521 	ixl_wr(sc, I40E_QINT_TQCTL(I40E_INTR_NOTX_QUEUE), reg);
2522 
2523 	ixl_wr(sc, I40E_PFINT_LNKLST0, I40E_QUEUE_TYPE_EOL);
2524 
2525 	/* make sure the no hw generated work is still in flight */
2526 	intr_barrier(sc->sc_ihc);
2527 	if (sc->sc_intrmap != NULL) {
2528 		for (i = 0; i < nqueues; i++) {
2529 			iv = &sc->sc_vectors[i];
2530 			rxr = iv->iv_rxr;
2531 			txr = iv->iv_txr;
2532 
2533 			ixl_txr_qdis(sc, txr, 0);
2534 
2535 			ifq_barrier(txr->txr_ifq);
2536 
2537 			timeout_del_barrier(&rxr->rxr_refill);
2538 
2539 			intr_barrier(iv->iv_ihc);
2540 		}
2541 	}
2542 
2543 	/* XXX wait at least 400 usec for all tx queues in one go */
2544 	delay(500);
2545 
2546 	for (i = 0; i < nqueues; i++) {
2547 		reg = ixl_rd(sc, I40E_QTX_ENA(i));
2548 		CLR(reg, I40E_QTX_ENA_QENA_REQ_MASK);
2549 		ixl_wr(sc, I40E_QTX_ENA(i), reg);
2550 
2551 		reg = ixl_rd(sc, I40E_QRX_ENA(i));
2552 		CLR(reg, I40E_QRX_ENA_QENA_REQ_MASK);
2553 		ixl_wr(sc, I40E_QRX_ENA(i), reg);
2554 	}
2555 
2556 	for (i = 0; i < nqueues; i++) {
2557 		iv = &sc->sc_vectors[i];
2558 		rxr = iv->iv_rxr;
2559 		txr = iv->iv_txr;
2560 
2561 		if (ixl_txr_disabled(sc, txr) != 0)
2562 			goto die;
2563 
2564 		if (ixl_rxr_disabled(sc, rxr) != 0)
2565 			goto die;
2566 	}
2567 
2568 	for (i = 0; i < nqueues; i++) {
2569 		iv = &sc->sc_vectors[i];
2570 		rxr = iv->iv_rxr;
2571 		txr = iv->iv_txr;
2572 
2573 		ixl_txr_unconfig(sc, txr);
2574 		ixl_rxr_unconfig(sc, rxr);
2575 
2576 		ixl_txr_clean(sc, txr);
2577 		ixl_rxr_clean(sc, rxr);
2578 
2579 		ixl_txr_free(sc, txr);
2580 		ixl_rxr_free(sc, rxr);
2581 
2582 		ifp->if_iqs[i]->ifiq_softc = NULL;
2583 		ifp->if_ifqs[i]->ifq_softc =  NULL;
2584 	}
2585 
2586 out:
2587 	rw_exit_write(&sc->sc_cfg_lock);
2588 	NET_LOCK();
2589 	return (error);
2590 die:
2591 	sc->sc_dead = 1;
2592 	log(LOG_CRIT, "%s: failed to shut down rings", DEVNAME(sc));
2593 	error = ETIMEDOUT;
2594 	goto out;
2595 }
2596 
2597 static struct ixl_tx_ring *
2598 ixl_txr_alloc(struct ixl_softc *sc, unsigned int qid)
2599 {
2600 	struct ixl_tx_ring *txr;
2601 	struct ixl_tx_map *maps, *txm;
2602 	unsigned int i;
2603 
2604 	txr = malloc(sizeof(*txr), M_DEVBUF, M_WAITOK|M_CANFAIL);
2605 	if (txr == NULL)
2606 		return (NULL);
2607 
2608 	maps = mallocarray(sizeof(*maps),
2609 	    sc->sc_tx_ring_ndescs, M_DEVBUF, M_WAITOK|M_CANFAIL|M_ZERO);
2610 	if (maps == NULL)
2611 		goto free;
2612 
2613 	if (ixl_dmamem_alloc(sc, &txr->txr_mem,
2614 	    sizeof(struct ixl_tx_desc) * sc->sc_tx_ring_ndescs,
2615 	    IXL_TX_QUEUE_ALIGN) != 0)
2616 		goto freemap;
2617 
2618 	for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2619 		txm = &maps[i];
2620 
2621 		if (bus_dmamap_create(sc->sc_dmat,
2622 		    MAXMCLBYTES, IXL_TX_PKT_DESCS, IXL_MAX_DMA_SEG_SIZE, 0,
2623 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
2624 		    &txm->txm_map) != 0)
2625 			goto uncreate;
2626 
2627 		txm->txm_eop = -1;
2628 		txm->txm_m = NULL;
2629 	}
2630 
2631 	txr->txr_cons = txr->txr_prod = 0;
2632 	txr->txr_maps = maps;
2633 
2634 	txr->txr_tail = I40E_QTX_TAIL(qid);
2635 	txr->txr_qid = qid;
2636 
2637 	return (txr);
2638 
2639 uncreate:
2640 	for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2641 		txm = &maps[i];
2642 
2643 		if (txm->txm_map == NULL)
2644 			continue;
2645 
2646 		bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2647 	}
2648 
2649 	ixl_dmamem_free(sc, &txr->txr_mem);
2650 freemap:
2651 	free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_tx_ring_ndescs);
2652 free:
2653 	free(txr, M_DEVBUF, sizeof(*txr));
2654 	return (NULL);
2655 }
2656 
2657 static void
2658 ixl_txr_qdis(struct ixl_softc *sc, struct ixl_tx_ring *txr, int enable)
2659 {
2660 	unsigned int qid;
2661 	bus_size_t reg;
2662 	uint32_t r;
2663 
2664 	qid = txr->txr_qid + sc->sc_base_queue;
2665 	reg = I40E_GLLAN_TXPRE_QDIS(qid / 128);
2666 	qid %= 128;
2667 
2668 	r = ixl_rd(sc, reg);
2669 	CLR(r, I40E_GLLAN_TXPRE_QDIS_QINDX_MASK);
2670 	SET(r, qid << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
2671 	SET(r, enable ? I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK :
2672 	    I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK);
2673 	ixl_wr(sc, reg, r);
2674 }
2675 
2676 static void
2677 ixl_txr_config(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2678 {
2679 	struct ixl_hmc_txq txq;
2680 	struct ixl_aq_vsi_data *data = IXL_DMA_KVA(&sc->sc_scratch);
2681 	void *hmc;
2682 
2683 	memset(&txq, 0, sizeof(txq));
2684 	txq.head = htole16(0);
2685 	txq.new_context = 1;
2686 	htolem64(&txq.base,
2687 	    IXL_DMA_DVA(&txr->txr_mem) / IXL_HMC_TXQ_BASE_UNIT);
2688 	txq.head_wb_ena = IXL_HMC_TXQ_DESC_WB;
2689 	htolem16(&txq.qlen, sc->sc_tx_ring_ndescs);
2690 	txq.tphrdesc_ena = 0;
2691 	txq.tphrpacket_ena = 0;
2692 	txq.tphwdesc_ena = 0;
2693 	txq.rdylist = data->qs_handle[0];
2694 
2695 	hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2696 	memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2697 	ixl_hmc_pack(hmc, &txq, ixl_hmc_pack_txq, nitems(ixl_hmc_pack_txq));
2698 }
2699 
2700 static void
2701 ixl_txr_unconfig(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2702 {
2703 	void *hmc;
2704 
2705 	hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2706 	memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2707 }
2708 
2709 static void
2710 ixl_txr_clean(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2711 {
2712 	struct ixl_tx_map *maps, *txm;
2713 	bus_dmamap_t map;
2714 	unsigned int i;
2715 
2716 	maps = txr->txr_maps;
2717 	for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2718 		txm = &maps[i];
2719 
2720 		if (txm->txm_m == NULL)
2721 			continue;
2722 
2723 		map = txm->txm_map;
2724 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2725 		    BUS_DMASYNC_POSTWRITE);
2726 		bus_dmamap_unload(sc->sc_dmat, map);
2727 
2728 		m_freem(txm->txm_m);
2729 		txm->txm_m = NULL;
2730 	}
2731 }
2732 
2733 static int
2734 ixl_txr_enabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2735 {
2736 	bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2737 	uint32_t reg;
2738 	int i;
2739 
2740 	for (i = 0; i < 10; i++) {
2741 		reg = ixl_rd(sc, ena);
2742 		if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK))
2743 			return (0);
2744 
2745 		delaymsec(10);
2746 	}
2747 
2748 	return (ETIMEDOUT);
2749 }
2750 
2751 static int
2752 ixl_txr_disabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2753 {
2754 	bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2755 	uint32_t reg;
2756 	int i;
2757 
2758 	for (i = 0; i < 20; i++) {
2759 		reg = ixl_rd(sc, ena);
2760 		if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK) == 0)
2761 			return (0);
2762 
2763 		delaymsec(10);
2764 	}
2765 
2766 	return (ETIMEDOUT);
2767 }
2768 
2769 static void
2770 ixl_txr_free(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2771 {
2772 	struct ixl_tx_map *maps, *txm;
2773 	unsigned int i;
2774 
2775 	maps = txr->txr_maps;
2776 	for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2777 		txm = &maps[i];
2778 
2779 		bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2780 	}
2781 
2782 	ixl_dmamem_free(sc, &txr->txr_mem);
2783 	free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_tx_ring_ndescs);
2784 	free(txr, M_DEVBUF, sizeof(*txr));
2785 }
2786 
2787 static inline int
2788 ixl_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m)
2789 {
2790 	int error;
2791 
2792 	error = bus_dmamap_load_mbuf(dmat, map, m,
2793 	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT);
2794 	if (error != EFBIG)
2795 		return (error);
2796 
2797 	error = m_defrag(m, M_DONTWAIT);
2798 	if (error != 0)
2799 		return (error);
2800 
2801 	return (bus_dmamap_load_mbuf(dmat, map, m,
2802 	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT));
2803 }
2804 
2805 static uint64_t
2806 ixl_tx_setup_offload(struct mbuf *m0, struct ixl_tx_ring *txr,
2807     unsigned int prod)
2808 {
2809 	struct ether_extracted ext;
2810 	uint64_t hlen;
2811 	uint64_t offload = 0;
2812 
2813 	if (ISSET(m0->m_flags, M_VLANTAG)) {
2814 		uint64_t vtag = m0->m_pkthdr.ether_vtag;
2815 		offload |= IXL_TX_DESC_CMD_IL2TAG1;
2816 		offload |= vtag << IXL_TX_DESC_L2TAG1_SHIFT;
2817 	}
2818 
2819 	if (!ISSET(m0->m_pkthdr.csum_flags,
2820 	    M_IPV4_CSUM_OUT|M_TCP_CSUM_OUT|M_UDP_CSUM_OUT|M_TCP_TSO))
2821 		return (offload);
2822 
2823 	ether_extract_headers(m0, &ext);
2824 
2825 	if (ext.ip4) {
2826 		offload |= ISSET(m0->m_pkthdr.csum_flags, M_IPV4_CSUM_OUT) ?
2827 		    IXL_TX_DESC_CMD_IIPT_IPV4_CSUM :
2828 		    IXL_TX_DESC_CMD_IIPT_IPV4;
2829 
2830 		hlen = ext.ip4->ip_hl << 2;
2831 #ifdef INET6
2832 	} else if (ext.ip6) {
2833 		offload |= IXL_TX_DESC_CMD_IIPT_IPV6;
2834 
2835 		hlen = sizeof(*ext.ip6);
2836 #endif
2837 	} else {
2838 		panic("CSUM_OUT set for non-IP packet");
2839 		/* NOTREACHED */
2840 	}
2841 
2842 	offload |= (ETHER_HDR_LEN >> 1) << IXL_TX_DESC_MACLEN_SHIFT;
2843 	offload |= (hlen >> 2) << IXL_TX_DESC_IPLEN_SHIFT;
2844 
2845 	if (ext.tcp && ISSET(m0->m_pkthdr.csum_flags, M_TCP_CSUM_OUT)) {
2846 		offload |= IXL_TX_DESC_CMD_L4T_EOFT_TCP;
2847 		offload |= (uint64_t)ext.tcp->th_off << IXL_TX_DESC_L4LEN_SHIFT;
2848 	} else if (ext.udp && ISSET(m0->m_pkthdr.csum_flags, M_UDP_CSUM_OUT)) {
2849 		offload |= IXL_TX_DESC_CMD_L4T_EOFT_UDP;
2850 		offload |= (sizeof(*ext.udp) >> 2) << IXL_TX_DESC_L4LEN_SHIFT;
2851 	}
2852 
2853 	if (ISSET(m0->m_pkthdr.csum_flags, M_TCP_TSO)) {
2854 		if (ext.tcp) {
2855 			struct ixl_tx_desc *ring, *txd;
2856 			uint64_t cmd = 0, paylen, outlen;
2857 
2858 			hlen += ext.tcp->th_off << 2;
2859 
2860 			outlen = m0->m_pkthdr.ph_mss;
2861 			paylen = m0->m_pkthdr.len - ETHER_HDR_LEN - hlen;
2862 
2863 			ring = IXL_DMA_KVA(&txr->txr_mem);
2864 			txd = &ring[prod];
2865 
2866 			cmd |= IXL_TX_DESC_DTYPE_CONTEXT;
2867 			cmd |= IXL_TX_CTX_DESC_CMD_TSO;
2868 			cmd |= paylen << IXL_TX_CTX_DESC_TLEN_SHIFT;
2869 			cmd |= outlen << IXL_TX_CTX_DESC_MSS_SHIFT;
2870 
2871 			htolem64(&txd->addr, 0);
2872 			htolem64(&txd->cmd, cmd);
2873 
2874 			tcpstat_add(tcps_outpkttso,
2875 			    (paylen + outlen - 1) / outlen);
2876 		} else
2877 			tcpstat_inc(tcps_outbadtso);
2878 	}
2879 
2880 	return (offload);
2881 }
2882 
2883 static void
2884 ixl_start(struct ifqueue *ifq)
2885 {
2886 	struct ifnet *ifp = ifq->ifq_if;
2887 	struct ixl_softc *sc = ifp->if_softc;
2888 	struct ixl_tx_ring *txr = ifq->ifq_softc;
2889 	struct ixl_tx_desc *ring, *txd;
2890 	struct ixl_tx_map *txm;
2891 	bus_dmamap_t map;
2892 	struct mbuf *m;
2893 	uint64_t cmd;
2894 	unsigned int prod, free, last, i;
2895 	unsigned int mask;
2896 	int post = 0;
2897 	uint64_t offload;
2898 #if NBPFILTER > 0
2899 	caddr_t if_bpf;
2900 #endif
2901 
2902 	if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
2903 		ifq_purge(ifq);
2904 		return;
2905 	}
2906 
2907 	prod = txr->txr_prod;
2908 	free = txr->txr_cons;
2909 	if (free <= prod)
2910 		free += sc->sc_tx_ring_ndescs;
2911 	free -= prod;
2912 
2913 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2914 	    0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTWRITE);
2915 
2916 	ring = IXL_DMA_KVA(&txr->txr_mem);
2917 	mask = sc->sc_tx_ring_ndescs - 1;
2918 
2919 	for (;;) {
2920 		/* We need one extra descriptor for TSO packets. */
2921 		if (free <= (IXL_TX_PKT_DESCS + 1)) {
2922 			ifq_set_oactive(ifq);
2923 			break;
2924 		}
2925 
2926 		m = ifq_dequeue(ifq);
2927 		if (m == NULL)
2928 			break;
2929 
2930 		offload = ixl_tx_setup_offload(m, txr, prod);
2931 
2932 		txm = &txr->txr_maps[prod];
2933 		map = txm->txm_map;
2934 
2935 		if (ISSET(m->m_pkthdr.csum_flags, M_TCP_TSO)) {
2936 			prod++;
2937 			prod &= mask;
2938 			free--;
2939 		}
2940 
2941 		if (ixl_load_mbuf(sc->sc_dmat, map, m) != 0) {
2942 			ifq->ifq_errors++;
2943 			m_freem(m);
2944 			continue;
2945 		}
2946 
2947 		bus_dmamap_sync(sc->sc_dmat, map, 0,
2948 		    map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2949 
2950 		for (i = 0; i < map->dm_nsegs; i++) {
2951 			txd = &ring[prod];
2952 
2953 			cmd = (uint64_t)map->dm_segs[i].ds_len <<
2954 			    IXL_TX_DESC_BSIZE_SHIFT;
2955 			cmd |= IXL_TX_DESC_DTYPE_DATA | IXL_TX_DESC_CMD_ICRC;
2956 			cmd |= offload;
2957 
2958 			htolem64(&txd->addr, map->dm_segs[i].ds_addr);
2959 			htolem64(&txd->cmd, cmd);
2960 
2961 			last = prod;
2962 
2963 			prod++;
2964 			prod &= mask;
2965 		}
2966 		cmd |= IXL_TX_DESC_CMD_EOP | IXL_TX_DESC_CMD_RS;
2967 		htolem64(&txd->cmd, cmd);
2968 
2969 		txm->txm_m = m;
2970 		txm->txm_eop = last;
2971 
2972 #if NBPFILTER > 0
2973 		if_bpf = ifp->if_bpf;
2974 		if (if_bpf)
2975 			bpf_mtap_ether(if_bpf, m, BPF_DIRECTION_OUT);
2976 #endif
2977 
2978 		free -= i;
2979 		post = 1;
2980 	}
2981 
2982 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2983 	    0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREWRITE);
2984 
2985 	if (post) {
2986 		txr->txr_prod = prod;
2987 		ixl_wr(sc, txr->txr_tail, prod);
2988 	}
2989 }
2990 
2991 static int
2992 ixl_txeof(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2993 {
2994 	struct ifqueue *ifq = txr->txr_ifq;
2995 	struct ixl_tx_desc *ring, *txd;
2996 	struct ixl_tx_map *txm;
2997 	bus_dmamap_t map;
2998 	unsigned int cons, prod, last;
2999 	unsigned int mask;
3000 	uint64_t dtype;
3001 	int done = 0;
3002 
3003 	prod = txr->txr_prod;
3004 	cons = txr->txr_cons;
3005 
3006 	if (cons == prod)
3007 		return (0);
3008 
3009 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
3010 	    0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTREAD);
3011 
3012 	ring = IXL_DMA_KVA(&txr->txr_mem);
3013 	mask = sc->sc_tx_ring_ndescs - 1;
3014 
3015 	do {
3016 		txm = &txr->txr_maps[cons];
3017 		last = txm->txm_eop;
3018 		txd = &ring[last];
3019 
3020 		dtype = txd->cmd & htole64(IXL_TX_DESC_DTYPE_MASK);
3021 		if (dtype != htole64(IXL_TX_DESC_DTYPE_DONE))
3022 			break;
3023 
3024 		map = txm->txm_map;
3025 
3026 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3027 		    BUS_DMASYNC_POSTWRITE);
3028 		bus_dmamap_unload(sc->sc_dmat, map);
3029 		m_freem(txm->txm_m);
3030 
3031 		txm->txm_m = NULL;
3032 		txm->txm_eop = -1;
3033 
3034 		cons = last + 1;
3035 		cons &= mask;
3036 
3037 		done = 1;
3038 	} while (cons != prod);
3039 
3040 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
3041 	    0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREREAD);
3042 
3043 	txr->txr_cons = cons;
3044 
3045 	//ixl_enable(sc, txr->txr_msix);
3046 
3047 	if (ifq_is_oactive(ifq))
3048 		ifq_restart(ifq);
3049 
3050 	return (done);
3051 }
3052 
3053 static struct ixl_rx_ring *
3054 ixl_rxr_alloc(struct ixl_softc *sc, unsigned int qid)
3055 {
3056 	struct ixl_rx_ring *rxr;
3057 	struct ixl_rx_map *maps, *rxm;
3058 	unsigned int i;
3059 
3060 	rxr = malloc(sizeof(*rxr), M_DEVBUF, M_WAITOK|M_CANFAIL);
3061 	if (rxr == NULL)
3062 		return (NULL);
3063 
3064 	maps = mallocarray(sizeof(*maps),
3065 	    sc->sc_rx_ring_ndescs, M_DEVBUF, M_WAITOK|M_CANFAIL|M_ZERO);
3066 	if (maps == NULL)
3067 		goto free;
3068 
3069 	if (ixl_dmamem_alloc(sc, &rxr->rxr_mem,
3070 	    sizeof(struct ixl_rx_rd_desc_16) * sc->sc_rx_ring_ndescs,
3071 	    IXL_RX_QUEUE_ALIGN) != 0)
3072 		goto freemap;
3073 
3074 	for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
3075 		rxm = &maps[i];
3076 
3077 		if (bus_dmamap_create(sc->sc_dmat,
3078 		    IXL_HARDMTU, 1, IXL_HARDMTU, 0,
3079 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
3080 		    &rxm->rxm_map) != 0)
3081 			goto uncreate;
3082 
3083 		rxm->rxm_m = NULL;
3084 	}
3085 
3086 	rxr->rxr_sc = sc;
3087 	if_rxr_init(&rxr->rxr_acct, 17, sc->sc_rx_ring_ndescs - 1);
3088 	timeout_set(&rxr->rxr_refill, ixl_rxrefill, rxr);
3089 	rxr->rxr_cons = rxr->rxr_prod = 0;
3090 	rxr->rxr_m_head = NULL;
3091 	rxr->rxr_m_tail = &rxr->rxr_m_head;
3092 	rxr->rxr_maps = maps;
3093 
3094 	rxr->rxr_tail = I40E_QRX_TAIL(qid);
3095 	rxr->rxr_qid = qid;
3096 
3097 	return (rxr);
3098 
3099 uncreate:
3100 	for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
3101 		rxm = &maps[i];
3102 
3103 		if (rxm->rxm_map == NULL)
3104 			continue;
3105 
3106 		bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
3107 	}
3108 
3109 	ixl_dmamem_free(sc, &rxr->rxr_mem);
3110 freemap:
3111 	free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_rx_ring_ndescs);
3112 free:
3113 	free(rxr, M_DEVBUF, sizeof(*rxr));
3114 	return (NULL);
3115 }
3116 
3117 static void
3118 ixl_rxr_clean(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3119 {
3120 	struct ixl_rx_map *maps, *rxm;
3121 	bus_dmamap_t map;
3122 	unsigned int i;
3123 
3124 	timeout_del_barrier(&rxr->rxr_refill);
3125 
3126 	maps = rxr->rxr_maps;
3127 	for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
3128 		rxm = &maps[i];
3129 
3130 		if (rxm->rxm_m == NULL)
3131 			continue;
3132 
3133 		map = rxm->rxm_map;
3134 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3135 		    BUS_DMASYNC_POSTWRITE);
3136 		bus_dmamap_unload(sc->sc_dmat, map);
3137 
3138 		m_freem(rxm->rxm_m);
3139 		rxm->rxm_m = NULL;
3140 	}
3141 
3142 	m_freem(rxr->rxr_m_head);
3143 	rxr->rxr_m_head = NULL;
3144 	rxr->rxr_m_tail = &rxr->rxr_m_head;
3145 
3146 	rxr->rxr_prod = rxr->rxr_cons = 0;
3147 }
3148 
3149 static int
3150 ixl_rxr_enabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3151 {
3152 	bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid);
3153 	uint32_t reg;
3154 	int i;
3155 
3156 	for (i = 0; i < 10; i++) {
3157 		reg = ixl_rd(sc, ena);
3158 		if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK))
3159 			return (0);
3160 
3161 		delaymsec(10);
3162 	}
3163 
3164 	return (ETIMEDOUT);
3165 }
3166 
3167 static int
3168 ixl_rxr_disabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3169 {
3170 	bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid);
3171 	uint32_t reg;
3172 	int i;
3173 
3174 	for (i = 0; i < 20; i++) {
3175 		reg = ixl_rd(sc, ena);
3176 		if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK) == 0)
3177 			return (0);
3178 
3179 		delaymsec(10);
3180 	}
3181 
3182 	return (ETIMEDOUT);
3183 }
3184 
3185 static void
3186 ixl_rxr_config(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3187 {
3188 	struct ixl_hmc_rxq rxq;
3189 	void *hmc;
3190 
3191 	memset(&rxq, 0, sizeof(rxq));
3192 
3193 	rxq.head = htole16(0);
3194 	htolem64(&rxq.base,
3195 	    IXL_DMA_DVA(&rxr->rxr_mem) / IXL_HMC_RXQ_BASE_UNIT);
3196 	htolem16(&rxq.qlen, sc->sc_rx_ring_ndescs);
3197 	rxq.dbuff = htole16(MCLBYTES / IXL_HMC_RXQ_DBUFF_UNIT);
3198 	rxq.hbuff = 0;
3199 	rxq.dtype = IXL_HMC_RXQ_DTYPE_NOSPLIT;
3200 	rxq.dsize = IXL_HMC_RXQ_DSIZE_16;
3201 	rxq.crcstrip = 1;
3202 	rxq.l2tsel = IXL_HMC_RXQ_L2TSEL_1ST_TAG_TO_L2TAG1;
3203 	rxq.showiv = 0;
3204 	rxq.rxmax = htole16(IXL_HARDMTU);
3205 	rxq.tphrdesc_ena = 0;
3206 	rxq.tphwdesc_ena = 0;
3207 	rxq.tphdata_ena = 0;
3208 	rxq.tphhead_ena = 0;
3209 	rxq.lrxqthresh = 0;
3210 	rxq.prefena = 1;
3211 
3212 	hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid);
3213 	memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX));
3214 	ixl_hmc_pack(hmc, &rxq, ixl_hmc_pack_rxq, nitems(ixl_hmc_pack_rxq));
3215 }
3216 
3217 static void
3218 ixl_rxr_unconfig(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3219 {
3220 	void *hmc;
3221 
3222 	hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid);
3223 	memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX));
3224 }
3225 
3226 static void
3227 ixl_rxr_free(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3228 {
3229 	struct ixl_rx_map *maps, *rxm;
3230 	unsigned int i;
3231 
3232 	maps = rxr->rxr_maps;
3233 	for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
3234 		rxm = &maps[i];
3235 
3236 		bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
3237 	}
3238 
3239 	ixl_dmamem_free(sc, &rxr->rxr_mem);
3240 	free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_rx_ring_ndescs);
3241 	free(rxr, M_DEVBUF, sizeof(*rxr));
3242 }
3243 
3244 static int
3245 ixl_rxeof(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3246 {
3247 	struct ifiqueue *ifiq = rxr->rxr_ifiq;
3248 	struct ifnet *ifp = &sc->sc_ac.ac_if;
3249 	struct ixl_rx_wb_desc_16 *ring, *rxd;
3250 	struct ixl_rx_map *rxm;
3251 	bus_dmamap_t map;
3252 	unsigned int cons, prod;
3253 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
3254 	struct mbuf *m;
3255 	uint64_t word;
3256 	unsigned int len;
3257 	unsigned int mask;
3258 	int done = 0;
3259 
3260 	prod = rxr->rxr_prod;
3261 	cons = rxr->rxr_cons;
3262 
3263 	if (cons == prod)
3264 		return (0);
3265 
3266 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
3267 	    0, IXL_DMA_LEN(&rxr->rxr_mem),
3268 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3269 
3270 	ring = IXL_DMA_KVA(&rxr->rxr_mem);
3271 	mask = sc->sc_rx_ring_ndescs - 1;
3272 
3273 	do {
3274 		rxd = &ring[cons];
3275 
3276 		word = lemtoh64(&rxd->qword1);
3277 		if (!ISSET(word, IXL_RX_DESC_DD))
3278 			break;
3279 
3280 		if_rxr_put(&rxr->rxr_acct, 1);
3281 
3282 		rxm = &rxr->rxr_maps[cons];
3283 
3284 		map = rxm->rxm_map;
3285 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3286 		    BUS_DMASYNC_POSTREAD);
3287 		bus_dmamap_unload(sc->sc_dmat, map);
3288 
3289 		m = rxm->rxm_m;
3290 		rxm->rxm_m = NULL;
3291 
3292 		len = (word & IXL_RX_DESC_PLEN_MASK) >> IXL_RX_DESC_PLEN_SHIFT;
3293 		m->m_len = len;
3294 		m->m_pkthdr.len = 0;
3295 
3296 		m->m_next = NULL;
3297 		*rxr->rxr_m_tail = m;
3298 		rxr->rxr_m_tail = &m->m_next;
3299 
3300 		m = rxr->rxr_m_head;
3301 		m->m_pkthdr.len += len;
3302 
3303 		if (ISSET(word, IXL_RX_DESC_EOP)) {
3304 			if (!ISSET(word,
3305 			    IXL_RX_DESC_RXE | IXL_RX_DESC_OVERSIZE)) {
3306 				if ((word & IXL_RX_DESC_FLTSTAT_MASK) ==
3307 				    IXL_RX_DESC_FLTSTAT_RSS) {
3308 					m->m_pkthdr.ph_flowid =
3309 					    lemtoh32(&rxd->filter_status);
3310 					m->m_pkthdr.csum_flags |= M_FLOWID;
3311 				}
3312 
3313 				if (ISSET(word, IXL_RX_DESC_L2TAG1P)) {
3314 					m->m_pkthdr.ether_vtag =
3315 					    lemtoh16(&rxd->l2tag1);
3316 					SET(m->m_flags, M_VLANTAG);
3317 				}
3318 
3319 				ixl_rx_checksum(m, word);
3320 				ml_enqueue(&ml, m);
3321 			} else {
3322 				ifp->if_ierrors++; /* XXX */
3323 				m_freem(m);
3324 			}
3325 
3326 			rxr->rxr_m_head = NULL;
3327 			rxr->rxr_m_tail = &rxr->rxr_m_head;
3328 		}
3329 
3330 		cons++;
3331 		cons &= mask;
3332 
3333 		done = 1;
3334 	} while (cons != prod);
3335 
3336 	if (done) {
3337 		rxr->rxr_cons = cons;
3338 		if (ifiq_input(ifiq, &ml))
3339 			if_rxr_livelocked(&rxr->rxr_acct);
3340 		ixl_rxfill(sc, rxr);
3341 	}
3342 
3343 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
3344 	    0, IXL_DMA_LEN(&rxr->rxr_mem),
3345 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3346 
3347 	return (done);
3348 }
3349 
3350 static void
3351 ixl_rxfill(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3352 {
3353 	struct ixl_rx_rd_desc_16 *ring, *rxd;
3354 	struct ixl_rx_map *rxm;
3355 	bus_dmamap_t map;
3356 	struct mbuf *m;
3357 	unsigned int prod;
3358 	unsigned int slots;
3359 	unsigned int mask;
3360 	int post = 0;
3361 
3362 	slots = if_rxr_get(&rxr->rxr_acct, sc->sc_rx_ring_ndescs);
3363 	if (slots == 0)
3364 		return;
3365 
3366 	prod = rxr->rxr_prod;
3367 
3368 	ring = IXL_DMA_KVA(&rxr->rxr_mem);
3369 	mask = sc->sc_rx_ring_ndescs - 1;
3370 
3371 	do {
3372 		rxm = &rxr->rxr_maps[prod];
3373 
3374 		m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES + ETHER_ALIGN);
3375 		if (m == NULL)
3376 			break;
3377 		m->m_data += (m->m_ext.ext_size - (MCLBYTES + ETHER_ALIGN));
3378 		m->m_len = m->m_pkthdr.len = MCLBYTES + ETHER_ALIGN;
3379 
3380 		map = rxm->rxm_map;
3381 
3382 		if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
3383 		    BUS_DMA_NOWAIT) != 0) {
3384 			m_freem(m);
3385 			break;
3386 		}
3387 
3388 		rxm->rxm_m = m;
3389 
3390 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3391 		    BUS_DMASYNC_PREREAD);
3392 
3393 		rxd = &ring[prod];
3394 
3395 		htolem64(&rxd->paddr, map->dm_segs[0].ds_addr);
3396 		rxd->haddr = htole64(0);
3397 
3398 		prod++;
3399 		prod &= mask;
3400 
3401 		post = 1;
3402 	} while (--slots);
3403 
3404 	if_rxr_put(&rxr->rxr_acct, slots);
3405 
3406 	if (if_rxr_inuse(&rxr->rxr_acct) == 0)
3407 		timeout_add(&rxr->rxr_refill, 1);
3408 	else if (post) {
3409 		rxr->rxr_prod = prod;
3410 		ixl_wr(sc, rxr->rxr_tail, prod);
3411 	}
3412 }
3413 
3414 void
3415 ixl_rxrefill(void *arg)
3416 {
3417 	struct ixl_rx_ring *rxr = arg;
3418 	struct ixl_softc *sc = rxr->rxr_sc;
3419 
3420 	ixl_rxfill(sc, rxr);
3421 }
3422 
3423 static int
3424 ixl_rxrinfo(struct ixl_softc *sc, struct if_rxrinfo *ifri)
3425 {
3426 	struct ifnet *ifp = &sc->sc_ac.ac_if;
3427 	struct if_rxring_info *ifr;
3428 	struct ixl_rx_ring *ring;
3429 	int i, rv;
3430 
3431 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
3432 		return (ENOTTY);
3433 
3434 	ifr = mallocarray(sizeof(*ifr), ixl_nqueues(sc), M_TEMP,
3435 	    M_WAITOK|M_CANFAIL|M_ZERO);
3436 	if (ifr == NULL)
3437 		return (ENOMEM);
3438 
3439 	for (i = 0; i < ixl_nqueues(sc); i++) {
3440 		ring = ifp->if_iqs[i]->ifiq_softc;
3441 		ifr[i].ifr_size = MCLBYTES;
3442 		snprintf(ifr[i].ifr_name, sizeof(ifr[i].ifr_name), "%d", i);
3443 		ifr[i].ifr_info = ring->rxr_acct;
3444 	}
3445 
3446 	rv = if_rxr_info_ioctl(ifri, ixl_nqueues(sc), ifr);
3447 	free(ifr, M_TEMP, ixl_nqueues(sc) * sizeof(*ifr));
3448 
3449 	return (rv);
3450 }
3451 
3452 static void
3453 ixl_rx_checksum(struct mbuf *m, uint64_t word)
3454 {
3455 	if (!ISSET(word, IXL_RX_DESC_L3L4P))
3456 		return;
3457 
3458 	if (ISSET(word, IXL_RX_DESC_IPE))
3459 		return;
3460 
3461 	m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
3462 
3463 	if (ISSET(word, IXL_RX_DESC_L4E))
3464 		return;
3465 
3466 	m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
3467 }
3468 
3469 static int
3470 ixl_intr0(void *xsc)
3471 {
3472 	struct ixl_softc *sc = xsc;
3473 	struct ifnet *ifp = &sc->sc_ac.ac_if;
3474 	uint32_t icr;
3475 	int rv = 0;
3476 
3477 	ixl_intr_enable(sc);
3478 	icr = ixl_rd(sc, I40E_PFINT_ICR0);
3479 
3480 	if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) {
3481 		ixl_atq_done(sc);
3482 		task_add(systq, &sc->sc_arq_task);
3483 		rv = 1;
3484 	}
3485 
3486 	if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) {
3487 		task_add(systq, &sc->sc_link_state_task);
3488 		rv = 1;
3489 	}
3490 
3491 	if (ISSET(ifp->if_flags, IFF_RUNNING)) {
3492 		struct ixl_vector *iv = sc->sc_vectors;
3493 		if (ISSET(icr, I40E_INTR_NOTX_RX_MASK))
3494 			rv |= ixl_rxeof(sc, iv->iv_rxr);
3495 		if (ISSET(icr, I40E_INTR_NOTX_TX_MASK))
3496 			rv |= ixl_txeof(sc, iv->iv_txr);
3497 	}
3498 
3499 	return (rv);
3500 }
3501 
3502 static int
3503 ixl_intr_vector(void *v)
3504 {
3505 	struct ixl_vector *iv = v;
3506 	struct ixl_softc *sc = iv->iv_sc;
3507 	struct ifnet *ifp = &sc->sc_ac.ac_if;
3508 	int rv = 0;
3509 
3510 	if (ISSET(ifp->if_flags, IFF_RUNNING)) {
3511 		rv |= ixl_rxeof(sc, iv->iv_rxr);
3512 		rv |= ixl_txeof(sc, iv->iv_txr);
3513 	}
3514 
3515 	ixl_wr(sc, I40E_PFINT_DYN_CTLN(iv->iv_qid),
3516 	    I40E_PFINT_DYN_CTLN_INTENA_MASK |
3517 	    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
3518 	    (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
3519 
3520 	return (rv);
3521 }
3522 
3523 static void
3524 ixl_link_state_update_iaq(struct ixl_softc *sc, void *arg)
3525 {
3526 	struct ifnet *ifp = &sc->sc_ac.ac_if;
3527 	struct ixl_aq_desc *iaq = arg;
3528 	uint16_t retval;
3529 	int link_state;
3530 	int change = 0;
3531 
3532 	retval = lemtoh16(&iaq->iaq_retval);
3533 	if (retval != IXL_AQ_RC_OK) {
3534 		printf("%s: LINK STATUS error %u\n", DEVNAME(sc), retval);
3535 		return;
3536 	}
3537 
3538 	link_state = ixl_set_link_status(sc, iaq);
3539 	mtx_enter(&sc->sc_link_state_mtx);
3540 	if (ifp->if_link_state != link_state) {
3541 		ifp->if_link_state = link_state;
3542 		change = 1;
3543 	}
3544 	mtx_leave(&sc->sc_link_state_mtx);
3545 
3546 	if (change)
3547 		if_link_state_change(ifp);
3548 }
3549 
3550 static void
3551 ixl_link_state_update(void *xsc)
3552 {
3553 	struct ixl_softc *sc = xsc;
3554 	struct ixl_aq_desc *iaq;
3555 	struct ixl_aq_link_param *param;
3556 
3557 	memset(&sc->sc_link_state_atq, 0, sizeof(sc->sc_link_state_atq));
3558 	iaq = &sc->sc_link_state_atq.iatq_desc;
3559 	iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS);
3560 	param = (struct ixl_aq_link_param *)iaq->iaq_param;
3561 	param->notify = IXL_AQ_LINK_NOTIFY;
3562 
3563 	ixl_atq_set(&sc->sc_link_state_atq, ixl_link_state_update_iaq, iaq);
3564 	ixl_atq_post(sc, &sc->sc_link_state_atq);
3565 }
3566 
3567 #if 0
3568 static void
3569 ixl_aq_dump(const struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
3570 {
3571 	printf("%s: flags %b opcode %04x\n", DEVNAME(sc),
3572 	    lemtoh16(&iaq->iaq_flags), IXL_AQ_FLAGS_FMT,
3573 	    lemtoh16(&iaq->iaq_opcode));
3574 	printf("%s: datalen %u retval %u\n", DEVNAME(sc),
3575 	    lemtoh16(&iaq->iaq_datalen), lemtoh16(&iaq->iaq_retval));
3576 	printf("%s: cookie %016llx\n", DEVNAME(sc), iaq->iaq_cookie);
3577 	printf("%s: %08x %08x %08x %08x\n", DEVNAME(sc),
3578 	    lemtoh32(&iaq->iaq_param[0]), lemtoh32(&iaq->iaq_param[1]),
3579 	    lemtoh32(&iaq->iaq_param[2]), lemtoh32(&iaq->iaq_param[3]));
3580 }
3581 #endif
3582 
3583 static void
3584 ixl_arq(void *xsc)
3585 {
3586 	struct ixl_softc *sc = xsc;
3587 	struct ixl_aq_desc *arq, *iaq;
3588 	struct ixl_aq_buf *aqb;
3589 	unsigned int cons = sc->sc_arq_cons;
3590 	unsigned int prod;
3591 	int done = 0;
3592 
3593 	prod = ixl_rd(sc, sc->sc_aq_regs->arq_head) &
3594 	    sc->sc_aq_regs->arq_head_mask;
3595 
3596 	if (cons == prod)
3597 		goto done;
3598 
3599 	arq = IXL_DMA_KVA(&sc->sc_arq);
3600 
3601 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3602 	    0, IXL_DMA_LEN(&sc->sc_arq),
3603 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3604 
3605 	do {
3606 		iaq = &arq[cons];
3607 
3608 		aqb = SIMPLEQ_FIRST(&sc->sc_arq_live);
3609 		SIMPLEQ_REMOVE_HEAD(&sc->sc_arq_live, aqb_entry);
3610 		bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN,
3611 		    BUS_DMASYNC_POSTREAD);
3612 
3613 		switch (iaq->iaq_opcode) {
3614 		case HTOLE16(IXL_AQ_OP_PHY_LINK_STATUS):
3615 			ixl_link_state_update_iaq(sc, iaq);
3616 			break;
3617 		}
3618 
3619 		memset(iaq, 0, sizeof(*iaq));
3620 		SIMPLEQ_INSERT_TAIL(&sc->sc_arq_idle, aqb, aqb_entry);
3621 		if_rxr_put(&sc->sc_arq_ring, 1);
3622 
3623 		cons++;
3624 		cons &= IXL_AQ_MASK;
3625 
3626 		done = 1;
3627 	} while (cons != prod);
3628 
3629 	if (done && ixl_arq_fill(sc))
3630 		ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
3631 
3632 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3633 	    0, IXL_DMA_LEN(&sc->sc_arq),
3634 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3635 
3636 	sc->sc_arq_cons = cons;
3637 
3638 done:
3639 	ixl_intr_enable(sc);
3640 }
3641 
3642 static void
3643 ixl_atq_set(struct ixl_atq *iatq,
3644     void (*fn)(struct ixl_softc *, void *), void *arg)
3645 {
3646 	iatq->iatq_fn = fn;
3647 	iatq->iatq_arg = arg;
3648 }
3649 
3650 static void
3651 ixl_atq_post(struct ixl_softc *sc, struct ixl_atq *iatq)
3652 {
3653 	struct ixl_aq_desc *atq, *slot;
3654 	unsigned int prod;
3655 
3656 	mtx_enter(&sc->sc_atq_mtx);
3657 
3658 	atq = IXL_DMA_KVA(&sc->sc_atq);
3659 	prod = sc->sc_atq_prod;
3660 	slot = atq + prod;
3661 
3662 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3663 	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
3664 
3665 	*slot = iatq->iatq_desc;
3666 	slot->iaq_cookie = (uint64_t)iatq;
3667 
3668 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3669 	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
3670 
3671 	prod++;
3672 	prod &= IXL_AQ_MASK;
3673 	sc->sc_atq_prod = prod;
3674 	ixl_wr(sc, sc->sc_aq_regs->atq_tail, prod);
3675 
3676 	mtx_leave(&sc->sc_atq_mtx);
3677 }
3678 
3679 static void
3680 ixl_atq_done(struct ixl_softc *sc)
3681 {
3682 	struct ixl_aq_desc *atq, *slot;
3683 	struct ixl_atq *iatq;
3684 	unsigned int cons;
3685 	unsigned int prod;
3686 
3687 	mtx_enter(&sc->sc_atq_mtx);
3688 
3689 	prod = sc->sc_atq_prod;
3690 	cons = sc->sc_atq_cons;
3691 
3692 	if (prod == cons) {
3693 		mtx_leave(&sc->sc_atq_mtx);
3694 		return;
3695 	}
3696 
3697 	atq = IXL_DMA_KVA(&sc->sc_atq);
3698 
3699 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3700 	    0, IXL_DMA_LEN(&sc->sc_atq),
3701 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3702 
3703 	do {
3704 		slot = &atq[cons];
3705 		if (!ISSET(slot->iaq_flags, htole16(IXL_AQ_DD)))
3706 			break;
3707 
3708 		KASSERT(slot->iaq_cookie != 0);
3709 		iatq = (struct ixl_atq *)slot->iaq_cookie;
3710 		iatq->iatq_desc = *slot;
3711 
3712 		memset(slot, 0, sizeof(*slot));
3713 
3714 		(*iatq->iatq_fn)(sc, iatq->iatq_arg);
3715 
3716 		cons++;
3717 		cons &= IXL_AQ_MASK;
3718 	} while (cons != prod);
3719 
3720 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3721 	    0, IXL_DMA_LEN(&sc->sc_atq),
3722 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3723 
3724 	sc->sc_atq_cons = cons;
3725 
3726 	mtx_leave(&sc->sc_atq_mtx);
3727 }
3728 
3729 static void
3730 ixl_wakeup(struct ixl_softc *sc, void *arg)
3731 {
3732 	struct cond *c = arg;
3733 
3734 	cond_signal(c);
3735 }
3736 
3737 static void
3738 ixl_atq_exec(struct ixl_softc *sc, struct ixl_atq *iatq, const char *wmesg)
3739 {
3740 	struct cond c = COND_INITIALIZER();
3741 
3742 	KASSERT(iatq->iatq_desc.iaq_cookie == 0);
3743 
3744 	ixl_atq_set(iatq, ixl_wakeup, &c);
3745 	ixl_atq_post(sc, iatq);
3746 
3747 	cond_wait(&c, wmesg);
3748 }
3749 
3750 static int
3751 ixl_atq_poll(struct ixl_softc *sc, struct ixl_aq_desc *iaq, unsigned int tm)
3752 {
3753 	struct ixl_aq_desc *atq, *slot;
3754 	unsigned int prod;
3755 	unsigned int t = 0;
3756 
3757 	mtx_enter(&sc->sc_atq_mtx);
3758 
3759 	atq = IXL_DMA_KVA(&sc->sc_atq);
3760 	prod = sc->sc_atq_prod;
3761 	slot = atq + prod;
3762 
3763 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3764 	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
3765 
3766 	*slot = *iaq;
3767 	slot->iaq_flags |= htole16(IXL_AQ_SI);
3768 
3769 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3770 	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
3771 
3772 	prod++;
3773 	prod &= IXL_AQ_MASK;
3774 	sc->sc_atq_prod = prod;
3775 	ixl_wr(sc, sc->sc_aq_regs->atq_tail, prod);
3776 
3777 	while (ixl_rd(sc, sc->sc_aq_regs->atq_head) != prod) {
3778 		delaymsec(1);
3779 
3780 		if (t++ > tm) {
3781 			mtx_leave(&sc->sc_atq_mtx);
3782 			return (ETIMEDOUT);
3783 		}
3784 	}
3785 
3786 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3787 	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTREAD);
3788 	*iaq = *slot;
3789 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3790 	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREREAD);
3791 
3792 	sc->sc_atq_cons = prod;
3793 
3794 	mtx_leave(&sc->sc_atq_mtx);
3795 	return (0);
3796 }
3797 
3798 static int
3799 ixl_get_version(struct ixl_softc *sc)
3800 {
3801 	struct ixl_aq_desc iaq;
3802 	uint32_t fwbuild, fwver, apiver;
3803 
3804 	memset(&iaq, 0, sizeof(iaq));
3805 	iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VERSION);
3806 
3807 	if (ixl_atq_poll(sc, &iaq, 2000) != 0)
3808 		return (ETIMEDOUT);
3809 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK))
3810 		return (EIO);
3811 
3812 	fwbuild = lemtoh32(&iaq.iaq_param[1]);
3813 	fwver = lemtoh32(&iaq.iaq_param[2]);
3814 	apiver = lemtoh32(&iaq.iaq_param[3]);
3815 
3816 	sc->sc_api_major = apiver & 0xffff;
3817 	sc->sc_api_minor = (apiver >> 16) & 0xffff;
3818 
3819 	printf(", FW %hu.%hu.%05u API %hu.%hu", (uint16_t)fwver,
3820 	    (uint16_t)(fwver >> 16), fwbuild,
3821 	    sc->sc_api_major, sc->sc_api_minor);
3822 
3823 	return (0);
3824 }
3825 
3826 static int
3827 ixl_pxe_clear(struct ixl_softc *sc)
3828 {
3829 	struct ixl_aq_desc iaq;
3830 
3831 	memset(&iaq, 0, sizeof(iaq));
3832 	iaq.iaq_opcode = htole16(IXL_AQ_OP_CLEAR_PXE_MODE);
3833 	iaq.iaq_param[0] = htole32(0x2);
3834 
3835 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
3836 		printf(", CLEAR PXE MODE timeout\n");
3837 		return (-1);
3838 	}
3839 
3840 	switch (iaq.iaq_retval) {
3841 	case HTOLE16(IXL_AQ_RC_OK):
3842 	case HTOLE16(IXL_AQ_RC_EEXIST):
3843 		break;
3844 	default:
3845 		printf(", CLEAR PXE MODE error\n");
3846 		return (-1);
3847 	}
3848 
3849 	return (0);
3850 }
3851 
3852 static int
3853 ixl_lldp_shut(struct ixl_softc *sc)
3854 {
3855 	struct ixl_aq_desc iaq;
3856 
3857 	memset(&iaq, 0, sizeof(iaq));
3858 	iaq.iaq_opcode = htole16(IXL_AQ_OP_LLDP_STOP_AGENT);
3859 	iaq.iaq_param[0] = htole32(IXL_LLDP_SHUTDOWN);
3860 
3861 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
3862 		printf(", STOP LLDP AGENT timeout\n");
3863 		return (-1);
3864 	}
3865 
3866 	switch (iaq.iaq_retval) {
3867 	case HTOLE16(IXL_AQ_RC_EMODE):
3868 	case HTOLE16(IXL_AQ_RC_EPERM):
3869 		/* ignore silently */
3870 	default:
3871 		break;
3872 	}
3873 
3874 	return (0);
3875 }
3876 
3877 static int
3878 ixl_get_mac(struct ixl_softc *sc)
3879 {
3880 	struct ixl_dmamem idm;
3881 	struct ixl_aq_desc iaq;
3882 	struct ixl_aq_mac_addresses *addrs;
3883 	int rv;
3884 
3885 #ifdef __sparc64__
3886 	if (OF_getprop(PCITAG_NODE(sc->sc_tag), "local-mac-address",
3887 	    sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN) == ETHER_ADDR_LEN)
3888 		return (0);
3889 #endif
3890 
3891 	if (ixl_dmamem_alloc(sc, &idm, sizeof(*addrs), 0) != 0) {
3892 		printf(", unable to allocate mac addresses\n");
3893 		return (-1);
3894 	}
3895 
3896 	memset(&iaq, 0, sizeof(iaq));
3897 	iaq.iaq_flags = htole16(IXL_AQ_BUF);
3898 	iaq.iaq_opcode = htole16(IXL_AQ_OP_MAC_ADDRESS_READ);
3899 	iaq.iaq_datalen = htole16(sizeof(*addrs));
3900 	ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
3901 
3902 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3903 	    BUS_DMASYNC_PREREAD);
3904 
3905 	rv = ixl_atq_poll(sc, &iaq, 250);
3906 
3907 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3908 	    BUS_DMASYNC_POSTREAD);
3909 
3910 	if (rv != 0) {
3911 		printf(", MAC ADDRESS READ timeout\n");
3912 		rv = -1;
3913 		goto done;
3914 	}
3915 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3916 		printf(", MAC ADDRESS READ error\n");
3917 		rv = -1;
3918 		goto done;
3919 	}
3920 
3921 	addrs = IXL_DMA_KVA(&idm);
3922 	if (!ISSET(iaq.iaq_param[0], htole32(IXL_AQ_MAC_PORT_VALID))) {
3923 		printf(", port address is not valid\n");
3924 		goto done;
3925 	}
3926 
3927 	memcpy(sc->sc_ac.ac_enaddr, addrs->port, ETHER_ADDR_LEN);
3928 	rv = 0;
3929 
3930 done:
3931 	ixl_dmamem_free(sc, &idm);
3932 	return (rv);
3933 }
3934 
3935 static int
3936 ixl_get_switch_config(struct ixl_softc *sc)
3937 {
3938 	struct ixl_dmamem idm;
3939 	struct ixl_aq_desc iaq;
3940 	struct ixl_aq_switch_config *hdr;
3941 	struct ixl_aq_switch_config_element *elms, *elm;
3942 	unsigned int nelm;
3943 	int rv;
3944 
3945 	if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) {
3946 		printf("%s: unable to allocate switch config buffer\n",
3947 		    DEVNAME(sc));
3948 		return (-1);
3949 	}
3950 
3951 	memset(&iaq, 0, sizeof(iaq));
3952 	iaq.iaq_flags = htole16(IXL_AQ_BUF |
3953 	    (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
3954 	iaq.iaq_opcode = htole16(IXL_AQ_OP_SWITCH_GET_CONFIG);
3955 	iaq.iaq_datalen = htole16(IXL_AQ_BUFLEN);
3956 	ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
3957 
3958 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3959 	    BUS_DMASYNC_PREREAD);
3960 
3961 	rv = ixl_atq_poll(sc, &iaq, 250);
3962 
3963 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3964 	    BUS_DMASYNC_POSTREAD);
3965 
3966 	if (rv != 0) {
3967 		printf("%s: GET SWITCH CONFIG timeout\n", DEVNAME(sc));
3968 		rv = -1;
3969 		goto done;
3970 	}
3971 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3972 		printf("%s: GET SWITCH CONFIG error\n", DEVNAME(sc));
3973 		rv = -1;
3974 		goto done;
3975 	}
3976 
3977 	hdr = IXL_DMA_KVA(&idm);
3978 	elms = (struct ixl_aq_switch_config_element *)(hdr + 1);
3979 
3980 	nelm = lemtoh16(&hdr->num_reported);
3981 	if (nelm < 1) {
3982 		printf("%s: no switch config available\n", DEVNAME(sc));
3983 		rv = -1;
3984 		goto done;
3985 	}
3986 
3987 #if 0
3988 	for (i = 0; i < nelm; i++) {
3989 		elm = &elms[i];
3990 
3991 		printf("%s: type %x revision %u seid %04x\n", DEVNAME(sc),
3992 		    elm->type, elm->revision, lemtoh16(&elm->seid));
3993 		printf("%s: uplink %04x downlink %04x\n", DEVNAME(sc),
3994 		    lemtoh16(&elm->uplink_seid),
3995 		    lemtoh16(&elm->downlink_seid));
3996 		printf("%s: conntype %x scheduler %04x extra %04x\n",
3997 		    DEVNAME(sc), elm->connection_type,
3998 		    lemtoh16(&elm->scheduler_id),
3999 		    lemtoh16(&elm->element_info));
4000 	}
4001 #endif
4002 
4003 	elm = &elms[0];
4004 
4005 	sc->sc_uplink_seid = elm->uplink_seid;
4006 	sc->sc_downlink_seid = elm->downlink_seid;
4007 	sc->sc_seid = elm->seid;
4008 
4009 	if ((sc->sc_uplink_seid == htole16(0)) !=
4010 	    (sc->sc_downlink_seid == htole16(0))) {
4011 		printf("%s: SEIDs are misconfigured\n", DEVNAME(sc));
4012 		rv = -1;
4013 		goto done;
4014 	}
4015 
4016 done:
4017 	ixl_dmamem_free(sc, &idm);
4018 	return (rv);
4019 }
4020 
4021 static int
4022 ixl_phy_mask_ints(struct ixl_softc *sc)
4023 {
4024 	struct ixl_aq_desc iaq;
4025 
4026 	memset(&iaq, 0, sizeof(iaq));
4027 	iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_EVENT_MASK);
4028 	iaq.iaq_param[2] = htole32(IXL_AQ_PHY_EV_MASK &
4029 	    ~(IXL_AQ_PHY_EV_LINK_UPDOWN | IXL_AQ_PHY_EV_MODULE_QUAL_FAIL |
4030 	      IXL_AQ_PHY_EV_MEDIA_NA));
4031 
4032 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4033 		printf("%s: SET PHY EVENT MASK timeout\n", DEVNAME(sc));
4034 		return (-1);
4035 	}
4036 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4037 		printf("%s: SET PHY EVENT MASK error\n", DEVNAME(sc));
4038 		return (-1);
4039 	}
4040 
4041 	return (0);
4042 }
4043 
4044 static int
4045 ixl_get_phy_abilities(struct ixl_softc *sc,struct ixl_dmamem *idm)
4046 {
4047 	struct ixl_aq_desc iaq;
4048 	int rv;
4049 
4050 	memset(&iaq, 0, sizeof(iaq));
4051 	iaq.iaq_flags = htole16(IXL_AQ_BUF |
4052 	    (IXL_DMA_LEN(idm) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4053 	iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_GET_ABILITIES);
4054 	htolem16(&iaq.iaq_datalen, IXL_DMA_LEN(idm));
4055 	iaq.iaq_param[0] = htole32(IXL_AQ_PHY_REPORT_INIT);
4056 	ixl_aq_dva(&iaq, IXL_DMA_DVA(idm));
4057 
4058 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
4059 	    BUS_DMASYNC_PREREAD);
4060 
4061 	rv = ixl_atq_poll(sc, &iaq, 250);
4062 
4063 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
4064 	    BUS_DMASYNC_POSTREAD);
4065 
4066 	if (rv != 0)
4067 		return (-1);
4068 
4069 	return (lemtoh16(&iaq.iaq_retval));
4070 }
4071 
4072 static int
4073 ixl_get_phy_types(struct ixl_softc *sc, uint64_t *phy_types_ptr)
4074 {
4075 	struct ixl_dmamem idm;
4076 	struct ixl_aq_phy_abilities *phy;
4077 	uint64_t phy_types;
4078 	int rv;
4079 
4080 	if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) {
4081 		printf("%s: unable to allocate phy abilities buffer\n",
4082 		    DEVNAME(sc));
4083 		return (-1);
4084 	}
4085 
4086 	rv = ixl_get_phy_abilities(sc, &idm);
4087 	switch (rv) {
4088 	case -1:
4089 		printf("%s: GET PHY ABILITIES timeout\n", DEVNAME(sc));
4090 		goto err;
4091 	case IXL_AQ_RC_OK:
4092 		break;
4093 	case IXL_AQ_RC_EIO:
4094 		/* API is too old to handle this command */
4095 		phy_types = 0;
4096 		goto done;
4097 	default:
4098 		printf("%s: GET PHY ABILITIES error %u\n", DEVNAME(sc), rv);
4099 		goto err;
4100 	}
4101 
4102 	phy = IXL_DMA_KVA(&idm);
4103 
4104 	phy_types = lemtoh32(&phy->phy_type);
4105 	phy_types |= (uint64_t)phy->phy_type_ext << 32;
4106 
4107 done:
4108 	*phy_types_ptr = phy_types;
4109 
4110 	rv = 0;
4111 
4112 err:
4113 	ixl_dmamem_free(sc, &idm);
4114 	return (rv);
4115 }
4116 
4117 /*
4118  * this returns -2 on software/driver failure, -1 for problems
4119  * talking to the hardware, or the sff module type.
4120  */
4121 
4122 static int
4123 ixl_get_module_type(struct ixl_softc *sc)
4124 {
4125 	struct ixl_dmamem idm;
4126 	struct ixl_aq_phy_abilities *phy;
4127 	int rv;
4128 
4129 	if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0)
4130 		return (-2);
4131 
4132 	rv = ixl_get_phy_abilities(sc, &idm);
4133 	if (rv != IXL_AQ_RC_OK) {
4134 		rv = -1;
4135 		goto done;
4136 	}
4137 
4138 	phy = IXL_DMA_KVA(&idm);
4139 
4140 	rv = phy->module_type[0];
4141 
4142 done:
4143 	ixl_dmamem_free(sc, &idm);
4144 	return (rv);
4145 }
4146 
4147 static int
4148 ixl_get_link_status(struct ixl_softc *sc)
4149 {
4150 	struct ixl_aq_desc iaq;
4151 	struct ixl_aq_link_param *param;
4152 
4153 	memset(&iaq, 0, sizeof(iaq));
4154 	iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS);
4155 	param = (struct ixl_aq_link_param *)iaq.iaq_param;
4156 	param->notify = IXL_AQ_LINK_NOTIFY;
4157 
4158 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4159 		printf("%s: GET LINK STATUS timeout\n", DEVNAME(sc));
4160 		return (-1);
4161 	}
4162 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4163 		printf("%s: GET LINK STATUS error\n", DEVNAME(sc));
4164 		return (0);
4165 	}
4166 
4167 	sc->sc_ac.ac_if.if_link_state = ixl_set_link_status(sc, &iaq);
4168 
4169 	return (0);
4170 }
4171 
4172 struct ixl_sff_ops {
4173 	int (*open)(struct ixl_softc *sc, struct if_sffpage *, uint8_t *);
4174 	int (*get)(struct ixl_softc *sc, struct if_sffpage *, size_t);
4175 	int (*close)(struct ixl_softc *sc, struct if_sffpage *, uint8_t);
4176 };
4177 
4178 static int
4179 ixl_sfp_open(struct ixl_softc *sc, struct if_sffpage *sff, uint8_t *page)
4180 {
4181 	int error;
4182 
4183 	if (sff->sff_addr != IFSFF_ADDR_EEPROM)
4184 		return (0);
4185 
4186 	error = ixl_sff_get_byte(sc, IFSFF_ADDR_EEPROM, 127, page);
4187 	if (error != 0)
4188 		return (error);
4189 	if (*page == sff->sff_page)
4190 		return (0);
4191 	error = ixl_sff_set_byte(sc, IFSFF_ADDR_EEPROM, 127, sff->sff_page);
4192 	if (error != 0)
4193 		return (error);
4194 
4195 	return (0);
4196 }
4197 
4198 static int
4199 ixl_sfp_get(struct ixl_softc *sc, struct if_sffpage *sff, size_t i)
4200 {
4201 	return (ixl_sff_get_byte(sc, sff->sff_addr, i, &sff->sff_data[i]));
4202 }
4203 
4204 static int
4205 ixl_sfp_close(struct ixl_softc *sc, struct if_sffpage *sff, uint8_t page)
4206 {
4207 	int error;
4208 
4209 	if (sff->sff_addr != IFSFF_ADDR_EEPROM)
4210 		return (0);
4211 
4212 	if (page == sff->sff_page)
4213 		return (0);
4214 
4215 	error = ixl_sff_set_byte(sc, IFSFF_ADDR_EEPROM, 127, page);
4216 	if (error != 0)
4217 		return (error);
4218 
4219 	return (0);
4220 }
4221 
4222 static const struct ixl_sff_ops ixl_sfp_ops = {
4223 	ixl_sfp_open,
4224 	ixl_sfp_get,
4225 	ixl_sfp_close,
4226 };
4227 
4228 static int
4229 ixl_qsfp_open(struct ixl_softc *sc, struct if_sffpage *sff, uint8_t *page)
4230 {
4231 	if (sff->sff_addr != IFSFF_ADDR_EEPROM)
4232 		return (EIO);
4233 
4234 	return (0);
4235 }
4236 
4237 static int
4238 ixl_qsfp_get(struct ixl_softc *sc, struct if_sffpage *sff, size_t i)
4239 {
4240 	return (ixl_sff_get_byte(sc, sff->sff_page, i, &sff->sff_data[i]));
4241 }
4242 
4243 static int
4244 ixl_qsfp_close(struct ixl_softc *sc, struct if_sffpage *sff, uint8_t page)
4245 {
4246 	return (0);
4247 }
4248 
4249 static const struct ixl_sff_ops ixl_qsfp_ops = {
4250 	ixl_qsfp_open,
4251 	ixl_qsfp_get,
4252 	ixl_qsfp_close,
4253 };
4254 
4255 static int
4256 ixl_get_sffpage(struct ixl_softc *sc, struct if_sffpage *sff)
4257 {
4258 	const struct ixl_sff_ops *ops;
4259 	uint8_t page;
4260 	size_t i;
4261 	int error;
4262 
4263 	switch (ixl_get_module_type(sc)) {
4264 	case -2:
4265 		return (ENOMEM);
4266 	case -1:
4267 		return (ENXIO);
4268 	case IXL_SFF8024_ID_SFP:
4269 		ops = &ixl_sfp_ops;
4270 		break;
4271 	case IXL_SFF8024_ID_QSFP:
4272 	case IXL_SFF8024_ID_QSFP_PLUS:
4273 	case IXL_SFF8024_ID_QSFP28:
4274 		ops = &ixl_qsfp_ops;
4275 		break;
4276 	default:
4277 		return (EOPNOTSUPP);
4278 	}
4279 
4280 	error = (*ops->open)(sc, sff, &page);
4281 	if (error != 0)
4282 		return (error);
4283 
4284 	for (i = 0; i < sizeof(sff->sff_data); i++) {
4285 		error = (*ops->get)(sc, sff, i);
4286 		if (error != 0)
4287 			return (error);
4288 	}
4289 
4290 	error = (*ops->close)(sc, sff, page);
4291 
4292 	return (0);
4293 }
4294 
4295 static int
4296 ixl_sff_get_byte(struct ixl_softc *sc, uint8_t dev, uint32_t reg, uint8_t *p)
4297 {
4298 	struct ixl_atq iatq;
4299 	struct ixl_aq_desc *iaq;
4300 	struct ixl_aq_phy_reg_access *param;
4301 
4302 	memset(&iatq, 0, sizeof(iatq));
4303 	iaq = &iatq.iatq_desc;
4304 	iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_GET_REGISTER);
4305 	param = (struct ixl_aq_phy_reg_access *)iaq->iaq_param;
4306 	param->phy_iface = IXL_AQ_PHY_IF_MODULE;
4307 	param->dev_addr = dev;
4308 	htolem32(&param->reg, reg);
4309 
4310 	ixl_atq_exec(sc, &iatq, "ixlsffget");
4311 
4312 	if (ISSET(sc->sc_ac.ac_if.if_flags, IFF_DEBUG)) {
4313 		printf("%s: %s(dev 0x%02x, reg 0x%02x) -> %04x\n",
4314 		    DEVNAME(sc), __func__,
4315 		    dev, reg, lemtoh16(&iaq->iaq_retval));
4316 	}
4317 
4318 	switch (iaq->iaq_retval) {
4319 	case htole16(IXL_AQ_RC_OK):
4320 		break;
4321 	case htole16(IXL_AQ_RC_EBUSY):
4322 		return (EBUSY);
4323 	case htole16(IXL_AQ_RC_ESRCH):
4324 		return (ENODEV);
4325 	case htole16(IXL_AQ_RC_EIO):
4326 	case htole16(IXL_AQ_RC_EINVAL):
4327 	default:
4328 		return (EIO);
4329 	}
4330 
4331 	*p = lemtoh32(&param->val);
4332 
4333 	return (0);
4334 }
4335 
4336 static int
4337 ixl_sff_set_byte(struct ixl_softc *sc, uint8_t dev, uint32_t reg, uint8_t v)
4338 {
4339 	struct ixl_atq iatq;
4340 	struct ixl_aq_desc *iaq;
4341 	struct ixl_aq_phy_reg_access *param;
4342 
4343 	memset(&iatq, 0, sizeof(iatq));
4344 	iaq = &iatq.iatq_desc;
4345 	iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_REGISTER);
4346 	param = (struct ixl_aq_phy_reg_access *)iaq->iaq_param;
4347 	param->phy_iface = IXL_AQ_PHY_IF_MODULE;
4348 	param->dev_addr = dev;
4349 	htolem32(&param->reg, reg);
4350 	htolem32(&param->val, v);
4351 
4352 	ixl_atq_exec(sc, &iatq, "ixlsffset");
4353 
4354 	if (ISSET(sc->sc_ac.ac_if.if_flags, IFF_DEBUG)) {
4355 		printf("%s: %s(dev 0x%02x, reg 0x%02x, val 0x%02x) -> %04x\n",
4356 		    DEVNAME(sc), __func__,
4357 		    dev, reg, v, lemtoh16(&iaq->iaq_retval));
4358 	}
4359 
4360 	switch (iaq->iaq_retval) {
4361 	case htole16(IXL_AQ_RC_OK):
4362 		break;
4363 	case htole16(IXL_AQ_RC_EBUSY):
4364 		return (EBUSY);
4365 	case htole16(IXL_AQ_RC_ESRCH):
4366 		return (ENODEV);
4367 	case htole16(IXL_AQ_RC_EIO):
4368 	case htole16(IXL_AQ_RC_EINVAL):
4369 	default:
4370 		return (EIO);
4371 	}
4372 
4373 	return (0);
4374 }
4375 
4376 static int
4377 ixl_get_vsi(struct ixl_softc *sc)
4378 {
4379 	struct ixl_dmamem *vsi = &sc->sc_scratch;
4380 	struct ixl_aq_desc iaq;
4381 	struct ixl_aq_vsi_param *param;
4382 	struct ixl_aq_vsi_reply *reply;
4383 	int rv;
4384 
4385 	/* grumble, vsi info isn't "known" at compile time */
4386 
4387 	memset(&iaq, 0, sizeof(iaq));
4388 	htolem16(&iaq.iaq_flags, IXL_AQ_BUF |
4389 	    (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4390 	iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VSI_PARAMS);
4391 	htolem16(&iaq.iaq_datalen, IXL_DMA_LEN(vsi));
4392 	ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi));
4393 
4394 	param = (struct ixl_aq_vsi_param *)iaq.iaq_param;
4395 	param->uplink_seid = sc->sc_seid;
4396 
4397 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4398 	    BUS_DMASYNC_PREREAD);
4399 
4400 	rv = ixl_atq_poll(sc, &iaq, 250);
4401 
4402 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4403 	    BUS_DMASYNC_POSTREAD);
4404 
4405 	if (rv != 0) {
4406 		printf("%s: GET VSI timeout\n", DEVNAME(sc));
4407 		return (-1);
4408 	}
4409 
4410 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4411 		printf("%s: GET VSI error %u\n", DEVNAME(sc),
4412 		    lemtoh16(&iaq.iaq_retval));
4413 		return (-1);
4414 	}
4415 
4416 	reply = (struct ixl_aq_vsi_reply *)iaq.iaq_param;
4417 	sc->sc_vsi_number = reply->vsi_number;
4418 
4419 	return (0);
4420 }
4421 
4422 static int
4423 ixl_set_vsi(struct ixl_softc *sc)
4424 {
4425 	struct ixl_dmamem *vsi = &sc->sc_scratch;
4426 	struct ixl_aq_desc iaq;
4427 	struct ixl_aq_vsi_param *param;
4428 	struct ixl_aq_vsi_data *data = IXL_DMA_KVA(vsi);
4429 	int rv;
4430 
4431 	data->valid_sections = htole16(IXL_AQ_VSI_VALID_QUEUE_MAP |
4432 	    IXL_AQ_VSI_VALID_VLAN);
4433 
4434 	CLR(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_MASK));
4435 	SET(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_CONTIG));
4436 	data->queue_mapping[0] = htole16(0);
4437 	data->tc_mapping[0] = htole16((0 << IXL_AQ_VSI_TC_Q_OFFSET_SHIFT) |
4438 	    (sc->sc_nqueues << IXL_AQ_VSI_TC_Q_NUMBER_SHIFT));
4439 
4440 	CLR(data->port_vlan_flags,
4441 	    htole16(IXL_AQ_VSI_PVLAN_MODE_MASK | IXL_AQ_VSI_PVLAN_EMOD_MASK));
4442 	SET(data->port_vlan_flags, htole16(IXL_AQ_VSI_PVLAN_MODE_ALL |
4443 	    IXL_AQ_VSI_PVLAN_EMOD_STR_BOTH));
4444 
4445 	/* grumble, vsi info isn't "known" at compile time */
4446 
4447 	memset(&iaq, 0, sizeof(iaq));
4448 	htolem16(&iaq.iaq_flags, IXL_AQ_BUF | IXL_AQ_RD |
4449 	    (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4450 	iaq.iaq_opcode = htole16(IXL_AQ_OP_UPD_VSI_PARAMS);
4451 	htolem16(&iaq.iaq_datalen, IXL_DMA_LEN(vsi));
4452 	ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi));
4453 
4454 	param = (struct ixl_aq_vsi_param *)iaq.iaq_param;
4455 	param->uplink_seid = sc->sc_seid;
4456 
4457 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4458 	    BUS_DMASYNC_PREWRITE);
4459 
4460 	rv = ixl_atq_poll(sc, &iaq, 250);
4461 
4462 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4463 	    BUS_DMASYNC_POSTWRITE);
4464 
4465 	if (rv != 0) {
4466 		printf("%s: UPDATE VSI timeout\n", DEVNAME(sc));
4467 		return (-1);
4468 	}
4469 
4470 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4471 		printf("%s: UPDATE VSI error %u\n", DEVNAME(sc),
4472 		    lemtoh16(&iaq.iaq_retval));
4473 		return (-1);
4474 	}
4475 
4476 	return (0);
4477 }
4478 
4479 static const struct ixl_phy_type *
4480 ixl_search_phy_type(uint8_t phy_type)
4481 {
4482 	const struct ixl_phy_type *itype;
4483 	uint64_t mask;
4484 	unsigned int i;
4485 
4486 	if (phy_type >= 64)
4487 		return (NULL);
4488 
4489 	mask = 1ULL << phy_type;
4490 
4491 	for (i = 0; i < nitems(ixl_phy_type_map); i++) {
4492 		itype = &ixl_phy_type_map[i];
4493 
4494 		if (ISSET(itype->phy_type, mask))
4495 			return (itype);
4496 	}
4497 
4498 	return (NULL);
4499 }
4500 
4501 static uint64_t
4502 ixl_search_link_speed(uint8_t link_speed)
4503 {
4504 	const struct ixl_speed_type *type;
4505 	unsigned int i;
4506 
4507 	for (i = 0; i < nitems(ixl_speed_type_map); i++) {
4508 		type = &ixl_speed_type_map[i];
4509 
4510 		if (ISSET(type->dev_speed, link_speed))
4511 			return (type->net_speed);
4512 	}
4513 
4514 	return (0);
4515 }
4516 
4517 static int
4518 ixl_set_link_status(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
4519 {
4520 	const struct ixl_aq_link_status *status;
4521 	const struct ixl_phy_type *itype;
4522 	uint64_t ifm_active = IFM_ETHER;
4523 	uint64_t ifm_status = IFM_AVALID;
4524 	int link_state = LINK_STATE_DOWN;
4525 	uint64_t baudrate = 0;
4526 
4527 	status = (const struct ixl_aq_link_status *)iaq->iaq_param;
4528 	if (!ISSET(status->link_info, IXL_AQ_LINK_UP_FUNCTION))
4529 		goto done;
4530 
4531 	ifm_active |= IFM_FDX;
4532 	ifm_status |= IFM_ACTIVE;
4533 	link_state = LINK_STATE_FULL_DUPLEX;
4534 
4535 	itype = ixl_search_phy_type(status->phy_type);
4536 	if (itype != NULL)
4537 		ifm_active |= itype->ifm_type;
4538 
4539 	if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_TX))
4540 		ifm_active |= IFM_ETH_TXPAUSE;
4541 	if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_RX))
4542 		ifm_active |= IFM_ETH_RXPAUSE;
4543 
4544 	baudrate = ixl_search_link_speed(status->link_speed);
4545 
4546 done:
4547 	mtx_enter(&sc->sc_link_state_mtx);
4548 	sc->sc_media_active = ifm_active;
4549 	sc->sc_media_status = ifm_status;
4550 	sc->sc_ac.ac_if.if_baudrate = baudrate;
4551 	mtx_leave(&sc->sc_link_state_mtx);
4552 
4553 	return (link_state);
4554 }
4555 
4556 static int
4557 ixl_restart_an(struct ixl_softc *sc)
4558 {
4559 	struct ixl_aq_desc iaq;
4560 
4561 	memset(&iaq, 0, sizeof(iaq));
4562 	iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_RESTART_AN);
4563 	iaq.iaq_param[0] =
4564 	    htole32(IXL_AQ_PHY_RESTART_AN | IXL_AQ_PHY_LINK_ENABLE);
4565 
4566 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4567 		printf("%s: RESTART AN timeout\n", DEVNAME(sc));
4568 		return (-1);
4569 	}
4570 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4571 		printf("%s: RESTART AN error\n", DEVNAME(sc));
4572 		return (-1);
4573 	}
4574 
4575 	return (0);
4576 }
4577 
4578 static int
4579 ixl_add_macvlan(struct ixl_softc *sc, uint8_t *macaddr, uint16_t vlan, uint16_t flags)
4580 {
4581 	struct ixl_aq_desc iaq;
4582 	struct ixl_aq_add_macvlan *param;
4583 	struct ixl_aq_add_macvlan_elem *elem;
4584 
4585 	memset(&iaq, 0, sizeof(iaq));
4586 	iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4587 	iaq.iaq_opcode = htole16(IXL_AQ_OP_ADD_MACVLAN);
4588 	iaq.iaq_datalen = htole16(sizeof(*elem));
4589 	ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch));
4590 
4591 	param = (struct ixl_aq_add_macvlan *)&iaq.iaq_param;
4592 	param->num_addrs = htole16(1);
4593 	param->seid0 = htole16(0x8000) | sc->sc_seid;
4594 	param->seid1 = 0;
4595 	param->seid2 = 0;
4596 
4597 	elem = IXL_DMA_KVA(&sc->sc_scratch);
4598 	memset(elem, 0, sizeof(*elem));
4599 	memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN);
4600 	elem->flags = htole16(IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH | flags);
4601 	elem->vlan = htole16(vlan);
4602 
4603 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4604 		printf("%s: ADD_MACVLAN timeout\n", DEVNAME(sc));
4605 		return (IXL_AQ_RC_EINVAL);
4606 	}
4607 
4608 	return letoh16(iaq.iaq_retval);
4609 }
4610 
4611 static int
4612 ixl_remove_macvlan(struct ixl_softc *sc, uint8_t *macaddr, uint16_t vlan, uint16_t flags)
4613 {
4614 	struct ixl_aq_desc iaq;
4615 	struct ixl_aq_remove_macvlan *param;
4616 	struct ixl_aq_remove_macvlan_elem *elem;
4617 
4618 	memset(&iaq, 0, sizeof(iaq));
4619 	iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4620 	iaq.iaq_opcode = htole16(IXL_AQ_OP_REMOVE_MACVLAN);
4621 	iaq.iaq_datalen = htole16(sizeof(*elem));
4622 	ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch));
4623 
4624 	param = (struct ixl_aq_remove_macvlan *)&iaq.iaq_param;
4625 	param->num_addrs = htole16(1);
4626 	param->seid0 = htole16(0x8000) | sc->sc_seid;
4627 	param->seid1 = 0;
4628 	param->seid2 = 0;
4629 
4630 	elem = IXL_DMA_KVA(&sc->sc_scratch);
4631 	memset(elem, 0, sizeof(*elem));
4632 	memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN);
4633 	elem->flags = htole16(IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH | flags);
4634 	elem->vlan = htole16(vlan);
4635 
4636 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4637 		printf("%s: REMOVE_MACVLAN timeout\n", DEVNAME(sc));
4638 		return (IXL_AQ_RC_EINVAL);
4639 	}
4640 
4641 	return letoh16(iaq.iaq_retval);
4642 }
4643 
4644 static int
4645 ixl_hmc(struct ixl_softc *sc)
4646 {
4647 	struct {
4648 		uint32_t   count;
4649 		uint32_t   minsize;
4650 		bus_size_t maxcnt;
4651 		bus_size_t setoff;
4652 		bus_size_t setcnt;
4653 	} regs[] = {
4654 		{
4655 			0,
4656 			IXL_HMC_TXQ_MINSIZE,
4657 			I40E_GLHMC_LANTXOBJSZ,
4658 			I40E_GLHMC_LANTXBASE(sc->sc_pf_id),
4659 			I40E_GLHMC_LANTXCNT(sc->sc_pf_id),
4660 		},
4661 		{
4662 			0,
4663 			IXL_HMC_RXQ_MINSIZE,
4664 			I40E_GLHMC_LANRXOBJSZ,
4665 			I40E_GLHMC_LANRXBASE(sc->sc_pf_id),
4666 			I40E_GLHMC_LANRXCNT(sc->sc_pf_id),
4667 		},
4668 		{
4669 			0,
4670 			0,
4671 			I40E_GLHMC_FCOEMAX,
4672 			I40E_GLHMC_FCOEDDPBASE(sc->sc_pf_id),
4673 			I40E_GLHMC_FCOEDDPCNT(sc->sc_pf_id),
4674 		},
4675 		{
4676 			0,
4677 			0,
4678 			I40E_GLHMC_FCOEFMAX,
4679 			I40E_GLHMC_FCOEFBASE(sc->sc_pf_id),
4680 			I40E_GLHMC_FCOEFCNT(sc->sc_pf_id),
4681 		},
4682 	};
4683 	struct ixl_hmc_entry *e;
4684 	uint64_t size, dva;
4685 	uint8_t *kva;
4686 	uint64_t *sdpage;
4687 	unsigned int i;
4688 	int npages, tables;
4689 
4690 	CTASSERT(nitems(regs) <= nitems(sc->sc_hmc_entries));
4691 
4692 	regs[IXL_HMC_LAN_TX].count = regs[IXL_HMC_LAN_RX].count =
4693 	    ixl_rd(sc, I40E_GLHMC_LANQMAX);
4694 
4695 	size = 0;
4696 	for (i = 0; i < nitems(regs); i++) {
4697 		e = &sc->sc_hmc_entries[i];
4698 
4699 		e->hmc_count = regs[i].count;
4700 		e->hmc_size = 1U << ixl_rd(sc, regs[i].maxcnt);
4701 		e->hmc_base = size;
4702 
4703 		if ((e->hmc_size * 8) < regs[i].minsize) {
4704 			printf("%s: kernel hmc entry is too big\n",
4705 			    DEVNAME(sc));
4706 			return (-1);
4707 		}
4708 
4709 		size += roundup(e->hmc_size * e->hmc_count, IXL_HMC_ROUNDUP);
4710 	}
4711 	size = roundup(size, IXL_HMC_PGSIZE);
4712 	npages = size / IXL_HMC_PGSIZE;
4713 
4714 	tables = roundup(size, IXL_HMC_L2SZ) / IXL_HMC_L2SZ;
4715 
4716 	if (ixl_dmamem_alloc(sc, &sc->sc_hmc_pd, size, IXL_HMC_PGSIZE) != 0) {
4717 		printf("%s: unable to allocate hmc pd memory\n", DEVNAME(sc));
4718 		return (-1);
4719 	}
4720 
4721 	if (ixl_dmamem_alloc(sc, &sc->sc_hmc_sd, tables * IXL_HMC_PGSIZE,
4722 	    IXL_HMC_PGSIZE) != 0) {
4723 		printf("%s: unable to allocate hmc sd memory\n", DEVNAME(sc));
4724 		ixl_dmamem_free(sc, &sc->sc_hmc_pd);
4725 		return (-1);
4726 	}
4727 
4728 	kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
4729 	memset(kva, 0, IXL_DMA_LEN(&sc->sc_hmc_pd));
4730 
4731 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
4732 	    0, IXL_DMA_LEN(&sc->sc_hmc_pd),
4733 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4734 
4735 	dva = IXL_DMA_DVA(&sc->sc_hmc_pd);
4736 	sdpage = IXL_DMA_KVA(&sc->sc_hmc_sd);
4737 	for (i = 0; i < npages; i++) {
4738 		htolem64(sdpage++, dva | IXL_HMC_PDVALID);
4739 
4740 		dva += IXL_HMC_PGSIZE;
4741 	}
4742 
4743 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_sd),
4744 	    0, IXL_DMA_LEN(&sc->sc_hmc_sd),
4745 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4746 
4747 	dva = IXL_DMA_DVA(&sc->sc_hmc_sd);
4748 	for (i = 0; i < tables; i++) {
4749 		uint32_t count;
4750 
4751 		KASSERT(npages >= 0);
4752 
4753 		count = (npages > IXL_HMC_PGS) ? IXL_HMC_PGS : npages;
4754 
4755 		ixl_wr(sc, I40E_PFHMC_SDDATAHIGH, dva >> 32);
4756 		ixl_wr(sc, I40E_PFHMC_SDDATALOW, dva |
4757 		    (count << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |
4758 		    (1U << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT));
4759 		ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
4760 		ixl_wr(sc, I40E_PFHMC_SDCMD,
4761 		    (1U << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | i);
4762 
4763 		npages -= IXL_HMC_PGS;
4764 		dva += IXL_HMC_PGSIZE;
4765 	}
4766 
4767 	for (i = 0; i < nitems(regs); i++) {
4768 		e = &sc->sc_hmc_entries[i];
4769 
4770 		ixl_wr(sc, regs[i].setoff, e->hmc_base / IXL_HMC_ROUNDUP);
4771 		ixl_wr(sc, regs[i].setcnt, e->hmc_count);
4772 	}
4773 
4774 	return (0);
4775 }
4776 
4777 static void
4778 ixl_hmc_free(struct ixl_softc *sc)
4779 {
4780 	ixl_dmamem_free(sc, &sc->sc_hmc_sd);
4781 	ixl_dmamem_free(sc, &sc->sc_hmc_pd);
4782 }
4783 
4784 static void
4785 ixl_hmc_pack(void *d, const void *s, const struct ixl_hmc_pack *packing,
4786     unsigned int npacking)
4787 {
4788 	uint8_t *dst = d;
4789 	const uint8_t *src = s;
4790 	unsigned int i;
4791 
4792 	for (i = 0; i < npacking; i++) {
4793 		const struct ixl_hmc_pack *pack = &packing[i];
4794 		unsigned int offset = pack->lsb / 8;
4795 		unsigned int align = pack->lsb % 8;
4796 		const uint8_t *in = src + pack->offset;
4797 		uint8_t *out = dst + offset;
4798 		int width = pack->width;
4799 		unsigned int inbits = 0;
4800 
4801 		if (align) {
4802 			inbits = (*in++) << align;
4803 			*out++ |= (inbits & 0xff);
4804 			inbits >>= 8;
4805 
4806 			width -= 8 - align;
4807 		}
4808 
4809 		while (width >= 8) {
4810 			inbits |= (*in++) << align;
4811 			*out++ = (inbits & 0xff);
4812 			inbits >>= 8;
4813 
4814 			width -= 8;
4815 		}
4816 
4817 		if (width > 0) {
4818 			inbits |= (*in) << align;
4819 			*out |= (inbits & ((1 << width) - 1));
4820 		}
4821 	}
4822 }
4823 
4824 static struct ixl_aq_buf *
4825 ixl_aqb_alloc(struct ixl_softc *sc)
4826 {
4827 	struct ixl_aq_buf *aqb;
4828 
4829 	aqb = malloc(sizeof(*aqb), M_DEVBUF, M_WAITOK);
4830 	if (aqb == NULL)
4831 		return (NULL);
4832 
4833 	aqb->aqb_data = dma_alloc(IXL_AQ_BUFLEN, PR_WAITOK);
4834 	if (aqb->aqb_data == NULL)
4835 		goto free;
4836 
4837 	if (bus_dmamap_create(sc->sc_dmat, IXL_AQ_BUFLEN, 1,
4838 	    IXL_AQ_BUFLEN, 0,
4839 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
4840 	    &aqb->aqb_map) != 0)
4841 		goto dma_free;
4842 
4843 	if (bus_dmamap_load(sc->sc_dmat, aqb->aqb_map, aqb->aqb_data,
4844 	    IXL_AQ_BUFLEN, NULL, BUS_DMA_WAITOK) != 0)
4845 		goto destroy;
4846 
4847 	return (aqb);
4848 
4849 destroy:
4850 	bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
4851 dma_free:
4852 	dma_free(aqb->aqb_data, IXL_AQ_BUFLEN);
4853 free:
4854 	free(aqb, M_DEVBUF, sizeof(*aqb));
4855 
4856 	return (NULL);
4857 }
4858 
4859 static void
4860 ixl_aqb_free(struct ixl_softc *sc, struct ixl_aq_buf *aqb)
4861 {
4862 	bus_dmamap_unload(sc->sc_dmat, aqb->aqb_map);
4863 	bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
4864 	dma_free(aqb->aqb_data, IXL_AQ_BUFLEN);
4865 	free(aqb, M_DEVBUF, sizeof(*aqb));
4866 }
4867 
4868 static int
4869 ixl_arq_fill(struct ixl_softc *sc)
4870 {
4871 	struct ixl_aq_buf *aqb;
4872 	struct ixl_aq_desc *arq, *iaq;
4873 	unsigned int prod = sc->sc_arq_prod;
4874 	unsigned int n;
4875 	int post = 0;
4876 
4877 	n = if_rxr_get(&sc->sc_arq_ring, IXL_AQ_NUM);
4878 	arq = IXL_DMA_KVA(&sc->sc_arq);
4879 
4880 	while (n > 0) {
4881 		aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle);
4882 		if (aqb != NULL)
4883 			SIMPLEQ_REMOVE_HEAD(&sc->sc_arq_idle, aqb_entry);
4884 		else if ((aqb = ixl_aqb_alloc(sc)) == NULL)
4885 			break;
4886 
4887 		memset(aqb->aqb_data, 0, IXL_AQ_BUFLEN);
4888 
4889 		bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN,
4890 		    BUS_DMASYNC_PREREAD);
4891 
4892 		iaq = &arq[prod];
4893 		iaq->iaq_flags = htole16(IXL_AQ_BUF |
4894 		    (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4895 		iaq->iaq_opcode = 0;
4896 		iaq->iaq_datalen = htole16(IXL_AQ_BUFLEN);
4897 		iaq->iaq_retval = 0;
4898 		iaq->iaq_cookie = 0;
4899 		iaq->iaq_param[0] = 0;
4900 		iaq->iaq_param[1] = 0;
4901 		ixl_aq_dva(iaq, aqb->aqb_map->dm_segs[0].ds_addr);
4902 
4903 		SIMPLEQ_INSERT_TAIL(&sc->sc_arq_live, aqb, aqb_entry);
4904 
4905 		prod++;
4906 		prod &= IXL_AQ_MASK;
4907 
4908 		post = 1;
4909 
4910 		n--;
4911 	}
4912 
4913 	if_rxr_put(&sc->sc_arq_ring, n);
4914 	sc->sc_arq_prod = prod;
4915 
4916 	return (post);
4917 }
4918 
4919 static void
4920 ixl_arq_unfill(struct ixl_softc *sc)
4921 {
4922 	struct ixl_aq_buf *aqb;
4923 
4924 	while ((aqb = SIMPLEQ_FIRST(&sc->sc_arq_live)) != NULL) {
4925 		SIMPLEQ_REMOVE_HEAD(&sc->sc_arq_live, aqb_entry);
4926 
4927 		bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN,
4928 		    BUS_DMASYNC_POSTREAD);
4929 		ixl_aqb_free(sc, aqb);
4930 	}
4931 }
4932 
4933 static void
4934 ixl_clear_hw(struct ixl_softc *sc)
4935 {
4936 	uint32_t num_queues, base_queue;
4937 	uint32_t num_pf_int;
4938 	uint32_t num_vf_int;
4939 	uint32_t num_vfs;
4940 	uint32_t i, j;
4941 	uint32_t val;
4942 
4943 	/* get number of interrupts, queues, and vfs */
4944 	val = ixl_rd(sc, I40E_GLPCI_CNF2);
4945 	num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
4946 	    I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
4947 	num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
4948 	    I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
4949 
4950 	val = ixl_rd(sc, I40E_PFLAN_QALLOC);
4951 	base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
4952 	    I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
4953 	j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
4954 	    I40E_PFLAN_QALLOC_LASTQ_SHIFT;
4955 	if (val & I40E_PFLAN_QALLOC_VALID_MASK)
4956 		num_queues = (j - base_queue) + 1;
4957 	else
4958 		num_queues = 0;
4959 
4960 	val = ixl_rd(sc, I40E_PF_VT_PFALLOC);
4961 	i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
4962 	    I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
4963 	j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
4964 	    I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
4965 	if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
4966 		num_vfs = (j - i) + 1;
4967 	else
4968 		num_vfs = 0;
4969 
4970 	/* stop all the interrupts */
4971 	ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0);
4972 	val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
4973 	for (i = 0; i < num_pf_int - 2; i++)
4974 		ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), val);
4975 
4976 	/* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
4977 	val = I40E_QUEUE_TYPE_EOL << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4978 	ixl_wr(sc, I40E_PFINT_LNKLST0, val);
4979 	for (i = 0; i < num_pf_int - 2; i++)
4980 		ixl_wr(sc, I40E_PFINT_LNKLSTN(i), val);
4981 	val = I40E_QUEUE_TYPE_EOL << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4982 	for (i = 0; i < num_vfs; i++)
4983 		ixl_wr(sc, I40E_VPINT_LNKLST0(i), val);
4984 	for (i = 0; i < num_vf_int - 2; i++)
4985 		ixl_wr(sc, I40E_VPINT_LNKLSTN(i), val);
4986 
4987 	/* warn the HW of the coming Tx disables */
4988 	for (i = 0; i < num_queues; i++) {
4989 		uint32_t abs_queue_idx = base_queue + i;
4990 		uint32_t reg_block = 0;
4991 
4992 		if (abs_queue_idx >= 128) {
4993 			reg_block = abs_queue_idx / 128;
4994 			abs_queue_idx %= 128;
4995 		}
4996 
4997 		val = ixl_rd(sc, I40E_GLLAN_TXPRE_QDIS(reg_block));
4998 		val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
4999 		val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
5000 		val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
5001 
5002 		ixl_wr(sc, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
5003 	}
5004 	delaymsec(400);
5005 
5006 	/* stop all the queues */
5007 	for (i = 0; i < num_queues; i++) {
5008 		ixl_wr(sc, I40E_QINT_TQCTL(i), 0);
5009 		ixl_wr(sc, I40E_QTX_ENA(i), 0);
5010 		ixl_wr(sc, I40E_QINT_RQCTL(i), 0);
5011 		ixl_wr(sc, I40E_QRX_ENA(i), 0);
5012 	}
5013 
5014 	/* short wait for all queue disables to settle */
5015 	delaymsec(50);
5016 }
5017 
5018 static int
5019 ixl_pf_reset(struct ixl_softc *sc)
5020 {
5021 	uint32_t cnt = 0;
5022 	uint32_t cnt1 = 0;
5023 	uint32_t reg = 0;
5024 	uint32_t grst_del;
5025 
5026 	/*
5027 	 * Poll for Global Reset steady state in case of recent GRST.
5028 	 * The grst delay value is in 100ms units, and we'll wait a
5029 	 * couple counts longer to be sure we don't just miss the end.
5030 	 */
5031 	grst_del = ixl_rd(sc, I40E_GLGEN_RSTCTL);
5032 	grst_del &= I40E_GLGEN_RSTCTL_GRSTDEL_MASK;
5033 	grst_del >>= I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
5034 	grst_del += 10;
5035 
5036 	for (cnt = 0; cnt < grst_del; cnt++) {
5037 		reg = ixl_rd(sc, I40E_GLGEN_RSTAT);
5038 		if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
5039 			break;
5040 		delaymsec(100);
5041 	}
5042 	if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
5043 		printf(", Global reset polling failed to complete\n");
5044 		return (-1);
5045 	}
5046 
5047 	/* Now Wait for the FW to be ready */
5048 	for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
5049 		reg = ixl_rd(sc, I40E_GLNVM_ULD);
5050 		reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5051 		    I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
5052 		if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5053 		    I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))
5054 			break;
5055 
5056 		delaymsec(10);
5057 	}
5058 	if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5059 	    I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
5060 		printf(", wait for FW Reset complete timed out "
5061 		    "(I40E_GLNVM_ULD = 0x%x)\n", reg);
5062 		return (-1);
5063 	}
5064 
5065 	/*
5066 	 * If there was a Global Reset in progress when we got here,
5067 	 * we don't need to do the PF Reset
5068 	 */
5069 	if (cnt == 0) {
5070 		reg = ixl_rd(sc, I40E_PFGEN_CTRL);
5071 		ixl_wr(sc, I40E_PFGEN_CTRL, reg | I40E_PFGEN_CTRL_PFSWR_MASK);
5072 		for (cnt = 0; cnt < I40E_PF_RESET_WAIT_COUNT; cnt++) {
5073 			reg = ixl_rd(sc, I40E_PFGEN_CTRL);
5074 			if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
5075 				break;
5076 			delaymsec(1);
5077 		}
5078 		if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
5079 			printf(", PF reset polling failed to complete"
5080 			    "(I40E_PFGEN_CTRL= 0x%x)\n", reg);
5081 			return (-1);
5082 		}
5083 	}
5084 
5085 	return (0);
5086 }
5087 
5088 static uint32_t
5089 ixl_710_rd_ctl(struct ixl_softc *sc, uint32_t r)
5090 {
5091 	struct ixl_atq iatq;
5092 	struct ixl_aq_desc *iaq;
5093 	uint16_t retval;
5094 
5095 	memset(&iatq, 0, sizeof(iatq));
5096 	iaq = &iatq.iatq_desc;
5097 	iaq->iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_READ);
5098 	htolem32(&iaq->iaq_param[1], r);
5099 
5100 	ixl_atq_exec(sc, &iatq, "ixl710rd");
5101 
5102 	retval = lemtoh16(&iaq->iaq_retval);
5103 	if (retval != IXL_AQ_RC_OK) {
5104 		printf("%s: %s failed (%u)\n", DEVNAME(sc), __func__, retval);
5105 		return (~0U);
5106 	}
5107 
5108 	return (lemtoh32(&iaq->iaq_param[3]));
5109 }
5110 
5111 static void
5112 ixl_710_wr_ctl(struct ixl_softc *sc, uint32_t r, uint32_t v)
5113 {
5114 	struct ixl_atq iatq;
5115 	struct ixl_aq_desc *iaq;
5116 	uint16_t retval;
5117 
5118 	memset(&iatq, 0, sizeof(iatq));
5119 	iaq = &iatq.iatq_desc;
5120 	iaq->iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_WRITE);
5121 	htolem32(&iaq->iaq_param[1], r);
5122 	htolem32(&iaq->iaq_param[3], v);
5123 
5124 	ixl_atq_exec(sc, &iatq, "ixl710wr");
5125 
5126 	retval = lemtoh16(&iaq->iaq_retval);
5127 	if (retval != IXL_AQ_RC_OK) {
5128 		printf("%s: %s %08x=%08x failed (%u)\n",
5129 		    DEVNAME(sc), __func__, r, v, retval);
5130 	}
5131 }
5132 
5133 static int
5134 ixl_710_set_rss_key(struct ixl_softc *sc, const struct ixl_rss_key *rsskey)
5135 {
5136 	unsigned int i;
5137 
5138 	for (i = 0; i < nitems(rsskey->key); i++)
5139 		ixl_wr_ctl(sc, I40E_PFQF_HKEY(i), rsskey->key[i]);
5140 
5141 	return (0);
5142 }
5143 
5144 static int
5145 ixl_710_set_rss_lut(struct ixl_softc *sc, const struct ixl_rss_lut_128 *lut)
5146 {
5147 	unsigned int i;
5148 
5149 	for (i = 0; i < nitems(lut->entries); i++)
5150 		ixl_wr(sc, I40E_PFQF_HLUT(i), lut->entries[i]);
5151 
5152 	return (0);
5153 }
5154 
5155 static uint32_t
5156 ixl_722_rd_ctl(struct ixl_softc *sc, uint32_t r)
5157 {
5158 	return (ixl_rd(sc, r));
5159 }
5160 
5161 static void
5162 ixl_722_wr_ctl(struct ixl_softc *sc, uint32_t r, uint32_t v)
5163 {
5164 	ixl_wr(sc, r, v);
5165 }
5166 
5167 static int
5168 ixl_722_set_rss_key(struct ixl_softc *sc, const struct ixl_rss_key *rsskey)
5169 {
5170 	/* XXX */
5171 
5172 	return (0);
5173 }
5174 
5175 static int
5176 ixl_722_set_rss_lut(struct ixl_softc *sc, const struct ixl_rss_lut_128 *lut)
5177 {
5178 	/* XXX */
5179 
5180 	return (0);
5181 }
5182 
5183 static int
5184 ixl_dmamem_alloc(struct ixl_softc *sc, struct ixl_dmamem *ixm,
5185     bus_size_t size, u_int align)
5186 {
5187 	ixm->ixm_size = size;
5188 
5189 	if (bus_dmamap_create(sc->sc_dmat, ixm->ixm_size, 1,
5190 	    ixm->ixm_size, 0,
5191 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
5192 	    &ixm->ixm_map) != 0)
5193 		return (1);
5194 	if (bus_dmamem_alloc(sc->sc_dmat, ixm->ixm_size,
5195 	    align, 0, &ixm->ixm_seg, 1, &ixm->ixm_nsegs,
5196 	    BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0)
5197 		goto destroy;
5198 	if (bus_dmamem_map(sc->sc_dmat, &ixm->ixm_seg, ixm->ixm_nsegs,
5199 	    ixm->ixm_size, &ixm->ixm_kva, BUS_DMA_WAITOK) != 0)
5200 		goto free;
5201 	if (bus_dmamap_load(sc->sc_dmat, ixm->ixm_map, ixm->ixm_kva,
5202 	    ixm->ixm_size, NULL, BUS_DMA_WAITOK) != 0)
5203 		goto unmap;
5204 
5205 	return (0);
5206 unmap:
5207 	bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
5208 free:
5209 	bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
5210 destroy:
5211 	bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
5212 	return (1);
5213 }
5214 
5215 static void
5216 ixl_dmamem_free(struct ixl_softc *sc, struct ixl_dmamem *ixm)
5217 {
5218 	bus_dmamap_unload(sc->sc_dmat, ixm->ixm_map);
5219 	bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
5220 	bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
5221 	bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
5222 }
5223 
5224 #if NKSTAT > 0
5225 
5226 CTASSERT(KSTAT_KV_U_NONE <= 0xffU);
5227 CTASSERT(KSTAT_KV_U_PACKETS <= 0xffU);
5228 CTASSERT(KSTAT_KV_U_BYTES <= 0xffU);
5229 
5230 struct ixl_counter {
5231 	const char		*c_name;
5232 	uint32_t		 c_base;
5233 	uint8_t			 c_width;
5234 	uint8_t			 c_type;
5235 };
5236 
5237 const struct ixl_counter ixl_port_counters[] = {
5238 	/* GORC */
5239 	{ "rx bytes",		0x00300000, 48, KSTAT_KV_U_BYTES },
5240 	/* MLFC */
5241 	{ "mac local errs",	0x00300020, 32, KSTAT_KV_U_NONE },
5242 	/* MRFC */
5243 	{ "mac remote errs",	0x00300040, 32, KSTAT_KV_U_NONE },
5244 	/* MSPDC */
5245 	{ "mac short",		0x00300060, 32, KSTAT_KV_U_PACKETS },
5246 	/* CRCERRS */
5247 	{ "crc errs",		0x00300080, 32, KSTAT_KV_U_PACKETS },
5248 	/* RLEC */
5249 	{ "rx len errs",	0x003000a0, 32, KSTAT_KV_U_PACKETS },
5250 	/* ERRBC */
5251 	{ "byte errs",		0x003000c0, 32, KSTAT_KV_U_PACKETS },
5252 	/* ILLERRC */
5253 	{ "illegal byte",	0x003000d0, 32, KSTAT_KV_U_PACKETS },
5254 	/* RUC */
5255 	{ "rx undersize",	0x00300100, 32, KSTAT_KV_U_PACKETS },
5256 	/* ROC */
5257 	{ "rx oversize",	0x00300120, 32, KSTAT_KV_U_PACKETS },
5258 	/* LXONRXCNT */
5259 	{ "rx link xon",	0x00300140, 32, KSTAT_KV_U_PACKETS },
5260 	/* LXOFFRXCNT */
5261 	{ "rx link xoff",	0x00300160, 32, KSTAT_KV_U_PACKETS },
5262 
5263 	/* Priority XON Received Count */
5264 	/* Priority XOFF Received Count */
5265 	/* Priority XON to XOFF Count */
5266 
5267 	/* PRC64 */
5268 	{ "rx 64B",		0x00300480, 48, KSTAT_KV_U_PACKETS },
5269 	/* PRC127 */
5270 	{ "rx 65-127B",		0x003004A0, 48, KSTAT_KV_U_PACKETS },
5271 	/* PRC255 */
5272 	{ "rx 128-255B",	0x003004C0, 48, KSTAT_KV_U_PACKETS },
5273 	/* PRC511 */
5274 	{ "rx 256-511B",	0x003004E0, 48, KSTAT_KV_U_PACKETS },
5275 	/* PRC1023 */
5276 	{ "rx 512-1023B",	0x00300500, 48, KSTAT_KV_U_PACKETS },
5277 	/* PRC1522 */
5278 	{ "rx 1024-1522B",	0x00300520, 48, KSTAT_KV_U_PACKETS },
5279 	/* PRC9522 */
5280 	{ "rx 1523-9522B",	0x00300540, 48, KSTAT_KV_U_PACKETS },
5281 	/* ROC */
5282 	{ "rx fragment",	0x00300560, 32, KSTAT_KV_U_PACKETS },
5283 	/* RJC */
5284 	{ "rx jabber",		0x00300580, 32, KSTAT_KV_U_PACKETS },
5285 	/* UPRC */
5286 	{ "rx ucasts",		0x003005a0, 48, KSTAT_KV_U_PACKETS },
5287 	/* MPRC */
5288 	{ "rx mcasts",		0x003005c0, 48, KSTAT_KV_U_PACKETS },
5289 	/* BPRC */
5290 	{ "rx bcasts",		0x003005e0, 48, KSTAT_KV_U_PACKETS },
5291 	/* RDPC */
5292 	{ "rx discards",	0x00300600, 32, KSTAT_KV_U_PACKETS },
5293 	/* LDPC */
5294 	{ "rx lo discards",	0x00300620, 32, KSTAT_KV_U_PACKETS },
5295 	/* RUPP */
5296 	{ "rx no dest",		0x00300660, 32, KSTAT_KV_U_PACKETS },
5297 
5298 	/* GOTC */
5299 	{ "tx bytes",		0x00300680, 48, KSTAT_KV_U_BYTES },
5300 	/* PTC64 */
5301 	{ "tx 64B",		0x003006A0, 48, KSTAT_KV_U_PACKETS },
5302 	/* PTC127 */
5303 	{ "tx 65-127B",		0x003006C0, 48, KSTAT_KV_U_PACKETS },
5304 	/* PTC255 */
5305 	{ "tx 128-255B",	0x003006E0, 48, KSTAT_KV_U_PACKETS },
5306 	/* PTC511 */
5307 	{ "tx 256-511B",	0x00300700, 48, KSTAT_KV_U_PACKETS },
5308 	/* PTC1023 */
5309 	{ "tx 512-1023B",	0x00300720, 48, KSTAT_KV_U_PACKETS },
5310 	/* PTC1522 */
5311 	{ "tx 1024-1522B",	0x00300740, 48, KSTAT_KV_U_PACKETS },
5312 	/* PTC9522 */
5313 	{ "tx 1523-9522B",	0x00300760, 48, KSTAT_KV_U_PACKETS },
5314 
5315 	/* Priority XON Transmitted Count */
5316 	/* Priority XOFF Transmitted Count */
5317 
5318 	/* LXONTXC */
5319 	{ "tx link xon",	0x00300980, 48, KSTAT_KV_U_PACKETS },
5320 	/* LXOFFTXC */
5321 	{ "tx link xoff",	0x003009a0, 48, KSTAT_KV_U_PACKETS },
5322 	/* UPTC */
5323 	{ "tx ucasts",		0x003009c0, 48, KSTAT_KV_U_PACKETS },
5324 	/* MPTC */
5325 	{ "tx mcasts",		0x003009e0, 48, KSTAT_KV_U_PACKETS },
5326 	/* BPTC */
5327 	{ "tx bcasts",		0x00300a00, 48, KSTAT_KV_U_PACKETS },
5328 	/* TDOLD */
5329 	{ "tx link down",	0x00300a20, 48, KSTAT_KV_U_PACKETS },
5330 };
5331 
5332 const struct ixl_counter ixl_vsi_counters[] = {
5333 	/* VSI RDPC */
5334 	{ "rx discards",	0x00310000, 32, KSTAT_KV_U_PACKETS },
5335 	/* VSI GOTC */
5336 	{ "tx bytes",		0x00328000, 48, KSTAT_KV_U_BYTES },
5337 	/* VSI UPTC */
5338 	{ "tx ucasts",		0x0033c000, 48, KSTAT_KV_U_PACKETS },
5339 	/* VSI MPTC */
5340 	{ "tx mcasts",		0x0033cc00, 48, KSTAT_KV_U_PACKETS },
5341 	/* VSI BPTC */
5342 	{ "tx bcasts",		0x0033d800, 48, KSTAT_KV_U_PACKETS },
5343 	/* VSI TEPC */
5344 	{ "tx errs",		0x00344000, 48, KSTAT_KV_U_PACKETS },
5345 	/* VSI TDPC */
5346 	{ "tx discards",	0x00348000, 48, KSTAT_KV_U_PACKETS },
5347 	/* VSI GORC */
5348 	{ "rx bytes",		0x00358000, 48, KSTAT_KV_U_BYTES },
5349 	/* VSI UPRC */
5350 	{ "rx ucasts",		0x0036c000, 48, KSTAT_KV_U_PACKETS },
5351 	/* VSI MPRC */
5352 	{ "rx mcasts",		0x0036cc00, 48, KSTAT_KV_U_PACKETS },
5353 	/* VSI BPRC */
5354 	{ "rx bcasts",		0x0036d800, 48, KSTAT_KV_U_PACKETS },
5355 	/* VSI RUPP */
5356 	{ "rx noproto",		0x0036e400, 32, KSTAT_KV_U_PACKETS },
5357 };
5358 
5359 struct ixl_counter_state {
5360 	const struct ixl_counter
5361 				*counters;
5362 	uint64_t		*values;
5363 	size_t			 n;
5364 	uint32_t		 index;
5365 	unsigned int		 gen;
5366 };
5367 
5368 static void
5369 ixl_rd_counters(struct ixl_softc *sc, const struct ixl_counter_state *state,
5370     uint64_t *vs)
5371 {
5372 	const struct ixl_counter *c;
5373 	bus_addr_t r;
5374 	uint64_t v;
5375 	size_t i;
5376 
5377 	for (i = 0; i < state->n; i++) {
5378 		c = &state->counters[i];
5379 
5380 		r = c->c_base + (state->index * 8);
5381 
5382 		if (c->c_width == 32)
5383 			v = bus_space_read_4(sc->sc_memt, sc->sc_memh, r);
5384 		else
5385 			v = bus_space_read_8(sc->sc_memt, sc->sc_memh, r);
5386 
5387 		vs[i] = v;
5388 	}
5389 }
5390 
5391 static int
5392 ixl_kstat_read(struct kstat *ks)
5393 {
5394 	struct ixl_softc *sc = ks->ks_softc;
5395 	struct kstat_kv *kvs = ks->ks_data;
5396 	struct ixl_counter_state *state = ks->ks_ptr;
5397 	unsigned int gen = (state->gen++) & 1;
5398 	uint64_t *ovs = state->values + (gen * state->n);
5399 	uint64_t *nvs = state->values + (!gen * state->n);
5400 	size_t i;
5401 
5402 	ixl_rd_counters(sc, state, nvs);
5403 	getnanouptime(&ks->ks_updated);
5404 
5405 	for (i = 0; i < state->n; i++) {
5406 		const struct ixl_counter *c = &state->counters[i];
5407 		uint64_t n = nvs[i], o = ovs[i];
5408 
5409 		if (c->c_width < 64) {
5410 			if (n < o)
5411 				n += (1ULL << c->c_width);
5412 		}
5413 
5414 		kstat_kv_u64(&kvs[i]) += (n - o);
5415 	}
5416 
5417 	return (0);
5418 }
5419 
5420 static void
5421 ixl_kstat_tick(void *arg)
5422 {
5423 	struct ixl_softc *sc = arg;
5424 
5425 	timeout_add_sec(&sc->sc_kstat_tmo, 4);
5426 
5427 	mtx_enter(&sc->sc_kstat_mtx);
5428 
5429 	ixl_kstat_read(sc->sc_port_kstat);
5430 	ixl_kstat_read(sc->sc_vsi_kstat);
5431 
5432 	mtx_leave(&sc->sc_kstat_mtx);
5433 }
5434 
5435 static struct kstat *
5436 ixl_kstat_create(struct ixl_softc *sc, const char *name,
5437     const struct ixl_counter *counters, size_t n, uint32_t index)
5438 {
5439 	struct kstat *ks;
5440 	struct kstat_kv *kvs;
5441 	struct ixl_counter_state *state;
5442 	const struct ixl_counter *c;
5443 	unsigned int i;
5444 
5445 	ks = kstat_create(DEVNAME(sc), 0, name, 0, KSTAT_T_KV, 0);
5446 	if (ks == NULL) {
5447 		/* unable to create kstats */
5448 		return (NULL);
5449 	}
5450 
5451 	kvs = mallocarray(n, sizeof(*kvs), M_DEVBUF, M_WAITOK|M_ZERO);
5452 	for (i = 0; i < n; i++) {
5453 		c = &counters[i];
5454 
5455 		kstat_kv_unit_init(&kvs[i], c->c_name,
5456 		    KSTAT_KV_T_COUNTER64, c->c_type);
5457 	}
5458 
5459 	ks->ks_data = kvs;
5460 	ks->ks_datalen = n * sizeof(*kvs);
5461 	ks->ks_read = ixl_kstat_read;
5462 
5463 	state = malloc(sizeof(*state), M_DEVBUF, M_WAITOK|M_ZERO);
5464 	state->counters = counters;
5465 	state->n = n;
5466 	state->values = mallocarray(n * 2, sizeof(*state->values),
5467 	    M_DEVBUF, M_WAITOK|M_ZERO);
5468 	state->index = index;
5469 	ks->ks_ptr = state;
5470 
5471 	kstat_set_mutex(ks, &sc->sc_kstat_mtx);
5472 	ks->ks_softc = sc;
5473 	kstat_install(ks);
5474 
5475 	/* fetch a baseline */
5476 	ixl_rd_counters(sc, state, state->values);
5477 
5478 	return (ks);
5479 }
5480 
5481 static void
5482 ixl_kstat_attach(struct ixl_softc *sc)
5483 {
5484 	mtx_init(&sc->sc_kstat_mtx, IPL_SOFTCLOCK);
5485 	timeout_set(&sc->sc_kstat_tmo, ixl_kstat_tick, sc);
5486 
5487 	sc->sc_port_kstat = ixl_kstat_create(sc, "ixl-port",
5488 	    ixl_port_counters, nitems(ixl_port_counters), sc->sc_port);
5489 	sc->sc_vsi_kstat = ixl_kstat_create(sc, "ixl-vsi",
5490 	    ixl_vsi_counters, nitems(ixl_vsi_counters),
5491 	    lemtoh16(&sc->sc_vsi_number));
5492 
5493 	/* ixl counters go up even when the interface is down */
5494 	timeout_add_sec(&sc->sc_kstat_tmo, 4);
5495 }
5496 
5497 #endif /* NKSTAT > 0 */
5498