xref: /openbsd/sys/dev/pci/if_ixl.c (revision 73471bf0)
1 /*	$OpenBSD: if_ixl.c,v 1.77 2021/11/27 16:25:40 deraadt Exp $ */
2 
3 /*
4  * Copyright (c) 2013-2015, Intel Corporation
5  * All rights reserved.
6 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions are met:
9  *
10  *  1. Redistributions of source code must retain the above copyright notice,
11  *     this list of conditions and the following disclaimer.
12  *
13  *  2. Redistributions in binary form must reproduce the above copyright
14  *     notice, this list of conditions and the following disclaimer in the
15  *     documentation and/or other materials provided with the distribution.
16  *
17  *  3. Neither the name of the Intel Corporation nor the names of its
18  *     contributors may be used to endorse or promote products derived from
19  *     this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * Copyright (c) 2016,2017 David Gwynne <dlg@openbsd.org>
36  *
37  * Permission to use, copy, modify, and distribute this software for any
38  * purpose with or without fee is hereby granted, provided that the above
39  * copyright notice and this permission notice appear in all copies.
40  *
41  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
46  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
47  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
48  */
49 
50 #include "bpfilter.h"
51 #include "kstat.h"
52 
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/proc.h>
56 #include <sys/sockio.h>
57 #include <sys/mbuf.h>
58 #include <sys/kernel.h>
59 #include <sys/socket.h>
60 #include <sys/device.h>
61 #include <sys/pool.h>
62 #include <sys/queue.h>
63 #include <sys/timeout.h>
64 #include <sys/task.h>
65 #include <sys/syslog.h>
66 #include <sys/intrmap.h>
67 
68 #include <machine/bus.h>
69 #include <machine/intr.h>
70 
71 #include <net/if.h>
72 #include <net/if_dl.h>
73 #include <net/if_media.h>
74 #include <net/toeplitz.h>
75 
76 #if NBPFILTER > 0
77 #include <net/bpf.h>
78 #endif
79 
80 #if NKSTAT > 0
81 #include <sys/kstat.h>
82 #endif
83 
84 #include <netinet/in.h>
85 #include <netinet/if_ether.h>
86 
87 #include <dev/pci/pcireg.h>
88 #include <dev/pci/pcivar.h>
89 #include <dev/pci/pcidevs.h>
90 
91 #ifdef __sparc64__
92 #include <dev/ofw/openfirm.h>
93 #endif
94 
95 #ifndef CACHE_LINE_SIZE
96 #define CACHE_LINE_SIZE 64
97 #endif
98 
99 #define IXL_MAX_VECTORS			8 /* XXX this is pretty arbitrary */
100 
101 #define I40E_MASK(mask, shift)		((mask) << (shift))
102 #define I40E_PF_RESET_WAIT_COUNT	200
103 #define I40E_AQ_LARGE_BUF		512
104 
105 /* bitfields for Tx queue mapping in QTX_CTL */
106 #define I40E_QTX_CTL_VF_QUEUE		0x0
107 #define I40E_QTX_CTL_VM_QUEUE		0x1
108 #define I40E_QTX_CTL_PF_QUEUE		0x2
109 
110 #define I40E_QUEUE_TYPE_EOL		0x7ff
111 #define I40E_INTR_NOTX_QUEUE		0
112 
113 #define I40E_QUEUE_TYPE_RX		0x0
114 #define I40E_QUEUE_TYPE_TX		0x1
115 #define I40E_QUEUE_TYPE_PE_CEQ		0x2
116 #define I40E_QUEUE_TYPE_UNKNOWN		0x3
117 
118 #define I40E_ITR_INDEX_RX		0x0
119 #define I40E_ITR_INDEX_TX		0x1
120 #define I40E_ITR_INDEX_OTHER		0x2
121 #define I40E_ITR_INDEX_NONE		0x3
122 
123 #include <dev/pci/if_ixlreg.h>
124 
125 #define I40E_INTR_NOTX_QUEUE		0
126 #define I40E_INTR_NOTX_INTR		0
127 #define I40E_INTR_NOTX_RX_QUEUE		0
128 #define I40E_INTR_NOTX_TX_QUEUE		1
129 #define I40E_INTR_NOTX_RX_MASK		I40E_PFINT_ICR0_QUEUE_0_MASK
130 #define I40E_INTR_NOTX_TX_MASK		I40E_PFINT_ICR0_QUEUE_1_MASK
131 
132 struct ixl_aq_desc {
133 	uint16_t	iaq_flags;
134 #define	IXL_AQ_DD		(1U << 0)
135 #define	IXL_AQ_CMP		(1U << 1)
136 #define IXL_AQ_ERR		(1U << 2)
137 #define IXL_AQ_VFE		(1U << 3)
138 #define IXL_AQ_LB		(1U << 9)
139 #define IXL_AQ_RD		(1U << 10)
140 #define IXL_AQ_VFC		(1U << 11)
141 #define IXL_AQ_BUF		(1U << 12)
142 #define IXL_AQ_SI		(1U << 13)
143 #define IXL_AQ_EI		(1U << 14)
144 #define IXL_AQ_FE		(1U << 15)
145 
146 #define IXL_AQ_FLAGS_FMT	"\020" "\020FE" "\017EI" "\016SI" "\015BUF" \
147 				    "\014VFC" "\013DB" "\012LB" "\004VFE" \
148 				    "\003ERR" "\002CMP" "\001DD"
149 
150 	uint16_t	iaq_opcode;
151 
152 	uint16_t	iaq_datalen;
153 	uint16_t	iaq_retval;
154 
155 	uint64_t	iaq_cookie;
156 
157 	uint32_t	iaq_param[4];
158 /*	iaq_data_hi	iaq_param[2] */
159 /*	iaq_data_lo	iaq_param[3] */
160 } __packed __aligned(8);
161 
162 /* aq commands */
163 #define IXL_AQ_OP_GET_VERSION		0x0001
164 #define IXL_AQ_OP_DRIVER_VERSION	0x0002
165 #define IXL_AQ_OP_QUEUE_SHUTDOWN	0x0003
166 #define IXL_AQ_OP_SET_PF_CONTEXT	0x0004
167 #define IXL_AQ_OP_GET_AQ_ERR_REASON	0x0005
168 #define IXL_AQ_OP_REQUEST_RESOURCE	0x0008
169 #define IXL_AQ_OP_RELEASE_RESOURCE	0x0009
170 #define IXL_AQ_OP_LIST_FUNC_CAP		0x000a
171 #define IXL_AQ_OP_LIST_DEV_CAP		0x000b
172 #define IXL_AQ_OP_MAC_ADDRESS_READ	0x0107
173 #define IXL_AQ_OP_CLEAR_PXE_MODE	0x0110
174 #define IXL_AQ_OP_SWITCH_GET_CONFIG	0x0200
175 #define IXL_AQ_OP_RX_CTL_READ		0x0206
176 #define IXL_AQ_OP_RX_CTL_WRITE		0x0207
177 #define IXL_AQ_OP_ADD_VSI		0x0210
178 #define IXL_AQ_OP_UPD_VSI_PARAMS	0x0211
179 #define IXL_AQ_OP_GET_VSI_PARAMS	0x0212
180 #define IXL_AQ_OP_ADD_VEB		0x0230
181 #define IXL_AQ_OP_UPD_VEB_PARAMS	0x0231
182 #define IXL_AQ_OP_GET_VEB_PARAMS	0x0232
183 #define IXL_AQ_OP_ADD_MACVLAN		0x0250
184 #define IXL_AQ_OP_REMOVE_MACVLAN	0x0251
185 #define IXL_AQ_OP_SET_VSI_PROMISC	0x0254
186 #define IXL_AQ_OP_PHY_GET_ABILITIES	0x0600
187 #define IXL_AQ_OP_PHY_SET_CONFIG	0x0601
188 #define IXL_AQ_OP_PHY_SET_MAC_CONFIG	0x0603
189 #define IXL_AQ_OP_PHY_RESTART_AN	0x0605
190 #define IXL_AQ_OP_PHY_LINK_STATUS	0x0607
191 #define IXL_AQ_OP_PHY_SET_EVENT_MASK	0x0613
192 #define IXL_AQ_OP_PHY_SET_REGISTER	0x0628
193 #define IXL_AQ_OP_PHY_GET_REGISTER	0x0629
194 #define IXL_AQ_OP_LLDP_GET_MIB		0x0a00
195 #define IXL_AQ_OP_LLDP_MIB_CHG_EV	0x0a01
196 #define IXL_AQ_OP_LLDP_ADD_TLV		0x0a02
197 #define IXL_AQ_OP_LLDP_UPD_TLV		0x0a03
198 #define IXL_AQ_OP_LLDP_DEL_TLV		0x0a04
199 #define IXL_AQ_OP_LLDP_STOP_AGENT	0x0a05
200 #define IXL_AQ_OP_LLDP_START_AGENT	0x0a06
201 #define IXL_AQ_OP_LLDP_GET_CEE_DCBX	0x0a07
202 #define IXL_AQ_OP_LLDP_SPECIFIC_AGENT	0x0a09
203 #define IXL_AQ_OP_SET_RSS_KEY		0x0b02 /* 722 only */
204 #define IXL_AQ_OP_SET_RSS_LUT		0x0b03 /* 722 only */
205 #define IXL_AQ_OP_GET_RSS_KEY		0x0b04 /* 722 only */
206 #define IXL_AQ_OP_GET_RSS_LUT		0x0b05 /* 722 only */
207 
208 struct ixl_aq_mac_addresses {
209 	uint8_t		pf_lan[ETHER_ADDR_LEN];
210 	uint8_t		pf_san[ETHER_ADDR_LEN];
211 	uint8_t		port[ETHER_ADDR_LEN];
212 	uint8_t		pf_wol[ETHER_ADDR_LEN];
213 } __packed;
214 
215 #define IXL_AQ_MAC_PF_LAN_VALID		(1U << 4)
216 #define IXL_AQ_MAC_PF_SAN_VALID		(1U << 5)
217 #define IXL_AQ_MAC_PORT_VALID		(1U << 6)
218 #define IXL_AQ_MAC_PF_WOL_VALID		(1U << 7)
219 
220 struct ixl_aq_capability {
221 	uint16_t	cap_id;
222 #define IXL_AQ_CAP_SWITCH_MODE		0x0001
223 #define IXL_AQ_CAP_MNG_MODE		0x0002
224 #define IXL_AQ_CAP_NPAR_ACTIVE		0x0003
225 #define IXL_AQ_CAP_OS2BMC_CAP		0x0004
226 #define IXL_AQ_CAP_FUNCTIONS_VALID	0x0005
227 #define IXL_AQ_CAP_ALTERNATE_RAM	0x0006
228 #define IXL_AQ_CAP_WOL_AND_PROXY	0x0008
229 #define IXL_AQ_CAP_SRIOV		0x0012
230 #define IXL_AQ_CAP_VF			0x0013
231 #define IXL_AQ_CAP_VMDQ			0x0014
232 #define IXL_AQ_CAP_8021QBG		0x0015
233 #define IXL_AQ_CAP_8021QBR		0x0016
234 #define IXL_AQ_CAP_VSI			0x0017
235 #define IXL_AQ_CAP_DCB			0x0018
236 #define IXL_AQ_CAP_FCOE			0x0021
237 #define IXL_AQ_CAP_ISCSI		0x0022
238 #define IXL_AQ_CAP_RSS			0x0040
239 #define IXL_AQ_CAP_RXQ			0x0041
240 #define IXL_AQ_CAP_TXQ			0x0042
241 #define IXL_AQ_CAP_MSIX			0x0043
242 #define IXL_AQ_CAP_VF_MSIX		0x0044
243 #define IXL_AQ_CAP_FLOW_DIRECTOR	0x0045
244 #define IXL_AQ_CAP_1588			0x0046
245 #define IXL_AQ_CAP_IWARP		0x0051
246 #define IXL_AQ_CAP_LED			0x0061
247 #define IXL_AQ_CAP_SDP			0x0062
248 #define IXL_AQ_CAP_MDIO			0x0063
249 #define IXL_AQ_CAP_WSR_PROT		0x0064
250 #define IXL_AQ_CAP_NVM_MGMT		0x0080
251 #define IXL_AQ_CAP_FLEX10		0x00F1
252 #define IXL_AQ_CAP_CEM			0x00F2
253 	uint8_t		major_rev;
254 	uint8_t		minor_rev;
255 	uint32_t	number;
256 	uint32_t	logical_id;
257 	uint32_t	phys_id;
258 	uint8_t		_reserved[16];
259 } __packed __aligned(4);
260 
261 #define IXL_LLDP_SHUTDOWN		0x1
262 
263 struct ixl_aq_switch_config {
264 	uint16_t	num_reported;
265 	uint16_t	num_total;
266 	uint8_t		_reserved[12];
267 } __packed __aligned(4);
268 
269 struct ixl_aq_switch_config_element {
270 	uint8_t		type;
271 #define IXL_AQ_SW_ELEM_TYPE_MAC		1
272 #define IXL_AQ_SW_ELEM_TYPE_PF		2
273 #define IXL_AQ_SW_ELEM_TYPE_VF		3
274 #define IXL_AQ_SW_ELEM_TYPE_EMP		4
275 #define IXL_AQ_SW_ELEM_TYPE_BMC		5
276 #define IXL_AQ_SW_ELEM_TYPE_PV		16
277 #define IXL_AQ_SW_ELEM_TYPE_VEB		17
278 #define IXL_AQ_SW_ELEM_TYPE_PA		18
279 #define IXL_AQ_SW_ELEM_TYPE_VSI		19
280 	uint8_t		revision;
281 #define IXL_AQ_SW_ELEM_REV_1		1
282 	uint16_t	seid;
283 
284 	uint16_t	uplink_seid;
285 	uint16_t	downlink_seid;
286 
287 	uint8_t		_reserved[3];
288 	uint8_t		connection_type;
289 #define IXL_AQ_CONN_TYPE_REGULAR	0x1
290 #define IXL_AQ_CONN_TYPE_DEFAULT	0x2
291 #define IXL_AQ_CONN_TYPE_CASCADED	0x3
292 
293 	uint16_t	scheduler_id;
294 	uint16_t	element_info;
295 } __packed __aligned(4);
296 
297 #define IXL_PHY_TYPE_SGMII		0x00
298 #define IXL_PHY_TYPE_1000BASE_KX	0x01
299 #define IXL_PHY_TYPE_10GBASE_KX4	0x02
300 #define IXL_PHY_TYPE_10GBASE_KR		0x03
301 #define IXL_PHY_TYPE_40GBASE_KR4	0x04
302 #define IXL_PHY_TYPE_XAUI		0x05
303 #define IXL_PHY_TYPE_XFI		0x06
304 #define IXL_PHY_TYPE_SFI		0x07
305 #define IXL_PHY_TYPE_XLAUI		0x08
306 #define IXL_PHY_TYPE_XLPPI		0x09
307 #define IXL_PHY_TYPE_40GBASE_CR4_CU	0x0a
308 #define IXL_PHY_TYPE_10GBASE_CR1_CU	0x0b
309 #define IXL_PHY_TYPE_10GBASE_AOC	0x0c
310 #define IXL_PHY_TYPE_40GBASE_AOC	0x0d
311 #define IXL_PHY_TYPE_100BASE_TX		0x11
312 #define IXL_PHY_TYPE_1000BASE_T		0x12
313 #define IXL_PHY_TYPE_10GBASE_T		0x13
314 #define IXL_PHY_TYPE_10GBASE_SR		0x14
315 #define IXL_PHY_TYPE_10GBASE_LR		0x15
316 #define IXL_PHY_TYPE_10GBASE_SFPP_CU	0x16
317 #define IXL_PHY_TYPE_10GBASE_CR1	0x17
318 #define IXL_PHY_TYPE_40GBASE_CR4	0x18
319 #define IXL_PHY_TYPE_40GBASE_SR4	0x19
320 #define IXL_PHY_TYPE_40GBASE_LR4	0x1a
321 #define IXL_PHY_TYPE_1000BASE_SX	0x1b
322 #define IXL_PHY_TYPE_1000BASE_LX	0x1c
323 #define IXL_PHY_TYPE_1000BASE_T_OPTICAL	0x1d
324 #define IXL_PHY_TYPE_20GBASE_KR2	0x1e
325 
326 #define IXL_PHY_TYPE_25GBASE_KR		0x1f
327 #define IXL_PHY_TYPE_25GBASE_CR		0x20
328 #define IXL_PHY_TYPE_25GBASE_SR		0x21
329 #define IXL_PHY_TYPE_25GBASE_LR		0x22
330 #define IXL_PHY_TYPE_25GBASE_AOC	0x23
331 #define IXL_PHY_TYPE_25GBASE_ACC	0x24
332 
333 struct ixl_aq_module_desc {
334 	uint8_t		oui[3];
335 	uint8_t		_reserved1;
336 	uint8_t		part_number[16];
337 	uint8_t		revision[4];
338 	uint8_t		_reserved2[8];
339 } __packed __aligned(4);
340 
341 struct ixl_aq_phy_abilities {
342 	uint32_t	phy_type;
343 
344 	uint8_t		link_speed;
345 #define IXL_AQ_PHY_LINK_SPEED_100MB	(1 << 1)
346 #define IXL_AQ_PHY_LINK_SPEED_1000MB	(1 << 2)
347 #define IXL_AQ_PHY_LINK_SPEED_10GB	(1 << 3)
348 #define IXL_AQ_PHY_LINK_SPEED_40GB	(1 << 4)
349 #define IXL_AQ_PHY_LINK_SPEED_20GB	(1 << 5)
350 #define IXL_AQ_PHY_LINK_SPEED_25GB	(1 << 6)
351 	uint8_t		abilities;
352 	uint16_t	eee_capability;
353 
354 	uint32_t	eeer_val;
355 
356 	uint8_t		d3_lpan;
357 	uint8_t		phy_type_ext;
358 #define IXL_AQ_PHY_TYPE_EXT_25G_KR	0x01
359 #define IXL_AQ_PHY_TYPE_EXT_25G_CR	0x02
360 #define IXL_AQ_PHY_TYPE_EXT_25G_SR	0x04
361 #define IXL_AQ_PHY_TYPE_EXT_25G_LR	0x08
362 	uint8_t		fec_cfg_curr_mod_ext_info;
363 #define IXL_AQ_ENABLE_FEC_KR		0x01
364 #define IXL_AQ_ENABLE_FEC_RS		0x02
365 #define IXL_AQ_REQUEST_FEC_KR		0x04
366 #define IXL_AQ_REQUEST_FEC_RS		0x08
367 #define IXL_AQ_ENABLE_FEC_AUTO		0x10
368 #define IXL_AQ_MODULE_TYPE_EXT_MASK	0xe0
369 #define IXL_AQ_MODULE_TYPE_EXT_SHIFT	5
370 	uint8_t		ext_comp_code;
371 
372 	uint8_t		phy_id[4];
373 
374 	uint8_t		module_type[3];
375 #define IXL_SFF8024_ID_SFP		0x03
376 #define IXL_SFF8024_ID_QSFP		0x0c
377 #define IXL_SFF8024_ID_QSFP_PLUS	0x0d
378 #define IXL_SFF8024_ID_QSFP28		0x11
379 	uint8_t		qualified_module_count;
380 #define IXL_AQ_PHY_MAX_QMS		16
381 	struct ixl_aq_module_desc
382 			qualified_module[IXL_AQ_PHY_MAX_QMS];
383 } __packed __aligned(4);
384 
385 struct ixl_aq_link_param {
386 	uint8_t		notify;
387 #define IXL_AQ_LINK_NOTIFY	0x03
388 	uint8_t		_reserved1;
389 	uint8_t		phy;
390 	uint8_t		speed;
391 	uint8_t		status;
392 	uint8_t		_reserved2[11];
393 } __packed __aligned(4);
394 
395 struct ixl_aq_vsi_param {
396 	uint16_t	uplink_seid;
397 	uint8_t		connect_type;
398 #define IXL_AQ_VSI_CONN_TYPE_NORMAL	(0x1)
399 #define IXL_AQ_VSI_CONN_TYPE_DEFAULT	(0x2)
400 #define IXL_AQ_VSI_CONN_TYPE_CASCADED	(0x3)
401 	uint8_t		_reserved1;
402 
403 	uint8_t		vf_id;
404 	uint8_t		_reserved2;
405 	uint16_t	vsi_flags;
406 #define IXL_AQ_VSI_TYPE_SHIFT		0x0
407 #define IXL_AQ_VSI_TYPE_MASK		(0x3 << IXL_AQ_VSI_TYPE_SHIFT)
408 #define IXL_AQ_VSI_TYPE_VF		0x0
409 #define IXL_AQ_VSI_TYPE_VMDQ2		0x1
410 #define IXL_AQ_VSI_TYPE_PF		0x2
411 #define IXL_AQ_VSI_TYPE_EMP_MNG		0x3
412 #define IXL_AQ_VSI_FLAG_CASCADED_PV	0x4
413 
414 	uint32_t	addr_hi;
415 	uint32_t	addr_lo;
416 } __packed __aligned(16);
417 
418 struct ixl_aq_add_macvlan {
419 	uint16_t	num_addrs;
420 	uint16_t	seid0;
421 	uint16_t	seid1;
422 	uint16_t	seid2;
423 	uint32_t	addr_hi;
424 	uint32_t	addr_lo;
425 } __packed __aligned(16);
426 
427 struct ixl_aq_add_macvlan_elem {
428 	uint8_t		macaddr[6];
429 	uint16_t	vlan;
430 	uint16_t	flags;
431 #define IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH	0x0001
432 #define IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN	0x0004
433 	uint16_t	queue;
434 	uint32_t	_reserved;
435 } __packed __aligned(16);
436 
437 struct ixl_aq_remove_macvlan {
438 	uint16_t	num_addrs;
439 	uint16_t	seid0;
440 	uint16_t	seid1;
441 	uint16_t	seid2;
442 	uint32_t	addr_hi;
443 	uint32_t	addr_lo;
444 } __packed __aligned(16);
445 
446 struct ixl_aq_remove_macvlan_elem {
447 	uint8_t		macaddr[6];
448 	uint16_t	vlan;
449 	uint8_t		flags;
450 #define IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH	0x0001
451 #define IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN	0x0008
452 	uint8_t		_reserved[7];
453 } __packed __aligned(16);
454 
455 struct ixl_aq_vsi_reply {
456 	uint16_t	seid;
457 	uint16_t	vsi_number;
458 
459 	uint16_t	vsis_used;
460 	uint16_t	vsis_free;
461 
462 	uint32_t	addr_hi;
463 	uint32_t	addr_lo;
464 } __packed __aligned(16);
465 
466 struct ixl_aq_vsi_data {
467 	/* first 96 byte are written by SW */
468 	uint16_t	valid_sections;
469 #define IXL_AQ_VSI_VALID_SWITCH		(1 << 0)
470 #define IXL_AQ_VSI_VALID_SECURITY	(1 << 1)
471 #define IXL_AQ_VSI_VALID_VLAN		(1 << 2)
472 #define IXL_AQ_VSI_VALID_CAS_PV		(1 << 3)
473 #define IXL_AQ_VSI_VALID_INGRESS_UP	(1 << 4)
474 #define IXL_AQ_VSI_VALID_EGRESS_UP	(1 << 5)
475 #define IXL_AQ_VSI_VALID_QUEUE_MAP	(1 << 6)
476 #define IXL_AQ_VSI_VALID_QUEUE_OPT	(1 << 7)
477 #define IXL_AQ_VSI_VALID_OUTER_UP	(1 << 8)
478 #define IXL_AQ_VSI_VALID_SCHED		(1 << 9)
479 	/* switch section */
480 	uint16_t	switch_id;
481 #define IXL_AQ_VSI_SWITCH_ID_SHIFT	0
482 #define IXL_AQ_VSI_SWITCH_ID_MASK	(0xfff << IXL_AQ_VSI_SWITCH_ID_SHIFT)
483 #define IXL_AQ_VSI_SWITCH_NOT_STAG	(1 << 12)
484 #define IXL_AQ_VSI_SWITCH_LOCAL_LB	(1 << 14)
485 
486 	uint8_t		_reserved1[2];
487 	/* security section */
488 	uint8_t		sec_flags;
489 #define IXL_AQ_VSI_SEC_ALLOW_DEST_OVRD	(1 << 0)
490 #define IXL_AQ_VSI_SEC_ENABLE_VLAN_CHK	(1 << 1)
491 #define IXL_AQ_VSI_SEC_ENABLE_MAC_CHK	(1 << 2)
492 	uint8_t		_reserved2;
493 
494 	/* vlan section */
495 	uint16_t	pvid;
496 	uint16_t	fcoe_pvid;
497 
498 	uint8_t		port_vlan_flags;
499 #define IXL_AQ_VSI_PVLAN_MODE_SHIFT	0
500 #define IXL_AQ_VSI_PVLAN_MODE_MASK	(0x3 << IXL_AQ_VSI_PVLAN_MODE_SHIFT)
501 #define IXL_AQ_VSI_PVLAN_MODE_TAGGED	(0x1 << IXL_AQ_VSI_PVLAN_MODE_SHIFT)
502 #define IXL_AQ_VSI_PVLAN_MODE_UNTAGGED	(0x2 << IXL_AQ_VSI_PVLAN_MODE_SHIFT)
503 #define IXL_AQ_VSI_PVLAN_MODE_ALL	(0x3 << IXL_AQ_VSI_PVLAN_MODE_SHIFT)
504 #define IXL_AQ_VSI_PVLAN_INSERT_PVID	(0x4 << IXL_AQ_VSI_PVLAN_MODE_SHIFT)
505 #define IXL_AQ_VSI_PVLAN_EMOD_SHIFT	0x3
506 #define IXL_AQ_VSI_PVLAN_EMOD_MASK	(0x3 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT)
507 #define IXL_AQ_VSI_PVLAN_EMOD_STR_BOTH	(0x0 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT)
508 #define IXL_AQ_VSI_PVLAN_EMOD_STR_UP	(0x1 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT)
509 #define IXL_AQ_VSI_PVLAN_EMOD_STR	(0x2 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT)
510 #define IXL_AQ_VSI_PVLAN_EMOD_NOTHING	(0x3 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT)
511 	uint8_t		_reserved3[3];
512 
513 	/* ingress egress up section */
514 	uint32_t	ingress_table;
515 #define IXL_AQ_VSI_UP_SHIFT(_up)	((_up) * 3)
516 #define IXL_AQ_VSI_UP_MASK(_up)		(0x7 << (IXL_AQ_VSI_UP_SHIFT(_up))
517 	uint32_t	egress_table;
518 
519 	/* cascaded pv section */
520 	uint16_t	cas_pv_tag;
521 	uint8_t		cas_pv_flags;
522 #define IXL_AQ_VSI_CAS_PV_TAGX_SHIFT	0
523 #define IXL_AQ_VSI_CAS_PV_TAGX_MASK	(0x3 << IXL_AQ_VSI_CAS_PV_TAGX_SHIFT)
524 #define IXL_AQ_VSI_CAS_PV_TAGX_LEAVE	(0x0 << IXL_AQ_VSI_CAS_PV_TAGX_SHIFT)
525 #define IXL_AQ_VSI_CAS_PV_TAGX_REMOVE	(0x1 << IXL_AQ_VSI_CAS_PV_TAGX_SHIFT)
526 #define IXL_AQ_VSI_CAS_PV_TAGX_COPY	(0x2 << IXL_AQ_VSI_CAS_PV_TAGX_SHIFT)
527 #define IXL_AQ_VSI_CAS_PV_INSERT_TAG	(1 << 4)
528 #define IXL_AQ_VSI_CAS_PV_ETAG_PRUNE	(1 << 5)
529 #define IXL_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG \
530 					(1 << 6)
531 	uint8_t		_reserved4;
532 
533 	/* queue mapping section */
534 	uint16_t	mapping_flags;
535 #define IXL_AQ_VSI_QUE_MAP_MASK		0x1
536 #define IXL_AQ_VSI_QUE_MAP_CONTIG	0x0
537 #define IXL_AQ_VSI_QUE_MAP_NONCONTIG	0x1
538 	uint16_t	queue_mapping[16];
539 #define IXL_AQ_VSI_QUEUE_SHIFT		0x0
540 #define IXL_AQ_VSI_QUEUE_MASK		(0x7ff << IXL_AQ_VSI_QUEUE_SHIFT)
541 	uint16_t	tc_mapping[8];
542 #define IXL_AQ_VSI_TC_Q_OFFSET_SHIFT	0
543 #define IXL_AQ_VSI_TC_Q_OFFSET_MASK	(0x1ff << IXL_AQ_VSI_TC_Q_OFFSET_SHIFT)
544 #define IXL_AQ_VSI_TC_Q_NUMBER_SHIFT	9
545 #define IXL_AQ_VSI_TC_Q_NUMBER_MASK	(0x7 << IXL_AQ_VSI_TC_Q_NUMBER_SHIFT)
546 
547 	/* queueing option section */
548 	uint8_t		queueing_opt_flags;
549 #define IXL_AQ_VSI_QUE_OPT_MCAST_UDP_EN	(1 << 2)
550 #define IXL_AQ_VSI_QUE_OPT_UCAST_UDP_EN	(1 << 3)
551 #define IXL_AQ_VSI_QUE_OPT_TCP_EN	(1 << 4)
552 #define IXL_AQ_VSI_QUE_OPT_FCOE_EN	(1 << 5)
553 #define IXL_AQ_VSI_QUE_OPT_RSS_LUT_PF	0
554 #define IXL_AQ_VSI_QUE_OPT_RSS_LUT_VSI	(1 << 6)
555 	uint8_t		_reserved5[3];
556 
557 	/* scheduler section */
558 	uint8_t		up_enable_bits;
559 	uint8_t		_reserved6;
560 
561 	/* outer up section */
562 	uint32_t	outer_up_table; /* same as ingress/egress tables */
563 	uint8_t		_reserved7[8];
564 
565 	/* last 32 bytes are written by FW */
566 	uint16_t	qs_handle[8];
567 #define IXL_AQ_VSI_QS_HANDLE_INVALID	0xffff
568 	uint16_t	stat_counter_idx;
569 	uint16_t	sched_id;
570 
571 	uint8_t		_reserved8[12];
572 } __packed __aligned(8);
573 
574 CTASSERT(sizeof(struct ixl_aq_vsi_data) == 128);
575 
576 struct ixl_aq_vsi_promisc_param {
577 	uint16_t	flags;
578 	uint16_t	valid_flags;
579 #define IXL_AQ_VSI_PROMISC_FLAG_UCAST	(1 << 0)
580 #define IXL_AQ_VSI_PROMISC_FLAG_MCAST	(1 << 1)
581 #define IXL_AQ_VSI_PROMISC_FLAG_BCAST	(1 << 2)
582 #define IXL_AQ_VSI_PROMISC_FLAG_DFLT	(1 << 3)
583 #define IXL_AQ_VSI_PROMISC_FLAG_VLAN	(1 << 4)
584 #define IXL_AQ_VSI_PROMISC_FLAG_RXONLY	(1 << 15)
585 
586 	uint16_t	seid;
587 #define IXL_AQ_VSI_PROMISC_SEID_VALID	(1 << 15)
588 	uint16_t	vlan;
589 #define IXL_AQ_VSI_PROMISC_VLAN_VALID	(1 << 15)
590 	uint32_t	reserved[2];
591 } __packed __aligned(8);
592 
593 struct ixl_aq_veb_param {
594 	uint16_t	uplink_seid;
595 	uint16_t	downlink_seid;
596 	uint16_t	veb_flags;
597 #define IXL_AQ_ADD_VEB_FLOATING		(1 << 0)
598 #define IXL_AQ_ADD_VEB_PORT_TYPE_SHIFT	1
599 #define IXL_AQ_ADD_VEB_PORT_TYPE_MASK	(0x3 << IXL_AQ_ADD_VEB_PORT_TYPE_SHIFT)
600 #define IXL_AQ_ADD_VEB_PORT_TYPE_DEFAULT \
601 					(0x2 << IXL_AQ_ADD_VEB_PORT_TYPE_SHIFT)
602 #define IXL_AQ_ADD_VEB_PORT_TYPE_DATA	(0x4 << IXL_AQ_ADD_VEB_PORT_TYPE_SHIFT)
603 #define IXL_AQ_ADD_VEB_ENABLE_L2_FILTER	(1 << 3) /* deprecated */
604 #define IXL_AQ_ADD_VEB_DISABLE_STATS	(1 << 4)
605 	uint8_t		enable_tcs;
606 	uint8_t		_reserved[9];
607 } __packed __aligned(16);
608 
609 struct ixl_aq_veb_reply {
610 	uint16_t	_reserved1;
611 	uint16_t	_reserved2;
612 	uint16_t	_reserved3;
613 	uint16_t	switch_seid;
614 	uint16_t	veb_seid;
615 #define IXL_AQ_VEB_ERR_FLAG_NO_VEB	(1 << 0)
616 #define IXL_AQ_VEB_ERR_FLAG_NO_SCHED	(1 << 1)
617 #define IXL_AQ_VEB_ERR_FLAG_NO_COUNTER	(1 << 2)
618 #define IXL_AQ_VEB_ERR_FLAG_NO_ENTRY	(1 << 3);
619 	uint16_t	statistic_index;
620 	uint16_t	vebs_used;
621 	uint16_t	vebs_free;
622 } __packed __aligned(16);
623 
624 /* GET PHY ABILITIES param[0] */
625 #define IXL_AQ_PHY_REPORT_QUAL		(1 << 0)
626 #define IXL_AQ_PHY_REPORT_INIT		(1 << 1)
627 
628 struct ixl_aq_phy_reg_access {
629 	uint8_t		phy_iface;
630 #define IXL_AQ_PHY_IF_INTERNAL		0
631 #define IXL_AQ_PHY_IF_EXTERNAL		1
632 #define IXL_AQ_PHY_IF_MODULE		2
633 	uint8_t		dev_addr;
634 	uint16_t	recall;
635 #define IXL_AQ_PHY_QSFP_DEV_ADDR	0
636 #define IXL_AQ_PHY_QSFP_LAST		1
637 	uint32_t	reg;
638 	uint32_t	val;
639 	uint32_t	_reserved2;
640 } __packed __aligned(16);
641 
642 /* RESTART_AN param[0] */
643 #define IXL_AQ_PHY_RESTART_AN		(1 << 1)
644 #define IXL_AQ_PHY_LINK_ENABLE		(1 << 2)
645 
646 struct ixl_aq_link_status { /* this occupies the iaq_param space */
647 	uint16_t	command_flags; /* only field set on command */
648 #define IXL_AQ_LSE_MASK			0x3
649 #define IXL_AQ_LSE_NOP			0x0
650 #define IXL_AQ_LSE_DISABLE		0x2
651 #define IXL_AQ_LSE_ENABLE		0x3
652 #define IXL_AQ_LSE_IS_ENABLED		0x1 /* only set in response */
653 	uint8_t		phy_type;
654 	uint8_t		link_speed;
655 #define IXL_AQ_LINK_SPEED_1GB		(1 << 2)
656 #define IXL_AQ_LINK_SPEED_10GB		(1 << 3)
657 #define IXL_AQ_LINK_SPEED_40GB		(1 << 4)
658 #define IXL_AQ_LINK_SPEED_25GB		(1 << 6)
659 	uint8_t		link_info;
660 #define IXL_AQ_LINK_UP_FUNCTION		0x01
661 #define IXL_AQ_LINK_FAULT		0x02
662 #define IXL_AQ_LINK_FAULT_TX		0x04
663 #define IXL_AQ_LINK_FAULT_RX		0x08
664 #define IXL_AQ_LINK_FAULT_REMOTE	0x10
665 #define IXL_AQ_LINK_UP_PORT		0x20
666 #define IXL_AQ_MEDIA_AVAILABLE		0x40
667 #define IXL_AQ_SIGNAL_DETECT		0x80
668 	uint8_t		an_info;
669 #define IXL_AQ_AN_COMPLETED		0x01
670 #define IXL_AQ_LP_AN_ABILITY		0x02
671 #define IXL_AQ_PD_FAULT			0x04
672 #define IXL_AQ_FEC_EN			0x08
673 #define IXL_AQ_PHY_LOW_POWER		0x10
674 #define IXL_AQ_LINK_PAUSE_TX		0x20
675 #define IXL_AQ_LINK_PAUSE_RX		0x40
676 #define IXL_AQ_QUALIFIED_MODULE		0x80
677 
678 	uint8_t		ext_info;
679 #define IXL_AQ_LINK_PHY_TEMP_ALARM	0x01
680 #define IXL_AQ_LINK_XCESSIVE_ERRORS	0x02
681 #define IXL_AQ_LINK_TX_SHIFT		0x02
682 #define IXL_AQ_LINK_TX_MASK		(0x03 << IXL_AQ_LINK_TX_SHIFT)
683 #define IXL_AQ_LINK_TX_ACTIVE		0x00
684 #define IXL_AQ_LINK_TX_DRAINED		0x01
685 #define IXL_AQ_LINK_TX_FLUSHED		0x03
686 #define IXL_AQ_LINK_FORCED_40G		0x10
687 /* 25G Error Codes */
688 #define IXL_AQ_25G_NO_ERR		0X00
689 #define IXL_AQ_25G_NOT_PRESENT		0X01
690 #define IXL_AQ_25G_NVM_CRC_ERR		0X02
691 #define IXL_AQ_25G_SBUS_UCODE_ERR	0X03
692 #define IXL_AQ_25G_SERDES_UCODE_ERR	0X04
693 #define IXL_AQ_25G_NIMB_UCODE_ERR	0X05
694 	uint8_t		loopback;
695 	uint16_t	max_frame_size;
696 
697 	uint8_t		config;
698 #define IXL_AQ_CONFIG_FEC_KR_ENA	0x01
699 #define IXL_AQ_CONFIG_FEC_RS_ENA	0x02
700 #define IXL_AQ_CONFIG_CRC_ENA	0x04
701 #define IXL_AQ_CONFIG_PACING_MASK	0x78
702 	uint8_t		power_desc;
703 #define IXL_AQ_LINK_POWER_CLASS_1	0x00
704 #define IXL_AQ_LINK_POWER_CLASS_2	0x01
705 #define IXL_AQ_LINK_POWER_CLASS_3	0x02
706 #define IXL_AQ_LINK_POWER_CLASS_4	0x03
707 #define IXL_AQ_PWR_CLASS_MASK		0x03
708 
709 	uint8_t		reserved[4];
710 } __packed __aligned(4);
711 /* event mask command flags for param[2] */
712 #define IXL_AQ_PHY_EV_MASK		0x3ff
713 #define IXL_AQ_PHY_EV_LINK_UPDOWN	(1 << 1)
714 #define IXL_AQ_PHY_EV_MEDIA_NA		(1 << 2)
715 #define IXL_AQ_PHY_EV_LINK_FAULT	(1 << 3)
716 #define IXL_AQ_PHY_EV_PHY_TEMP_ALARM	(1 << 4)
717 #define IXL_AQ_PHY_EV_EXCESS_ERRORS	(1 << 5)
718 #define IXL_AQ_PHY_EV_SIGNAL_DETECT	(1 << 6)
719 #define IXL_AQ_PHY_EV_AN_COMPLETED	(1 << 7)
720 #define IXL_AQ_PHY_EV_MODULE_QUAL_FAIL	(1 << 8)
721 #define IXL_AQ_PHY_EV_PORT_TX_SUSPENDED	(1 << 9)
722 
723 struct ixl_aq_rss_lut { /* 722 */
724 #define IXL_AQ_SET_RSS_LUT_VSI_VALID	(1 << 15)
725 #define IXL_AQ_SET_RSS_LUT_VSI_ID_SHIFT	0
726 #define IXL_AQ_SET_RSS_LUT_VSI_ID_MASK	\
727 	(0x3FF << IXL_AQ_SET_RSS_LUT_VSI_ID_SHIFT)
728 
729 	uint16_t	vsi_number;
730 #define IXL_AQ_SET_RSS_LUT_TABLE_TYPE_SHIFT 0
731 #define IXL_AQ_SET_RSS_LUT_TABLE_TYPE_MASK \
732 	(0x1 << IXL_AQ_SET_RSS_LUT_TABLE_TYPE_SHIFT)
733 #define IXL_AQ_SET_RSS_LUT_TABLE_TYPE_VSI	0
734 #define IXL_AQ_SET_RSS_LUT_TABLE_TYPE_PF	1
735 	uint16_t	flags;
736 	uint8_t		_reserved[4];
737 	uint32_t	addr_hi;
738 	uint32_t	addr_lo;
739 } __packed __aligned(16);
740 
741 struct ixl_aq_get_set_rss_key { /* 722 */
742 #define IXL_AQ_SET_RSS_KEY_VSI_VALID	(1 << 15)
743 #define IXL_AQ_SET_RSS_KEY_VSI_ID_SHIFT	0
744 #define IXL_AQ_SET_RSS_KEY_VSI_ID_MASK	\
745 	(0x3FF << IXL_AQ_SET_RSS_KEY_VSI_ID_SHIFT)
746 	uint16_t	vsi_number;
747 	uint8_t		_reserved[6];
748 	uint32_t	addr_hi;
749 	uint32_t	addr_lo;
750 } __packed __aligned(16);
751 
752 /* aq response codes */
753 #define IXL_AQ_RC_OK			0  /* success */
754 #define IXL_AQ_RC_EPERM			1  /* Operation not permitted */
755 #define IXL_AQ_RC_ENOENT		2  /* No such element */
756 #define IXL_AQ_RC_ESRCH			3  /* Bad opcode */
757 #define IXL_AQ_RC_EINTR			4  /* operation interrupted */
758 #define IXL_AQ_RC_EIO			5  /* I/O error */
759 #define IXL_AQ_RC_ENXIO			6  /* No such resource */
760 #define IXL_AQ_RC_E2BIG			7  /* Arg too long */
761 #define IXL_AQ_RC_EAGAIN		8  /* Try again */
762 #define IXL_AQ_RC_ENOMEM		9  /* Out of memory */
763 #define IXL_AQ_RC_EACCES		10 /* Permission denied */
764 #define IXL_AQ_RC_EFAULT		11 /* Bad address */
765 #define IXL_AQ_RC_EBUSY			12 /* Device or resource busy */
766 #define IXL_AQ_RC_EEXIST		13 /* object already exists */
767 #define IXL_AQ_RC_EINVAL		14 /* invalid argument */
768 #define IXL_AQ_RC_ENOTTY		15 /* not a typewriter */
769 #define IXL_AQ_RC_ENOSPC		16 /* No space or alloc failure */
770 #define IXL_AQ_RC_ENOSYS		17 /* function not implemented */
771 #define IXL_AQ_RC_ERANGE		18 /* parameter out of range */
772 #define IXL_AQ_RC_EFLUSHED		19 /* cmd flushed due to prev error */
773 #define IXL_AQ_RC_BAD_ADDR		20 /* contains a bad pointer */
774 #define IXL_AQ_RC_EMODE			21 /* not allowed in current mode */
775 #define IXL_AQ_RC_EFBIG			22 /* file too large */
776 
777 struct ixl_tx_desc {
778 	uint64_t		addr;
779 	uint64_t		cmd;
780 #define IXL_TX_DESC_DTYPE_SHIFT		0
781 #define IXL_TX_DESC_DTYPE_MASK		(0xfULL << IXL_TX_DESC_DTYPE_SHIFT)
782 #define IXL_TX_DESC_DTYPE_DATA		(0x0ULL << IXL_TX_DESC_DTYPE_SHIFT)
783 #define IXL_TX_DESC_DTYPE_NOP		(0x1ULL << IXL_TX_DESC_DTYPE_SHIFT)
784 #define IXL_TX_DESC_DTYPE_CONTEXT	(0x1ULL << IXL_TX_DESC_DTYPE_SHIFT)
785 #define IXL_TX_DESC_DTYPE_FCOE_CTX	(0x2ULL << IXL_TX_DESC_DTYPE_SHIFT)
786 #define IXL_TX_DESC_DTYPE_FD		(0x8ULL << IXL_TX_DESC_DTYPE_SHIFT)
787 #define IXL_TX_DESC_DTYPE_DDP_CTX	(0x9ULL << IXL_TX_DESC_DTYPE_SHIFT)
788 #define IXL_TX_DESC_DTYPE_FLEX_DATA	(0xbULL << IXL_TX_DESC_DTYPE_SHIFT)
789 #define IXL_TX_DESC_DTYPE_FLEX_CTX_1	(0xcULL << IXL_TX_DESC_DTYPE_SHIFT)
790 #define IXL_TX_DESC_DTYPE_FLEX_CTX_2	(0xdULL << IXL_TX_DESC_DTYPE_SHIFT)
791 #define IXL_TX_DESC_DTYPE_DONE		(0xfULL << IXL_TX_DESC_DTYPE_SHIFT)
792 
793 #define IXL_TX_DESC_CMD_SHIFT		4
794 #define IXL_TX_DESC_CMD_MASK		(0x3ffULL << IXL_TX_DESC_CMD_SHIFT)
795 #define IXL_TX_DESC_CMD_EOP		(0x001 << IXL_TX_DESC_CMD_SHIFT)
796 #define IXL_TX_DESC_CMD_RS		(0x002 << IXL_TX_DESC_CMD_SHIFT)
797 #define IXL_TX_DESC_CMD_ICRC		(0x004 << IXL_TX_DESC_CMD_SHIFT)
798 #define IXL_TX_DESC_CMD_IL2TAG1		(0x008 << IXL_TX_DESC_CMD_SHIFT)
799 #define IXL_TX_DESC_CMD_DUMMY		(0x010 << IXL_TX_DESC_CMD_SHIFT)
800 #define IXL_TX_DESC_CMD_IIPT_MASK	(0x060 << IXL_TX_DESC_CMD_SHIFT)
801 #define IXL_TX_DESC_CMD_IIPT_NONIP	(0x000 << IXL_TX_DESC_CMD_SHIFT)
802 #define IXL_TX_DESC_CMD_IIPT_IPV6	(0x020 << IXL_TX_DESC_CMD_SHIFT)
803 #define IXL_TX_DESC_CMD_IIPT_IPV4	(0x040 << IXL_TX_DESC_CMD_SHIFT)
804 #define IXL_TX_DESC_CMD_IIPT_IPV4_CSUM	(0x060 << IXL_TX_DESC_CMD_SHIFT)
805 #define IXL_TX_DESC_CMD_FCOET		(0x080 << IXL_TX_DESC_CMD_SHIFT)
806 #define IXL_TX_DESC_CMD_L4T_EOFT_MASK	(0x300 << IXL_TX_DESC_CMD_SHIFT)
807 #define IXL_TX_DESC_CMD_L4T_EOFT_UNK	(0x000 << IXL_TX_DESC_CMD_SHIFT)
808 #define IXL_TX_DESC_CMD_L4T_EOFT_TCP	(0x100 << IXL_TX_DESC_CMD_SHIFT)
809 #define IXL_TX_DESC_CMD_L4T_EOFT_SCTP	(0x200 << IXL_TX_DESC_CMD_SHIFT)
810 #define IXL_TX_DESC_CMD_L4T_EOFT_UDP	(0x300 << IXL_TX_DESC_CMD_SHIFT)
811 
812 #define IXL_TX_DESC_MACLEN_SHIFT	16
813 #define IXL_TX_DESC_MACLEN_MASK		(0x7fULL << IXL_TX_DESC_MACLEN_SHIFT)
814 #define IXL_TX_DESC_IPLEN_SHIFT		23
815 #define IXL_TX_DESC_IPLEN_MASK		(0x7fULL << IXL_TX_DESC_IPLEN_SHIFT)
816 #define IXL_TX_DESC_L4LEN_SHIFT		30
817 #define IXL_TX_DESC_L4LEN_MASK		(0xfULL << IXL_TX_DESC_L4LEN_SHIFT)
818 #define IXL_TX_DESC_FCLEN_SHIFT		30
819 #define IXL_TX_DESC_FCLEN_MASK		(0xfULL << IXL_TX_DESC_FCLEN_SHIFT)
820 
821 #define IXL_TX_DESC_BSIZE_SHIFT		34
822 #define IXL_TX_DESC_BSIZE_MAX		0x3fffULL
823 #define IXL_TX_DESC_BSIZE_MASK		\
824 	(IXL_TX_DESC_BSIZE_MAX << IXL_TX_DESC_BSIZE_SHIFT)
825 } __packed __aligned(16);
826 
827 struct ixl_rx_rd_desc_16 {
828 	uint64_t		paddr; /* packet addr */
829 	uint64_t		haddr; /* header addr */
830 } __packed __aligned(16);
831 
832 struct ixl_rx_rd_desc_32 {
833 	uint64_t		paddr; /* packet addr */
834 	uint64_t		haddr; /* header addr */
835 	uint64_t		_reserved1;
836 	uint64_t		_reserved2;
837 } __packed __aligned(16);
838 
839 struct ixl_rx_wb_desc_16 {
840 	uint32_t		_reserved1;
841 	uint32_t		filter_status;
842 	uint64_t		qword1;
843 #define IXL_RX_DESC_DD			(1 << 0)
844 #define IXL_RX_DESC_EOP			(1 << 1)
845 #define IXL_RX_DESC_L2TAG1P		(1 << 2)
846 #define IXL_RX_DESC_L3L4P		(1 << 3)
847 #define IXL_RX_DESC_CRCP		(1 << 4)
848 #define IXL_RX_DESC_TSYNINDX_SHIFT	5	/* TSYNINDX */
849 #define IXL_RX_DESC_TSYNINDX_MASK	(7 << IXL_RX_DESC_TSYNINDX_SHIFT)
850 #define IXL_RX_DESC_UMB_SHIFT		9
851 #define IXL_RX_DESC_UMB_MASK		(0x3 << IXL_RX_DESC_UMB_SHIFT)
852 #define IXL_RX_DESC_UMB_UCAST		(0x0 << IXL_RX_DESC_UMB_SHIFT)
853 #define IXL_RX_DESC_UMB_MCAST		(0x1 << IXL_RX_DESC_UMB_SHIFT)
854 #define IXL_RX_DESC_UMB_BCAST		(0x2 << IXL_RX_DESC_UMB_SHIFT)
855 #define IXL_RX_DESC_UMB_MIRROR		(0x3 << IXL_RX_DESC_UMB_SHIFT)
856 #define IXL_RX_DESC_FLM			(1 << 11)
857 #define IXL_RX_DESC_FLTSTAT_SHIFT	12
858 #define IXL_RX_DESC_FLTSTAT_MASK	(0x3 << IXL_RX_DESC_FLTSTAT_SHIFT)
859 #define IXL_RX_DESC_FLTSTAT_NODATA	(0x0 << IXL_RX_DESC_FLTSTAT_SHIFT)
860 #define IXL_RX_DESC_FLTSTAT_FDFILTID	(0x1 << IXL_RX_DESC_FLTSTAT_SHIFT)
861 #define IXL_RX_DESC_FLTSTAT_RSS		(0x3 << IXL_RX_DESC_FLTSTAT_SHIFT)
862 #define IXL_RX_DESC_LPBK		(1 << 14)
863 #define IXL_RX_DESC_IPV6EXTADD		(1 << 15)
864 #define IXL_RX_DESC_INT_UDP_0		(1 << 18)
865 
866 #define IXL_RX_DESC_RXE			(1 << 19)
867 #define IXL_RX_DESC_HBO			(1 << 21)
868 #define IXL_RX_DESC_IPE			(1 << 22)
869 #define IXL_RX_DESC_L4E			(1 << 23)
870 #define IXL_RX_DESC_EIPE		(1 << 24)
871 #define IXL_RX_DESC_OVERSIZE		(1 << 25)
872 
873 #define IXL_RX_DESC_PTYPE_SHIFT		30
874 #define IXL_RX_DESC_PTYPE_MASK		(0xffULL << IXL_RX_DESC_PTYPE_SHIFT)
875 
876 #define IXL_RX_DESC_PLEN_SHIFT		38
877 #define IXL_RX_DESC_PLEN_MASK		(0x3fffULL << IXL_RX_DESC_PLEN_SHIFT)
878 #define IXL_RX_DESC_HLEN_SHIFT		42
879 #define IXL_RX_DESC_HLEN_MASK		(0x7ffULL << IXL_RX_DESC_HLEN_SHIFT)
880 } __packed __aligned(16);
881 
882 struct ixl_rx_wb_desc_32 {
883 	uint64_t		qword0;
884 	uint64_t		qword1;
885 	uint64_t		qword2;
886 	uint64_t		qword3;
887 } __packed __aligned(16);
888 
889 #define IXL_TX_PKT_DESCS		8
890 #define IXL_TX_QUEUE_ALIGN		128
891 #define IXL_RX_QUEUE_ALIGN		128
892 
893 #define IXL_HARDMTU			9712 /* 9726 - ETHER_HDR_LEN */
894 
895 #define IXL_PCIREG			PCI_MAPREG_START
896 
897 #define IXL_ITR0			0x0
898 #define IXL_ITR1			0x1
899 #define IXL_ITR2			0x2
900 #define IXL_NOITR			0x2
901 
902 #define IXL_AQ_NUM			256
903 #define IXL_AQ_MASK			(IXL_AQ_NUM - 1)
904 #define IXL_AQ_ALIGN			64 /* lol */
905 #define IXL_AQ_BUFLEN			4096
906 
907 /* Packet Classifier Types for filters */
908 /* bits 0-28 are reserved for future use */
909 #define IXL_PCT_NONF_IPV4_UDP_UCAST	(1ULL << 29)	/* 722 */
910 #define IXL_PCT_NONF_IPV4_UDP_MCAST	(1ULL << 30)	/* 722 */
911 #define IXL_PCT_NONF_IPV4_UDP		(1ULL << 31)
912 #define IXL_PCT_NONF_IPV4_TCP_SYN_NOACK	(1ULL << 32)	/* 722 */
913 #define IXL_PCT_NONF_IPV4_TCP		(1ULL << 33)
914 #define IXL_PCT_NONF_IPV4_SCTP		(1ULL << 34)
915 #define IXL_PCT_NONF_IPV4_OTHER		(1ULL << 35)
916 #define IXL_PCT_FRAG_IPV4		(1ULL << 36)
917 /* bits 37-38 are reserved for future use */
918 #define IXL_PCT_NONF_IPV6_UDP_UCAST	(1ULL << 39)	/* 722 */
919 #define IXL_PCT_NONF_IPV6_UDP_MCAST	(1ULL << 40)	/* 722 */
920 #define IXL_PCT_NONF_IPV6_UDP		(1ULL << 41)
921 #define IXL_PCT_NONF_IPV6_TCP_SYN_NOACK	(1ULL << 42)	/* 722 */
922 #define IXL_PCT_NONF_IPV6_TCP		(1ULL << 43)
923 #define IXL_PCT_NONF_IPV6_SCTP		(1ULL << 44)
924 #define IXL_PCT_NONF_IPV6_OTHER		(1ULL << 45)
925 #define IXL_PCT_FRAG_IPV6		(1ULL << 46)
926 /* bit 47 is reserved for future use */
927 #define IXL_PCT_FCOE_OX			(1ULL << 48)
928 #define IXL_PCT_FCOE_RX			(1ULL << 49)
929 #define IXL_PCT_FCOE_OTHER		(1ULL << 50)
930 /* bits 51-62 are reserved for future use */
931 #define IXL_PCT_L2_PAYLOAD		(1ULL << 63)
932 
933 #define IXL_RSS_HENA_BASE_DEFAULT		\
934 	IXL_PCT_NONF_IPV4_UDP |			\
935 	IXL_PCT_NONF_IPV4_TCP |			\
936 	IXL_PCT_NONF_IPV4_SCTP |		\
937 	IXL_PCT_NONF_IPV4_OTHER |		\
938 	IXL_PCT_FRAG_IPV4 |			\
939 	IXL_PCT_NONF_IPV6_UDP |			\
940 	IXL_PCT_NONF_IPV6_TCP |			\
941 	IXL_PCT_NONF_IPV6_SCTP |		\
942 	IXL_PCT_NONF_IPV6_OTHER |		\
943 	IXL_PCT_FRAG_IPV6 |			\
944 	IXL_PCT_L2_PAYLOAD
945 
946 #define IXL_RSS_HENA_BASE_710		IXL_RSS_HENA_BASE_DEFAULT
947 #define IXL_RSS_HENA_BASE_722		IXL_RSS_HENA_BASE_DEFAULT | \
948 	IXL_PCT_NONF_IPV4_UDP_UCAST |		\
949 	IXL_PCT_NONF_IPV4_UDP_MCAST |		\
950 	IXL_PCT_NONF_IPV6_UDP_UCAST |		\
951 	IXL_PCT_NONF_IPV6_UDP_MCAST |		\
952 	IXL_PCT_NONF_IPV4_TCP_SYN_NOACK |	\
953 	IXL_PCT_NONF_IPV6_TCP_SYN_NOACK
954 
955 #define IXL_HMC_ROUNDUP			512
956 #define IXL_HMC_PGSIZE			4096
957 #define IXL_HMC_DVASZ			sizeof(uint64_t)
958 #define IXL_HMC_PGS			(IXL_HMC_PGSIZE / IXL_HMC_DVASZ)
959 #define IXL_HMC_L2SZ			(IXL_HMC_PGSIZE * IXL_HMC_PGS)
960 #define IXL_HMC_PDVALID			1ULL
961 
962 struct ixl_aq_regs {
963 	bus_size_t		atq_tail;
964 	bus_size_t		atq_head;
965 	bus_size_t		atq_len;
966 	bus_size_t		atq_bal;
967 	bus_size_t		atq_bah;
968 
969 	bus_size_t		arq_tail;
970 	bus_size_t		arq_head;
971 	bus_size_t		arq_len;
972 	bus_size_t		arq_bal;
973 	bus_size_t		arq_bah;
974 
975 	uint32_t		atq_len_enable;
976 	uint32_t		atq_tail_mask;
977 	uint32_t		atq_head_mask;
978 
979 	uint32_t		arq_len_enable;
980 	uint32_t		arq_tail_mask;
981 	uint32_t		arq_head_mask;
982 };
983 
984 struct ixl_phy_type {
985 	uint64_t	phy_type;
986 	uint64_t	ifm_type;
987 };
988 
989 struct ixl_speed_type {
990 	uint8_t		dev_speed;
991 	uint64_t	net_speed;
992 };
993 
994 struct ixl_aq_buf {
995 	SIMPLEQ_ENTRY(ixl_aq_buf)
996 				 aqb_entry;
997 	void			*aqb_data;
998 	bus_dmamap_t		 aqb_map;
999 };
1000 SIMPLEQ_HEAD(ixl_aq_bufs, ixl_aq_buf);
1001 
1002 struct ixl_dmamem {
1003 	bus_dmamap_t		ixm_map;
1004 	bus_dma_segment_t	ixm_seg;
1005 	int			ixm_nsegs;
1006 	size_t			ixm_size;
1007 	caddr_t			ixm_kva;
1008 };
1009 #define IXL_DMA_MAP(_ixm)	((_ixm)->ixm_map)
1010 #define IXL_DMA_DVA(_ixm)	((_ixm)->ixm_map->dm_segs[0].ds_addr)
1011 #define IXL_DMA_KVA(_ixm)	((void *)(_ixm)->ixm_kva)
1012 #define IXL_DMA_LEN(_ixm)	((_ixm)->ixm_size)
1013 
1014 struct ixl_hmc_entry {
1015 	uint64_t		 hmc_base;
1016 	uint32_t		 hmc_count;
1017 	uint32_t		 hmc_size;
1018 };
1019 
1020 #define IXL_HMC_LAN_TX		 0
1021 #define IXL_HMC_LAN_RX		 1
1022 #define IXL_HMC_FCOE_CTX	 2
1023 #define IXL_HMC_FCOE_FILTER	 3
1024 #define IXL_HMC_COUNT		 4
1025 
1026 struct ixl_hmc_pack {
1027 	uint16_t		offset;
1028 	uint16_t		width;
1029 	uint16_t		lsb;
1030 };
1031 
1032 /*
1033  * these hmc objects have weird sizes and alignments, so these are abstract
1034  * representations of them that are nice for c to populate.
1035  *
1036  * the packing code relies on little-endian values being stored in the fields,
1037  * no high bits in the fields being set, and the fields must be packed in the
1038  * same order as they are in the ctx structure.
1039  */
1040 
1041 struct ixl_hmc_rxq {
1042 	uint16_t		 head;
1043 	uint8_t			 cpuid;
1044 	uint64_t		 base;
1045 #define IXL_HMC_RXQ_BASE_UNIT		128
1046 	uint16_t		 qlen;
1047 	uint16_t		 dbuff;
1048 #define IXL_HMC_RXQ_DBUFF_UNIT		128
1049 	uint8_t			 hbuff;
1050 #define IXL_HMC_RXQ_HBUFF_UNIT		64
1051 	uint8_t			 dtype;
1052 #define IXL_HMC_RXQ_DTYPE_NOSPLIT	0x0
1053 #define IXL_HMC_RXQ_DTYPE_HSPLIT	0x1
1054 #define IXL_HMC_RXQ_DTYPE_SPLIT_ALWAYS	0x2
1055 	uint8_t			 dsize;
1056 #define IXL_HMC_RXQ_DSIZE_16		0
1057 #define IXL_HMC_RXQ_DSIZE_32		1
1058 	uint8_t			 crcstrip;
1059 	uint8_t			 fc_ena;
1060 	uint8_t			 l2sel;
1061 	uint8_t			 hsplit_0;
1062 	uint8_t			 hsplit_1;
1063 	uint8_t			 showiv;
1064 	uint16_t		 rxmax;
1065 	uint8_t			 tphrdesc_ena;
1066 	uint8_t			 tphwdesc_ena;
1067 	uint8_t			 tphdata_ena;
1068 	uint8_t			 tphhead_ena;
1069 	uint8_t			 lrxqthresh;
1070 	uint8_t			 prefena;
1071 };
1072 
1073 static const struct ixl_hmc_pack ixl_hmc_pack_rxq[] = {
1074 	{ offsetof(struct ixl_hmc_rxq, head),		13,	0 },
1075 	{ offsetof(struct ixl_hmc_rxq, cpuid),		8,	13 },
1076 	{ offsetof(struct ixl_hmc_rxq, base),		57,	32 },
1077 	{ offsetof(struct ixl_hmc_rxq, qlen),		13,	89 },
1078 	{ offsetof(struct ixl_hmc_rxq, dbuff),		7,	102 },
1079 	{ offsetof(struct ixl_hmc_rxq, hbuff),		5,	109 },
1080 	{ offsetof(struct ixl_hmc_rxq, dtype),		2,	114 },
1081 	{ offsetof(struct ixl_hmc_rxq, dsize),		1,	116 },
1082 	{ offsetof(struct ixl_hmc_rxq, crcstrip),	1,	117 },
1083 	{ offsetof(struct ixl_hmc_rxq, fc_ena),		1,	118 },
1084 	{ offsetof(struct ixl_hmc_rxq, l2sel),		1,	119 },
1085 	{ offsetof(struct ixl_hmc_rxq, hsplit_0),	4,	120 },
1086 	{ offsetof(struct ixl_hmc_rxq, hsplit_1),	2,	124 },
1087 	{ offsetof(struct ixl_hmc_rxq, showiv),		1,	127 },
1088 	{ offsetof(struct ixl_hmc_rxq, rxmax),		14,	174 },
1089 	{ offsetof(struct ixl_hmc_rxq, tphrdesc_ena),	1,	193 },
1090 	{ offsetof(struct ixl_hmc_rxq, tphwdesc_ena),	1,	194 },
1091 	{ offsetof(struct ixl_hmc_rxq, tphdata_ena),	1,	195 },
1092 	{ offsetof(struct ixl_hmc_rxq, tphhead_ena),	1,	196 },
1093 	{ offsetof(struct ixl_hmc_rxq, lrxqthresh),	3,	198 },
1094 	{ offsetof(struct ixl_hmc_rxq, prefena),	1,	201 },
1095 };
1096 
1097 #define IXL_HMC_RXQ_MINSIZE (201 + 1)
1098 
1099 struct ixl_hmc_txq {
1100 	uint16_t		head;
1101 	uint8_t			new_context;
1102 	uint64_t		base;
1103 #define IXL_HMC_TXQ_BASE_UNIT		128
1104 	uint8_t			fc_ena;
1105 	uint8_t			timesync_ena;
1106 	uint8_t			fd_ena;
1107 	uint8_t			alt_vlan_ena;
1108 	uint16_t		thead_wb;
1109 	uint8_t			cpuid;
1110 	uint8_t			head_wb_ena;
1111 #define IXL_HMC_TXQ_DESC_WB		0
1112 #define IXL_HMC_TXQ_HEAD_WB		1
1113 	uint16_t		qlen;
1114 	uint8_t			tphrdesc_ena;
1115 	uint8_t			tphrpacket_ena;
1116 	uint8_t			tphwdesc_ena;
1117 	uint64_t		head_wb_addr;
1118 	uint32_t		crc;
1119 	uint16_t		rdylist;
1120 	uint8_t			rdylist_act;
1121 };
1122 
1123 static const struct ixl_hmc_pack ixl_hmc_pack_txq[] = {
1124 	{ offsetof(struct ixl_hmc_txq, head),		13,	0 },
1125 	{ offsetof(struct ixl_hmc_txq, new_context),	1,	30 },
1126 	{ offsetof(struct ixl_hmc_txq, base),		57,	32 },
1127 	{ offsetof(struct ixl_hmc_txq, fc_ena),		1,	89 },
1128 	{ offsetof(struct ixl_hmc_txq, timesync_ena),	1,	90 },
1129 	{ offsetof(struct ixl_hmc_txq, fd_ena),		1,	91 },
1130 	{ offsetof(struct ixl_hmc_txq, alt_vlan_ena),	1,	92 },
1131 	{ offsetof(struct ixl_hmc_txq, cpuid),		8,	96 },
1132 /* line 1 */
1133 	{ offsetof(struct ixl_hmc_txq, thead_wb),	13,	0 + 128 },
1134 	{ offsetof(struct ixl_hmc_txq, head_wb_ena),	1,	32 + 128 },
1135 	{ offsetof(struct ixl_hmc_txq, qlen),		13,	33 + 128 },
1136 	{ offsetof(struct ixl_hmc_txq, tphrdesc_ena),	1,	46 + 128 },
1137 	{ offsetof(struct ixl_hmc_txq, tphrpacket_ena),	1,	47 + 128 },
1138 	{ offsetof(struct ixl_hmc_txq, tphwdesc_ena),	1,	48 + 128 },
1139 	{ offsetof(struct ixl_hmc_txq, head_wb_addr),	64,	64 + 128 },
1140 /* line 7 */
1141 	{ offsetof(struct ixl_hmc_txq, crc),		32,	0 + (7*128) },
1142 	{ offsetof(struct ixl_hmc_txq, rdylist),	10,	84 + (7*128) },
1143 	{ offsetof(struct ixl_hmc_txq, rdylist_act),	1,	94 + (7*128) },
1144 };
1145 
1146 #define IXL_HMC_TXQ_MINSIZE (94 + (7*128) + 1)
1147 
1148 struct ixl_rss_key {
1149 	uint32_t		 key[13];
1150 };
1151 
1152 struct ixl_rss_lut_128 {
1153 	uint32_t		 entries[128 / sizeof(uint32_t)];
1154 };
1155 
1156 struct ixl_rss_lut_512 {
1157 	uint32_t		 entries[512 / sizeof(uint32_t)];
1158 };
1159 
1160 /* driver structures */
1161 
1162 struct ixl_vector;
1163 struct ixl_chip;
1164 
1165 struct ixl_tx_map {
1166 	struct mbuf		*txm_m;
1167 	bus_dmamap_t		 txm_map;
1168 	unsigned int		 txm_eop;
1169 };
1170 
1171 struct ixl_tx_ring {
1172 	struct ixl_softc	*txr_sc;
1173 	struct ixl_vector	*txr_vector;
1174 	struct ifqueue		*txr_ifq;
1175 
1176 	unsigned int		 txr_prod;
1177 	unsigned int		 txr_cons;
1178 
1179 	struct ixl_tx_map	*txr_maps;
1180 	struct ixl_dmamem	 txr_mem;
1181 
1182 	bus_size_t		 txr_tail;
1183 	unsigned int		 txr_qid;
1184 } __aligned(CACHE_LINE_SIZE);
1185 
1186 struct ixl_rx_map {
1187 	struct mbuf		*rxm_m;
1188 	bus_dmamap_t		 rxm_map;
1189 };
1190 
1191 struct ixl_rx_ring {
1192 	struct ixl_softc	*rxr_sc;
1193 	struct ixl_vector	*rxr_vector;
1194 	struct ifiqueue		*rxr_ifiq;
1195 
1196 	struct if_rxring	 rxr_acct;
1197 	struct timeout		 rxr_refill;
1198 
1199 	unsigned int		 rxr_prod;
1200 	unsigned int		 rxr_cons;
1201 
1202 	struct ixl_rx_map	*rxr_maps;
1203 	struct ixl_dmamem	 rxr_mem;
1204 
1205 	struct mbuf		*rxr_m_head;
1206 	struct mbuf		**rxr_m_tail;
1207 
1208 	bus_size_t		 rxr_tail;
1209 	unsigned int		 rxr_qid;
1210 } __aligned(CACHE_LINE_SIZE);
1211 
1212 struct ixl_atq {
1213 	struct ixl_aq_desc	  iatq_desc;
1214 	void			 *iatq_arg;
1215 	void			(*iatq_fn)(struct ixl_softc *, void *);
1216 };
1217 SIMPLEQ_HEAD(ixl_atq_list, ixl_atq);
1218 
1219 struct ixl_vector {
1220 	struct ixl_softc	*iv_sc;
1221 	struct ixl_rx_ring	*iv_rxr;
1222 	struct ixl_tx_ring	*iv_txr;
1223 	int			 iv_qid;
1224 	void			*iv_ihc;
1225 	char			 iv_name[16];
1226 } __aligned(CACHE_LINE_SIZE);
1227 
1228 struct ixl_softc {
1229 	struct device		 sc_dev;
1230 	const struct ixl_chip	*sc_chip;
1231 	struct arpcom		 sc_ac;
1232 	struct ifmedia		 sc_media;
1233 	uint64_t		 sc_media_status;
1234 	uint64_t		 sc_media_active;
1235 
1236 	pci_chipset_tag_t	 sc_pc;
1237 	pci_intr_handle_t	 sc_ih;
1238 	void			*sc_ihc;
1239 	pcitag_t		 sc_tag;
1240 
1241 	bus_dma_tag_t		 sc_dmat;
1242 	bus_space_tag_t		 sc_memt;
1243 	bus_space_handle_t	 sc_memh;
1244 	bus_size_t		 sc_mems;
1245 
1246 	uint16_t		 sc_api_major;
1247 	uint16_t		 sc_api_minor;
1248 	uint8_t			 sc_pf_id;
1249 	uint16_t		 sc_uplink_seid;	/* le */
1250 	uint16_t		 sc_downlink_seid;	/* le */
1251 	uint16_t		 sc_veb_seid;		/* le */
1252 	uint16_t		 sc_vsi_number;		/* le */
1253 	uint16_t		 sc_seid;
1254 	unsigned int		 sc_base_queue;
1255 	unsigned int		 sc_port;
1256 
1257 	struct ixl_dmamem	 sc_scratch;
1258 
1259 	const struct ixl_aq_regs *
1260 				 sc_aq_regs;
1261 
1262 	struct ixl_dmamem	 sc_atq;
1263 	unsigned int		 sc_atq_prod;
1264 	unsigned int		 sc_atq_cons;
1265 
1266 	struct ixl_dmamem	 sc_arq;
1267 	struct task		 sc_arq_task;
1268 	struct ixl_aq_bufs	 sc_arq_idle;
1269 	struct ixl_aq_bufs	 sc_arq_live;
1270 	struct if_rxring	 sc_arq_ring;
1271 	unsigned int		 sc_arq_prod;
1272 	unsigned int		 sc_arq_cons;
1273 
1274 	struct mutex		 sc_link_state_mtx;
1275 	struct task		 sc_link_state_task;
1276 	struct ixl_atq		 sc_link_state_atq;
1277 
1278 	struct ixl_dmamem	 sc_hmc_sd;
1279 	struct ixl_dmamem	 sc_hmc_pd;
1280 	struct ixl_hmc_entry	 sc_hmc_entries[IXL_HMC_COUNT];
1281 
1282 	unsigned int		 sc_tx_ring_ndescs;
1283 	unsigned int		 sc_rx_ring_ndescs;
1284 	unsigned int		 sc_nqueues;	/* 1 << sc_nqueues */
1285 
1286 	struct intrmap		*sc_intrmap;
1287 	struct ixl_vector	*sc_vectors;
1288 
1289 	struct rwlock		 sc_cfg_lock;
1290 	unsigned int		 sc_dead;
1291 
1292 	uint8_t			 sc_enaddr[ETHER_ADDR_LEN];
1293 
1294 #if NKSTAT > 0
1295 	struct mutex		 sc_kstat_mtx;
1296 	struct timeout		 sc_kstat_tmo;
1297 	struct kstat		*sc_port_kstat;
1298 	struct kstat		*sc_vsi_kstat;
1299 #endif
1300 };
1301 #define DEVNAME(_sc) ((_sc)->sc_dev.dv_xname)
1302 
1303 #define delaymsec(_ms)	delay(1000 * (_ms))
1304 
1305 static void	ixl_clear_hw(struct ixl_softc *);
1306 static int	ixl_pf_reset(struct ixl_softc *);
1307 
1308 static int	ixl_dmamem_alloc(struct ixl_softc *, struct ixl_dmamem *,
1309 		    bus_size_t, u_int);
1310 static void	ixl_dmamem_free(struct ixl_softc *, struct ixl_dmamem *);
1311 
1312 static int	ixl_arq_fill(struct ixl_softc *);
1313 static void	ixl_arq_unfill(struct ixl_softc *);
1314 
1315 static int	ixl_atq_poll(struct ixl_softc *, struct ixl_aq_desc *,
1316 		    unsigned int);
1317 static void	ixl_atq_set(struct ixl_atq *,
1318 		    void (*)(struct ixl_softc *, void *), void *);
1319 static void	ixl_atq_post(struct ixl_softc *, struct ixl_atq *);
1320 static void	ixl_atq_done(struct ixl_softc *);
1321 static void	ixl_atq_exec(struct ixl_softc *, struct ixl_atq *,
1322 		    const char *);
1323 static int	ixl_get_version(struct ixl_softc *);
1324 static int	ixl_pxe_clear(struct ixl_softc *);
1325 static int	ixl_lldp_shut(struct ixl_softc *);
1326 static int	ixl_get_mac(struct ixl_softc *);
1327 static int	ixl_get_switch_config(struct ixl_softc *);
1328 static int	ixl_phy_mask_ints(struct ixl_softc *);
1329 static int	ixl_get_phy_types(struct ixl_softc *, uint64_t *);
1330 static int	ixl_restart_an(struct ixl_softc *);
1331 static int	ixl_hmc(struct ixl_softc *);
1332 static void	ixl_hmc_free(struct ixl_softc *);
1333 static int	ixl_get_vsi(struct ixl_softc *);
1334 static int	ixl_set_vsi(struct ixl_softc *);
1335 static int	ixl_get_link_status(struct ixl_softc *);
1336 static int	ixl_set_link_status(struct ixl_softc *,
1337 		    const struct ixl_aq_desc *);
1338 static int	ixl_add_macvlan(struct ixl_softc *, uint8_t *, uint16_t,
1339 		    uint16_t);
1340 static int	ixl_remove_macvlan(struct ixl_softc *, uint8_t *, uint16_t,
1341 		    uint16_t);
1342 static void	ixl_link_state_update(void *);
1343 static void	ixl_arq(void *);
1344 static void	ixl_hmc_pack(void *, const void *,
1345 		    const struct ixl_hmc_pack *, unsigned int);
1346 
1347 static int	ixl_get_sffpage(struct ixl_softc *, struct if_sffpage *);
1348 static int	ixl_sff_get_byte(struct ixl_softc *, uint8_t, uint32_t,
1349 		    uint8_t *);
1350 static int	ixl_sff_set_byte(struct ixl_softc *, uint8_t, uint32_t,
1351 		    uint8_t);
1352 
1353 static int	ixl_match(struct device *, void *, void *);
1354 static void	ixl_attach(struct device *, struct device *, void *);
1355 
1356 static void	ixl_media_add(struct ixl_softc *, uint64_t);
1357 static int	ixl_media_change(struct ifnet *);
1358 static void	ixl_media_status(struct ifnet *, struct ifmediareq *);
1359 static void	ixl_watchdog(struct ifnet *);
1360 static int	ixl_ioctl(struct ifnet *, u_long, caddr_t);
1361 static void	ixl_start(struct ifqueue *);
1362 static int	ixl_intr0(void *);
1363 static int	ixl_intr_vector(void *);
1364 static int	ixl_up(struct ixl_softc *);
1365 static int	ixl_down(struct ixl_softc *);
1366 static int	ixl_iff(struct ixl_softc *);
1367 
1368 static struct ixl_tx_ring *
1369 		ixl_txr_alloc(struct ixl_softc *, unsigned int);
1370 static void	ixl_txr_qdis(struct ixl_softc *, struct ixl_tx_ring *, int);
1371 static void	ixl_txr_config(struct ixl_softc *, struct ixl_tx_ring *);
1372 static int	ixl_txr_enabled(struct ixl_softc *, struct ixl_tx_ring *);
1373 static int	ixl_txr_disabled(struct ixl_softc *, struct ixl_tx_ring *);
1374 static void	ixl_txr_unconfig(struct ixl_softc *, struct ixl_tx_ring *);
1375 static void	ixl_txr_clean(struct ixl_softc *, struct ixl_tx_ring *);
1376 static void	ixl_txr_free(struct ixl_softc *, struct ixl_tx_ring *);
1377 static int	ixl_txeof(struct ixl_softc *, struct ixl_tx_ring *);
1378 
1379 static struct ixl_rx_ring *
1380 		ixl_rxr_alloc(struct ixl_softc *, unsigned int);
1381 static void	ixl_rxr_config(struct ixl_softc *, struct ixl_rx_ring *);
1382 static int	ixl_rxr_enabled(struct ixl_softc *, struct ixl_rx_ring *);
1383 static int	ixl_rxr_disabled(struct ixl_softc *, struct ixl_rx_ring *);
1384 static void	ixl_rxr_unconfig(struct ixl_softc *, struct ixl_rx_ring *);
1385 static void	ixl_rxr_clean(struct ixl_softc *, struct ixl_rx_ring *);
1386 static void	ixl_rxr_free(struct ixl_softc *, struct ixl_rx_ring *);
1387 static int	ixl_rxeof(struct ixl_softc *, struct ixl_rx_ring *);
1388 static void	ixl_rxfill(struct ixl_softc *, struct ixl_rx_ring *);
1389 static void	ixl_rxrefill(void *);
1390 static int	ixl_rxrinfo(struct ixl_softc *, struct if_rxrinfo *);
1391 
1392 #if NKSTAT > 0
1393 static void	ixl_kstat_attach(struct ixl_softc *);
1394 #endif
1395 
1396 struct cfdriver ixl_cd = {
1397 	NULL,
1398 	"ixl",
1399 	DV_IFNET,
1400 };
1401 
1402 struct cfattach ixl_ca = {
1403 	sizeof(struct ixl_softc),
1404 	ixl_match,
1405 	ixl_attach,
1406 };
1407 
1408 static const struct ixl_phy_type ixl_phy_type_map[] = {
1409 	{ 1ULL << IXL_PHY_TYPE_SGMII,		IFM_1000_SGMII },
1410 	{ 1ULL << IXL_PHY_TYPE_1000BASE_KX,	IFM_1000_KX },
1411 	{ 1ULL << IXL_PHY_TYPE_10GBASE_KX4,	IFM_10G_KX4 },
1412 	{ 1ULL << IXL_PHY_TYPE_10GBASE_KR,	IFM_10G_KR },
1413 	{ 1ULL << IXL_PHY_TYPE_40GBASE_KR4,	IFM_40G_KR4 },
1414 	{ 1ULL << IXL_PHY_TYPE_XAUI |
1415 	  1ULL << IXL_PHY_TYPE_XFI,		IFM_10G_CX4 },
1416 	{ 1ULL << IXL_PHY_TYPE_SFI,		IFM_10G_SFI },
1417 	{ 1ULL << IXL_PHY_TYPE_XLAUI |
1418 	  1ULL << IXL_PHY_TYPE_XLPPI,		IFM_40G_XLPPI },
1419 	{ 1ULL << IXL_PHY_TYPE_40GBASE_CR4_CU |
1420 	  1ULL << IXL_PHY_TYPE_40GBASE_CR4,	IFM_40G_CR4 },
1421 	{ 1ULL << IXL_PHY_TYPE_10GBASE_CR1_CU |
1422 	  1ULL << IXL_PHY_TYPE_10GBASE_CR1,	IFM_10G_CR1 },
1423 	{ 1ULL << IXL_PHY_TYPE_10GBASE_AOC,	IFM_10G_AOC },
1424 	{ 1ULL << IXL_PHY_TYPE_40GBASE_AOC,	IFM_40G_AOC },
1425 	{ 1ULL << IXL_PHY_TYPE_100BASE_TX,	IFM_100_TX },
1426 	{ 1ULL << IXL_PHY_TYPE_1000BASE_T_OPTICAL |
1427 	  1ULL << IXL_PHY_TYPE_1000BASE_T,	IFM_1000_T },
1428 	{ 1ULL << IXL_PHY_TYPE_10GBASE_T,	IFM_10G_T },
1429 	{ 1ULL << IXL_PHY_TYPE_10GBASE_SR,	IFM_10G_SR },
1430 	{ 1ULL << IXL_PHY_TYPE_10GBASE_LR,	IFM_10G_LR },
1431 	{ 1ULL << IXL_PHY_TYPE_10GBASE_SFPP_CU,	IFM_10G_SFP_CU },
1432 	{ 1ULL << IXL_PHY_TYPE_40GBASE_SR4,	IFM_40G_SR4 },
1433 	{ 1ULL << IXL_PHY_TYPE_40GBASE_LR4,	IFM_40G_LR4 },
1434 	{ 1ULL << IXL_PHY_TYPE_1000BASE_SX,	IFM_1000_SX },
1435 	{ 1ULL << IXL_PHY_TYPE_1000BASE_LX,	IFM_1000_LX },
1436 	{ 1ULL << IXL_PHY_TYPE_20GBASE_KR2,	IFM_20G_KR2 },
1437 	{ 1ULL << IXL_PHY_TYPE_25GBASE_KR,	IFM_25G_KR },
1438 	{ 1ULL << IXL_PHY_TYPE_25GBASE_CR,	IFM_25G_CR },
1439 	{ 1ULL << IXL_PHY_TYPE_25GBASE_SR,	IFM_25G_SR },
1440 	{ 1ULL << IXL_PHY_TYPE_25GBASE_LR,	IFM_25G_LR },
1441 	{ 1ULL << IXL_PHY_TYPE_25GBASE_AOC,	IFM_25G_AOC },
1442 	{ 1ULL << IXL_PHY_TYPE_25GBASE_ACC,	IFM_25G_CR },
1443 };
1444 
1445 static const struct ixl_speed_type ixl_speed_type_map[] = {
1446 	{ IXL_AQ_LINK_SPEED_40GB,		IF_Gbps(40) },
1447 	{ IXL_AQ_LINK_SPEED_25GB,		IF_Gbps(25) },
1448 	{ IXL_AQ_LINK_SPEED_10GB,		IF_Gbps(10) },
1449 	{ IXL_AQ_LINK_SPEED_1GB,		IF_Gbps(1) },
1450 };
1451 
1452 static const struct ixl_aq_regs ixl_pf_aq_regs = {
1453 	.atq_tail	= I40E_PF_ATQT,
1454 	.atq_tail_mask	= I40E_PF_ATQT_ATQT_MASK,
1455 	.atq_head	= I40E_PF_ATQH,
1456 	.atq_head_mask	= I40E_PF_ATQH_ATQH_MASK,
1457 	.atq_len	= I40E_PF_ATQLEN,
1458 	.atq_bal	= I40E_PF_ATQBAL,
1459 	.atq_bah	= I40E_PF_ATQBAH,
1460 	.atq_len_enable	= I40E_PF_ATQLEN_ATQENABLE_MASK,
1461 
1462 	.arq_tail	= I40E_PF_ARQT,
1463 	.arq_tail_mask	= I40E_PF_ARQT_ARQT_MASK,
1464 	.arq_head	= I40E_PF_ARQH,
1465 	.arq_head_mask	= I40E_PF_ARQH_ARQH_MASK,
1466 	.arq_len	= I40E_PF_ARQLEN,
1467 	.arq_bal	= I40E_PF_ARQBAL,
1468 	.arq_bah	= I40E_PF_ARQBAH,
1469 	.arq_len_enable	= I40E_PF_ARQLEN_ARQENABLE_MASK,
1470 };
1471 
1472 #define ixl_rd(_s, _r) \
1473 	bus_space_read_4((_s)->sc_memt, (_s)->sc_memh, (_r))
1474 #define ixl_wr(_s, _r, _v) \
1475 	bus_space_write_4((_s)->sc_memt, (_s)->sc_memh, (_r), (_v))
1476 #define ixl_barrier(_s, _r, _l, _o) \
1477 	bus_space_barrier((_s)->sc_memt, (_s)->sc_memh, (_r), (_l), (_o))
1478 #define ixl_intr_enable(_s) \
1479 	ixl_wr((_s), I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_INTENA_MASK | \
1480 	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | \
1481 	    (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT))
1482 
1483 #define ixl_nqueues(_sc)	(1 << (_sc)->sc_nqueues)
1484 
1485 #ifdef __LP64__
1486 #define ixl_dmamem_hi(_ixm)	(uint32_t)(IXL_DMA_DVA(_ixm) >> 32)
1487 #else
1488 #define ixl_dmamem_hi(_ixm)	0
1489 #endif
1490 
1491 #define ixl_dmamem_lo(_ixm)	(uint32_t)IXL_DMA_DVA(_ixm)
1492 
1493 static inline void
1494 ixl_aq_dva(struct ixl_aq_desc *iaq, bus_addr_t addr)
1495 {
1496 #ifdef __LP64__
1497 	htolem32(&iaq->iaq_param[2], addr >> 32);
1498 #else
1499 	iaq->iaq_param[2] = htole32(0);
1500 #endif
1501 	htolem32(&iaq->iaq_param[3], addr);
1502 }
1503 
1504 #if _BYTE_ORDER == _BIG_ENDIAN
1505 #define HTOLE16(_x)	(uint16_t)(((_x) & 0xff) << 8 | ((_x) & 0xff00) >> 8)
1506 #else
1507 #define HTOLE16(_x)	(_x)
1508 #endif
1509 
1510 static struct rwlock ixl_sff_lock = RWLOCK_INITIALIZER("ixlsff");
1511 
1512 /* deal with differences between chips */
1513 
1514 struct ixl_chip {
1515 	uint64_t		  ic_rss_hena;
1516 	uint32_t		(*ic_rd_ctl)(struct ixl_softc *, uint32_t);
1517 	void			(*ic_wr_ctl)(struct ixl_softc *, uint32_t,
1518 				      uint32_t);
1519 
1520 	int			(*ic_set_rss_key)(struct ixl_softc *,
1521 				      const struct ixl_rss_key *);
1522 	int			(*ic_set_rss_lut)(struct ixl_softc *,
1523 				      const struct ixl_rss_lut_128 *);
1524 };
1525 
1526 static inline uint64_t
1527 ixl_rss_hena(struct ixl_softc *sc)
1528 {
1529 	return (sc->sc_chip->ic_rss_hena);
1530 }
1531 
1532 static inline uint32_t
1533 ixl_rd_ctl(struct ixl_softc *sc, uint32_t r)
1534 {
1535 	return ((*sc->sc_chip->ic_rd_ctl)(sc, r));
1536 }
1537 
1538 static inline void
1539 ixl_wr_ctl(struct ixl_softc *sc, uint32_t r, uint32_t v)
1540 {
1541 	(*sc->sc_chip->ic_wr_ctl)(sc, r, v);
1542 }
1543 
1544 static inline int
1545 ixl_set_rss_key(struct ixl_softc *sc, const struct ixl_rss_key *rsskey)
1546 {
1547 	return ((*sc->sc_chip->ic_set_rss_key)(sc, rsskey));
1548 }
1549 
1550 static inline int
1551 ixl_set_rss_lut(struct ixl_softc *sc, const struct ixl_rss_lut_128 *lut)
1552 {
1553 	return ((*sc->sc_chip->ic_set_rss_lut)(sc, lut));
1554 }
1555 
1556 /* 710 chip specifics */
1557 
1558 static uint32_t		ixl_710_rd_ctl(struct ixl_softc *, uint32_t);
1559 static void		ixl_710_wr_ctl(struct ixl_softc *, uint32_t, uint32_t);
1560 static int		ixl_710_set_rss_key(struct ixl_softc *,
1561 			    const struct ixl_rss_key *);
1562 static int		ixl_710_set_rss_lut(struct ixl_softc *,
1563 			    const struct ixl_rss_lut_128 *);
1564 
1565 static const struct ixl_chip ixl_710 = {
1566 	.ic_rss_hena =		IXL_RSS_HENA_BASE_710,
1567 	.ic_rd_ctl =		ixl_710_rd_ctl,
1568 	.ic_wr_ctl =		ixl_710_wr_ctl,
1569 	.ic_set_rss_key =	ixl_710_set_rss_key,
1570 	.ic_set_rss_lut =	ixl_710_set_rss_lut,
1571 };
1572 
1573 /* 722 chip specifics */
1574 
1575 static uint32_t		ixl_722_rd_ctl(struct ixl_softc *, uint32_t);
1576 static void		ixl_722_wr_ctl(struct ixl_softc *, uint32_t, uint32_t);
1577 static int		ixl_722_set_rss_key(struct ixl_softc *,
1578 			    const struct ixl_rss_key *);
1579 static int		ixl_722_set_rss_lut(struct ixl_softc *,
1580 			    const struct ixl_rss_lut_128 *);
1581 
1582 static const struct ixl_chip ixl_722 = {
1583 	.ic_rss_hena =		IXL_RSS_HENA_BASE_722,
1584 	.ic_rd_ctl =		ixl_722_rd_ctl,
1585 	.ic_wr_ctl =		ixl_722_wr_ctl,
1586 	.ic_set_rss_key =	ixl_722_set_rss_key,
1587 	.ic_set_rss_lut =	ixl_722_set_rss_lut,
1588 };
1589 
1590 /*
1591  * 710 chips using an older firmware/API use the same ctl ops as
1592  * 722 chips. or 722 chips use the same ctl ops as 710 chips in early
1593  * firmware/API versions?
1594 */
1595 
1596 static const struct ixl_chip ixl_710_decrepit = {
1597 	.ic_rss_hena =		IXL_RSS_HENA_BASE_710,
1598 	.ic_rd_ctl =		ixl_722_rd_ctl,
1599 	.ic_wr_ctl =		ixl_722_wr_ctl,
1600 	.ic_set_rss_key =	ixl_710_set_rss_key,
1601 	.ic_set_rss_lut =	ixl_710_set_rss_lut,
1602 };
1603 
1604 /* driver code */
1605 
1606 struct ixl_device {
1607 	const struct ixl_chip	*id_chip;
1608 	pci_vendor_id_t		 id_vid;
1609 	pci_product_id_t	 id_pid;
1610 };
1611 
1612 static const struct ixl_device ixl_devices[] = {
1613 	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_SFP },
1614 	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_SFP_2 },
1615 	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_40G_BP },
1616 	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_BP, },
1617 	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_1 },
1618 	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_2 },
1619 	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_QSFP },
1620 	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_BASET },
1621 	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_1 },
1622 	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_2 },
1623 	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_T4_10G },
1624 	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_BP },
1625 	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_SFP28, },
1626 	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_T, },
1627 	{ &ixl_722, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_10G_KX },
1628 	{ &ixl_722, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_10G_QSFP },
1629 	{ &ixl_722, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_10G_SFP_1 },
1630 	{ &ixl_722, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_1G },
1631 	{ &ixl_722, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_10G_T },
1632 	{ &ixl_722, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_10G_SFP_2 },
1633 };
1634 
1635 static const struct ixl_device *
1636 ixl_device_lookup(struct pci_attach_args *pa)
1637 {
1638 	pci_vendor_id_t vid = PCI_VENDOR(pa->pa_id);
1639 	pci_product_id_t pid = PCI_PRODUCT(pa->pa_id);
1640 	const struct ixl_device *id;
1641 	unsigned int i;
1642 
1643 	for (i = 0; i < nitems(ixl_devices); i++) {
1644 		id = &ixl_devices[i];
1645 		if (id->id_vid == vid && id->id_pid == pid)
1646 			return (id);
1647 	}
1648 
1649 	return (NULL);
1650 }
1651 
1652 static int
1653 ixl_match(struct device *parent, void *match, void *aux)
1654 {
1655 	return (ixl_device_lookup(aux) != NULL);
1656 }
1657 
1658 void
1659 ixl_attach(struct device *parent, struct device *self, void *aux)
1660 {
1661 	struct ixl_softc *sc = (struct ixl_softc *)self;
1662 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1663 	struct pci_attach_args *pa = aux;
1664 	pcireg_t memtype;
1665 	uint32_t port, ari, func;
1666 	uint64_t phy_types = 0;
1667 	unsigned int nqueues, i;
1668 	int tries;
1669 
1670 	rw_init(&sc->sc_cfg_lock, "ixlcfg");
1671 
1672 	sc->sc_chip = ixl_device_lookup(pa)->id_chip;
1673 	sc->sc_pc = pa->pa_pc;
1674 	sc->sc_tag = pa->pa_tag;
1675 	sc->sc_dmat = pa->pa_dmat;
1676 	sc->sc_aq_regs = &ixl_pf_aq_regs;
1677 
1678 	sc->sc_nqueues = 0; /* 1 << 0 is 1 queue */
1679 	sc->sc_tx_ring_ndescs = 1024;
1680 	sc->sc_rx_ring_ndescs = 1024;
1681 
1682 	memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, IXL_PCIREG);
1683 	if (pci_mapreg_map(pa, IXL_PCIREG, memtype, 0,
1684 	    &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems, 0)) {
1685 		printf(": unable to map registers\n");
1686 		return;
1687 	}
1688 
1689 	sc->sc_base_queue = (ixl_rd(sc, I40E_PFLAN_QALLOC) &
1690 	    I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
1691 	    I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
1692 
1693 	ixl_clear_hw(sc);
1694 	if (ixl_pf_reset(sc) == -1) {
1695 		/* error printed by ixl_pf_reset */
1696 		goto unmap;
1697 	}
1698 
1699 	port = ixl_rd(sc, I40E_PFGEN_PORTNUM);
1700 	port &= I40E_PFGEN_PORTNUM_PORT_NUM_MASK;
1701 	port >>= I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
1702 	sc->sc_port = port;
1703 	printf(": port %u", port);
1704 
1705 	ari = ixl_rd(sc, I40E_GLPCI_CAPSUP);
1706 	ari &= I40E_GLPCI_CAPSUP_ARI_EN_MASK;
1707 	ari >>= I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
1708 
1709 	func = ixl_rd(sc, I40E_PF_FUNC_RID);
1710 	sc->sc_pf_id = func & (ari ? 0xff : 0x7);
1711 
1712 	/* initialise the adminq */
1713 
1714 	if (ixl_dmamem_alloc(sc, &sc->sc_atq,
1715 	    sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
1716 		printf("\n" "%s: unable to allocate atq\n", DEVNAME(sc));
1717 		goto unmap;
1718 	}
1719 
1720 	SIMPLEQ_INIT(&sc->sc_arq_idle);
1721 	SIMPLEQ_INIT(&sc->sc_arq_live);
1722 	if_rxr_init(&sc->sc_arq_ring, 2, IXL_AQ_NUM - 1);
1723 	task_set(&sc->sc_arq_task, ixl_arq, sc);
1724 	sc->sc_arq_cons = 0;
1725 	sc->sc_arq_prod = 0;
1726 
1727 	if (ixl_dmamem_alloc(sc, &sc->sc_arq,
1728 	    sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
1729 		printf("\n" "%s: unable to allocate arq\n", DEVNAME(sc));
1730 		goto free_atq;
1731 	}
1732 
1733 	if (!ixl_arq_fill(sc)) {
1734 		printf("\n" "%s: unable to fill arq descriptors\n",
1735 		    DEVNAME(sc));
1736 		goto free_arq;
1737 	}
1738 
1739 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1740 	    0, IXL_DMA_LEN(&sc->sc_atq),
1741 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1742 
1743 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1744 	    0, IXL_DMA_LEN(&sc->sc_arq),
1745 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1746 
1747 	for (tries = 0; tries < 10; tries++) {
1748 		int rv;
1749 
1750 		sc->sc_atq_cons = 0;
1751 		sc->sc_atq_prod = 0;
1752 
1753 		ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1754 		ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1755 		ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1756 		ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1757 
1758 		ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
1759 
1760 		ixl_wr(sc, sc->sc_aq_regs->atq_bal,
1761 		    ixl_dmamem_lo(&sc->sc_atq));
1762 		ixl_wr(sc, sc->sc_aq_regs->atq_bah,
1763 		    ixl_dmamem_hi(&sc->sc_atq));
1764 		ixl_wr(sc, sc->sc_aq_regs->atq_len,
1765 		    sc->sc_aq_regs->atq_len_enable | IXL_AQ_NUM);
1766 
1767 		ixl_wr(sc, sc->sc_aq_regs->arq_bal,
1768 		    ixl_dmamem_lo(&sc->sc_arq));
1769 		ixl_wr(sc, sc->sc_aq_regs->arq_bah,
1770 		    ixl_dmamem_hi(&sc->sc_arq));
1771 		ixl_wr(sc, sc->sc_aq_regs->arq_len,
1772 		    sc->sc_aq_regs->arq_len_enable | IXL_AQ_NUM);
1773 
1774 		rv = ixl_get_version(sc);
1775 		if (rv == 0)
1776 			break;
1777 		if (rv != ETIMEDOUT) {
1778 			printf(", unable to get firmware version\n");
1779 			goto shutdown;
1780 		}
1781 
1782 		delaymsec(100);
1783 	}
1784 
1785 	ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
1786 
1787 	if (ixl_pxe_clear(sc) != 0) {
1788 		/* error printed by ixl_pxe_clear */
1789 		goto shutdown;
1790 	}
1791 
1792 	if (ixl_get_mac(sc) != 0) {
1793 		/* error printed by ixl_get_mac */
1794 		goto shutdown;
1795 	}
1796 
1797 	if (pci_intr_map_msix(pa, 0, &sc->sc_ih) == 0) {
1798 		int nmsix = pci_intr_msix_count(pa);
1799 		if (nmsix > 1) { /* we used 1 (the 0th) for the adminq */
1800 			nmsix--;
1801 
1802 			sc->sc_intrmap = intrmap_create(&sc->sc_dev,
1803 			    nmsix, IXL_MAX_VECTORS, INTRMAP_POWEROF2);
1804 			nqueues = intrmap_count(sc->sc_intrmap);
1805 			KASSERT(nqueues > 0);
1806 			KASSERT(powerof2(nqueues));
1807 			sc->sc_nqueues = fls(nqueues) - 1;
1808 		}
1809 	} else {
1810 		if (pci_intr_map_msi(pa, &sc->sc_ih) != 0 &&
1811 		    pci_intr_map(pa, &sc->sc_ih) != 0) {
1812 			printf(", unable to map interrupt\n");
1813 			goto shutdown;
1814 		}
1815 	}
1816 
1817 	nqueues = ixl_nqueues(sc);
1818 
1819 	printf(", %s, %d queue%s, address %s\n",
1820 	    pci_intr_string(sc->sc_pc, sc->sc_ih), ixl_nqueues(sc),
1821 	    (nqueues > 1 ? "s" : ""),
1822 	    ether_sprintf(sc->sc_ac.ac_enaddr));
1823 
1824 	if (ixl_hmc(sc) != 0) {
1825 		/* error printed by ixl_hmc */
1826 		goto shutdown;
1827 	}
1828 
1829 	if (ixl_lldp_shut(sc) != 0) {
1830 		/* error printed by ixl_lldp_shut */
1831 		goto free_hmc;
1832 	}
1833 
1834 	if (ixl_phy_mask_ints(sc) != 0) {
1835 		/* error printed by ixl_phy_mask_ints */
1836 		goto free_hmc;
1837 	}
1838 
1839 	if (ixl_restart_an(sc) != 0) {
1840 		/* error printed by ixl_restart_an */
1841 		goto free_hmc;
1842 	}
1843 
1844 	if (ixl_get_switch_config(sc) != 0) {
1845 		/* error printed by ixl_get_switch_config */
1846 		goto free_hmc;
1847 	}
1848 
1849 	if (ixl_get_phy_types(sc, &phy_types) != 0) {
1850 		/* error printed by ixl_get_phy_abilities */
1851 		goto free_hmc;
1852 	}
1853 
1854 	if (ixl_get_link_status(sc) != 0) {
1855 		/* error printed by ixl_get_link_status */
1856 		goto free_hmc;
1857 	}
1858 
1859 	if (ixl_dmamem_alloc(sc, &sc->sc_scratch,
1860 	    sizeof(struct ixl_aq_vsi_data), 8) != 0) {
1861 		printf("%s: unable to allocate scratch buffer\n", DEVNAME(sc));
1862 		goto free_hmc;
1863 	}
1864 
1865 	if (ixl_get_vsi(sc) != 0) {
1866 		/* error printed by ixl_get_vsi */
1867 		goto free_hmc;
1868 	}
1869 
1870 	if (ixl_set_vsi(sc) != 0) {
1871 		/* error printed by ixl_set_vsi */
1872 		goto free_scratch;
1873 	}
1874 
1875 	sc->sc_ihc = pci_intr_establish(sc->sc_pc, sc->sc_ih,
1876 	    IPL_NET | IPL_MPSAFE, ixl_intr0, sc, DEVNAME(sc));
1877 	if (sc->sc_ihc == NULL) {
1878 		printf("%s: unable to establish interrupt handler\n",
1879 		    DEVNAME(sc));
1880 		goto free_scratch;
1881 	}
1882 
1883 	sc->sc_vectors = mallocarray(sizeof(*sc->sc_vectors), nqueues,
1884 	    M_DEVBUF, M_WAITOK|M_CANFAIL|M_ZERO);
1885 	if (sc->sc_vectors == NULL) {
1886 		printf("%s: unable to allocate vectors\n", DEVNAME(sc));
1887 		goto free_scratch;
1888 	}
1889 
1890 	for (i = 0; i < nqueues; i++) {
1891 		struct ixl_vector *iv = &sc->sc_vectors[i];
1892 		iv->iv_sc = sc;
1893 		iv->iv_qid = i;
1894 		snprintf(iv->iv_name, sizeof(iv->iv_name),
1895 		    "%s:%u", DEVNAME(sc), i); /* truncated? */
1896 	}
1897 
1898 	if (sc->sc_intrmap) {
1899 		for (i = 0; i < nqueues; i++) {
1900 			struct ixl_vector *iv = &sc->sc_vectors[i];
1901 			pci_intr_handle_t ih;
1902 			int v = i + 1; /* 0 is used for adminq */
1903 
1904 			if (pci_intr_map_msix(pa, v, &ih)) {
1905 				printf("%s: unable to map msi-x vector %d\n",
1906 				    DEVNAME(sc), v);
1907 				goto free_vectors;
1908 			}
1909 
1910 			iv->iv_ihc = pci_intr_establish_cpu(sc->sc_pc, ih,
1911 			    IPL_NET | IPL_MPSAFE,
1912 			    intrmap_cpu(sc->sc_intrmap, i),
1913 			    ixl_intr_vector, iv, iv->iv_name);
1914 			if (iv->iv_ihc == NULL) {
1915 				printf("%s: unable to establish interrupt %d\n",
1916 				    DEVNAME(sc), v);
1917 				goto free_vectors;
1918 			}
1919 
1920 			ixl_wr(sc, I40E_PFINT_DYN_CTLN(i),
1921 			    I40E_PFINT_DYN_CTLN_INTENA_MASK |
1922 			    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1923 			    (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
1924 		}
1925 	}
1926 
1927 	/* fixup the chip ops for older fw releases */
1928 	if (sc->sc_chip == &ixl_710 &&
1929 	    sc->sc_api_major == 1 && sc->sc_api_minor < 5)
1930 		sc->sc_chip = &ixl_710_decrepit;
1931 
1932 	ifp->if_softc = sc;
1933 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1934 	ifp->if_xflags = IFXF_MPSAFE;
1935 	ifp->if_ioctl = ixl_ioctl;
1936 	ifp->if_qstart = ixl_start;
1937 	ifp->if_watchdog = ixl_watchdog;
1938 	ifp->if_hardmtu = IXL_HARDMTU;
1939 	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
1940 	ifq_set_maxlen(&ifp->if_snd, sc->sc_tx_ring_ndescs);
1941 
1942 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1943 #if 0
1944 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1945 	ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
1946 	    IFCAP_CSUM_UDPv4;
1947 #endif
1948 
1949 	ifmedia_init(&sc->sc_media, 0, ixl_media_change, ixl_media_status);
1950 
1951 	ixl_media_add(sc, phy_types);
1952 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1953 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
1954 
1955 	if_attach(ifp);
1956 	ether_ifattach(ifp);
1957 
1958 	if_attach_queues(ifp, nqueues);
1959 	if_attach_iqueues(ifp, nqueues);
1960 
1961 	mtx_init(&sc->sc_link_state_mtx, IPL_NET);
1962 	task_set(&sc->sc_link_state_task, ixl_link_state_update, sc);
1963 	ixl_wr(sc, I40E_PFINT_ICR0_ENA,
1964 	    I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK |
1965 	    I40E_PFINT_ICR0_ENA_ADMINQ_MASK);
1966 	ixl_wr(sc, I40E_PFINT_STAT_CTL0,
1967 	    IXL_NOITR << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT);
1968 
1969 	/* remove default mac filter and replace it so we can see vlans */
1970 	ixl_remove_macvlan(sc, sc->sc_ac.ac_enaddr, 0, 0);
1971 	ixl_remove_macvlan(sc, sc->sc_ac.ac_enaddr, 0,
1972 	    IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1973 	ixl_add_macvlan(sc, sc->sc_ac.ac_enaddr, 0,
1974 	    IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
1975 	ixl_add_macvlan(sc, etherbroadcastaddr, 0,
1976 	    IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
1977 	memcpy(sc->sc_enaddr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN);
1978 
1979 	ixl_intr_enable(sc);
1980 
1981 #if NKSTAT > 0
1982 	ixl_kstat_attach(sc);
1983 #endif
1984 
1985 	return;
1986 free_vectors:
1987 	if (sc->sc_intrmap != NULL) {
1988 		for (i = 0; i < nqueues; i++) {
1989 			struct ixl_vector *iv = &sc->sc_vectors[i];
1990 			if (iv->iv_ihc == NULL)
1991 				continue;
1992 			pci_intr_disestablish(sc->sc_pc, iv->iv_ihc);
1993 		}
1994 	}
1995 	free(sc->sc_vectors, M_DEVBUF, nqueues * sizeof(*sc->sc_vectors));
1996 free_scratch:
1997 	ixl_dmamem_free(sc, &sc->sc_scratch);
1998 free_hmc:
1999 	ixl_hmc_free(sc);
2000 shutdown:
2001 	ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
2002 	ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
2003 	ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
2004 	ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
2005 
2006 	ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0);
2007 	ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0);
2008 	ixl_wr(sc, sc->sc_aq_regs->atq_len, 0);
2009 
2010 	ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0);
2011 	ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0);
2012 	ixl_wr(sc, sc->sc_aq_regs->arq_len, 0);
2013 
2014 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
2015 	    0, IXL_DMA_LEN(&sc->sc_arq),
2016 	    BUS_DMASYNC_POSTREAD);
2017 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
2018 	    0, IXL_DMA_LEN(&sc->sc_atq),
2019 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2020 
2021 	ixl_arq_unfill(sc);
2022 
2023 free_arq:
2024 	ixl_dmamem_free(sc, &sc->sc_arq);
2025 free_atq:
2026 	ixl_dmamem_free(sc, &sc->sc_atq);
2027 unmap:
2028 	bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
2029 	sc->sc_mems = 0;
2030 
2031 	if (sc->sc_intrmap != NULL)
2032 		intrmap_destroy(sc->sc_intrmap);
2033 }
2034 
2035 static void
2036 ixl_media_add(struct ixl_softc *sc, uint64_t phy_types)
2037 {
2038 	struct ifmedia *ifm = &sc->sc_media;
2039 	const struct ixl_phy_type *itype;
2040 	unsigned int i;
2041 
2042 	for (i = 0; i < nitems(ixl_phy_type_map); i++) {
2043 		itype = &ixl_phy_type_map[i];
2044 
2045 		if (ISSET(phy_types, itype->phy_type))
2046 			ifmedia_add(ifm, IFM_ETHER | itype->ifm_type, 0, NULL);
2047 	}
2048 }
2049 
2050 static int
2051 ixl_media_change(struct ifnet *ifp)
2052 {
2053 	/* ignore? */
2054 	return (EOPNOTSUPP);
2055 }
2056 
2057 static void
2058 ixl_media_status(struct ifnet *ifp, struct ifmediareq *ifm)
2059 {
2060 	struct ixl_softc *sc = ifp->if_softc;
2061 
2062 	NET_ASSERT_LOCKED();
2063 
2064 	ifm->ifm_status = sc->sc_media_status;
2065 	ifm->ifm_active = sc->sc_media_active;
2066 }
2067 
2068 static void
2069 ixl_watchdog(struct ifnet *ifp)
2070 {
2071 
2072 }
2073 
2074 int
2075 ixl_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2076 {
2077 	struct ixl_softc *sc = (struct ixl_softc *)ifp->if_softc;
2078 	struct ifreq *ifr = (struct ifreq *)data;
2079 	uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN];
2080 	int aqerror, error = 0;
2081 
2082 	switch (cmd) {
2083 	case SIOCSIFADDR:
2084 		ifp->if_flags |= IFF_UP;
2085 		/* FALLTHROUGH */
2086 
2087 	case SIOCSIFFLAGS:
2088 		if (ISSET(ifp->if_flags, IFF_UP)) {
2089 			if (ISSET(ifp->if_flags, IFF_RUNNING))
2090 				error = ENETRESET;
2091 			else
2092 				error = ixl_up(sc);
2093 		} else {
2094 			if (ISSET(ifp->if_flags, IFF_RUNNING))
2095 				error = ixl_down(sc);
2096 		}
2097 		break;
2098 
2099 	case SIOCGIFMEDIA:
2100 	case SIOCSIFMEDIA:
2101 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
2102 		break;
2103 
2104 	case SIOCGIFRXR:
2105 		error = ixl_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
2106 		break;
2107 
2108 	case SIOCADDMULTI:
2109 		if (ether_addmulti(ifr, &sc->sc_ac) == ENETRESET) {
2110 			error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi);
2111 			if (error != 0)
2112 				return (error);
2113 
2114 			aqerror = ixl_add_macvlan(sc, addrlo, 0,
2115 			    IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
2116 			if (aqerror == IXL_AQ_RC_ENOSPC) {
2117 				ether_delmulti(ifr, &sc->sc_ac);
2118 				error = ENOSPC;
2119 			}
2120 
2121 			if (sc->sc_ac.ac_multirangecnt > 0) {
2122 				SET(ifp->if_flags, IFF_ALLMULTI);
2123 				error = ENETRESET;
2124 			}
2125 		}
2126 		break;
2127 
2128 	case SIOCDELMULTI:
2129 		if (ether_delmulti(ifr, &sc->sc_ac) == ENETRESET) {
2130 			error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi);
2131 			if (error != 0)
2132 				return (error);
2133 
2134 			ixl_remove_macvlan(sc, addrlo, 0,
2135 			    IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
2136 
2137 			if (ISSET(ifp->if_flags, IFF_ALLMULTI) &&
2138 			    sc->sc_ac.ac_multirangecnt == 0) {
2139 				CLR(ifp->if_flags, IFF_ALLMULTI);
2140 				error = ENETRESET;
2141 			}
2142 		}
2143 		break;
2144 
2145 	case SIOCGIFSFFPAGE:
2146 		error = rw_enter(&ixl_sff_lock, RW_WRITE|RW_INTR);
2147 		if (error != 0)
2148 			break;
2149 
2150 		error = ixl_get_sffpage(sc, (struct if_sffpage *)data);
2151 		rw_exit(&ixl_sff_lock);
2152 		break;
2153 
2154 	default:
2155 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
2156 		break;
2157 	}
2158 
2159 	if (error == ENETRESET)
2160 		error = ixl_iff(sc);
2161 
2162 	return (error);
2163 }
2164 
2165 static inline void *
2166 ixl_hmc_kva(struct ixl_softc *sc, unsigned int type, unsigned int i)
2167 {
2168 	uint8_t *kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
2169 	struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
2170 
2171 	if (i >= e->hmc_count)
2172 		return (NULL);
2173 
2174 	kva += e->hmc_base;
2175 	kva += i * e->hmc_size;
2176 
2177 	return (kva);
2178 }
2179 
2180 static inline size_t
2181 ixl_hmc_len(struct ixl_softc *sc, unsigned int type)
2182 {
2183 	struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
2184 
2185 	return (e->hmc_size);
2186 }
2187 
2188 static int
2189 ixl_configure_rss(struct ixl_softc *sc)
2190 {
2191 	struct ixl_rss_key rsskey;
2192 	struct ixl_rss_lut_128 lut;
2193 	uint8_t *lute = (uint8_t *)&lut;
2194 	uint64_t rss_hena;
2195 	unsigned int i, nqueues;
2196 	int error;
2197 
2198 #if 0
2199 	/* if we want to do a 512 entry LUT, do this. */
2200 	uint32_t v = ixl_rd_ctl(sc, I40E_PFQF_CTL_0);
2201 	SET(v, I40E_PFQF_CTL_0_HASHLUTSIZE_MASK);
2202 	ixl_wr_ctl(sc, I40E_PFQF_CTL_0, v);
2203 #endif
2204 
2205 	stoeplitz_to_key(&rsskey, sizeof(rsskey));
2206 
2207 	nqueues = ixl_nqueues(sc);
2208 	for (i = 0; i < sizeof(lut); i++) {
2209 		/*
2210 		 * ixl must have a power of 2 rings, so using mod
2211 		 * to populate the table is fine.
2212 		 */
2213 		lute[i] = i % nqueues;
2214 	}
2215 
2216 	error = ixl_set_rss_key(sc, &rsskey);
2217 	if (error != 0)
2218 		return (error);
2219 
2220 	rss_hena = (uint64_t)ixl_rd_ctl(sc, I40E_PFQF_HENA(0));
2221 	rss_hena |= (uint64_t)ixl_rd_ctl(sc, I40E_PFQF_HENA(1)) << 32;
2222 	rss_hena |= ixl_rss_hena(sc);
2223 	ixl_wr_ctl(sc, I40E_PFQF_HENA(0), rss_hena);
2224 	ixl_wr_ctl(sc, I40E_PFQF_HENA(1), rss_hena >> 32);
2225 
2226 	error = ixl_set_rss_lut(sc, &lut);
2227 	if (error != 0)
2228 		return (error);
2229 
2230 	/* nothing to clena up :( */
2231 
2232 	return (0);
2233 }
2234 
2235 static int
2236 ixl_up(struct ixl_softc *sc)
2237 {
2238 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2239 	struct ifqueue *ifq;
2240 	struct ifiqueue *ifiq;
2241 	struct ixl_vector *iv;
2242 	struct ixl_rx_ring *rxr;
2243 	struct ixl_tx_ring *txr;
2244 	unsigned int nqueues, i;
2245 	uint32_t reg;
2246 	int rv = ENOMEM;
2247 
2248 	nqueues = ixl_nqueues(sc);
2249 
2250 	rw_enter_write(&sc->sc_cfg_lock);
2251 	if (sc->sc_dead) {
2252 		rw_exit_write(&sc->sc_cfg_lock);
2253 		return (ENXIO);
2254 	}
2255 
2256 	/* allocation is the only thing that can fail, so do it up front */
2257 	for (i = 0; i < nqueues; i++) {
2258 		rxr = ixl_rxr_alloc(sc, i);
2259 		if (rxr == NULL)
2260 			goto free;
2261 
2262 		txr = ixl_txr_alloc(sc, i);
2263 		if (txr == NULL) {
2264 			ixl_rxr_free(sc, rxr);
2265 			goto free;
2266 		}
2267 
2268 		/* wire everything together */
2269 		iv = &sc->sc_vectors[i];
2270 		iv->iv_rxr = rxr;
2271 		iv->iv_txr = txr;
2272 
2273 		ifq = ifp->if_ifqs[i];
2274 		ifq->ifq_softc = txr;
2275 		txr->txr_ifq = ifq;
2276 
2277 		ifiq = ifp->if_iqs[i];
2278 		ifiq->ifiq_softc = rxr;
2279 		rxr->rxr_ifiq = ifiq;
2280 	}
2281 
2282 	/* XXX wait 50ms from completion of last RX queue disable */
2283 
2284 	for (i = 0; i < nqueues; i++) {
2285 		iv = &sc->sc_vectors[i];
2286 		rxr = iv->iv_rxr;
2287 		txr = iv->iv_txr;
2288 
2289 		ixl_txr_qdis(sc, txr, 1);
2290 
2291 		ixl_rxr_config(sc, rxr);
2292 		ixl_txr_config(sc, txr);
2293 
2294 		ixl_wr(sc, I40E_QTX_CTL(i), I40E_QTX_CTL_PF_QUEUE |
2295 		    (sc->sc_pf_id << I40E_QTX_CTL_PF_INDX_SHIFT));
2296 
2297 		ixl_wr(sc, rxr->rxr_tail, 0);
2298 		ixl_rxfill(sc, rxr);
2299 
2300 		reg = ixl_rd(sc, I40E_QRX_ENA(i));
2301 		SET(reg, I40E_QRX_ENA_QENA_REQ_MASK);
2302 		ixl_wr(sc, I40E_QRX_ENA(i), reg);
2303 
2304 		reg = ixl_rd(sc, I40E_QTX_ENA(i));
2305 		SET(reg, I40E_QTX_ENA_QENA_REQ_MASK);
2306 		ixl_wr(sc, I40E_QTX_ENA(i), reg);
2307 	}
2308 
2309 	for (i = 0; i < nqueues; i++) {
2310 		iv = &sc->sc_vectors[i];
2311 		rxr = iv->iv_rxr;
2312 		txr = iv->iv_txr;
2313 
2314 		if (ixl_rxr_enabled(sc, rxr) != 0)
2315 			goto down;
2316 
2317 		if (ixl_txr_enabled(sc, txr) != 0)
2318 			goto down;
2319 	}
2320 
2321 	ixl_configure_rss(sc);
2322 
2323 	SET(ifp->if_flags, IFF_RUNNING);
2324 
2325 	if (sc->sc_intrmap == NULL) {
2326 		ixl_wr(sc, I40E_PFINT_LNKLST0,
2327 		    (I40E_INTR_NOTX_QUEUE <<
2328 		     I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
2329 		    (I40E_QUEUE_TYPE_RX <<
2330 		     I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
2331 
2332 		ixl_wr(sc, I40E_QINT_RQCTL(I40E_INTR_NOTX_QUEUE),
2333 		    (I40E_INTR_NOTX_INTR << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2334 		    (I40E_ITR_INDEX_RX << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2335 		    (I40E_INTR_NOTX_RX_QUEUE <<
2336 		     I40E_QINT_RQCTL_MSIX0_INDX_SHIFT) |
2337 		    (I40E_INTR_NOTX_QUEUE << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2338 		    (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
2339 		    I40E_QINT_RQCTL_CAUSE_ENA_MASK);
2340 
2341 		ixl_wr(sc, I40E_QINT_TQCTL(I40E_INTR_NOTX_QUEUE),
2342 		    (I40E_INTR_NOTX_INTR << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2343 		    (I40E_ITR_INDEX_TX << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2344 		    (I40E_INTR_NOTX_TX_QUEUE <<
2345 		     I40E_QINT_TQCTL_MSIX0_INDX_SHIFT) |
2346 		    (I40E_QUEUE_TYPE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
2347 		    (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT) |
2348 		    I40E_QINT_TQCTL_CAUSE_ENA_MASK);
2349 	} else {
2350 		/* vector 0 has no queues */
2351 		ixl_wr(sc, I40E_PFINT_LNKLST0,
2352 		    I40E_QUEUE_TYPE_EOL <<
2353 		    I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT);
2354 
2355 		/* queue n is mapped to vector n+1 */
2356 		for (i = 0; i < nqueues; i++) {
2357 			/* LNKLSTN(i) configures vector i+1 */
2358 			ixl_wr(sc, I40E_PFINT_LNKLSTN(i),
2359 			    (i << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
2360 			    (I40E_QUEUE_TYPE_RX <<
2361 			     I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
2362 			ixl_wr(sc, I40E_QINT_RQCTL(i),
2363 			    ((i+1) << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2364 			    (I40E_ITR_INDEX_RX <<
2365 			     I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2366 			    (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2367 			    (I40E_QUEUE_TYPE_TX <<
2368 			     I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
2369 			    I40E_QINT_RQCTL_CAUSE_ENA_MASK);
2370 			ixl_wr(sc, I40E_QINT_TQCTL(i),
2371 			    ((i+1) << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2372 			    (I40E_ITR_INDEX_TX <<
2373 			     I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2374 			    (I40E_QUEUE_TYPE_EOL <<
2375 			     I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
2376 			    (I40E_QUEUE_TYPE_RX <<
2377 			     I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT) |
2378 			    I40E_QINT_TQCTL_CAUSE_ENA_MASK);
2379 
2380 			ixl_wr(sc, I40E_PFINT_ITRN(0, i), 0x7a);
2381 			ixl_wr(sc, I40E_PFINT_ITRN(1, i), 0x7a);
2382 			ixl_wr(sc, I40E_PFINT_ITRN(2, i), 0);
2383 		}
2384 	}
2385 
2386 	ixl_wr(sc, I40E_PFINT_ITR0(0), 0x7a);
2387 	ixl_wr(sc, I40E_PFINT_ITR0(1), 0x7a);
2388 	ixl_wr(sc, I40E_PFINT_ITR0(2), 0);
2389 
2390 	rw_exit_write(&sc->sc_cfg_lock);
2391 
2392 	return (ENETRESET);
2393 
2394 free:
2395 	for (i = 0; i < nqueues; i++) {
2396 		iv = &sc->sc_vectors[i];
2397 		rxr = iv->iv_rxr;
2398 		txr = iv->iv_txr;
2399 
2400 		if (rxr == NULL) {
2401 			/*
2402 			 * tx and rx get set at the same time, so if one
2403 			 * is NULL, the other is too.
2404 			 */
2405 			continue;
2406 		}
2407 
2408 		ixl_txr_free(sc, txr);
2409 		ixl_rxr_free(sc, rxr);
2410 	}
2411 	rw_exit_write(&sc->sc_cfg_lock);
2412 	return (rv);
2413 down:
2414 	rw_exit_write(&sc->sc_cfg_lock);
2415 	ixl_down(sc);
2416 	return (ETIMEDOUT);
2417 }
2418 
2419 static int
2420 ixl_iff(struct ixl_softc *sc)
2421 {
2422 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2423 	struct ixl_atq iatq;
2424 	struct ixl_aq_desc *iaq;
2425 	struct ixl_aq_vsi_promisc_param *param;
2426 
2427 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
2428 		return (0);
2429 
2430 	memset(&iatq, 0, sizeof(iatq));
2431 
2432 	iaq = &iatq.iatq_desc;
2433 	iaq->iaq_opcode = htole16(IXL_AQ_OP_SET_VSI_PROMISC);
2434 
2435 	param = (struct ixl_aq_vsi_promisc_param *)&iaq->iaq_param;
2436 	param->flags = htole16(IXL_AQ_VSI_PROMISC_FLAG_BCAST |
2437 	    IXL_AQ_VSI_PROMISC_FLAG_VLAN);
2438 	if (ISSET(ifp->if_flags, IFF_PROMISC)) {
2439 		param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
2440 		    IXL_AQ_VSI_PROMISC_FLAG_MCAST);
2441 	} else if (ISSET(ifp->if_flags, IFF_ALLMULTI)) {
2442 		param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_MCAST);
2443 	}
2444 	param->valid_flags = htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
2445 	    IXL_AQ_VSI_PROMISC_FLAG_MCAST | IXL_AQ_VSI_PROMISC_FLAG_BCAST |
2446 	    IXL_AQ_VSI_PROMISC_FLAG_VLAN);
2447 	param->seid = sc->sc_seid;
2448 
2449 	ixl_atq_exec(sc, &iatq, "ixliff");
2450 
2451 	if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK))
2452 		return (EIO);
2453 
2454 	if (memcmp(sc->sc_enaddr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN) != 0) {
2455 		ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
2456 		    IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
2457 		ixl_add_macvlan(sc, sc->sc_ac.ac_enaddr, 0,
2458 		    IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
2459 		memcpy(sc->sc_enaddr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN);
2460 	}
2461 	return (0);
2462 }
2463 
2464 static int
2465 ixl_down(struct ixl_softc *sc)
2466 {
2467 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2468 	struct ixl_vector *iv;
2469 	struct ixl_rx_ring *rxr;
2470 	struct ixl_tx_ring *txr;
2471 	unsigned int nqueues, i;
2472 	uint32_t reg;
2473 	int error = 0;
2474 
2475 	nqueues = ixl_nqueues(sc);
2476 
2477 	rw_enter_write(&sc->sc_cfg_lock);
2478 
2479 	CLR(ifp->if_flags, IFF_RUNNING);
2480 
2481 	NET_UNLOCK();
2482 
2483 	/* mask interrupts */
2484 	reg = ixl_rd(sc, I40E_QINT_RQCTL(I40E_INTR_NOTX_QUEUE));
2485 	CLR(reg, I40E_QINT_RQCTL_CAUSE_ENA_MASK);
2486 	ixl_wr(sc, I40E_QINT_RQCTL(I40E_INTR_NOTX_QUEUE), reg);
2487 
2488 	reg = ixl_rd(sc, I40E_QINT_TQCTL(I40E_INTR_NOTX_QUEUE));
2489 	CLR(reg, I40E_QINT_TQCTL_CAUSE_ENA_MASK);
2490 	ixl_wr(sc, I40E_QINT_TQCTL(I40E_INTR_NOTX_QUEUE), reg);
2491 
2492 	ixl_wr(sc, I40E_PFINT_LNKLST0, I40E_QUEUE_TYPE_EOL);
2493 
2494 	/* make sure the no hw generated work is still in flight */
2495 	intr_barrier(sc->sc_ihc);
2496 	if (sc->sc_intrmap != NULL) {
2497 		for (i = 0; i < nqueues; i++) {
2498 			iv = &sc->sc_vectors[i];
2499 			rxr = iv->iv_rxr;
2500 			txr = iv->iv_txr;
2501 
2502 			ixl_txr_qdis(sc, txr, 0);
2503 
2504 			ifq_barrier(txr->txr_ifq);
2505 
2506 			timeout_del_barrier(&rxr->rxr_refill);
2507 
2508 			intr_barrier(iv->iv_ihc);
2509 		}
2510 	}
2511 
2512 	/* XXX wait at least 400 usec for all tx queues in one go */
2513 	delay(500);
2514 
2515 	for (i = 0; i < nqueues; i++) {
2516 		reg = ixl_rd(sc, I40E_QTX_ENA(i));
2517 		CLR(reg, I40E_QTX_ENA_QENA_REQ_MASK);
2518 		ixl_wr(sc, I40E_QTX_ENA(i), reg);
2519 
2520 		reg = ixl_rd(sc, I40E_QRX_ENA(i));
2521 		CLR(reg, I40E_QRX_ENA_QENA_REQ_MASK);
2522 		ixl_wr(sc, I40E_QRX_ENA(i), reg);
2523 	}
2524 
2525 	for (i = 0; i < nqueues; i++) {
2526 		iv = &sc->sc_vectors[i];
2527 		rxr = iv->iv_rxr;
2528 		txr = iv->iv_txr;
2529 
2530 		if (ixl_txr_disabled(sc, txr) != 0)
2531 			goto die;
2532 
2533 		if (ixl_rxr_disabled(sc, rxr) != 0)
2534 			goto die;
2535 	}
2536 
2537 	for (i = 0; i < nqueues; i++) {
2538 		iv = &sc->sc_vectors[i];
2539 		rxr = iv->iv_rxr;
2540 		txr = iv->iv_txr;
2541 
2542 		ixl_txr_unconfig(sc, txr);
2543 		ixl_rxr_unconfig(sc, rxr);
2544 
2545 		ixl_txr_clean(sc, txr);
2546 		ixl_rxr_clean(sc, rxr);
2547 
2548 		ixl_txr_free(sc, txr);
2549 		ixl_rxr_free(sc, rxr);
2550 
2551 		ifp->if_iqs[i]->ifiq_softc = NULL;
2552 		ifp->if_ifqs[i]->ifq_softc =  NULL;
2553 	}
2554 
2555 out:
2556 	rw_exit_write(&sc->sc_cfg_lock);
2557 	NET_LOCK();
2558 	return (error);
2559 die:
2560 	sc->sc_dead = 1;
2561 	log(LOG_CRIT, "%s: failed to shut down rings", DEVNAME(sc));
2562 	error = ETIMEDOUT;
2563 	goto out;
2564 }
2565 
2566 static struct ixl_tx_ring *
2567 ixl_txr_alloc(struct ixl_softc *sc, unsigned int qid)
2568 {
2569 	struct ixl_tx_ring *txr;
2570 	struct ixl_tx_map *maps, *txm;
2571 	unsigned int i;
2572 
2573 	txr = malloc(sizeof(*txr), M_DEVBUF, M_WAITOK|M_CANFAIL);
2574 	if (txr == NULL)
2575 		return (NULL);
2576 
2577 	maps = mallocarray(sizeof(*maps),
2578 	    sc->sc_tx_ring_ndescs, M_DEVBUF, M_WAITOK|M_CANFAIL|M_ZERO);
2579 	if (maps == NULL)
2580 		goto free;
2581 
2582 	if (ixl_dmamem_alloc(sc, &txr->txr_mem,
2583 	    sizeof(struct ixl_tx_desc) * sc->sc_tx_ring_ndescs,
2584 	    IXL_TX_QUEUE_ALIGN) != 0)
2585 		goto freemap;
2586 
2587 	for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2588 		txm = &maps[i];
2589 
2590 		if (bus_dmamap_create(sc->sc_dmat,
2591 		    IXL_HARDMTU, IXL_TX_PKT_DESCS, IXL_HARDMTU, 0,
2592 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
2593 		    &txm->txm_map) != 0)
2594 			goto uncreate;
2595 
2596 		txm->txm_eop = -1;
2597 		txm->txm_m = NULL;
2598 	}
2599 
2600 	txr->txr_cons = txr->txr_prod = 0;
2601 	txr->txr_maps = maps;
2602 
2603 	txr->txr_tail = I40E_QTX_TAIL(qid);
2604 	txr->txr_qid = qid;
2605 
2606 	return (txr);
2607 
2608 uncreate:
2609 	for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2610 		txm = &maps[i];
2611 
2612 		if (txm->txm_map == NULL)
2613 			continue;
2614 
2615 		bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2616 	}
2617 
2618 	ixl_dmamem_free(sc, &txr->txr_mem);
2619 freemap:
2620 	free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_tx_ring_ndescs);
2621 free:
2622 	free(txr, M_DEVBUF, sizeof(*txr));
2623 	return (NULL);
2624 }
2625 
2626 static void
2627 ixl_txr_qdis(struct ixl_softc *sc, struct ixl_tx_ring *txr, int enable)
2628 {
2629 	unsigned int qid;
2630 	bus_size_t reg;
2631 	uint32_t r;
2632 
2633 	qid = txr->txr_qid + sc->sc_base_queue;
2634 	reg = I40E_GLLAN_TXPRE_QDIS(qid / 128);
2635 	qid %= 128;
2636 
2637 	r = ixl_rd(sc, reg);
2638 	CLR(r, I40E_GLLAN_TXPRE_QDIS_QINDX_MASK);
2639 	SET(r, qid << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
2640 	SET(r, enable ? I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK :
2641 	    I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK);
2642 	ixl_wr(sc, reg, r);
2643 }
2644 
2645 static void
2646 ixl_txr_config(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2647 {
2648 	struct ixl_hmc_txq txq;
2649 	struct ixl_aq_vsi_data *data = IXL_DMA_KVA(&sc->sc_scratch);
2650 	void *hmc;
2651 
2652 	memset(&txq, 0, sizeof(txq));
2653 	txq.head = htole16(0);
2654 	txq.new_context = 1;
2655 	htolem64(&txq.base,
2656 	    IXL_DMA_DVA(&txr->txr_mem) / IXL_HMC_TXQ_BASE_UNIT);
2657 	txq.head_wb_ena = IXL_HMC_TXQ_DESC_WB;
2658 	htolem16(&txq.qlen, sc->sc_tx_ring_ndescs);
2659 	txq.tphrdesc_ena = 0;
2660 	txq.tphrpacket_ena = 0;
2661 	txq.tphwdesc_ena = 0;
2662 	txq.rdylist = data->qs_handle[0];
2663 
2664 	hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2665 	memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2666 	ixl_hmc_pack(hmc, &txq, ixl_hmc_pack_txq, nitems(ixl_hmc_pack_txq));
2667 }
2668 
2669 static void
2670 ixl_txr_unconfig(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2671 {
2672 	void *hmc;
2673 
2674 	hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2675 	memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2676 }
2677 
2678 static void
2679 ixl_txr_clean(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2680 {
2681 	struct ixl_tx_map *maps, *txm;
2682 	bus_dmamap_t map;
2683 	unsigned int i;
2684 
2685 	maps = txr->txr_maps;
2686 	for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2687 		txm = &maps[i];
2688 
2689 		if (txm->txm_m == NULL)
2690 			continue;
2691 
2692 		map = txm->txm_map;
2693 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2694 		    BUS_DMASYNC_POSTWRITE);
2695 		bus_dmamap_unload(sc->sc_dmat, map);
2696 
2697 		m_freem(txm->txm_m);
2698 		txm->txm_m = NULL;
2699 	}
2700 }
2701 
2702 static int
2703 ixl_txr_enabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2704 {
2705 	bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2706 	uint32_t reg;
2707 	int i;
2708 
2709 	for (i = 0; i < 10; i++) {
2710 		reg = ixl_rd(sc, ena);
2711 		if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK))
2712 			return (0);
2713 
2714 		delaymsec(10);
2715 	}
2716 
2717 	return (ETIMEDOUT);
2718 }
2719 
2720 static int
2721 ixl_txr_disabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2722 {
2723 	bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2724 	uint32_t reg;
2725 	int i;
2726 
2727 	for (i = 0; i < 20; i++) {
2728 		reg = ixl_rd(sc, ena);
2729 		if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK) == 0)
2730 			return (0);
2731 
2732 		delaymsec(10);
2733 	}
2734 
2735 	return (ETIMEDOUT);
2736 }
2737 
2738 static void
2739 ixl_txr_free(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2740 {
2741 	struct ixl_tx_map *maps, *txm;
2742 	unsigned int i;
2743 
2744 	maps = txr->txr_maps;
2745 	for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2746 		txm = &maps[i];
2747 
2748 		bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2749 	}
2750 
2751 	ixl_dmamem_free(sc, &txr->txr_mem);
2752 	free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_tx_ring_ndescs);
2753 	free(txr, M_DEVBUF, sizeof(*txr));
2754 }
2755 
2756 static inline int
2757 ixl_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m)
2758 {
2759 	int error;
2760 
2761 	error = bus_dmamap_load_mbuf(dmat, map, m,
2762 	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT);
2763 	if (error != EFBIG)
2764 		return (error);
2765 
2766 	error = m_defrag(m, M_DONTWAIT);
2767 	if (error != 0)
2768 		return (error);
2769 
2770 	return (bus_dmamap_load_mbuf(dmat, map, m,
2771 	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT));
2772 }
2773 
2774 static void
2775 ixl_start(struct ifqueue *ifq)
2776 {
2777 	struct ifnet *ifp = ifq->ifq_if;
2778 	struct ixl_softc *sc = ifp->if_softc;
2779 	struct ixl_tx_ring *txr = ifq->ifq_softc;
2780 	struct ixl_tx_desc *ring, *txd;
2781 	struct ixl_tx_map *txm;
2782 	bus_dmamap_t map;
2783 	struct mbuf *m;
2784 	uint64_t cmd;
2785 	unsigned int prod, free, last, i;
2786 	unsigned int mask;
2787 	int post = 0;
2788 #if NBPFILTER > 0
2789 	caddr_t if_bpf;
2790 #endif
2791 
2792 	if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
2793 		ifq_purge(ifq);
2794 		return;
2795 	}
2796 
2797 	prod = txr->txr_prod;
2798 	free = txr->txr_cons;
2799 	if (free <= prod)
2800 		free += sc->sc_tx_ring_ndescs;
2801 	free -= prod;
2802 
2803 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2804 	    0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTWRITE);
2805 
2806 	ring = IXL_DMA_KVA(&txr->txr_mem);
2807 	mask = sc->sc_tx_ring_ndescs - 1;
2808 
2809 	for (;;) {
2810 		if (free <= IXL_TX_PKT_DESCS) {
2811 			ifq_set_oactive(ifq);
2812 			break;
2813 		}
2814 
2815 		m = ifq_dequeue(ifq);
2816 		if (m == NULL)
2817 			break;
2818 
2819 		txm = &txr->txr_maps[prod];
2820 		map = txm->txm_map;
2821 
2822 		if (ixl_load_mbuf(sc->sc_dmat, map, m) != 0) {
2823 			ifq->ifq_errors++;
2824 			m_freem(m);
2825 			continue;
2826 		}
2827 
2828 		bus_dmamap_sync(sc->sc_dmat, map, 0,
2829 		    map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2830 
2831 		for (i = 0; i < map->dm_nsegs; i++) {
2832 			txd = &ring[prod];
2833 
2834 			cmd = (uint64_t)map->dm_segs[i].ds_len <<
2835 			    IXL_TX_DESC_BSIZE_SHIFT;
2836 			cmd |= IXL_TX_DESC_DTYPE_DATA | IXL_TX_DESC_CMD_ICRC;
2837 
2838 			htolem64(&txd->addr, map->dm_segs[i].ds_addr);
2839 			htolem64(&txd->cmd, cmd);
2840 
2841 			last = prod;
2842 
2843 			prod++;
2844 			prod &= mask;
2845 		}
2846 		cmd |= IXL_TX_DESC_CMD_EOP | IXL_TX_DESC_CMD_RS;
2847 		htolem64(&txd->cmd, cmd);
2848 
2849 		txm->txm_m = m;
2850 		txm->txm_eop = last;
2851 
2852 #if NBPFILTER > 0
2853 		if_bpf = ifp->if_bpf;
2854 		if (if_bpf)
2855 			bpf_mtap_ether(if_bpf, m, BPF_DIRECTION_OUT);
2856 #endif
2857 
2858 		free -= i;
2859 		post = 1;
2860 	}
2861 
2862 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2863 	    0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREWRITE);
2864 
2865 	if (post) {
2866 		txr->txr_prod = prod;
2867 		ixl_wr(sc, txr->txr_tail, prod);
2868 	}
2869 }
2870 
2871 static int
2872 ixl_txeof(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2873 {
2874 	struct ifqueue *ifq = txr->txr_ifq;
2875 	struct ixl_tx_desc *ring, *txd;
2876 	struct ixl_tx_map *txm;
2877 	bus_dmamap_t map;
2878 	unsigned int cons, prod, last;
2879 	unsigned int mask;
2880 	uint64_t dtype;
2881 	int done = 0;
2882 
2883 	prod = txr->txr_prod;
2884 	cons = txr->txr_cons;
2885 
2886 	if (cons == prod)
2887 		return (0);
2888 
2889 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2890 	    0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTREAD);
2891 
2892 	ring = IXL_DMA_KVA(&txr->txr_mem);
2893 	mask = sc->sc_tx_ring_ndescs - 1;
2894 
2895 	do {
2896 		txm = &txr->txr_maps[cons];
2897 		last = txm->txm_eop;
2898 		txd = &ring[last];
2899 
2900 		dtype = txd->cmd & htole64(IXL_TX_DESC_DTYPE_MASK);
2901 		if (dtype != htole64(IXL_TX_DESC_DTYPE_DONE))
2902 			break;
2903 
2904 		map = txm->txm_map;
2905 
2906 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2907 		    BUS_DMASYNC_POSTWRITE);
2908 		bus_dmamap_unload(sc->sc_dmat, map);
2909 		m_freem(txm->txm_m);
2910 
2911 		txm->txm_m = NULL;
2912 		txm->txm_eop = -1;
2913 
2914 		cons = last + 1;
2915 		cons &= mask;
2916 
2917 		done = 1;
2918 	} while (cons != prod);
2919 
2920 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2921 	    0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREREAD);
2922 
2923 	txr->txr_cons = cons;
2924 
2925 	//ixl_enable(sc, txr->txr_msix);
2926 
2927 	if (ifq_is_oactive(ifq))
2928 		ifq_restart(ifq);
2929 
2930 	return (done);
2931 }
2932 
2933 static struct ixl_rx_ring *
2934 ixl_rxr_alloc(struct ixl_softc *sc, unsigned int qid)
2935 {
2936 	struct ixl_rx_ring *rxr;
2937 	struct ixl_rx_map *maps, *rxm;
2938 	unsigned int i;
2939 
2940 	rxr = malloc(sizeof(*rxr), M_DEVBUF, M_WAITOK|M_CANFAIL);
2941 	if (rxr == NULL)
2942 		return (NULL);
2943 
2944 	maps = mallocarray(sizeof(*maps),
2945 	    sc->sc_rx_ring_ndescs, M_DEVBUF, M_WAITOK|M_CANFAIL|M_ZERO);
2946 	if (maps == NULL)
2947 		goto free;
2948 
2949 	if (ixl_dmamem_alloc(sc, &rxr->rxr_mem,
2950 	    sizeof(struct ixl_rx_rd_desc_16) * sc->sc_rx_ring_ndescs,
2951 	    IXL_RX_QUEUE_ALIGN) != 0)
2952 		goto freemap;
2953 
2954 	for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2955 		rxm = &maps[i];
2956 
2957 		if (bus_dmamap_create(sc->sc_dmat,
2958 		    IXL_HARDMTU, 1, IXL_HARDMTU, 0,
2959 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
2960 		    &rxm->rxm_map) != 0)
2961 			goto uncreate;
2962 
2963 		rxm->rxm_m = NULL;
2964 	}
2965 
2966 	rxr->rxr_sc = sc;
2967 	if_rxr_init(&rxr->rxr_acct, 17, sc->sc_rx_ring_ndescs - 1);
2968 	timeout_set(&rxr->rxr_refill, ixl_rxrefill, rxr);
2969 	rxr->rxr_cons = rxr->rxr_prod = 0;
2970 	rxr->rxr_m_head = NULL;
2971 	rxr->rxr_m_tail = &rxr->rxr_m_head;
2972 	rxr->rxr_maps = maps;
2973 
2974 	rxr->rxr_tail = I40E_QRX_TAIL(qid);
2975 	rxr->rxr_qid = qid;
2976 
2977 	return (rxr);
2978 
2979 uncreate:
2980 	for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2981 		rxm = &maps[i];
2982 
2983 		if (rxm->rxm_map == NULL)
2984 			continue;
2985 
2986 		bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
2987 	}
2988 
2989 	ixl_dmamem_free(sc, &rxr->rxr_mem);
2990 freemap:
2991 	free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_rx_ring_ndescs);
2992 free:
2993 	free(rxr, M_DEVBUF, sizeof(*rxr));
2994 	return (NULL);
2995 }
2996 
2997 static void
2998 ixl_rxr_clean(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2999 {
3000 	struct ixl_rx_map *maps, *rxm;
3001 	bus_dmamap_t map;
3002 	unsigned int i;
3003 
3004 	timeout_del_barrier(&rxr->rxr_refill);
3005 
3006 	maps = rxr->rxr_maps;
3007 	for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
3008 		rxm = &maps[i];
3009 
3010 		if (rxm->rxm_m == NULL)
3011 			continue;
3012 
3013 		map = rxm->rxm_map;
3014 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3015 		    BUS_DMASYNC_POSTWRITE);
3016 		bus_dmamap_unload(sc->sc_dmat, map);
3017 
3018 		m_freem(rxm->rxm_m);
3019 		rxm->rxm_m = NULL;
3020 	}
3021 
3022 	m_freem(rxr->rxr_m_head);
3023 	rxr->rxr_m_head = NULL;
3024 	rxr->rxr_m_tail = &rxr->rxr_m_head;
3025 
3026 	rxr->rxr_prod = rxr->rxr_cons = 0;
3027 }
3028 
3029 static int
3030 ixl_rxr_enabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3031 {
3032 	bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid);
3033 	uint32_t reg;
3034 	int i;
3035 
3036 	for (i = 0; i < 10; i++) {
3037 		reg = ixl_rd(sc, ena);
3038 		if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK))
3039 			return (0);
3040 
3041 		delaymsec(10);
3042 	}
3043 
3044 	return (ETIMEDOUT);
3045 }
3046 
3047 static int
3048 ixl_rxr_disabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3049 {
3050 	bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid);
3051 	uint32_t reg;
3052 	int i;
3053 
3054 	for (i = 0; i < 20; i++) {
3055 		reg = ixl_rd(sc, ena);
3056 		if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK) == 0)
3057 			return (0);
3058 
3059 		delaymsec(10);
3060 	}
3061 
3062 	return (ETIMEDOUT);
3063 }
3064 
3065 static void
3066 ixl_rxr_config(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3067 {
3068 	struct ixl_hmc_rxq rxq;
3069 	void *hmc;
3070 
3071 	memset(&rxq, 0, sizeof(rxq));
3072 
3073 	rxq.head = htole16(0);
3074 	htolem64(&rxq.base,
3075 	    IXL_DMA_DVA(&rxr->rxr_mem) / IXL_HMC_RXQ_BASE_UNIT);
3076 	htolem16(&rxq.qlen, sc->sc_rx_ring_ndescs);
3077 	rxq.dbuff = htole16(MCLBYTES / IXL_HMC_RXQ_DBUFF_UNIT);
3078 	rxq.hbuff = 0;
3079 	rxq.dtype = IXL_HMC_RXQ_DTYPE_NOSPLIT;
3080 	rxq.dsize = IXL_HMC_RXQ_DSIZE_16;
3081 	rxq.crcstrip = 1;
3082 	rxq.l2sel = 0;
3083 	rxq.showiv = 0;
3084 	rxq.rxmax = htole16(IXL_HARDMTU);
3085 	rxq.tphrdesc_ena = 0;
3086 	rxq.tphwdesc_ena = 0;
3087 	rxq.tphdata_ena = 0;
3088 	rxq.tphhead_ena = 0;
3089 	rxq.lrxqthresh = 0;
3090 	rxq.prefena = 1;
3091 
3092 	hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid);
3093 	memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX));
3094 	ixl_hmc_pack(hmc, &rxq, ixl_hmc_pack_rxq, nitems(ixl_hmc_pack_rxq));
3095 }
3096 
3097 static void
3098 ixl_rxr_unconfig(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3099 {
3100 	void *hmc;
3101 
3102 	hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid);
3103 	memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX));
3104 }
3105 
3106 static void
3107 ixl_rxr_free(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3108 {
3109 	struct ixl_rx_map *maps, *rxm;
3110 	unsigned int i;
3111 
3112 	maps = rxr->rxr_maps;
3113 	for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
3114 		rxm = &maps[i];
3115 
3116 		bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
3117 	}
3118 
3119 	ixl_dmamem_free(sc, &rxr->rxr_mem);
3120 	free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_rx_ring_ndescs);
3121 	free(rxr, M_DEVBUF, sizeof(*rxr));
3122 }
3123 
3124 static int
3125 ixl_rxeof(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3126 {
3127 	struct ifiqueue *ifiq = rxr->rxr_ifiq;
3128 	struct ifnet *ifp = &sc->sc_ac.ac_if;
3129 	struct ixl_rx_wb_desc_16 *ring, *rxd;
3130 	struct ixl_rx_map *rxm;
3131 	bus_dmamap_t map;
3132 	unsigned int cons, prod;
3133 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
3134 	struct mbuf *m;
3135 	uint64_t word;
3136 	unsigned int len;
3137 	unsigned int mask;
3138 	int done = 0;
3139 
3140 	prod = rxr->rxr_prod;
3141 	cons = rxr->rxr_cons;
3142 
3143 	if (cons == prod)
3144 		return (0);
3145 
3146 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
3147 	    0, IXL_DMA_LEN(&rxr->rxr_mem),
3148 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3149 
3150 	ring = IXL_DMA_KVA(&rxr->rxr_mem);
3151 	mask = sc->sc_rx_ring_ndescs - 1;
3152 
3153 	do {
3154 		rxd = &ring[cons];
3155 
3156 		word = lemtoh64(&rxd->qword1);
3157 		if (!ISSET(word, IXL_RX_DESC_DD))
3158 			break;
3159 
3160 		if_rxr_put(&rxr->rxr_acct, 1);
3161 
3162 		rxm = &rxr->rxr_maps[cons];
3163 
3164 		map = rxm->rxm_map;
3165 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3166 		    BUS_DMASYNC_POSTREAD);
3167 		bus_dmamap_unload(sc->sc_dmat, map);
3168 
3169 		m = rxm->rxm_m;
3170 		rxm->rxm_m = NULL;
3171 
3172 		len = (word & IXL_RX_DESC_PLEN_MASK) >> IXL_RX_DESC_PLEN_SHIFT;
3173 		m->m_len = len;
3174 		m->m_pkthdr.len = 0;
3175 
3176 		m->m_next = NULL;
3177 		*rxr->rxr_m_tail = m;
3178 		rxr->rxr_m_tail = &m->m_next;
3179 
3180 		m = rxr->rxr_m_head;
3181 		m->m_pkthdr.len += len;
3182 
3183 		if (ISSET(word, IXL_RX_DESC_EOP)) {
3184 			if (!ISSET(word,
3185 			    IXL_RX_DESC_RXE | IXL_RX_DESC_OVERSIZE)) {
3186 				if ((word & IXL_RX_DESC_FLTSTAT_MASK) ==
3187 				    IXL_RX_DESC_FLTSTAT_RSS) {
3188 					m->m_pkthdr.ph_flowid =
3189 					    lemtoh32(&rxd->filter_status);
3190 					m->m_pkthdr.csum_flags |= M_FLOWID;
3191 				}
3192 
3193 				ml_enqueue(&ml, m);
3194 			} else {
3195 				ifp->if_ierrors++; /* XXX */
3196 				m_freem(m);
3197 			}
3198 
3199 			rxr->rxr_m_head = NULL;
3200 			rxr->rxr_m_tail = &rxr->rxr_m_head;
3201 		}
3202 
3203 		cons++;
3204 		cons &= mask;
3205 
3206 		done = 1;
3207 	} while (cons != prod);
3208 
3209 	if (done) {
3210 		rxr->rxr_cons = cons;
3211 		if (ifiq_input(ifiq, &ml))
3212 			if_rxr_livelocked(&rxr->rxr_acct);
3213 		ixl_rxfill(sc, rxr);
3214 	}
3215 
3216 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
3217 	    0, IXL_DMA_LEN(&rxr->rxr_mem),
3218 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3219 
3220 	return (done);
3221 }
3222 
3223 static void
3224 ixl_rxfill(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3225 {
3226 	struct ixl_rx_rd_desc_16 *ring, *rxd;
3227 	struct ixl_rx_map *rxm;
3228 	bus_dmamap_t map;
3229 	struct mbuf *m;
3230 	unsigned int prod;
3231 	unsigned int slots;
3232 	unsigned int mask;
3233 	int post = 0;
3234 
3235 	slots = if_rxr_get(&rxr->rxr_acct, sc->sc_rx_ring_ndescs);
3236 	if (slots == 0)
3237 		return;
3238 
3239 	prod = rxr->rxr_prod;
3240 
3241 	ring = IXL_DMA_KVA(&rxr->rxr_mem);
3242 	mask = sc->sc_rx_ring_ndescs - 1;
3243 
3244 	do {
3245 		rxm = &rxr->rxr_maps[prod];
3246 
3247 		m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES + ETHER_ALIGN);
3248 		if (m == NULL)
3249 			break;
3250 		m->m_data += (m->m_ext.ext_size - (MCLBYTES + ETHER_ALIGN));
3251 		m->m_len = m->m_pkthdr.len = MCLBYTES + ETHER_ALIGN;
3252 
3253 		map = rxm->rxm_map;
3254 
3255 		if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
3256 		    BUS_DMA_NOWAIT) != 0) {
3257 			m_freem(m);
3258 			break;
3259 		}
3260 
3261 		rxm->rxm_m = m;
3262 
3263 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3264 		    BUS_DMASYNC_PREREAD);
3265 
3266 		rxd = &ring[prod];
3267 
3268 		htolem64(&rxd->paddr, map->dm_segs[0].ds_addr);
3269 		rxd->haddr = htole64(0);
3270 
3271 		prod++;
3272 		prod &= mask;
3273 
3274 		post = 1;
3275 	} while (--slots);
3276 
3277 	if_rxr_put(&rxr->rxr_acct, slots);
3278 
3279 	if (if_rxr_inuse(&rxr->rxr_acct) == 0)
3280 		timeout_add(&rxr->rxr_refill, 1);
3281 	else if (post) {
3282 		rxr->rxr_prod = prod;
3283 		ixl_wr(sc, rxr->rxr_tail, prod);
3284 	}
3285 }
3286 
3287 void
3288 ixl_rxrefill(void *arg)
3289 {
3290 	struct ixl_rx_ring *rxr = arg;
3291 	struct ixl_softc *sc = rxr->rxr_sc;
3292 
3293 	ixl_rxfill(sc, rxr);
3294 }
3295 
3296 static int
3297 ixl_rxrinfo(struct ixl_softc *sc, struct if_rxrinfo *ifri)
3298 {
3299 	struct ifnet *ifp = &sc->sc_ac.ac_if;
3300 	struct if_rxring_info *ifr;
3301 	struct ixl_rx_ring *ring;
3302 	int i, rv;
3303 
3304 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
3305 		return (ENOTTY);
3306 
3307 	ifr = mallocarray(sizeof(*ifr), ixl_nqueues(sc), M_TEMP,
3308 	    M_WAITOK|M_CANFAIL|M_ZERO);
3309 	if (ifr == NULL)
3310 		return (ENOMEM);
3311 
3312 	for (i = 0; i < ixl_nqueues(sc); i++) {
3313 		ring = ifp->if_iqs[i]->ifiq_softc;
3314 		ifr[i].ifr_size = MCLBYTES;
3315 		snprintf(ifr[i].ifr_name, sizeof(ifr[i].ifr_name), "%d", i);
3316 		ifr[i].ifr_info = ring->rxr_acct;
3317 	}
3318 
3319 	rv = if_rxr_info_ioctl(ifri, ixl_nqueues(sc), ifr);
3320 	free(ifr, M_TEMP, ixl_nqueues(sc) * sizeof(*ifr));
3321 
3322 	return (rv);
3323 }
3324 
3325 static int
3326 ixl_intr0(void *xsc)
3327 {
3328 	struct ixl_softc *sc = xsc;
3329 	struct ifnet *ifp = &sc->sc_ac.ac_if;
3330 	uint32_t icr;
3331 	int rv = 0;
3332 
3333 	ixl_intr_enable(sc);
3334 	icr = ixl_rd(sc, I40E_PFINT_ICR0);
3335 
3336 	if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) {
3337 		ixl_atq_done(sc);
3338 		task_add(systq, &sc->sc_arq_task);
3339 		rv = 1;
3340 	}
3341 
3342 	if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) {
3343 		task_add(systq, &sc->sc_link_state_task);
3344 		rv = 1;
3345 	}
3346 
3347 	if (ISSET(ifp->if_flags, IFF_RUNNING)) {
3348 		struct ixl_vector *iv = sc->sc_vectors;
3349 		if (ISSET(icr, I40E_INTR_NOTX_RX_MASK))
3350 			rv |= ixl_rxeof(sc, iv->iv_rxr);
3351 		if (ISSET(icr, I40E_INTR_NOTX_TX_MASK))
3352 			rv |= ixl_txeof(sc, iv->iv_txr);
3353 	}
3354 
3355 	return (rv);
3356 }
3357 
3358 static int
3359 ixl_intr_vector(void *v)
3360 {
3361 	struct ixl_vector *iv = v;
3362 	struct ixl_softc *sc = iv->iv_sc;
3363 	struct ifnet *ifp = &sc->sc_ac.ac_if;
3364 	int rv = 0;
3365 
3366 	if (ISSET(ifp->if_flags, IFF_RUNNING)) {
3367 		rv |= ixl_rxeof(sc, iv->iv_rxr);
3368 		rv |= ixl_txeof(sc, iv->iv_txr);
3369 	}
3370 
3371 	ixl_wr(sc, I40E_PFINT_DYN_CTLN(iv->iv_qid),
3372 	    I40E_PFINT_DYN_CTLN_INTENA_MASK |
3373 	    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
3374 	    (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
3375 
3376 	return (rv);
3377 }
3378 
3379 static void
3380 ixl_link_state_update_iaq(struct ixl_softc *sc, void *arg)
3381 {
3382 	struct ifnet *ifp = &sc->sc_ac.ac_if;
3383 	struct ixl_aq_desc *iaq = arg;
3384 	uint16_t retval;
3385 	int link_state;
3386 	int change = 0;
3387 
3388 	retval = lemtoh16(&iaq->iaq_retval);
3389 	if (retval != IXL_AQ_RC_OK) {
3390 		printf("%s: LINK STATUS error %u\n", DEVNAME(sc), retval);
3391 		return;
3392 	}
3393 
3394 	link_state = ixl_set_link_status(sc, iaq);
3395 	mtx_enter(&sc->sc_link_state_mtx);
3396 	if (ifp->if_link_state != link_state) {
3397 		ifp->if_link_state = link_state;
3398 		change = 1;
3399 	}
3400 	mtx_leave(&sc->sc_link_state_mtx);
3401 
3402 	if (change)
3403 		if_link_state_change(ifp);
3404 }
3405 
3406 static void
3407 ixl_link_state_update(void *xsc)
3408 {
3409 	struct ixl_softc *sc = xsc;
3410 	struct ixl_aq_desc *iaq;
3411 	struct ixl_aq_link_param *param;
3412 
3413 	memset(&sc->sc_link_state_atq, 0, sizeof(sc->sc_link_state_atq));
3414 	iaq = &sc->sc_link_state_atq.iatq_desc;
3415 	iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS);
3416 	param = (struct ixl_aq_link_param *)iaq->iaq_param;
3417 	param->notify = IXL_AQ_LINK_NOTIFY;
3418 
3419 	ixl_atq_set(&sc->sc_link_state_atq, ixl_link_state_update_iaq, iaq);
3420 	ixl_atq_post(sc, &sc->sc_link_state_atq);
3421 }
3422 
3423 #if 0
3424 static void
3425 ixl_aq_dump(const struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
3426 {
3427 	printf("%s: flags %b opcode %04x\n", DEVNAME(sc),
3428 	    lemtoh16(&iaq->iaq_flags), IXL_AQ_FLAGS_FMT,
3429 	    lemtoh16(&iaq->iaq_opcode));
3430 	printf("%s: datalen %u retval %u\n", DEVNAME(sc),
3431 	    lemtoh16(&iaq->iaq_datalen), lemtoh16(&iaq->iaq_retval));
3432 	printf("%s: cookie %016llx\n", DEVNAME(sc), iaq->iaq_cookie);
3433 	printf("%s: %08x %08x %08x %08x\n", DEVNAME(sc),
3434 	    lemtoh32(&iaq->iaq_param[0]), lemtoh32(&iaq->iaq_param[1]),
3435 	    lemtoh32(&iaq->iaq_param[2]), lemtoh32(&iaq->iaq_param[3]));
3436 }
3437 #endif
3438 
3439 static void
3440 ixl_arq(void *xsc)
3441 {
3442 	struct ixl_softc *sc = xsc;
3443 	struct ixl_aq_desc *arq, *iaq;
3444 	struct ixl_aq_buf *aqb;
3445 	unsigned int cons = sc->sc_arq_cons;
3446 	unsigned int prod;
3447 	int done = 0;
3448 
3449 	prod = ixl_rd(sc, sc->sc_aq_regs->arq_head) &
3450 	    sc->sc_aq_regs->arq_head_mask;
3451 
3452 	if (cons == prod)
3453 		goto done;
3454 
3455 	arq = IXL_DMA_KVA(&sc->sc_arq);
3456 
3457 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3458 	    0, IXL_DMA_LEN(&sc->sc_arq),
3459 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3460 
3461 	do {
3462 		iaq = &arq[cons];
3463 
3464 		aqb = SIMPLEQ_FIRST(&sc->sc_arq_live);
3465 		SIMPLEQ_REMOVE_HEAD(&sc->sc_arq_live, aqb_entry);
3466 		bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN,
3467 		    BUS_DMASYNC_POSTREAD);
3468 
3469 		switch (iaq->iaq_opcode) {
3470 		case HTOLE16(IXL_AQ_OP_PHY_LINK_STATUS):
3471 			ixl_link_state_update_iaq(sc, iaq);
3472 			break;
3473 		}
3474 
3475 		memset(iaq, 0, sizeof(*iaq));
3476 		SIMPLEQ_INSERT_TAIL(&sc->sc_arq_idle, aqb, aqb_entry);
3477 		if_rxr_put(&sc->sc_arq_ring, 1);
3478 
3479 		cons++;
3480 		cons &= IXL_AQ_MASK;
3481 
3482 		done = 1;
3483 	} while (cons != prod);
3484 
3485 	if (done && ixl_arq_fill(sc))
3486 		ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
3487 
3488 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3489 	    0, IXL_DMA_LEN(&sc->sc_arq),
3490 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3491 
3492 	sc->sc_arq_cons = cons;
3493 
3494 done:
3495 	ixl_intr_enable(sc);
3496 }
3497 
3498 static void
3499 ixl_atq_set(struct ixl_atq *iatq,
3500     void (*fn)(struct ixl_softc *, void *), void *arg)
3501 {
3502 	iatq->iatq_fn = fn;
3503 	iatq->iatq_arg = arg;
3504 }
3505 
3506 static void
3507 ixl_atq_post(struct ixl_softc *sc, struct ixl_atq *iatq)
3508 {
3509 	struct ixl_aq_desc *atq, *slot;
3510 	unsigned int prod;
3511 
3512 	/* assert locked */
3513 
3514 	atq = IXL_DMA_KVA(&sc->sc_atq);
3515 	prod = sc->sc_atq_prod;
3516 	slot = atq + prod;
3517 
3518 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3519 	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
3520 
3521 	*slot = iatq->iatq_desc;
3522 	slot->iaq_cookie = (uint64_t)iatq;
3523 
3524 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3525 	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
3526 
3527 	prod++;
3528 	prod &= IXL_AQ_MASK;
3529 	sc->sc_atq_prod = prod;
3530 	ixl_wr(sc, sc->sc_aq_regs->atq_tail, prod);
3531 }
3532 
3533 static void
3534 ixl_atq_done(struct ixl_softc *sc)
3535 {
3536 	struct ixl_aq_desc *atq, *slot;
3537 	struct ixl_atq *iatq;
3538 	unsigned int cons;
3539 	unsigned int prod;
3540 
3541 	prod = sc->sc_atq_prod;
3542 	cons = sc->sc_atq_cons;
3543 
3544 	if (prod == cons)
3545 		return;
3546 
3547 	atq = IXL_DMA_KVA(&sc->sc_atq);
3548 
3549 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3550 	    0, IXL_DMA_LEN(&sc->sc_atq),
3551 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3552 
3553 	do {
3554 		slot = &atq[cons];
3555 		if (!ISSET(slot->iaq_flags, htole16(IXL_AQ_DD)))
3556 			break;
3557 
3558 		iatq = (struct ixl_atq *)slot->iaq_cookie;
3559 		iatq->iatq_desc = *slot;
3560 
3561 		memset(slot, 0, sizeof(*slot));
3562 
3563 		(*iatq->iatq_fn)(sc, iatq->iatq_arg);
3564 
3565 		cons++;
3566 		cons &= IXL_AQ_MASK;
3567 	} while (cons != prod);
3568 
3569 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3570 	    0, IXL_DMA_LEN(&sc->sc_atq),
3571 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3572 
3573 	sc->sc_atq_cons = cons;
3574 }
3575 
3576 static void
3577 ixl_wakeup(struct ixl_softc *sc, void *arg)
3578 {
3579 	struct cond *c = arg;
3580 
3581 	cond_signal(c);
3582 }
3583 
3584 static void
3585 ixl_atq_exec(struct ixl_softc *sc, struct ixl_atq *iatq, const char *wmesg)
3586 {
3587 	struct cond c = COND_INITIALIZER();
3588 
3589 	KASSERT(iatq->iatq_desc.iaq_cookie == 0);
3590 
3591 	ixl_atq_set(iatq, ixl_wakeup, &c);
3592 	ixl_atq_post(sc, iatq);
3593 
3594 	cond_wait(&c, wmesg);
3595 }
3596 
3597 static int
3598 ixl_atq_poll(struct ixl_softc *sc, struct ixl_aq_desc *iaq, unsigned int tm)
3599 {
3600 	struct ixl_aq_desc *atq, *slot;
3601 	unsigned int prod;
3602 	unsigned int t = 0;
3603 
3604 	atq = IXL_DMA_KVA(&sc->sc_atq);
3605 	prod = sc->sc_atq_prod;
3606 	slot = atq + prod;
3607 
3608 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3609 	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
3610 
3611 	*slot = *iaq;
3612 	slot->iaq_flags |= htole16(IXL_AQ_SI);
3613 
3614 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3615 	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
3616 
3617 	prod++;
3618 	prod &= IXL_AQ_MASK;
3619 	sc->sc_atq_prod = prod;
3620 	ixl_wr(sc, sc->sc_aq_regs->atq_tail, prod);
3621 
3622 	while (ixl_rd(sc, sc->sc_aq_regs->atq_head) != prod) {
3623 		delaymsec(1);
3624 
3625 		if (t++ > tm)
3626 			return (ETIMEDOUT);
3627 	}
3628 
3629 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3630 	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTREAD);
3631 	*iaq = *slot;
3632 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3633 	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREREAD);
3634 
3635 	sc->sc_atq_cons = prod;
3636 
3637 	return (0);
3638 }
3639 
3640 static int
3641 ixl_get_version(struct ixl_softc *sc)
3642 {
3643 	struct ixl_aq_desc iaq;
3644 	uint32_t fwbuild, fwver, apiver;
3645 
3646 	memset(&iaq, 0, sizeof(iaq));
3647 	iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VERSION);
3648 
3649 	if (ixl_atq_poll(sc, &iaq, 2000) != 0)
3650 		return (ETIMEDOUT);
3651 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK))
3652 		return (EIO);
3653 
3654 	fwbuild = lemtoh32(&iaq.iaq_param[1]);
3655 	fwver = lemtoh32(&iaq.iaq_param[2]);
3656 	apiver = lemtoh32(&iaq.iaq_param[3]);
3657 
3658 	sc->sc_api_major = apiver & 0xffff;
3659 	sc->sc_api_minor = (apiver >> 16) & 0xffff;
3660 
3661 	printf(", FW %hu.%hu.%05u API %hu.%hu", (uint16_t)fwver,
3662 	    (uint16_t)(fwver >> 16), fwbuild,
3663 	    sc->sc_api_major, sc->sc_api_minor);
3664 
3665 	return (0);
3666 }
3667 
3668 static int
3669 ixl_pxe_clear(struct ixl_softc *sc)
3670 {
3671 	struct ixl_aq_desc iaq;
3672 
3673 	memset(&iaq, 0, sizeof(iaq));
3674 	iaq.iaq_opcode = htole16(IXL_AQ_OP_CLEAR_PXE_MODE);
3675 	iaq.iaq_param[0] = htole32(0x2);
3676 
3677 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
3678 		printf(", CLEAR PXE MODE timeout\n");
3679 		return (-1);
3680 	}
3681 
3682 	switch (iaq.iaq_retval) {
3683 	case HTOLE16(IXL_AQ_RC_OK):
3684 	case HTOLE16(IXL_AQ_RC_EEXIST):
3685 		break;
3686 	default:
3687 		printf(", CLEAR PXE MODE error\n");
3688 		return (-1);
3689 	}
3690 
3691 	return (0);
3692 }
3693 
3694 static int
3695 ixl_lldp_shut(struct ixl_softc *sc)
3696 {
3697 	struct ixl_aq_desc iaq;
3698 
3699 	memset(&iaq, 0, sizeof(iaq));
3700 	iaq.iaq_opcode = htole16(IXL_AQ_OP_LLDP_STOP_AGENT);
3701 	iaq.iaq_param[0] = htole32(IXL_LLDP_SHUTDOWN);
3702 
3703 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
3704 		printf(", STOP LLDP AGENT timeout\n");
3705 		return (-1);
3706 	}
3707 
3708 	switch (iaq.iaq_retval) {
3709 	case HTOLE16(IXL_AQ_RC_EMODE):
3710 	case HTOLE16(IXL_AQ_RC_EPERM):
3711 		/* ignore silently */
3712 	default:
3713 		break;
3714 	}
3715 
3716 	return (0);
3717 }
3718 
3719 static int
3720 ixl_get_mac(struct ixl_softc *sc)
3721 {
3722 	struct ixl_dmamem idm;
3723 	struct ixl_aq_desc iaq;
3724 	struct ixl_aq_mac_addresses *addrs;
3725 	int rv;
3726 
3727 #ifdef __sparc64__
3728 	if (OF_getprop(PCITAG_NODE(sc->sc_tag), "local-mac-address",
3729 	    sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN) == ETHER_ADDR_LEN)
3730 		return (0);
3731 #endif
3732 
3733 	if (ixl_dmamem_alloc(sc, &idm, sizeof(*addrs), 0) != 0) {
3734 		printf(", unable to allocate mac addresses\n");
3735 		return (-1);
3736 	}
3737 
3738 	memset(&iaq, 0, sizeof(iaq));
3739 	iaq.iaq_flags = htole16(IXL_AQ_BUF);
3740 	iaq.iaq_opcode = htole16(IXL_AQ_OP_MAC_ADDRESS_READ);
3741 	iaq.iaq_datalen = htole16(sizeof(*addrs));
3742 	ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
3743 
3744 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3745 	    BUS_DMASYNC_PREREAD);
3746 
3747 	rv = ixl_atq_poll(sc, &iaq, 250);
3748 
3749 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3750 	    BUS_DMASYNC_POSTREAD);
3751 
3752 	if (rv != 0) {
3753 		printf(", MAC ADDRESS READ timeout\n");
3754 		rv = -1;
3755 		goto done;
3756 	}
3757 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3758 		printf(", MAC ADDRESS READ error\n");
3759 		rv = -1;
3760 		goto done;
3761 	}
3762 
3763 	addrs = IXL_DMA_KVA(&idm);
3764 	if (!ISSET(iaq.iaq_param[0], htole32(IXL_AQ_MAC_PORT_VALID))) {
3765 		printf(", port address is not valid\n");
3766 		goto done;
3767 	}
3768 
3769 	memcpy(sc->sc_ac.ac_enaddr, addrs->port, ETHER_ADDR_LEN);
3770 	rv = 0;
3771 
3772 done:
3773 	ixl_dmamem_free(sc, &idm);
3774 	return (rv);
3775 }
3776 
3777 static int
3778 ixl_get_switch_config(struct ixl_softc *sc)
3779 {
3780 	struct ixl_dmamem idm;
3781 	struct ixl_aq_desc iaq;
3782 	struct ixl_aq_switch_config *hdr;
3783 	struct ixl_aq_switch_config_element *elms, *elm;
3784 	unsigned int nelm;
3785 	int rv;
3786 
3787 	if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) {
3788 		printf("%s: unable to allocate switch config buffer\n",
3789 		    DEVNAME(sc));
3790 		return (-1);
3791 	}
3792 
3793 	memset(&iaq, 0, sizeof(iaq));
3794 	iaq.iaq_flags = htole16(IXL_AQ_BUF |
3795 	    (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
3796 	iaq.iaq_opcode = htole16(IXL_AQ_OP_SWITCH_GET_CONFIG);
3797 	iaq.iaq_datalen = htole16(IXL_AQ_BUFLEN);
3798 	ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
3799 
3800 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3801 	    BUS_DMASYNC_PREREAD);
3802 
3803 	rv = ixl_atq_poll(sc, &iaq, 250);
3804 
3805 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3806 	    BUS_DMASYNC_POSTREAD);
3807 
3808 	if (rv != 0) {
3809 		printf("%s: GET SWITCH CONFIG timeout\n", DEVNAME(sc));
3810 		rv = -1;
3811 		goto done;
3812 	}
3813 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3814 		printf("%s: GET SWITCH CONFIG error\n", DEVNAME(sc));
3815 		rv = -1;
3816 		goto done;
3817 	}
3818 
3819 	hdr = IXL_DMA_KVA(&idm);
3820 	elms = (struct ixl_aq_switch_config_element *)(hdr + 1);
3821 
3822 	nelm = lemtoh16(&hdr->num_reported);
3823 	if (nelm < 1) {
3824 		printf("%s: no switch config available\n", DEVNAME(sc));
3825 		rv = -1;
3826 		goto done;
3827 	}
3828 
3829 #if 0
3830 	for (i = 0; i < nelm; i++) {
3831 		elm = &elms[i];
3832 
3833 		printf("%s: type %x revision %u seid %04x\n", DEVNAME(sc),
3834 		    elm->type, elm->revision, lemtoh16(&elm->seid));
3835 		printf("%s: uplink %04x downlink %04x\n", DEVNAME(sc),
3836 		    lemtoh16(&elm->uplink_seid),
3837 		    lemtoh16(&elm->downlink_seid));
3838 		printf("%s: conntype %x scheduler %04x extra %04x\n",
3839 		    DEVNAME(sc), elm->connection_type,
3840 		    lemtoh16(&elm->scheduler_id),
3841 		    lemtoh16(&elm->element_info));
3842 	}
3843 #endif
3844 
3845 	elm = &elms[0];
3846 
3847 	sc->sc_uplink_seid = elm->uplink_seid;
3848 	sc->sc_downlink_seid = elm->downlink_seid;
3849 	sc->sc_seid = elm->seid;
3850 
3851 	if ((sc->sc_uplink_seid == htole16(0)) !=
3852 	    (sc->sc_downlink_seid == htole16(0))) {
3853 		printf("%s: SEIDs are misconfigured\n", DEVNAME(sc));
3854 		rv = -1;
3855 		goto done;
3856 	}
3857 
3858 done:
3859 	ixl_dmamem_free(sc, &idm);
3860 	return (rv);
3861 }
3862 
3863 static int
3864 ixl_phy_mask_ints(struct ixl_softc *sc)
3865 {
3866 	struct ixl_aq_desc iaq;
3867 
3868 	memset(&iaq, 0, sizeof(iaq));
3869 	iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_EVENT_MASK);
3870 	iaq.iaq_param[2] = htole32(IXL_AQ_PHY_EV_MASK &
3871 	    ~(IXL_AQ_PHY_EV_LINK_UPDOWN | IXL_AQ_PHY_EV_MODULE_QUAL_FAIL |
3872 	      IXL_AQ_PHY_EV_MEDIA_NA));
3873 
3874 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
3875 		printf("%s: SET PHY EVENT MASK timeout\n", DEVNAME(sc));
3876 		return (-1);
3877 	}
3878 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3879 		printf("%s: SET PHY EVENT MASK error\n", DEVNAME(sc));
3880 		return (-1);
3881 	}
3882 
3883 	return (0);
3884 }
3885 
3886 static int
3887 ixl_get_phy_abilities(struct ixl_softc *sc,struct ixl_dmamem *idm)
3888 {
3889 	struct ixl_aq_desc iaq;
3890 	int rv;
3891 
3892 	memset(&iaq, 0, sizeof(iaq));
3893 	iaq.iaq_flags = htole16(IXL_AQ_BUF |
3894 	    (IXL_DMA_LEN(idm) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
3895 	iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_GET_ABILITIES);
3896 	htolem16(&iaq.iaq_datalen, IXL_DMA_LEN(idm));
3897 	iaq.iaq_param[0] = htole32(IXL_AQ_PHY_REPORT_INIT);
3898 	ixl_aq_dva(&iaq, IXL_DMA_DVA(idm));
3899 
3900 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
3901 	    BUS_DMASYNC_PREREAD);
3902 
3903 	rv = ixl_atq_poll(sc, &iaq, 250);
3904 
3905 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
3906 	    BUS_DMASYNC_POSTREAD);
3907 
3908 	if (rv != 0)
3909 		return (-1);
3910 
3911 	return (lemtoh16(&iaq.iaq_retval));
3912 }
3913 
3914 static int
3915 ixl_get_phy_types(struct ixl_softc *sc, uint64_t *phy_types_ptr)
3916 {
3917 	struct ixl_dmamem idm;
3918 	struct ixl_aq_phy_abilities *phy;
3919 	uint64_t phy_types;
3920 	int rv;
3921 
3922 	if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) {
3923 		printf("%s: unable to allocate phy abilities buffer\n",
3924 		    DEVNAME(sc));
3925 		return (-1);
3926 	}
3927 
3928 	rv = ixl_get_phy_abilities(sc, &idm);
3929 	switch (rv) {
3930 	case -1:
3931 		printf("%s: GET PHY ABILITIES timeout\n", DEVNAME(sc));
3932 		goto err;
3933 	case IXL_AQ_RC_OK:
3934 		break;
3935 	case IXL_AQ_RC_EIO:
3936 		/* API is too old to handle this command */
3937 		phy_types = 0;
3938 		goto done;
3939 	default:
3940 		printf("%s: GET PHY ABILITIIES error %u\n", DEVNAME(sc), rv);
3941 		goto err;
3942 	}
3943 
3944 	phy = IXL_DMA_KVA(&idm);
3945 
3946 	phy_types = lemtoh32(&phy->phy_type);
3947 	phy_types |= (uint64_t)phy->phy_type_ext << 32;
3948 
3949 done:
3950 	*phy_types_ptr = phy_types;
3951 
3952 	rv = 0;
3953 
3954 err:
3955 	ixl_dmamem_free(sc, &idm);
3956 	return (rv);
3957 }
3958 
3959 /*
3960  * this returns -2 on software/driver failure, -1 for problems
3961  * talking to the hardware, or the sff module type.
3962  */
3963 
3964 static int
3965 ixl_get_module_type(struct ixl_softc *sc)
3966 {
3967 	struct ixl_dmamem idm;
3968 	struct ixl_aq_phy_abilities *phy;
3969 	int rv;
3970 
3971 	if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0)
3972 		return (-2);
3973 
3974 	rv = ixl_get_phy_abilities(sc, &idm);
3975 	if (rv != IXL_AQ_RC_OK) {
3976 		rv = -1;
3977 		goto done;
3978 	}
3979 
3980 	phy = IXL_DMA_KVA(&idm);
3981 
3982 	rv = phy->module_type[0];
3983 
3984 done:
3985 	ixl_dmamem_free(sc, &idm);
3986 	return (rv);
3987 }
3988 
3989 static int
3990 ixl_get_link_status(struct ixl_softc *sc)
3991 {
3992 	struct ixl_aq_desc iaq;
3993 	struct ixl_aq_link_param *param;
3994 
3995 	memset(&iaq, 0, sizeof(iaq));
3996 	iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS);
3997 	param = (struct ixl_aq_link_param *)iaq.iaq_param;
3998 	param->notify = IXL_AQ_LINK_NOTIFY;
3999 
4000 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4001 		printf("%s: GET LINK STATUS timeout\n", DEVNAME(sc));
4002 		return (-1);
4003 	}
4004 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4005 		printf("%s: GET LINK STATUS error\n", DEVNAME(sc));
4006 		return (0);
4007 	}
4008 
4009 	sc->sc_ac.ac_if.if_link_state = ixl_set_link_status(sc, &iaq);
4010 
4011 	return (0);
4012 }
4013 
4014 struct ixl_sff_ops {
4015 	int (*open)(struct ixl_softc *sc, struct if_sffpage *, uint8_t *);
4016 	int (*get)(struct ixl_softc *sc, struct if_sffpage *, size_t);
4017 	int (*close)(struct ixl_softc *sc, struct if_sffpage *, uint8_t);
4018 };
4019 
4020 static int
4021 ixl_sfp_open(struct ixl_softc *sc, struct if_sffpage *sff, uint8_t *page)
4022 {
4023 	int error;
4024 
4025 	if (sff->sff_addr != IFSFF_ADDR_EEPROM)
4026 		return (0);
4027 
4028 	error = ixl_sff_get_byte(sc, IFSFF_ADDR_EEPROM, 127, page);
4029 	if (error != 0)
4030 		return (error);
4031 	if (*page == sff->sff_page)
4032 		return (0);
4033 	error = ixl_sff_set_byte(sc, IFSFF_ADDR_EEPROM, 127, sff->sff_page);
4034 	if (error != 0)
4035 		return (error);
4036 
4037 	return (0);
4038 }
4039 
4040 static int
4041 ixl_sfp_get(struct ixl_softc *sc, struct if_sffpage *sff, size_t i)
4042 {
4043 	return (ixl_sff_get_byte(sc, sff->sff_addr, i, &sff->sff_data[i]));
4044 }
4045 
4046 static int
4047 ixl_sfp_close(struct ixl_softc *sc, struct if_sffpage *sff, uint8_t page)
4048 {
4049 	int error;
4050 
4051 	if (sff->sff_addr != IFSFF_ADDR_EEPROM)
4052 		return (0);
4053 
4054 	if (page == sff->sff_page)
4055 		return (0);
4056 
4057 	error = ixl_sff_set_byte(sc, IFSFF_ADDR_EEPROM, 127, page);
4058 	if (error != 0)
4059 		return (error);
4060 
4061 	return (0);
4062 }
4063 
4064 static const struct ixl_sff_ops ixl_sfp_ops = {
4065 	ixl_sfp_open,
4066 	ixl_sfp_get,
4067 	ixl_sfp_close,
4068 };
4069 
4070 static int
4071 ixl_qsfp_open(struct ixl_softc *sc, struct if_sffpage *sff, uint8_t *page)
4072 {
4073 	if (sff->sff_addr != IFSFF_ADDR_EEPROM)
4074 		return (EIO);
4075 
4076 	return (0);
4077 }
4078 
4079 static int
4080 ixl_qsfp_get(struct ixl_softc *sc, struct if_sffpage *sff, size_t i)
4081 {
4082 	return (ixl_sff_get_byte(sc, sff->sff_page, i, &sff->sff_data[i]));
4083 }
4084 
4085 static int
4086 ixl_qsfp_close(struct ixl_softc *sc, struct if_sffpage *sff, uint8_t page)
4087 {
4088 	return (0);
4089 }
4090 
4091 static const struct ixl_sff_ops ixl_qsfp_ops = {
4092 	ixl_qsfp_open,
4093 	ixl_qsfp_get,
4094 	ixl_qsfp_close,
4095 };
4096 
4097 static int
4098 ixl_get_sffpage(struct ixl_softc *sc, struct if_sffpage *sff)
4099 {
4100 	const struct ixl_sff_ops *ops;
4101 	uint8_t page;
4102 	size_t i;
4103 	int error;
4104 
4105 	switch (ixl_get_module_type(sc)) {
4106 	case -2:
4107 		return (ENOMEM);
4108 	case -1:
4109 		return (ENXIO);
4110 	case IXL_SFF8024_ID_SFP:
4111 		ops = &ixl_sfp_ops;
4112 		break;
4113 	case IXL_SFF8024_ID_QSFP:
4114 	case IXL_SFF8024_ID_QSFP_PLUS:
4115 	case IXL_SFF8024_ID_QSFP28:
4116 		ops = &ixl_qsfp_ops;
4117 		break;
4118 	default:
4119 		return (EOPNOTSUPP);
4120 	}
4121 
4122 	error = (*ops->open)(sc, sff, &page);
4123 	if (error != 0)
4124 		return (error);
4125 
4126 	for (i = 0; i < sizeof(sff->sff_data); i++) {
4127 		error = (*ops->get)(sc, sff, i);
4128 		if (error != 0)
4129 			return (error);
4130 	}
4131 
4132 	error = (*ops->close)(sc, sff, page);
4133 
4134 	return (0);
4135 }
4136 
4137 static int
4138 ixl_sff_get_byte(struct ixl_softc *sc, uint8_t dev, uint32_t reg, uint8_t *p)
4139 {
4140 	struct ixl_atq iatq;
4141 	struct ixl_aq_desc *iaq;
4142 	struct ixl_aq_phy_reg_access *param;
4143 
4144 	memset(&iatq, 0, sizeof(iatq));
4145 	iaq = &iatq.iatq_desc;
4146 	iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_GET_REGISTER);
4147 	param = (struct ixl_aq_phy_reg_access *)iaq->iaq_param;
4148 	param->phy_iface = IXL_AQ_PHY_IF_MODULE;
4149 	param->dev_addr = dev;
4150 	htolem32(&param->reg, reg);
4151 
4152 	ixl_atq_exec(sc, &iatq, "ixlsffget");
4153 
4154 	if (ISSET(sc->sc_ac.ac_if.if_flags, IFF_DEBUG)) {
4155 		printf("%s: %s(dev 0x%02x, reg 0x%02x) -> %04x\n",
4156 		    DEVNAME(sc), __func__,
4157 		    dev, reg, lemtoh16(&iaq->iaq_retval));
4158 	}
4159 
4160 	switch (iaq->iaq_retval) {
4161 	case htole16(IXL_AQ_RC_OK):
4162 		break;
4163 	case htole16(IXL_AQ_RC_EBUSY):
4164 		return (EBUSY);
4165 	case htole16(IXL_AQ_RC_ESRCH):
4166 		return (ENODEV);
4167 	case htole16(IXL_AQ_RC_EIO):
4168 	case htole16(IXL_AQ_RC_EINVAL):
4169 	default:
4170 		return (EIO);
4171 	}
4172 
4173 	*p = lemtoh32(&param->val);
4174 
4175 	return (0);
4176 }
4177 
4178 static int
4179 ixl_sff_set_byte(struct ixl_softc *sc, uint8_t dev, uint32_t reg, uint8_t v)
4180 {
4181 	struct ixl_atq iatq;
4182 	struct ixl_aq_desc *iaq;
4183 	struct ixl_aq_phy_reg_access *param;
4184 
4185 	memset(&iatq, 0, sizeof(iatq));
4186 	iaq = &iatq.iatq_desc;
4187 	iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_REGISTER);
4188 	param = (struct ixl_aq_phy_reg_access *)iaq->iaq_param;
4189 	param->phy_iface = IXL_AQ_PHY_IF_MODULE;
4190 	param->dev_addr = dev;
4191 	htolem32(&param->reg, reg);
4192 	htolem32(&param->val, v);
4193 
4194 	ixl_atq_exec(sc, &iatq, "ixlsffset");
4195 
4196 	if (ISSET(sc->sc_ac.ac_if.if_flags, IFF_DEBUG)) {
4197 		printf("%s: %s(dev 0x%02x, reg 0x%02x, val 0x%02x) -> %04x\n",
4198 		    DEVNAME(sc), __func__,
4199 		    dev, reg, v, lemtoh16(&iaq->iaq_retval));
4200 	}
4201 
4202 	switch (iaq->iaq_retval) {
4203 	case htole16(IXL_AQ_RC_OK):
4204 		break;
4205 	case htole16(IXL_AQ_RC_EBUSY):
4206 		return (EBUSY);
4207 	case htole16(IXL_AQ_RC_ESRCH):
4208 		return (ENODEV);
4209 	case htole16(IXL_AQ_RC_EIO):
4210 	case htole16(IXL_AQ_RC_EINVAL):
4211 	default:
4212 		return (EIO);
4213 	}
4214 
4215 	return (0);
4216 }
4217 
4218 static int
4219 ixl_get_vsi(struct ixl_softc *sc)
4220 {
4221 	struct ixl_dmamem *vsi = &sc->sc_scratch;
4222 	struct ixl_aq_desc iaq;
4223 	struct ixl_aq_vsi_param *param;
4224 	struct ixl_aq_vsi_reply *reply;
4225 	int rv;
4226 
4227 	/* grumble, vsi info isn't "known" at compile time */
4228 
4229 	memset(&iaq, 0, sizeof(iaq));
4230 	htolem16(&iaq.iaq_flags, IXL_AQ_BUF |
4231 	    (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4232 	iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VSI_PARAMS);
4233 	htolem16(&iaq.iaq_datalen, IXL_DMA_LEN(vsi));
4234 	ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi));
4235 
4236 	param = (struct ixl_aq_vsi_param *)iaq.iaq_param;
4237 	param->uplink_seid = sc->sc_seid;
4238 
4239 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4240 	    BUS_DMASYNC_PREREAD);
4241 
4242 	rv = ixl_atq_poll(sc, &iaq, 250);
4243 
4244 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4245 	    BUS_DMASYNC_POSTREAD);
4246 
4247 	if (rv != 0) {
4248 		printf("%s: GET VSI timeout\n", DEVNAME(sc));
4249 		return (-1);
4250 	}
4251 
4252 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4253 		printf("%s: GET VSI error %u\n", DEVNAME(sc),
4254 		    lemtoh16(&iaq.iaq_retval));
4255 		return (-1);
4256 	}
4257 
4258 	reply = (struct ixl_aq_vsi_reply *)iaq.iaq_param;
4259 	sc->sc_vsi_number = reply->vsi_number;
4260 
4261 	return (0);
4262 }
4263 
4264 static int
4265 ixl_set_vsi(struct ixl_softc *sc)
4266 {
4267 	struct ixl_dmamem *vsi = &sc->sc_scratch;
4268 	struct ixl_aq_desc iaq;
4269 	struct ixl_aq_vsi_param *param;
4270 	struct ixl_aq_vsi_data *data = IXL_DMA_KVA(vsi);
4271 	int rv;
4272 
4273 	data->valid_sections = htole16(IXL_AQ_VSI_VALID_QUEUE_MAP |
4274 	    IXL_AQ_VSI_VALID_VLAN);
4275 
4276 	CLR(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_MASK));
4277 	SET(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_CONTIG));
4278 	data->queue_mapping[0] = htole16(0);
4279 	data->tc_mapping[0] = htole16((0 << IXL_AQ_VSI_TC_Q_OFFSET_SHIFT) |
4280 	    (sc->sc_nqueues << IXL_AQ_VSI_TC_Q_NUMBER_SHIFT));
4281 
4282 	CLR(data->port_vlan_flags,
4283 	    htole16(IXL_AQ_VSI_PVLAN_MODE_MASK | IXL_AQ_VSI_PVLAN_EMOD_MASK));
4284 	SET(data->port_vlan_flags,
4285 	    htole16(IXL_AQ_VSI_PVLAN_MODE_ALL | IXL_AQ_VSI_PVLAN_EMOD_NOTHING));
4286 
4287 	/* grumble, vsi info isn't "known" at compile time */
4288 
4289 	memset(&iaq, 0, sizeof(iaq));
4290 	htolem16(&iaq.iaq_flags, IXL_AQ_BUF | IXL_AQ_RD |
4291 	    (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4292 	iaq.iaq_opcode = htole16(IXL_AQ_OP_UPD_VSI_PARAMS);
4293 	htolem16(&iaq.iaq_datalen, IXL_DMA_LEN(vsi));
4294 	ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi));
4295 
4296 	param = (struct ixl_aq_vsi_param *)iaq.iaq_param;
4297 	param->uplink_seid = sc->sc_seid;
4298 
4299 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4300 	    BUS_DMASYNC_PREWRITE);
4301 
4302 	rv = ixl_atq_poll(sc, &iaq, 250);
4303 
4304 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4305 	    BUS_DMASYNC_POSTWRITE);
4306 
4307 	if (rv != 0) {
4308 		printf("%s: UPDATE VSI timeout\n", DEVNAME(sc));
4309 		return (-1);
4310 	}
4311 
4312 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4313 		printf("%s: UPDATE VSI error %u\n", DEVNAME(sc),
4314 		    lemtoh16(&iaq.iaq_retval));
4315 		return (-1);
4316 	}
4317 
4318 	return (0);
4319 }
4320 
4321 static const struct ixl_phy_type *
4322 ixl_search_phy_type(uint8_t phy_type)
4323 {
4324 	const struct ixl_phy_type *itype;
4325 	uint64_t mask;
4326 	unsigned int i;
4327 
4328 	if (phy_type >= 64)
4329 		return (NULL);
4330 
4331 	mask = 1ULL << phy_type;
4332 
4333 	for (i = 0; i < nitems(ixl_phy_type_map); i++) {
4334 		itype = &ixl_phy_type_map[i];
4335 
4336 		if (ISSET(itype->phy_type, mask))
4337 			return (itype);
4338 	}
4339 
4340 	return (NULL);
4341 }
4342 
4343 static uint64_t
4344 ixl_search_link_speed(uint8_t link_speed)
4345 {
4346 	const struct ixl_speed_type *type;
4347 	unsigned int i;
4348 
4349 	for (i = 0; i < nitems(ixl_speed_type_map); i++) {
4350 		type = &ixl_speed_type_map[i];
4351 
4352 		if (ISSET(type->dev_speed, link_speed))
4353 			return (type->net_speed);
4354 	}
4355 
4356 	return (0);
4357 }
4358 
4359 static int
4360 ixl_set_link_status(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
4361 {
4362 	const struct ixl_aq_link_status *status;
4363 	const struct ixl_phy_type *itype;
4364 
4365 	uint64_t ifm_active = IFM_ETHER;
4366 	uint64_t ifm_status = IFM_AVALID;
4367 	int link_state = LINK_STATE_DOWN;
4368 	uint64_t baudrate = 0;
4369 
4370 	status = (const struct ixl_aq_link_status *)iaq->iaq_param;
4371 	if (!ISSET(status->link_info, IXL_AQ_LINK_UP_FUNCTION))
4372 		goto done;
4373 
4374 	ifm_active |= IFM_FDX;
4375 	ifm_status |= IFM_ACTIVE;
4376 	link_state = LINK_STATE_FULL_DUPLEX;
4377 
4378 	itype = ixl_search_phy_type(status->phy_type);
4379 	if (itype != NULL)
4380 		ifm_active |= itype->ifm_type;
4381 
4382 	if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_TX))
4383 		ifm_active |= IFM_ETH_TXPAUSE;
4384 	if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_RX))
4385 		ifm_active |= IFM_ETH_RXPAUSE;
4386 
4387 	baudrate = ixl_search_link_speed(status->link_speed);
4388 
4389 done:
4390 	/* NET_ASSERT_LOCKED() except during attach */
4391 	sc->sc_media_active = ifm_active;
4392 	sc->sc_media_status = ifm_status;
4393 	sc->sc_ac.ac_if.if_baudrate = baudrate;
4394 
4395 	return (link_state);
4396 }
4397 
4398 static int
4399 ixl_restart_an(struct ixl_softc *sc)
4400 {
4401 	struct ixl_aq_desc iaq;
4402 
4403 	memset(&iaq, 0, sizeof(iaq));
4404 	iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_RESTART_AN);
4405 	iaq.iaq_param[0] =
4406 	    htole32(IXL_AQ_PHY_RESTART_AN | IXL_AQ_PHY_LINK_ENABLE);
4407 
4408 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4409 		printf("%s: RESTART AN timeout\n", DEVNAME(sc));
4410 		return (-1);
4411 	}
4412 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4413 		printf("%s: RESTART AN error\n", DEVNAME(sc));
4414 		return (-1);
4415 	}
4416 
4417 	return (0);
4418 }
4419 
4420 static int
4421 ixl_add_macvlan(struct ixl_softc *sc, uint8_t *macaddr, uint16_t vlan, uint16_t flags)
4422 {
4423 	struct ixl_aq_desc iaq;
4424 	struct ixl_aq_add_macvlan *param;
4425 	struct ixl_aq_add_macvlan_elem *elem;
4426 
4427 	memset(&iaq, 0, sizeof(iaq));
4428 	iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4429 	iaq.iaq_opcode = htole16(IXL_AQ_OP_ADD_MACVLAN);
4430 	iaq.iaq_datalen = htole16(sizeof(*elem));
4431 	ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch));
4432 
4433 	param = (struct ixl_aq_add_macvlan *)&iaq.iaq_param;
4434 	param->num_addrs = htole16(1);
4435 	param->seid0 = htole16(0x8000) | sc->sc_seid;
4436 	param->seid1 = 0;
4437 	param->seid2 = 0;
4438 
4439 	elem = IXL_DMA_KVA(&sc->sc_scratch);
4440 	memset(elem, 0, sizeof(*elem));
4441 	memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN);
4442 	elem->flags = htole16(IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH | flags);
4443 	elem->vlan = htole16(vlan);
4444 
4445 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4446 		printf("%s: ADD_MACVLAN timeout\n", DEVNAME(sc));
4447 		return (IXL_AQ_RC_EINVAL);
4448 	}
4449 
4450 	return letoh16(iaq.iaq_retval);
4451 }
4452 
4453 static int
4454 ixl_remove_macvlan(struct ixl_softc *sc, uint8_t *macaddr, uint16_t vlan, uint16_t flags)
4455 {
4456 	struct ixl_aq_desc iaq;
4457 	struct ixl_aq_remove_macvlan *param;
4458 	struct ixl_aq_remove_macvlan_elem *elem;
4459 
4460 	memset(&iaq, 0, sizeof(iaq));
4461 	iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4462 	iaq.iaq_opcode = htole16(IXL_AQ_OP_REMOVE_MACVLAN);
4463 	iaq.iaq_datalen = htole16(sizeof(*elem));
4464 	ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch));
4465 
4466 	param = (struct ixl_aq_remove_macvlan *)&iaq.iaq_param;
4467 	param->num_addrs = htole16(1);
4468 	param->seid0 = htole16(0x8000) | sc->sc_seid;
4469 	param->seid1 = 0;
4470 	param->seid2 = 0;
4471 
4472 	elem = IXL_DMA_KVA(&sc->sc_scratch);
4473 	memset(elem, 0, sizeof(*elem));
4474 	memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN);
4475 	elem->flags = htole16(IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH | flags);
4476 	elem->vlan = htole16(vlan);
4477 
4478 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4479 		printf("%s: REMOVE_MACVLAN timeout\n", DEVNAME(sc));
4480 		return (IXL_AQ_RC_EINVAL);
4481 	}
4482 
4483 	return letoh16(iaq.iaq_retval);
4484 }
4485 
4486 static int
4487 ixl_hmc(struct ixl_softc *sc)
4488 {
4489 	struct {
4490 		uint32_t   count;
4491 		uint32_t   minsize;
4492 		bus_size_t maxcnt;
4493 		bus_size_t setoff;
4494 		bus_size_t setcnt;
4495 	} regs[] = {
4496 		{
4497 			0,
4498 			IXL_HMC_TXQ_MINSIZE,
4499 			I40E_GLHMC_LANTXOBJSZ,
4500 			I40E_GLHMC_LANTXBASE(sc->sc_pf_id),
4501 			I40E_GLHMC_LANTXCNT(sc->sc_pf_id),
4502 		},
4503 		{
4504 			0,
4505 			IXL_HMC_RXQ_MINSIZE,
4506 			I40E_GLHMC_LANRXOBJSZ,
4507 			I40E_GLHMC_LANRXBASE(sc->sc_pf_id),
4508 			I40E_GLHMC_LANRXCNT(sc->sc_pf_id),
4509 		},
4510 		{
4511 			0,
4512 			0,
4513 			I40E_GLHMC_FCOEMAX,
4514 			I40E_GLHMC_FCOEDDPBASE(sc->sc_pf_id),
4515 			I40E_GLHMC_FCOEDDPCNT(sc->sc_pf_id),
4516 		},
4517 		{
4518 			0,
4519 			0,
4520 			I40E_GLHMC_FCOEFMAX,
4521 			I40E_GLHMC_FCOEFBASE(sc->sc_pf_id),
4522 			I40E_GLHMC_FCOEFCNT(sc->sc_pf_id),
4523 		},
4524 	};
4525 	struct ixl_hmc_entry *e;
4526 	uint64_t size, dva;
4527 	uint8_t *kva;
4528 	uint64_t *sdpage;
4529 	unsigned int i;
4530 	int npages, tables;
4531 
4532 	CTASSERT(nitems(regs) <= nitems(sc->sc_hmc_entries));
4533 
4534 	regs[IXL_HMC_LAN_TX].count = regs[IXL_HMC_LAN_RX].count =
4535 	    ixl_rd(sc, I40E_GLHMC_LANQMAX);
4536 
4537 	size = 0;
4538 	for (i = 0; i < nitems(regs); i++) {
4539 		e = &sc->sc_hmc_entries[i];
4540 
4541 		e->hmc_count = regs[i].count;
4542 		e->hmc_size = 1U << ixl_rd(sc, regs[i].maxcnt);
4543 		e->hmc_base = size;
4544 
4545 		if ((e->hmc_size * 8) < regs[i].minsize) {
4546 			printf("%s: kernel hmc entry is too big\n",
4547 			    DEVNAME(sc));
4548 			return (-1);
4549 		}
4550 
4551 		size += roundup(e->hmc_size * e->hmc_count, IXL_HMC_ROUNDUP);
4552 	}
4553 	size = roundup(size, IXL_HMC_PGSIZE);
4554 	npages = size / IXL_HMC_PGSIZE;
4555 
4556 	tables = roundup(size, IXL_HMC_L2SZ) / IXL_HMC_L2SZ;
4557 
4558 	if (ixl_dmamem_alloc(sc, &sc->sc_hmc_pd, size, IXL_HMC_PGSIZE) != 0) {
4559 		printf("%s: unable to allocate hmc pd memory\n", DEVNAME(sc));
4560 		return (-1);
4561 	}
4562 
4563 	if (ixl_dmamem_alloc(sc, &sc->sc_hmc_sd, tables * IXL_HMC_PGSIZE,
4564 	    IXL_HMC_PGSIZE) != 0) {
4565 		printf("%s: unable to allocate hmc sd memory\n", DEVNAME(sc));
4566 		ixl_dmamem_free(sc, &sc->sc_hmc_pd);
4567 		return (-1);
4568 	}
4569 
4570 	kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
4571 	memset(kva, 0, IXL_DMA_LEN(&sc->sc_hmc_pd));
4572 
4573 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
4574 	    0, IXL_DMA_LEN(&sc->sc_hmc_pd),
4575 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4576 
4577 	dva = IXL_DMA_DVA(&sc->sc_hmc_pd);
4578 	sdpage = IXL_DMA_KVA(&sc->sc_hmc_sd);
4579 	for (i = 0; i < npages; i++) {
4580 		htolem64(sdpage++, dva | IXL_HMC_PDVALID);
4581 
4582 		dva += IXL_HMC_PGSIZE;
4583 	}
4584 
4585 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_sd),
4586 	    0, IXL_DMA_LEN(&sc->sc_hmc_sd),
4587 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4588 
4589 	dva = IXL_DMA_DVA(&sc->sc_hmc_sd);
4590 	for (i = 0; i < tables; i++) {
4591 		uint32_t count;
4592 
4593 		KASSERT(npages >= 0);
4594 
4595 		count = (npages > IXL_HMC_PGS) ? IXL_HMC_PGS : npages;
4596 
4597 		ixl_wr(sc, I40E_PFHMC_SDDATAHIGH, dva >> 32);
4598 		ixl_wr(sc, I40E_PFHMC_SDDATALOW, dva |
4599 		    (count << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |
4600 		    (1U << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT));
4601 		ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
4602 		ixl_wr(sc, I40E_PFHMC_SDCMD,
4603 		    (1U << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | i);
4604 
4605 		npages -= IXL_HMC_PGS;
4606 		dva += IXL_HMC_PGSIZE;
4607 	}
4608 
4609 	for (i = 0; i < nitems(regs); i++) {
4610 		e = &sc->sc_hmc_entries[i];
4611 
4612 		ixl_wr(sc, regs[i].setoff, e->hmc_base / IXL_HMC_ROUNDUP);
4613 		ixl_wr(sc, regs[i].setcnt, e->hmc_count);
4614 	}
4615 
4616 	return (0);
4617 }
4618 
4619 static void
4620 ixl_hmc_free(struct ixl_softc *sc)
4621 {
4622 	ixl_dmamem_free(sc, &sc->sc_hmc_sd);
4623 	ixl_dmamem_free(sc, &sc->sc_hmc_pd);
4624 }
4625 
4626 static void
4627 ixl_hmc_pack(void *d, const void *s, const struct ixl_hmc_pack *packing,
4628     unsigned int npacking)
4629 {
4630 	uint8_t *dst = d;
4631 	const uint8_t *src = s;
4632 	unsigned int i;
4633 
4634 	for (i = 0; i < npacking; i++) {
4635 		const struct ixl_hmc_pack *pack = &packing[i];
4636 		unsigned int offset = pack->lsb / 8;
4637 		unsigned int align = pack->lsb % 8;
4638 		const uint8_t *in = src + pack->offset;
4639 		uint8_t *out = dst + offset;
4640 		int width = pack->width;
4641 		unsigned int inbits = 0;
4642 
4643 		if (align) {
4644 			inbits = (*in++) << align;
4645 			*out++ |= (inbits & 0xff);
4646 			inbits >>= 8;
4647 
4648 			width -= 8 - align;
4649 		}
4650 
4651 		while (width >= 8) {
4652 			inbits |= (*in++) << align;
4653 			*out++ = (inbits & 0xff);
4654 			inbits >>= 8;
4655 
4656 			width -= 8;
4657 		}
4658 
4659 		if (width > 0) {
4660 			inbits |= (*in) << align;
4661 			*out |= (inbits & ((1 << width) - 1));
4662 		}
4663 	}
4664 }
4665 
4666 static struct ixl_aq_buf *
4667 ixl_aqb_alloc(struct ixl_softc *sc)
4668 {
4669 	struct ixl_aq_buf *aqb;
4670 
4671 	aqb = malloc(sizeof(*aqb), M_DEVBUF, M_WAITOK);
4672 	if (aqb == NULL)
4673 		return (NULL);
4674 
4675 	aqb->aqb_data = dma_alloc(IXL_AQ_BUFLEN, PR_WAITOK);
4676 	if (aqb->aqb_data == NULL)
4677 		goto free;
4678 
4679 	if (bus_dmamap_create(sc->sc_dmat, IXL_AQ_BUFLEN, 1,
4680 	    IXL_AQ_BUFLEN, 0,
4681 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
4682 	    &aqb->aqb_map) != 0)
4683 		goto dma_free;
4684 
4685 	if (bus_dmamap_load(sc->sc_dmat, aqb->aqb_map, aqb->aqb_data,
4686 	    IXL_AQ_BUFLEN, NULL, BUS_DMA_WAITOK) != 0)
4687 		goto destroy;
4688 
4689 	return (aqb);
4690 
4691 destroy:
4692 	bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
4693 dma_free:
4694 	dma_free(aqb->aqb_data, IXL_AQ_BUFLEN);
4695 free:
4696 	free(aqb, M_DEVBUF, sizeof(*aqb));
4697 
4698 	return (NULL);
4699 }
4700 
4701 static void
4702 ixl_aqb_free(struct ixl_softc *sc, struct ixl_aq_buf *aqb)
4703 {
4704 	bus_dmamap_unload(sc->sc_dmat, aqb->aqb_map);
4705 	bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
4706 	dma_free(aqb->aqb_data, IXL_AQ_BUFLEN);
4707 	free(aqb, M_DEVBUF, sizeof(*aqb));
4708 }
4709 
4710 static int
4711 ixl_arq_fill(struct ixl_softc *sc)
4712 {
4713 	struct ixl_aq_buf *aqb;
4714 	struct ixl_aq_desc *arq, *iaq;
4715 	unsigned int prod = sc->sc_arq_prod;
4716 	unsigned int n;
4717 	int post = 0;
4718 
4719 	n = if_rxr_get(&sc->sc_arq_ring, IXL_AQ_NUM);
4720 	arq = IXL_DMA_KVA(&sc->sc_arq);
4721 
4722 	while (n > 0) {
4723 		aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle);
4724 		if (aqb != NULL)
4725 			SIMPLEQ_REMOVE_HEAD(&sc->sc_arq_idle, aqb_entry);
4726 		else if ((aqb = ixl_aqb_alloc(sc)) == NULL)
4727 			break;
4728 
4729 		memset(aqb->aqb_data, 0, IXL_AQ_BUFLEN);
4730 
4731 		bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN,
4732 		    BUS_DMASYNC_PREREAD);
4733 
4734 		iaq = &arq[prod];
4735 		iaq->iaq_flags = htole16(IXL_AQ_BUF |
4736 		    (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4737 		iaq->iaq_opcode = 0;
4738 		iaq->iaq_datalen = htole16(IXL_AQ_BUFLEN);
4739 		iaq->iaq_retval = 0;
4740 		iaq->iaq_cookie = 0;
4741 		iaq->iaq_param[0] = 0;
4742 		iaq->iaq_param[1] = 0;
4743 		ixl_aq_dva(iaq, aqb->aqb_map->dm_segs[0].ds_addr);
4744 
4745 		SIMPLEQ_INSERT_TAIL(&sc->sc_arq_live, aqb, aqb_entry);
4746 
4747 		prod++;
4748 		prod &= IXL_AQ_MASK;
4749 
4750 		post = 1;
4751 
4752 		n--;
4753 	}
4754 
4755 	if_rxr_put(&sc->sc_arq_ring, n);
4756 	sc->sc_arq_prod = prod;
4757 
4758 	return (post);
4759 }
4760 
4761 static void
4762 ixl_arq_unfill(struct ixl_softc *sc)
4763 {
4764 	struct ixl_aq_buf *aqb;
4765 
4766 	while ((aqb = SIMPLEQ_FIRST(&sc->sc_arq_live)) != NULL) {
4767 		SIMPLEQ_REMOVE_HEAD(&sc->sc_arq_live, aqb_entry);
4768 
4769 		bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN,
4770 		    BUS_DMASYNC_POSTREAD);
4771 		ixl_aqb_free(sc, aqb);
4772 	}
4773 }
4774 
4775 static void
4776 ixl_clear_hw(struct ixl_softc *sc)
4777 {
4778 	uint32_t num_queues, base_queue;
4779 	uint32_t num_pf_int;
4780 	uint32_t num_vf_int;
4781 	uint32_t num_vfs;
4782 	uint32_t i, j;
4783 	uint32_t val;
4784 
4785 	/* get number of interrupts, queues, and vfs */
4786 	val = ixl_rd(sc, I40E_GLPCI_CNF2);
4787 	num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
4788 	    I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
4789 	num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
4790 	    I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
4791 
4792 	val = ixl_rd(sc, I40E_PFLAN_QALLOC);
4793 	base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
4794 	    I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
4795 	j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
4796 	    I40E_PFLAN_QALLOC_LASTQ_SHIFT;
4797 	if (val & I40E_PFLAN_QALLOC_VALID_MASK)
4798 		num_queues = (j - base_queue) + 1;
4799 	else
4800 		num_queues = 0;
4801 
4802 	val = ixl_rd(sc, I40E_PF_VT_PFALLOC);
4803 	i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
4804 	    I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
4805 	j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
4806 	    I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
4807 	if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
4808 		num_vfs = (j - i) + 1;
4809 	else
4810 		num_vfs = 0;
4811 
4812 	/* stop all the interrupts */
4813 	ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0);
4814 	val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
4815 	for (i = 0; i < num_pf_int - 2; i++)
4816 		ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), val);
4817 
4818 	/* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
4819 	val = I40E_QUEUE_TYPE_EOL << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4820 	ixl_wr(sc, I40E_PFINT_LNKLST0, val);
4821 	for (i = 0; i < num_pf_int - 2; i++)
4822 		ixl_wr(sc, I40E_PFINT_LNKLSTN(i), val);
4823 	val = I40E_QUEUE_TYPE_EOL << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4824 	for (i = 0; i < num_vfs; i++)
4825 		ixl_wr(sc, I40E_VPINT_LNKLST0(i), val);
4826 	for (i = 0; i < num_vf_int - 2; i++)
4827 		ixl_wr(sc, I40E_VPINT_LNKLSTN(i), val);
4828 
4829 	/* warn the HW of the coming Tx disables */
4830 	for (i = 0; i < num_queues; i++) {
4831 		uint32_t abs_queue_idx = base_queue + i;
4832 		uint32_t reg_block = 0;
4833 
4834 		if (abs_queue_idx >= 128) {
4835 			reg_block = abs_queue_idx / 128;
4836 			abs_queue_idx %= 128;
4837 		}
4838 
4839 		val = ixl_rd(sc, I40E_GLLAN_TXPRE_QDIS(reg_block));
4840 		val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
4841 		val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
4842 		val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
4843 
4844 		ixl_wr(sc, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
4845 	}
4846 	delaymsec(400);
4847 
4848 	/* stop all the queues */
4849 	for (i = 0; i < num_queues; i++) {
4850 		ixl_wr(sc, I40E_QINT_TQCTL(i), 0);
4851 		ixl_wr(sc, I40E_QTX_ENA(i), 0);
4852 		ixl_wr(sc, I40E_QINT_RQCTL(i), 0);
4853 		ixl_wr(sc, I40E_QRX_ENA(i), 0);
4854 	}
4855 
4856 	/* short wait for all queue disables to settle */
4857 	delaymsec(50);
4858 }
4859 
4860 static int
4861 ixl_pf_reset(struct ixl_softc *sc)
4862 {
4863 	uint32_t cnt = 0;
4864 	uint32_t cnt1 = 0;
4865 	uint32_t reg = 0;
4866 	uint32_t grst_del;
4867 
4868 	/*
4869 	 * Poll for Global Reset steady state in case of recent GRST.
4870 	 * The grst delay value is in 100ms units, and we'll wait a
4871 	 * couple counts longer to be sure we don't just miss the end.
4872 	 */
4873 	grst_del = ixl_rd(sc, I40E_GLGEN_RSTCTL);
4874 	grst_del &= I40E_GLGEN_RSTCTL_GRSTDEL_MASK;
4875 	grst_del >>= I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
4876 	grst_del += 10;
4877 
4878 	for (cnt = 0; cnt < grst_del; cnt++) {
4879 		reg = ixl_rd(sc, I40E_GLGEN_RSTAT);
4880 		if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
4881 			break;
4882 		delaymsec(100);
4883 	}
4884 	if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
4885 		printf(", Global reset polling failed to complete\n");
4886 		return (-1);
4887 	}
4888 
4889 	/* Now Wait for the FW to be ready */
4890 	for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
4891 		reg = ixl_rd(sc, I40E_GLNVM_ULD);
4892 		reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
4893 		    I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
4894 		if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
4895 		    I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))
4896 			break;
4897 
4898 		delaymsec(10);
4899 	}
4900 	if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
4901 	    I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
4902 		printf(", wait for FW Reset complete timed out "
4903 		    "(I40E_GLNVM_ULD = 0x%x)\n", reg);
4904 		return (-1);
4905 	}
4906 
4907 	/*
4908 	 * If there was a Global Reset in progress when we got here,
4909 	 * we don't need to do the PF Reset
4910 	 */
4911 	if (cnt == 0) {
4912 		reg = ixl_rd(sc, I40E_PFGEN_CTRL);
4913 		ixl_wr(sc, I40E_PFGEN_CTRL, reg | I40E_PFGEN_CTRL_PFSWR_MASK);
4914 		for (cnt = 0; cnt < I40E_PF_RESET_WAIT_COUNT; cnt++) {
4915 			reg = ixl_rd(sc, I40E_PFGEN_CTRL);
4916 			if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
4917 				break;
4918 			delaymsec(1);
4919 		}
4920 		if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
4921 			printf(", PF reset polling failed to complete"
4922 			    "(I40E_PFGEN_CTRL= 0x%x)\n", reg);
4923 			return (-1);
4924 		}
4925 	}
4926 
4927 	return (0);
4928 }
4929 
4930 static uint32_t
4931 ixl_710_rd_ctl(struct ixl_softc *sc, uint32_t r)
4932 {
4933 	struct ixl_atq iatq;
4934 	struct ixl_aq_desc *iaq;
4935 	uint16_t retval;
4936 
4937 	memset(&iatq, 0, sizeof(iatq));
4938 	iaq = &iatq.iatq_desc;
4939 	iaq->iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_READ);
4940 	htolem32(&iaq->iaq_param[1], r);
4941 
4942 	ixl_atq_exec(sc, &iatq, "ixl710rd");
4943 
4944 	retval = lemtoh16(&iaq->iaq_retval);
4945 	if (retval != IXL_AQ_RC_OK) {
4946 		printf("%s: %s failed (%u)\n", DEVNAME(sc), __func__, retval);
4947 		return (~0U);
4948 	}
4949 
4950 	return (lemtoh32(&iaq->iaq_param[3]));
4951 }
4952 
4953 static void
4954 ixl_710_wr_ctl(struct ixl_softc *sc, uint32_t r, uint32_t v)
4955 {
4956 	struct ixl_atq iatq;
4957 	struct ixl_aq_desc *iaq;
4958 	uint16_t retval;
4959 
4960 	memset(&iatq, 0, sizeof(iatq));
4961 	iaq = &iatq.iatq_desc;
4962 	iaq->iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_WRITE);
4963 	htolem32(&iaq->iaq_param[1], r);
4964 	htolem32(&iaq->iaq_param[3], v);
4965 
4966 	ixl_atq_exec(sc, &iatq, "ixl710wr");
4967 
4968 	retval = lemtoh16(&iaq->iaq_retval);
4969 	if (retval != IXL_AQ_RC_OK) {
4970 		printf("%s: %s %08x=%08x failed (%u)\n",
4971 		    DEVNAME(sc), __func__, r, v, retval);
4972 	}
4973 }
4974 
4975 static int
4976 ixl_710_set_rss_key(struct ixl_softc *sc, const struct ixl_rss_key *rsskey)
4977 {
4978 	unsigned int i;
4979 
4980 	for (i = 0; i < nitems(rsskey->key); i++)
4981 		ixl_wr_ctl(sc, I40E_PFQF_HKEY(i), rsskey->key[i]);
4982 
4983 	return (0);
4984 }
4985 
4986 static int
4987 ixl_710_set_rss_lut(struct ixl_softc *sc, const struct ixl_rss_lut_128 *lut)
4988 {
4989 	unsigned int i;
4990 
4991 	for (i = 0; i < nitems(lut->entries); i++)
4992 		ixl_wr(sc, I40E_PFQF_HLUT(i), lut->entries[i]);
4993 
4994 	return (0);
4995 }
4996 
4997 static uint32_t
4998 ixl_722_rd_ctl(struct ixl_softc *sc, uint32_t r)
4999 {
5000 	return (ixl_rd(sc, r));
5001 }
5002 
5003 static void
5004 ixl_722_wr_ctl(struct ixl_softc *sc, uint32_t r, uint32_t v)
5005 {
5006 	ixl_wr(sc, r, v);
5007 }
5008 
5009 static int
5010 ixl_722_set_rss_key(struct ixl_softc *sc, const struct ixl_rss_key *rsskey)
5011 {
5012 	/* XXX */
5013 
5014 	return (0);
5015 }
5016 
5017 static int
5018 ixl_722_set_rss_lut(struct ixl_softc *sc, const struct ixl_rss_lut_128 *lut)
5019 {
5020 	/* XXX */
5021 
5022 	return (0);
5023 }
5024 
5025 static int
5026 ixl_dmamem_alloc(struct ixl_softc *sc, struct ixl_dmamem *ixm,
5027     bus_size_t size, u_int align)
5028 {
5029 	ixm->ixm_size = size;
5030 
5031 	if (bus_dmamap_create(sc->sc_dmat, ixm->ixm_size, 1,
5032 	    ixm->ixm_size, 0,
5033 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
5034 	    &ixm->ixm_map) != 0)
5035 		return (1);
5036 	if (bus_dmamem_alloc(sc->sc_dmat, ixm->ixm_size,
5037 	    align, 0, &ixm->ixm_seg, 1, &ixm->ixm_nsegs,
5038 	    BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0)
5039 		goto destroy;
5040 	if (bus_dmamem_map(sc->sc_dmat, &ixm->ixm_seg, ixm->ixm_nsegs,
5041 	    ixm->ixm_size, &ixm->ixm_kva, BUS_DMA_WAITOK) != 0)
5042 		goto free;
5043 	if (bus_dmamap_load(sc->sc_dmat, ixm->ixm_map, ixm->ixm_kva,
5044 	    ixm->ixm_size, NULL, BUS_DMA_WAITOK) != 0)
5045 		goto unmap;
5046 
5047 	return (0);
5048 unmap:
5049 	bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
5050 free:
5051 	bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
5052 destroy:
5053 	bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
5054 	return (1);
5055 }
5056 
5057 static void
5058 ixl_dmamem_free(struct ixl_softc *sc, struct ixl_dmamem *ixm)
5059 {
5060 	bus_dmamap_unload(sc->sc_dmat, ixm->ixm_map);
5061 	bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
5062 	bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
5063 	bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
5064 }
5065 
5066 #if NKSTAT > 0
5067 
5068 CTASSERT(KSTAT_KV_U_NONE <= 0xffU);
5069 CTASSERT(KSTAT_KV_U_PACKETS <= 0xffU);
5070 CTASSERT(KSTAT_KV_U_BYTES <= 0xffU);
5071 
5072 struct ixl_counter {
5073 	const char		*c_name;
5074 	uint32_t		 c_base;
5075 	uint8_t			 c_width;
5076 	uint8_t			 c_type;
5077 };
5078 
5079 const struct ixl_counter ixl_port_counters[] = {
5080 	/* GORC */
5081 	{ "rx bytes",		0x00300000, 48, KSTAT_KV_U_BYTES },
5082 	/* MLFC */
5083 	{ "mac local errs",	0x00300020, 32, KSTAT_KV_U_NONE },
5084 	/* MRFC */
5085 	{ "mac remote errs",	0x00300040, 32, KSTAT_KV_U_NONE },
5086 	/* MSPDC */
5087 	{ "mac short",		0x00300060, 32, KSTAT_KV_U_PACKETS },
5088 	/* CRCERRS */
5089 	{ "crc errs",		0x00300080, 32, KSTAT_KV_U_PACKETS },
5090 	/* RLEC */
5091 	{ "rx len errs",	0x003000a0, 32, KSTAT_KV_U_PACKETS },
5092 	/* ERRBC */
5093 	{ "byte errs",		0x003000c0, 32, KSTAT_KV_U_PACKETS },
5094 	/* ILLERRC */
5095 	{ "illegal byte",	0x003000d0, 32, KSTAT_KV_U_PACKETS },
5096 	/* RUC */
5097 	{ "rx undersize",	0x00300100, 32, KSTAT_KV_U_PACKETS },
5098 	/* ROC */
5099 	{ "rx oversize",	0x00300120, 32, KSTAT_KV_U_PACKETS },
5100 	/* LXONRXCNT */
5101 	{ "rx link xon",	0x00300140, 32, KSTAT_KV_U_PACKETS },
5102 	/* LXOFFRXCNT */
5103 	{ "rx link xoff",	0x00300160, 32, KSTAT_KV_U_PACKETS },
5104 
5105 	/* Priority XON Received Count */
5106 	/* Priority XOFF Received Count */
5107 	/* Priority XON to XOFF Count */
5108 
5109 	/* PRC64 */
5110 	{ "rx 64B",		0x00300480, 48, KSTAT_KV_U_PACKETS },
5111 	/* PRC127 */
5112 	{ "rx 65-127B",		0x003004A0, 48, KSTAT_KV_U_PACKETS },
5113 	/* PRC255 */
5114 	{ "rx 128-255B",	0x003004C0, 48, KSTAT_KV_U_PACKETS },
5115 	/* PRC511 */
5116 	{ "rx 256-511B",	0x003004E0, 48, KSTAT_KV_U_PACKETS },
5117 	/* PRC1023 */
5118 	{ "rx 512-1023B",	0x00300500, 48, KSTAT_KV_U_PACKETS },
5119 	/* PRC1522 */
5120 	{ "rx 1024-1522B",	0x00300520, 48, KSTAT_KV_U_PACKETS },
5121 	/* PRC9522 */
5122 	{ "rx 1523-9522B",	0x00300540, 48, KSTAT_KV_U_PACKETS },
5123 	/* ROC */
5124 	{ "rx fragment",	0x00300560, 32, KSTAT_KV_U_PACKETS },
5125 	/* RJC */
5126 	{ "rx jabber",		0x00300580, 32, KSTAT_KV_U_PACKETS },
5127 	/* UPRC */
5128 	{ "rx ucasts",		0x003005a0, 48, KSTAT_KV_U_PACKETS },
5129 	/* MPRC */
5130 	{ "rx mcasts",		0x003005c0, 48, KSTAT_KV_U_PACKETS },
5131 	/* BPRC */
5132 	{ "rx bcasts",		0x003005e0, 48, KSTAT_KV_U_PACKETS },
5133 	/* RDPC */
5134 	{ "rx discards",	0x00300600, 32, KSTAT_KV_U_PACKETS },
5135 	/* LDPC */
5136 	{ "rx lo discards",	0x00300620, 32, KSTAT_KV_U_PACKETS },
5137 	/* RUPP */
5138 	{ "rx no dest",		0x00300660, 32, KSTAT_KV_U_PACKETS },
5139 
5140 	/* GOTC */
5141 	{ "tx bytes",		0x00300680, 48, KSTAT_KV_U_BYTES },
5142 	/* PTC64 */
5143 	{ "tx 64B",		0x003006A0, 48, KSTAT_KV_U_PACKETS },
5144 	/* PTC127 */
5145 	{ "tx 65-127B",		0x003006C0, 48, KSTAT_KV_U_PACKETS },
5146 	/* PTC255 */
5147 	{ "tx 128-255B",	0x003006E0, 48, KSTAT_KV_U_PACKETS },
5148 	/* PTC511 */
5149 	{ "tx 256-511B",	0x00300700, 48, KSTAT_KV_U_PACKETS },
5150 	/* PTC1023 */
5151 	{ "tx 512-1023B",	0x00300720, 48, KSTAT_KV_U_PACKETS },
5152 	/* PTC1522 */
5153 	{ "tx 1024-1522B",	0x00300740, 48, KSTAT_KV_U_PACKETS },
5154 	/* PTC9522 */
5155 	{ "tx 1523-9522B",	0x00300760, 48, KSTAT_KV_U_PACKETS },
5156 
5157 	/* Priority XON Transmitted Count */
5158 	/* Priority XOFF Transmitted Count */
5159 
5160 	/* LXONTXC */
5161 	{ "tx link xon",	0x00300980, 48, KSTAT_KV_U_PACKETS },
5162 	/* LXOFFTXC */
5163 	{ "tx link xoff",	0x003009a0, 48, KSTAT_KV_U_PACKETS },
5164 	/* UPTC */
5165 	{ "tx ucasts",		0x003009c0, 48, KSTAT_KV_U_PACKETS },
5166 	/* MPTC */
5167 	{ "tx mcasts",		0x003009e0, 48, KSTAT_KV_U_PACKETS },
5168 	/* BPTC */
5169 	{ "tx bcasts",		0x00300a00, 48, KSTAT_KV_U_PACKETS },
5170 	/* TDOLD */
5171 	{ "tx link down",	0x00300a20, 48, KSTAT_KV_U_PACKETS },
5172 };
5173 
5174 const struct ixl_counter ixl_vsi_counters[] = {
5175 	/* VSI RDPC */
5176 	{ "rx discards",	0x00310000, 32, KSTAT_KV_U_PACKETS },
5177 	/* VSI GOTC */
5178 	{ "tx bytes",		0x00328000, 48, KSTAT_KV_U_BYTES },
5179 	/* VSI UPTC */
5180 	{ "tx ucasts",		0x0033c000, 48, KSTAT_KV_U_PACKETS },
5181 	/* VSI MPTC */
5182 	{ "tx mcasts",		0x0033cc00, 48, KSTAT_KV_U_PACKETS },
5183 	/* VSI BPTC */
5184 	{ "tx bcasts",		0x0033d800, 48, KSTAT_KV_U_PACKETS },
5185 	/* VSI TEPC */
5186 	{ "tx errs",		0x00344000, 48, KSTAT_KV_U_PACKETS },
5187 	/* VSI TDPC */
5188 	{ "tx discards",	0x00348000, 48, KSTAT_KV_U_PACKETS },
5189 	/* VSI GORC */
5190 	{ "rx bytes",		0x00358000, 48, KSTAT_KV_U_BYTES },
5191 	/* VSI UPRC */
5192 	{ "rx ucasts",		0x0036c000, 48, KSTAT_KV_U_PACKETS },
5193 	/* VSI MPRC */
5194 	{ "rx mcasts",		0x0036cc00, 48, KSTAT_KV_U_PACKETS },
5195 	/* VSI BPRC */
5196 	{ "rx bcasts",		0x0036d800, 48, KSTAT_KV_U_PACKETS },
5197 	/* VSI RUPP */
5198 	{ "rx noproto",		0x0036e400, 32, KSTAT_KV_U_PACKETS },
5199 };
5200 
5201 struct ixl_counter_state {
5202 	const struct ixl_counter
5203 				*counters;
5204 	uint64_t		*values;
5205 	size_t			 n;
5206 	uint32_t		 index;
5207 	unsigned int		 gen;
5208 };
5209 
5210 static void
5211 ixl_rd_counters(struct ixl_softc *sc, const struct ixl_counter_state *state,
5212     uint64_t *vs)
5213 {
5214 	const struct ixl_counter *c;
5215 	bus_addr_t r;
5216 	uint64_t v;
5217 	size_t i;
5218 
5219 	for (i = 0; i < state->n; i++) {
5220 		c = &state->counters[i];
5221 
5222 		r = c->c_base + (state->index * 8);
5223 
5224 		if (c->c_width == 32)
5225 			v = bus_space_read_4(sc->sc_memt, sc->sc_memh, r);
5226 		else
5227 			v = bus_space_read_8(sc->sc_memt, sc->sc_memh, r);
5228 
5229 		vs[i] = v;
5230 	}
5231 }
5232 
5233 static int
5234 ixl_kstat_read(struct kstat *ks)
5235 {
5236 	struct ixl_softc *sc = ks->ks_softc;
5237 	struct kstat_kv *kvs = ks->ks_data;
5238 	struct ixl_counter_state *state = ks->ks_ptr;
5239 	unsigned int gen = (state->gen++) & 1;
5240 	uint64_t *ovs = state->values + (gen * state->n);
5241 	uint64_t *nvs = state->values + (!gen * state->n);
5242 	size_t i;
5243 
5244 	ixl_rd_counters(sc, state, nvs);
5245 	getnanouptime(&ks->ks_updated);
5246 
5247 	for (i = 0; i < state->n; i++) {
5248 		const struct ixl_counter *c = &state->counters[i];
5249 		uint64_t n = nvs[i], o = ovs[i];
5250 
5251 		if (c->c_width < 64) {
5252 			if (n < o)
5253 				n += (1ULL << c->c_width);
5254 		}
5255 
5256 		kstat_kv_u64(&kvs[i]) += (n - o);
5257 	}
5258 
5259 	return (0);
5260 }
5261 
5262 static void
5263 ixl_kstat_tick(void *arg)
5264 {
5265 	struct ixl_softc *sc = arg;
5266 
5267 	timeout_add_sec(&sc->sc_kstat_tmo, 4);
5268 
5269 	mtx_enter(&sc->sc_kstat_mtx);
5270 
5271 	ixl_kstat_read(sc->sc_port_kstat);
5272 	ixl_kstat_read(sc->sc_vsi_kstat);
5273 
5274 	mtx_leave(&sc->sc_kstat_mtx);
5275 }
5276 
5277 static struct kstat *
5278 ixl_kstat_create(struct ixl_softc *sc, const char *name,
5279     const struct ixl_counter *counters, size_t n, uint32_t index)
5280 {
5281 	struct kstat *ks;
5282 	struct kstat_kv *kvs;
5283 	struct ixl_counter_state *state;
5284 	const struct ixl_counter *c;
5285 	unsigned int i;
5286 
5287 	ks = kstat_create(DEVNAME(sc), 0, name, 0, KSTAT_T_KV, 0);
5288 	if (ks == NULL) {
5289 		/* unable to create kstats */
5290 		return (NULL);
5291 	}
5292 
5293 	kvs = mallocarray(n, sizeof(*kvs), M_DEVBUF, M_WAITOK|M_ZERO);
5294 	for (i = 0; i < n; i++) {
5295 		c = &counters[i];
5296 
5297 		kstat_kv_unit_init(&kvs[i], c->c_name,
5298 		    KSTAT_KV_T_COUNTER64, c->c_type);
5299 	}
5300 
5301 	ks->ks_data = kvs;
5302 	ks->ks_datalen = n * sizeof(*kvs);
5303 	ks->ks_read = ixl_kstat_read;
5304 
5305 	state = malloc(sizeof(*state), M_DEVBUF, M_WAITOK|M_ZERO);
5306 	state->counters = counters;
5307 	state->n = n;
5308 	state->values = mallocarray(n * 2, sizeof(*state->values),
5309 	    M_DEVBUF, M_WAITOK|M_ZERO);
5310 	state->index = index;
5311 	ks->ks_ptr = state;
5312 
5313 	kstat_set_mutex(ks, &sc->sc_kstat_mtx);
5314 	ks->ks_softc = sc;
5315 	kstat_install(ks);
5316 
5317 	/* fetch a baseline */
5318 	ixl_rd_counters(sc, state, state->values);
5319 
5320 	return (ks);
5321 }
5322 
5323 static void
5324 ixl_kstat_attach(struct ixl_softc *sc)
5325 {
5326 	mtx_init(&sc->sc_kstat_mtx, IPL_SOFTCLOCK);
5327 	timeout_set(&sc->sc_kstat_tmo, ixl_kstat_tick, sc);
5328 
5329 	sc->sc_port_kstat = ixl_kstat_create(sc, "ixl-port",
5330 	    ixl_port_counters, nitems(ixl_port_counters), sc->sc_port);
5331 	sc->sc_vsi_kstat = ixl_kstat_create(sc, "ixl-vsi",
5332 	    ixl_vsi_counters, nitems(ixl_vsi_counters),
5333 	    lemtoh16(&sc->sc_vsi_number));
5334 
5335 	/* ixl counters go up even when the interface is down */
5336 	timeout_add_sec(&sc->sc_kstat_tmo, 4);
5337 }
5338 
5339 #endif /* NKSTAT > 0 */
5340