xref: /freebsd/sys/dev/ixl/if_ixl.c (revision e17f5b1d)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2018, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #include "ixl.h"
36 #include "ixl_pf.h"
37 
38 #ifdef IXL_IW
39 #include "ixl_iw.h"
40 #include "ixl_iw_int.h"
41 #endif
42 
43 #ifdef PCI_IOV
44 #include "ixl_pf_iov.h"
45 #endif
46 
47 /*********************************************************************
48  *  Driver version
49  *********************************************************************/
50 #define IXL_DRIVER_VERSION_MAJOR	2
51 #define IXL_DRIVER_VERSION_MINOR	2
52 #define IXL_DRIVER_VERSION_BUILD	0
53 
54 #define IXL_DRIVER_VERSION_STRING			\
55     __XSTRING(IXL_DRIVER_VERSION_MAJOR) "."		\
56     __XSTRING(IXL_DRIVER_VERSION_MINOR) "."		\
57     __XSTRING(IXL_DRIVER_VERSION_BUILD) "-k"
58 
59 /*********************************************************************
60  *  PCI Device ID Table
61  *
62  *  Used by probe to select devices to load on
63  *
64  *  ( Vendor ID, Device ID, Branding String )
65  *********************************************************************/
66 
67 static pci_vendor_info_t ixl_vendor_info_array[] =
68 {
69 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, "Intel(R) Ethernet Controller X710 for 10GbE SFP+"),
70 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, "Intel(R) Ethernet Controller XL710 for 40GbE backplane"),
71 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, "Intel(R) Ethernet Controller X710 for 10GbE backplane"),
72 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"),
73 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"),
74 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, "Intel(R) Ethernet Controller X710 for 10GbE QSFP+"),
75 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, "Intel(R) Ethernet Controller X710 for 10GBASE-T"),
76 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, "Intel(R) Ethernet Controller X710/X557-AT 10GBASE-T"),
77 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722, "Intel(R) Ethernet Connection X722 for 10GbE backplane"),
78 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE QSFP+"),
79 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"),
80 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 1GbE"),
81 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 10GBASE-T"),
82 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"),
83 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B, "Intel(R) Ethernet Controller XXV710 for 25GbE backplane"),
84 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28, "Intel(R) Ethernet Controller XXV710 for 25GbE SFP28"),
85 	/* required last entry */
86 	PVID_END
87 };
88 
89 /*********************************************************************
90  *  Function prototypes
91  *********************************************************************/
92 /*** IFLIB interface ***/
93 static void	*ixl_register(device_t dev);
94 static int	 ixl_if_attach_pre(if_ctx_t ctx);
95 static int	 ixl_if_attach_post(if_ctx_t ctx);
96 static int	 ixl_if_detach(if_ctx_t ctx);
97 static int	 ixl_if_shutdown(if_ctx_t ctx);
98 static int	 ixl_if_suspend(if_ctx_t ctx);
99 static int	 ixl_if_resume(if_ctx_t ctx);
100 static int	 ixl_if_msix_intr_assign(if_ctx_t ctx, int msix);
101 static void	 ixl_if_enable_intr(if_ctx_t ctx);
102 static void	 ixl_if_disable_intr(if_ctx_t ctx);
103 static int	 ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid);
104 static int	 ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid);
105 static int	 ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets);
106 static int	 ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
107 static void	 ixl_if_queues_free(if_ctx_t ctx);
108 static void	 ixl_if_update_admin_status(if_ctx_t ctx);
109 static void	 ixl_if_multi_set(if_ctx_t ctx);
110 static int	 ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
111 static void	 ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr);
112 static int	 ixl_if_media_change(if_ctx_t ctx);
113 static int	 ixl_if_promisc_set(if_ctx_t ctx, int flags);
114 static void	 ixl_if_timer(if_ctx_t ctx, uint16_t qid);
115 static void	 ixl_if_vlan_register(if_ctx_t ctx, u16 vtag);
116 static void	 ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
117 static uint64_t	 ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt);
118 static int	 ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req);
119 static int	 ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data);
120 static bool	 ixl_if_needs_restart(if_ctx_t ctx, enum iflib_restart_event event);
121 #ifdef PCI_IOV
122 static void	 ixl_if_vflr_handle(if_ctx_t ctx);
123 #endif
124 
125 /*** Other ***/
126 static u_int	 ixl_mc_filter_apply(void *, struct sockaddr_dl *, u_int);
127 static void	 ixl_save_pf_tunables(struct ixl_pf *);
128 static int	 ixl_allocate_pci_resources(struct ixl_pf *);
129 static void	 ixl_setup_ssctx(struct ixl_pf *pf);
130 static void	 ixl_admin_timer(void *arg);
131 
132 /*********************************************************************
133  *  FreeBSD Device Interface Entry Points
134  *********************************************************************/
135 
136 static device_method_t ixl_methods[] = {
137 	/* Device interface */
138 	DEVMETHOD(device_register, ixl_register),
139 	DEVMETHOD(device_probe, iflib_device_probe),
140 	DEVMETHOD(device_attach, iflib_device_attach),
141 	DEVMETHOD(device_detach, iflib_device_detach),
142 	DEVMETHOD(device_shutdown, iflib_device_shutdown),
143 #ifdef PCI_IOV
144 	DEVMETHOD(pci_iov_init, iflib_device_iov_init),
145 	DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
146 	DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
147 #endif
148 	DEVMETHOD_END
149 };
150 
151 static driver_t ixl_driver = {
152 	"ixl", ixl_methods, sizeof(struct ixl_pf),
153 };
154 
155 devclass_t ixl_devclass;
156 DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0);
157 IFLIB_PNP_INFO(pci, ixl, ixl_vendor_info_array);
158 MODULE_VERSION(ixl, 3);
159 
160 MODULE_DEPEND(ixl, pci, 1, 1, 1);
161 MODULE_DEPEND(ixl, ether, 1, 1, 1);
162 MODULE_DEPEND(ixl, iflib, 1, 1, 1);
163 
164 static device_method_t ixl_if_methods[] = {
165 	DEVMETHOD(ifdi_attach_pre, ixl_if_attach_pre),
166 	DEVMETHOD(ifdi_attach_post, ixl_if_attach_post),
167 	DEVMETHOD(ifdi_detach, ixl_if_detach),
168 	DEVMETHOD(ifdi_shutdown, ixl_if_shutdown),
169 	DEVMETHOD(ifdi_suspend, ixl_if_suspend),
170 	DEVMETHOD(ifdi_resume, ixl_if_resume),
171 	DEVMETHOD(ifdi_init, ixl_if_init),
172 	DEVMETHOD(ifdi_stop, ixl_if_stop),
173 	DEVMETHOD(ifdi_msix_intr_assign, ixl_if_msix_intr_assign),
174 	DEVMETHOD(ifdi_intr_enable, ixl_if_enable_intr),
175 	DEVMETHOD(ifdi_intr_disable, ixl_if_disable_intr),
176 	DEVMETHOD(ifdi_rx_queue_intr_enable, ixl_if_rx_queue_intr_enable),
177 	DEVMETHOD(ifdi_tx_queue_intr_enable, ixl_if_tx_queue_intr_enable),
178 	DEVMETHOD(ifdi_tx_queues_alloc, ixl_if_tx_queues_alloc),
179 	DEVMETHOD(ifdi_rx_queues_alloc, ixl_if_rx_queues_alloc),
180 	DEVMETHOD(ifdi_queues_free, ixl_if_queues_free),
181 	DEVMETHOD(ifdi_update_admin_status, ixl_if_update_admin_status),
182 	DEVMETHOD(ifdi_multi_set, ixl_if_multi_set),
183 	DEVMETHOD(ifdi_mtu_set, ixl_if_mtu_set),
184 	DEVMETHOD(ifdi_media_status, ixl_if_media_status),
185 	DEVMETHOD(ifdi_media_change, ixl_if_media_change),
186 	DEVMETHOD(ifdi_promisc_set, ixl_if_promisc_set),
187 	DEVMETHOD(ifdi_timer, ixl_if_timer),
188 	DEVMETHOD(ifdi_vlan_register, ixl_if_vlan_register),
189 	DEVMETHOD(ifdi_vlan_unregister, ixl_if_vlan_unregister),
190 	DEVMETHOD(ifdi_get_counter, ixl_if_get_counter),
191 	DEVMETHOD(ifdi_i2c_req, ixl_if_i2c_req),
192 	DEVMETHOD(ifdi_priv_ioctl, ixl_if_priv_ioctl),
193 	DEVMETHOD(ifdi_needs_restart, ixl_if_needs_restart),
194 #ifdef PCI_IOV
195 	DEVMETHOD(ifdi_iov_init, ixl_if_iov_init),
196 	DEVMETHOD(ifdi_iov_uninit, ixl_if_iov_uninit),
197 	DEVMETHOD(ifdi_iov_vf_add, ixl_if_iov_vf_add),
198 	DEVMETHOD(ifdi_vflr_handle, ixl_if_vflr_handle),
199 #endif
200 	// ifdi_led_func
201 	// ifdi_debug
202 	DEVMETHOD_END
203 };
204 
205 static driver_t ixl_if_driver = {
206 	"ixl_if", ixl_if_methods, sizeof(struct ixl_pf)
207 };
208 
209 /*
210 ** TUNEABLE PARAMETERS:
211 */
212 
213 static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
214     "ixl driver parameters");
215 
216 #ifdef IXL_DEBUG_FC
217 /*
218  * Leave this on unless you need to send flow control
219  * frames (or other control frames) from software
220  */
221 static int ixl_enable_tx_fc_filter = 1;
222 TUNABLE_INT("hw.ixl.enable_tx_fc_filter",
223     &ixl_enable_tx_fc_filter);
224 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_tx_fc_filter, CTLFLAG_RDTUN,
225     &ixl_enable_tx_fc_filter, 0,
226     "Filter out packets with Ethertype 0x8808 from being sent out by non-HW sources");
227 #endif
228 
229 #ifdef IXL_DEBUG
230 static int ixl_debug_recovery_mode = 0;
231 TUNABLE_INT("hw.ixl.debug_recovery_mode",
232     &ixl_debug_recovery_mode);
233 SYSCTL_INT(_hw_ixl, OID_AUTO, debug_recovery_mode, CTLFLAG_RDTUN,
234     &ixl_debug_recovery_mode, 0,
235     "Act like when FW entered recovery mode (for debuging)");
236 #endif
237 
238 static int ixl_i2c_access_method = 0;
239 TUNABLE_INT("hw.ixl.i2c_access_method",
240     &ixl_i2c_access_method);
241 SYSCTL_INT(_hw_ixl, OID_AUTO, i2c_access_method, CTLFLAG_RDTUN,
242     &ixl_i2c_access_method, 0,
243     IXL_SYSCTL_HELP_I2C_METHOD);
244 
245 static int ixl_enable_vf_loopback = 1;
246 TUNABLE_INT("hw.ixl.enable_vf_loopback",
247     &ixl_enable_vf_loopback);
248 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_vf_loopback, CTLFLAG_RDTUN,
249     &ixl_enable_vf_loopback, 0,
250     IXL_SYSCTL_HELP_VF_LOOPBACK);
251 
252 /*
253  * Different method for processing TX descriptor
254  * completion.
255  */
256 static int ixl_enable_head_writeback = 1;
257 TUNABLE_INT("hw.ixl.enable_head_writeback",
258     &ixl_enable_head_writeback);
259 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN,
260     &ixl_enable_head_writeback, 0,
261     "For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors");
262 
263 static int ixl_core_debug_mask = 0;
264 TUNABLE_INT("hw.ixl.core_debug_mask",
265     &ixl_core_debug_mask);
266 SYSCTL_INT(_hw_ixl, OID_AUTO, core_debug_mask, CTLFLAG_RDTUN,
267     &ixl_core_debug_mask, 0,
268     "Display debug statements that are printed in non-shared code");
269 
270 static int ixl_shared_debug_mask = 0;
271 TUNABLE_INT("hw.ixl.shared_debug_mask",
272     &ixl_shared_debug_mask);
273 SYSCTL_INT(_hw_ixl, OID_AUTO, shared_debug_mask, CTLFLAG_RDTUN,
274     &ixl_shared_debug_mask, 0,
275     "Display debug statements that are printed in shared code");
276 
277 #if 0
278 /*
279 ** Controls for Interrupt Throttling
280 **	- true/false for dynamic adjustment
281 ** 	- default values for static ITR
282 */
283 static int ixl_dynamic_rx_itr = 0;
284 TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
285 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
286     &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
287 
288 static int ixl_dynamic_tx_itr = 0;
289 TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
290 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
291     &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
292 #endif
293 
294 static int ixl_rx_itr = IXL_ITR_8K;
295 TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
296 SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
297     &ixl_rx_itr, 0, "RX Interrupt Rate");
298 
299 static int ixl_tx_itr = IXL_ITR_4K;
300 TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
301 SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
302     &ixl_tx_itr, 0, "TX Interrupt Rate");
303 
304 #ifdef IXL_IW
305 int ixl_enable_iwarp = 0;
306 TUNABLE_INT("hw.ixl.enable_iwarp", &ixl_enable_iwarp);
307 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_iwarp, CTLFLAG_RDTUN,
308     &ixl_enable_iwarp, 0, "iWARP enabled");
309 
310 #if __FreeBSD_version < 1100000
311 int ixl_limit_iwarp_msix = 1;
312 #else
313 int ixl_limit_iwarp_msix = IXL_IW_MAX_MSIX;
314 #endif
315 TUNABLE_INT("hw.ixl.limit_iwarp_msix", &ixl_limit_iwarp_msix);
316 SYSCTL_INT(_hw_ixl, OID_AUTO, limit_iwarp_msix, CTLFLAG_RDTUN,
317     &ixl_limit_iwarp_msix, 0, "Limit MSI-X vectors assigned to iWARP");
318 #endif
319 
320 extern struct if_txrx ixl_txrx_hwb;
321 extern struct if_txrx ixl_txrx_dwb;
322 
323 static struct if_shared_ctx ixl_sctx_init = {
324 	.isc_magic = IFLIB_MAGIC,
325 	.isc_q_align = PAGE_SIZE,
326 	.isc_tx_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
327 	.isc_tx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
328 	.isc_tso_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
329 	.isc_tso_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
330 	.isc_rx_maxsize = 16384,
331 	.isc_rx_nsegments = IXL_MAX_RX_SEGS,
332 	.isc_rx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
333 	.isc_nfl = 1,
334 	.isc_ntxqs = 1,
335 	.isc_nrxqs = 1,
336 
337 	.isc_admin_intrcnt = 1,
338 	.isc_vendor_info = ixl_vendor_info_array,
339 	.isc_driver_version = IXL_DRIVER_VERSION_STRING,
340 	.isc_driver = &ixl_if_driver,
341 	.isc_flags = IFLIB_NEED_SCRATCH | IFLIB_NEED_ZERO_CSUM | IFLIB_TSO_INIT_IP | IFLIB_ADMIN_ALWAYS_RUN,
342 
343 	.isc_nrxd_min = {IXL_MIN_RING},
344 	.isc_ntxd_min = {IXL_MIN_RING},
345 	.isc_nrxd_max = {IXL_MAX_RING},
346 	.isc_ntxd_max = {IXL_MAX_RING},
347 	.isc_nrxd_default = {IXL_DEFAULT_RING},
348 	.isc_ntxd_default = {IXL_DEFAULT_RING},
349 };
350 
351 if_shared_ctx_t ixl_sctx = &ixl_sctx_init;
352 
353 /*** Functions ***/
354 static void *
355 ixl_register(device_t dev)
356 {
357 	return (ixl_sctx);
358 }
359 
360 static int
361 ixl_allocate_pci_resources(struct ixl_pf *pf)
362 {
363 	device_t dev = iflib_get_dev(pf->vsi.ctx);
364 	struct i40e_hw *hw = &pf->hw;
365 	int             rid;
366 
367 	/* Map BAR0 */
368 	rid = PCIR_BAR(0);
369 	pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
370 	    &rid, RF_ACTIVE);
371 
372 	if (!(pf->pci_mem)) {
373 		device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
374 		return (ENXIO);
375 	}
376 
377 	/* Save off the PCI information */
378 	hw->vendor_id = pci_get_vendor(dev);
379 	hw->device_id = pci_get_device(dev);
380 	hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
381 	hw->subsystem_vendor_id =
382 	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
383 	hw->subsystem_device_id =
384 	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
385 
386 	hw->bus.device = pci_get_slot(dev);
387 	hw->bus.func = pci_get_function(dev);
388 
389 	/* Save off register access information */
390 	pf->osdep.mem_bus_space_tag =
391 		rman_get_bustag(pf->pci_mem);
392 	pf->osdep.mem_bus_space_handle =
393 		rman_get_bushandle(pf->pci_mem);
394 	pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
395 	pf->osdep.flush_reg = I40E_GLGEN_STAT;
396 	pf->osdep.dev = dev;
397 
398 	pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
399 	pf->hw.back = &pf->osdep;
400 
401  	return (0);
402 }
403 
404 static void
405 ixl_setup_ssctx(struct ixl_pf *pf)
406 {
407 	if_softc_ctx_t scctx = pf->vsi.shared;
408 	struct i40e_hw *hw = &pf->hw;
409 
410 	if (IXL_PF_IN_RECOVERY_MODE(pf)) {
411 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 1;
412 		scctx->isc_ntxqsets = scctx->isc_nrxqsets = 1;
413 	} else if (hw->mac.type == I40E_MAC_X722)
414 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 128;
415 	else
416 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
417 
418 	if (pf->vsi.enable_head_writeback) {
419 		scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
420 		    * sizeof(struct i40e_tx_desc) + sizeof(u32), DBA_ALIGN);
421 		scctx->isc_txrx = &ixl_txrx_hwb;
422 	} else {
423 		scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
424 		    * sizeof(struct i40e_tx_desc), DBA_ALIGN);
425 		scctx->isc_txrx = &ixl_txrx_dwb;
426 	}
427 
428 	scctx->isc_txrx->ift_legacy_intr = ixl_intr;
429 	scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0]
430 	    * sizeof(union i40e_32byte_rx_desc), DBA_ALIGN);
431 	scctx->isc_msix_bar = PCIR_BAR(IXL_MSIX_BAR);
432 	scctx->isc_tx_nsegments = IXL_MAX_TX_SEGS;
433 	scctx->isc_tx_tso_segments_max = IXL_MAX_TSO_SEGS;
434 	scctx->isc_tx_tso_size_max = IXL_TSO_SIZE;
435 	scctx->isc_tx_tso_segsize_max = IXL_MAX_DMA_SEG_SIZE;
436 	scctx->isc_rss_table_size = pf->hw.func_caps.rss_table_size;
437 	scctx->isc_tx_csum_flags = CSUM_OFFLOAD;
438 	scctx->isc_capabilities = scctx->isc_capenable = IXL_CAPS;
439 }
440 
441 static void
442 ixl_admin_timer(void *arg)
443 {
444 	struct ixl_pf *pf = (struct ixl_pf *)arg;
445 
446 	/* Fire off the admin task */
447 	iflib_admin_intr_deferred(pf->vsi.ctx);
448 
449 	/* Reschedule the admin timer */
450 	callout_schedule(&pf->admin_timer, hz/2);
451 }
452 
453 static int
454 ixl_attach_pre_recovery_mode(struct ixl_pf *pf)
455 {
456 	struct ixl_vsi *vsi = &pf->vsi;
457 	struct i40e_hw *hw = &pf->hw;
458 	device_t dev = pf->dev;
459 
460 	device_printf(dev, "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
461 
462 	i40e_get_mac_addr(hw, hw->mac.addr);
463 
464 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
465 		ixl_configure_intr0_msix(pf);
466 		ixl_enable_intr0(hw);
467 	}
468 
469 	ixl_setup_ssctx(pf);
470 
471 	return (0);
472 }
473 
474 static int
475 ixl_if_attach_pre(if_ctx_t ctx)
476 {
477 	device_t dev;
478 	struct ixl_pf *pf;
479 	struct i40e_hw *hw;
480 	struct ixl_vsi *vsi;
481 	enum i40e_get_fw_lldp_status_resp lldp_status;
482 	struct i40e_filter_control_settings filter;
483 	enum i40e_status_code status;
484 	int error = 0;
485 
486 	dev = iflib_get_dev(ctx);
487 	pf = iflib_get_softc(ctx);
488 
489 	INIT_DBG_DEV(dev, "begin");
490 
491 	vsi = &pf->vsi;
492 	vsi->back = pf;
493 	pf->dev = dev;
494 	hw = &pf->hw;
495 
496 	vsi->dev = dev;
497 	vsi->hw = &pf->hw;
498 	vsi->id = 0;
499 	vsi->num_vlans = 0;
500 	vsi->ctx = ctx;
501 	vsi->media = iflib_get_media(ctx);
502 	vsi->shared = iflib_get_softc_ctx(ctx);
503 
504 	snprintf(pf->admin_mtx_name, sizeof(pf->admin_mtx_name),
505 	    "%s:admin", device_get_nameunit(dev));
506 	mtx_init(&pf->admin_mtx, pf->admin_mtx_name, NULL, MTX_DEF);
507 	callout_init_mtx(&pf->admin_timer, &pf->admin_mtx, 0);
508 
509 	/* Save tunable values */
510 	ixl_save_pf_tunables(pf);
511 
512 	/* Do PCI setup - map BAR0, etc */
513 	if (ixl_allocate_pci_resources(pf)) {
514 		device_printf(dev, "Allocation of PCI resources failed\n");
515 		error = ENXIO;
516 		goto err_pci_res;
517 	}
518 
519 	/* Establish a clean starting point */
520 	i40e_clear_hw(hw);
521 	i40e_set_mac_type(hw);
522 
523 	error = ixl_pf_reset(pf);
524 	if (error)
525 		goto err_out;
526 
527 	/* Initialize the shared code */
528 	status = i40e_init_shared_code(hw);
529 	if (status) {
530 		device_printf(dev, "Unable to initialize shared code, error %s\n",
531 		    i40e_stat_str(hw, status));
532 		error = EIO;
533 		goto err_out;
534 	}
535 
536 	/* Set up the admin queue */
537 	hw->aq.num_arq_entries = IXL_AQ_LEN;
538 	hw->aq.num_asq_entries = IXL_AQ_LEN;
539 	hw->aq.arq_buf_size = IXL_AQ_BUF_SZ;
540 	hw->aq.asq_buf_size = IXL_AQ_BUF_SZ;
541 
542 	status = i40e_init_adminq(hw);
543 	if (status != 0 && status != I40E_ERR_FIRMWARE_API_VERSION) {
544 		device_printf(dev, "Unable to initialize Admin Queue, error %s\n",
545 		    i40e_stat_str(hw, status));
546 		error = EIO;
547 		goto err_out;
548 	}
549 	ixl_print_nvm_version(pf);
550 
551 	if (status == I40E_ERR_FIRMWARE_API_VERSION) {
552 		device_printf(dev, "The driver for the device stopped "
553 		    "because the NVM image is newer than expected.\n");
554 		device_printf(dev, "You must install the most recent version of "
555 		    "the network driver.\n");
556 		error = EIO;
557 		goto err_out;
558 	}
559 
560         if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
561 	    hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) {
562 		device_printf(dev, "The driver for the device detected "
563 		    "a newer version of the NVM image than expected.\n");
564 		device_printf(dev, "Please install the most recent version "
565 		    "of the network driver.\n");
566 	} else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4) {
567 		device_printf(dev, "The driver for the device detected "
568 		    "an older version of the NVM image than expected.\n");
569 		device_printf(dev, "Please update the NVM image.\n");
570 	}
571 
572 	if (IXL_PF_IN_RECOVERY_MODE(pf)) {
573 		error = ixl_attach_pre_recovery_mode(pf);
574 		if (error)
575 			goto err_out;
576 		return (error);
577 	}
578 
579 	/* Clear PXE mode */
580 	i40e_clear_pxe_mode(hw);
581 
582 	/* Get capabilities from the device */
583 	error = ixl_get_hw_capabilities(pf);
584 	if (error) {
585 		device_printf(dev, "get_hw_capabilities failed: %d\n",
586 		    error);
587 		goto err_get_cap;
588 	}
589 
590 	/* Set up host memory cache */
591 	error = ixl_setup_hmc(pf);
592 	if (error)
593 		goto err_mac_hmc;
594 
595 	/* Disable LLDP from the firmware for certain NVM versions */
596 	if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
597 	    (pf->hw.aq.fw_maj_ver < 4)) {
598 		i40e_aq_stop_lldp(hw, true, false, NULL);
599 		pf->state |= IXL_PF_STATE_FW_LLDP_DISABLED;
600 	}
601 
602 	/* Get MAC addresses from hardware */
603 	i40e_get_mac_addr(hw, hw->mac.addr);
604 	error = i40e_validate_mac_addr(hw->mac.addr);
605 	if (error) {
606 		device_printf(dev, "validate_mac_addr failed: %d\n", error);
607 		goto err_mac_hmc;
608 	}
609 	bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
610 	iflib_set_mac(ctx, hw->mac.addr);
611 	i40e_get_port_mac_addr(hw, hw->mac.port_addr);
612 
613 	/* Set up the device filtering */
614 	bzero(&filter, sizeof(filter));
615 	filter.enable_ethtype = TRUE;
616 	filter.enable_macvlan = TRUE;
617 	filter.enable_fdir = FALSE;
618 	filter.hash_lut_size = I40E_HASH_LUT_SIZE_512;
619 	if (i40e_set_filter_control(hw, &filter))
620 		device_printf(dev, "i40e_set_filter_control() failed\n");
621 
622 	/* Query device FW LLDP status */
623 	if (i40e_get_fw_lldp_status(hw, &lldp_status) == I40E_SUCCESS) {
624 		if (lldp_status == I40E_GET_FW_LLDP_STATUS_DISABLED) {
625 			atomic_set_32(&pf->state,
626 			    IXL_PF_STATE_FW_LLDP_DISABLED);
627 		} else {
628 			atomic_clear_32(&pf->state,
629 			    IXL_PF_STATE_FW_LLDP_DISABLED);
630 		}
631 	}
632 
633 	/* Tell FW to apply DCB config on link up */
634 	i40e_aq_set_dcb_parameters(hw, true, NULL);
635 
636 	/* Fill out iflib parameters */
637 	ixl_setup_ssctx(pf);
638 
639 	INIT_DBG_DEV(dev, "end");
640 	return (0);
641 
642 err_mac_hmc:
643 	ixl_shutdown_hmc(pf);
644 err_get_cap:
645 	i40e_shutdown_adminq(hw);
646 err_out:
647 	ixl_free_pci_resources(pf);
648 err_pci_res:
649 	mtx_lock(&pf->admin_mtx);
650 	callout_stop(&pf->admin_timer);
651 	mtx_unlock(&pf->admin_mtx);
652 	mtx_destroy(&pf->admin_mtx);
653 	return (error);
654 }
655 
656 static int
657 ixl_if_attach_post(if_ctx_t ctx)
658 {
659 	device_t dev;
660 	struct ixl_pf *pf;
661 	struct i40e_hw *hw;
662 	struct ixl_vsi *vsi;
663 	int error = 0;
664 	enum i40e_status_code status;
665 
666 	dev = iflib_get_dev(ctx);
667 	pf = iflib_get_softc(ctx);
668 
669 	INIT_DBG_DEV(dev, "begin");
670 
671 	vsi = &pf->vsi;
672 	vsi->ifp = iflib_get_ifp(ctx);
673 	hw = &pf->hw;
674 
675 	/* Save off determined number of queues for interface */
676 	vsi->num_rx_queues = vsi->shared->isc_nrxqsets;
677 	vsi->num_tx_queues = vsi->shared->isc_ntxqsets;
678 
679 	/* Setup OS network interface / ifnet */
680 	if (ixl_setup_interface(dev, pf)) {
681 		device_printf(dev, "interface setup failed!\n");
682 		error = EIO;
683 		goto err;
684 	}
685 
686 	if (IXL_PF_IN_RECOVERY_MODE(pf)) {
687 		/* Keep admin queue interrupts active while driver is loaded */
688 		if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
689 			ixl_configure_intr0_msix(pf);
690 			ixl_enable_intr0(hw);
691 		}
692 
693 		ixl_add_sysctls_recovery_mode(pf);
694 
695 		/* Start the admin timer */
696 		mtx_lock(&pf->admin_mtx);
697 		callout_reset(&pf->admin_timer, hz/2, ixl_admin_timer, pf);
698 		mtx_unlock(&pf->admin_mtx);
699 		return (0);
700 	}
701 
702 	/* Determine link state */
703 	if (ixl_attach_get_link_status(pf)) {
704 		error = EINVAL;
705 		goto err;
706 	}
707 
708 	error = ixl_switch_config(pf);
709 	if (error) {
710 		device_printf(dev, "Initial ixl_switch_config() failed: %d\n",
711 		     error);
712 		goto err;
713 	}
714 
715 	/* Add protocol filters to list */
716 	ixl_init_filters(vsi);
717 
718 	/* Init queue allocation manager */
719 	error = ixl_pf_qmgr_init(&pf->qmgr, hw->func_caps.num_tx_qp);
720 	if (error) {
721 		device_printf(dev, "Failed to init queue manager for PF queues, error %d\n",
722 		    error);
723 		goto err;
724 	}
725 	/* reserve a contiguous allocation for the PF's VSI */
726 	error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr,
727 	    max(vsi->num_rx_queues, vsi->num_tx_queues), &pf->qtag);
728 	if (error) {
729 		device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
730 		    error);
731 		goto err;
732 	}
733 	device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n",
734 	    pf->qtag.num_allocated, pf->qtag.num_active);
735 
736 	/* Limit PHY interrupts to link, autoneg, and modules failure */
737 	status = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
738 	    NULL);
739         if (status) {
740 		device_printf(dev, "i40e_aq_set_phy_mask() failed: err %s,"
741 		    " aq_err %s\n", i40e_stat_str(hw, status),
742 		    i40e_aq_str(hw, hw->aq.asq_last_status));
743 		goto err;
744 	}
745 
746 	/* Get the bus configuration and set the shared code */
747 	ixl_get_bus_info(pf);
748 
749 	/* Keep admin queue interrupts active while driver is loaded */
750 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
751  		ixl_configure_intr0_msix(pf);
752  		ixl_enable_intr0(hw);
753 	}
754 
755 	/* Set initial advertised speed sysctl value */
756 	ixl_set_initial_advertised_speeds(pf);
757 
758 	/* Initialize statistics & add sysctls */
759 	ixl_add_device_sysctls(pf);
760 	ixl_pf_reset_stats(pf);
761 	ixl_update_stats_counters(pf);
762 	ixl_add_hw_stats(pf);
763 
764 	hw->phy.get_link_info = true;
765 	i40e_get_link_status(hw, &pf->link_up);
766 	ixl_update_link_status(pf);
767 
768 #ifdef PCI_IOV
769 	ixl_initialize_sriov(pf);
770 #endif
771 
772 #ifdef IXL_IW
773 	if (hw->func_caps.iwarp && ixl_enable_iwarp) {
774 		pf->iw_enabled = (pf->iw_msix > 0) ? true : false;
775 		if (pf->iw_enabled) {
776 			error = ixl_iw_pf_attach(pf);
777 			if (error) {
778 				device_printf(dev,
779 				    "interfacing to iWARP driver failed: %d\n",
780 				    error);
781 				goto err;
782 			} else
783 				device_printf(dev, "iWARP ready\n");
784 		} else
785 			device_printf(dev, "iWARP disabled on this device "
786 			    "(no MSI-X vectors)\n");
787 	} else {
788 		pf->iw_enabled = false;
789 		device_printf(dev, "The device is not iWARP enabled\n");
790 	}
791 #endif
792 	/* Start the admin timer */
793 	mtx_lock(&pf->admin_mtx);
794 	callout_reset(&pf->admin_timer, hz/2, ixl_admin_timer, pf);
795 	mtx_unlock(&pf->admin_mtx);
796 
797 	INIT_DBG_DEV(dev, "end");
798 	return (0);
799 
800 err:
801 	INIT_DEBUGOUT("end: error %d", error);
802 	/* ixl_if_detach() is called on error from this */
803 	return (error);
804 }
805 
806 /**
807  * XXX: iflib always ignores the return value of detach()
808  * -> This means that this isn't allowed to fail
809  */
810 static int
811 ixl_if_detach(if_ctx_t ctx)
812 {
813 	struct ixl_pf *pf = iflib_get_softc(ctx);
814 	struct ixl_vsi *vsi = &pf->vsi;
815 	struct i40e_hw *hw = &pf->hw;
816 	device_t dev = pf->dev;
817 	enum i40e_status_code	status;
818 #ifdef IXL_IW
819 	int			error;
820 #endif
821 
822 	INIT_DBG_DEV(dev, "begin");
823 
824 	/* Stop the admin timer */
825 	mtx_lock(&pf->admin_mtx);
826 	callout_stop(&pf->admin_timer);
827 	mtx_unlock(&pf->admin_mtx);
828 	mtx_destroy(&pf->admin_mtx);
829 
830 #ifdef IXL_IW
831 	if (ixl_enable_iwarp && pf->iw_enabled) {
832 		error = ixl_iw_pf_detach(pf);
833 		if (error == EBUSY) {
834 			device_printf(dev, "iwarp in use; stop it first.\n");
835 			//return (error);
836 		}
837 	}
838 #endif
839 	/* Remove all previously allocated media types */
840 	ifmedia_removeall(vsi->media);
841 
842 	/* Shutdown LAN HMC */
843 	ixl_shutdown_hmc(pf);
844 
845 	/* Shutdown admin queue */
846 	ixl_disable_intr0(hw);
847 	status = i40e_shutdown_adminq(hw);
848 	if (status)
849 		device_printf(dev,
850 		    "i40e_shutdown_adminq() failed with status %s\n",
851 		    i40e_stat_str(hw, status));
852 
853 	ixl_pf_qmgr_destroy(&pf->qmgr);
854 	ixl_free_pci_resources(pf);
855 	ixl_free_mac_filters(vsi);
856 	INIT_DBG_DEV(dev, "end");
857 	return (0);
858 }
859 
860 static int
861 ixl_if_shutdown(if_ctx_t ctx)
862 {
863 	int error = 0;
864 
865 	INIT_DEBUGOUT("ixl_if_shutdown: begin");
866 
867 	/* TODO: Call ixl_if_stop()? */
868 
869 	/* TODO: Then setup low power mode */
870 
871 	return (error);
872 }
873 
874 static int
875 ixl_if_suspend(if_ctx_t ctx)
876 {
877 	int error = 0;
878 
879 	INIT_DEBUGOUT("ixl_if_suspend: begin");
880 
881 	/* TODO: Call ixl_if_stop()? */
882 
883 	/* TODO: Then setup low power mode */
884 
885 	return (error);
886 }
887 
888 static int
889 ixl_if_resume(if_ctx_t ctx)
890 {
891 	struct ifnet *ifp = iflib_get_ifp(ctx);
892 
893 	INIT_DEBUGOUT("ixl_if_resume: begin");
894 
895 	/* Read & clear wake-up registers */
896 
897 	/* Required after D3->D0 transition */
898 	if (ifp->if_flags & IFF_UP)
899 		ixl_if_init(ctx);
900 
901 	return (0);
902 }
903 
904 void
905 ixl_if_init(if_ctx_t ctx)
906 {
907 	struct ixl_pf *pf = iflib_get_softc(ctx);
908 	struct ixl_vsi *vsi = &pf->vsi;
909 	struct i40e_hw	*hw = &pf->hw;
910 	struct ifnet *ifp = iflib_get_ifp(ctx);
911 	device_t 	dev = iflib_get_dev(ctx);
912 	u8		tmpaddr[ETHER_ADDR_LEN];
913 	int		ret;
914 
915 	if (IXL_PF_IN_RECOVERY_MODE(pf))
916 		return;
917 	/*
918 	 * If the aq is dead here, it probably means something outside of the driver
919 	 * did something to the adapter, like a PF reset.
920 	 * So, rebuild the driver's state here if that occurs.
921 	 */
922 	if (!i40e_check_asq_alive(&pf->hw)) {
923 		device_printf(dev, "Admin Queue is down; resetting...\n");
924 		ixl_teardown_hw_structs(pf);
925 		ixl_rebuild_hw_structs_after_reset(pf, false);
926 	}
927 
928 	/* Get the latest mac address... User might use a LAA */
929 	bcopy(IF_LLADDR(vsi->ifp), tmpaddr, ETH_ALEN);
930 	if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
931 	    (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
932 		ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
933 		bcopy(tmpaddr, hw->mac.addr, ETH_ALEN);
934 		ret = i40e_aq_mac_address_write(hw,
935 		    I40E_AQC_WRITE_TYPE_LAA_ONLY,
936 		    hw->mac.addr, NULL);
937 		if (ret) {
938 			device_printf(dev, "LLA address change failed!!\n");
939 			return;
940 		}
941 		ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
942 	}
943 
944 	iflib_set_mac(ctx, hw->mac.addr);
945 
946 	/* Prepare the VSI: rings, hmc contexts, etc... */
947 	if (ixl_initialize_vsi(vsi)) {
948 		device_printf(dev, "initialize vsi failed!!\n");
949 		return;
950 	}
951 
952 	/* Reconfigure multicast filters in HW */
953 	ixl_if_multi_set(ctx);
954 
955 	/* Set up RSS */
956 	ixl_config_rss(pf);
957 
958 	/* Set up MSI-X routing and the ITR settings */
959 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
960 		ixl_configure_queue_intr_msix(pf);
961 		ixl_configure_itr(pf);
962 	} else
963 		ixl_configure_legacy(pf);
964 
965 	if (vsi->enable_head_writeback)
966 		ixl_init_tx_cidx(vsi);
967 	else
968 		ixl_init_tx_rsqs(vsi);
969 
970 	ixl_enable_rings(vsi);
971 
972 	i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
973 
974 	/* Re-add configure filters to HW */
975 	ixl_reconfigure_filters(vsi);
976 
977 	/* Configure promiscuous mode */
978 	ixl_if_promisc_set(ctx, if_getflags(ifp));
979 
980 #ifdef IXL_IW
981 	if (ixl_enable_iwarp && pf->iw_enabled) {
982 		ret = ixl_iw_pf_init(pf);
983 		if (ret)
984 			device_printf(dev,
985 			    "initialize iwarp failed, code %d\n", ret);
986 	}
987 #endif
988 }
989 
990 void
991 ixl_if_stop(if_ctx_t ctx)
992 {
993 	struct ixl_pf *pf = iflib_get_softc(ctx);
994 	struct ixl_vsi *vsi = &pf->vsi;
995 
996 	INIT_DEBUGOUT("ixl_if_stop: begin\n");
997 
998 	if (IXL_PF_IN_RECOVERY_MODE(pf))
999 		return;
1000 
1001 	// TODO: This may need to be reworked
1002 #ifdef IXL_IW
1003 	/* Stop iWARP device */
1004 	if (ixl_enable_iwarp && pf->iw_enabled)
1005 		ixl_iw_pf_stop(pf);
1006 #endif
1007 
1008 	ixl_disable_rings_intr(vsi);
1009 	ixl_disable_rings(pf, vsi, &pf->qtag);
1010 }
1011 
1012 static int
1013 ixl_if_msix_intr_assign(if_ctx_t ctx, int msix)
1014 {
1015 	struct ixl_pf *pf = iflib_get_softc(ctx);
1016 	struct ixl_vsi *vsi = &pf->vsi;
1017 	struct ixl_rx_queue *rx_que = vsi->rx_queues;
1018 	struct ixl_tx_queue *tx_que = vsi->tx_queues;
1019 	int err, i, rid, vector = 0;
1020 	char buf[16];
1021 
1022 	MPASS(vsi->shared->isc_nrxqsets > 0);
1023 	MPASS(vsi->shared->isc_ntxqsets > 0);
1024 
1025 	/* Admin Que must use vector 0*/
1026 	rid = vector + 1;
1027 	err = iflib_irq_alloc_generic(ctx, &vsi->irq, rid, IFLIB_INTR_ADMIN,
1028 	    ixl_msix_adminq, pf, 0, "aq");
1029 	if (err) {
1030 		iflib_irq_free(ctx, &vsi->irq);
1031 		device_printf(iflib_get_dev(ctx),
1032 		    "Failed to register Admin Que handler");
1033 		return (err);
1034 	}
1035 	/* Create soft IRQ for handling VFLRs */
1036 	iflib_softirq_alloc_generic(ctx, NULL, IFLIB_INTR_IOV, pf, 0, "iov");
1037 
1038 	/* Now set up the stations */
1039 	for (i = 0, vector = 1; i < vsi->shared->isc_nrxqsets; i++, vector++, rx_que++) {
1040 		rid = vector + 1;
1041 
1042 		snprintf(buf, sizeof(buf), "rxq%d", i);
1043 		err = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
1044 		    IFLIB_INTR_RX, ixl_msix_que, rx_que, rx_que->rxr.me, buf);
1045 		/* XXX: Does the driver work as expected if there are fewer num_rx_queues than
1046 		 * what's expected in the iflib context? */
1047 		if (err) {
1048 			device_printf(iflib_get_dev(ctx),
1049 			    "Failed to allocate queue RX int vector %d, err: %d\n", i, err);
1050 			vsi->num_rx_queues = i + 1;
1051 			goto fail;
1052 		}
1053 		rx_que->msix = vector;
1054 	}
1055 
1056 	bzero(buf, sizeof(buf));
1057 
1058 	for (i = 0; i < vsi->shared->isc_ntxqsets; i++, tx_que++) {
1059 		snprintf(buf, sizeof(buf), "txq%d", i);
1060 		iflib_softirq_alloc_generic(ctx,
1061 		    &vsi->rx_queues[i % vsi->shared->isc_nrxqsets].que_irq,
1062 		    IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
1063 
1064 		/* TODO: Maybe call a strategy function for this to figure out which
1065 		* interrupts to map Tx queues to. I don't know if there's an immediately
1066 		* better way than this other than a user-supplied map, though. */
1067 		tx_que->msix = (i % vsi->shared->isc_nrxqsets) + 1;
1068 	}
1069 
1070 	return (0);
1071 fail:
1072 	iflib_irq_free(ctx, &vsi->irq);
1073 	rx_que = vsi->rx_queues;
1074 	for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
1075 		iflib_irq_free(ctx, &rx_que->que_irq);
1076 	return (err);
1077 }
1078 
1079 /*
1080  * Enable all interrupts
1081  *
1082  * Called in:
1083  * iflib_init_locked, after ixl_if_init()
1084  */
1085 static void
1086 ixl_if_enable_intr(if_ctx_t ctx)
1087 {
1088 	struct ixl_pf *pf = iflib_get_softc(ctx);
1089 	struct ixl_vsi *vsi = &pf->vsi;
1090 	struct i40e_hw		*hw = vsi->hw;
1091 	struct ixl_rx_queue	*que = vsi->rx_queues;
1092 
1093 	ixl_enable_intr0(hw);
1094 	/* Enable queue interrupts */
1095 	for (int i = 0; i < vsi->num_rx_queues; i++, que++)
1096 		/* TODO: Queue index parameter is probably wrong */
1097 		ixl_enable_queue(hw, que->rxr.me);
1098 }
1099 
1100 /*
1101  * Disable queue interrupts
1102  *
1103  * Other interrupt causes need to remain active.
1104  */
1105 static void
1106 ixl_if_disable_intr(if_ctx_t ctx)
1107 {
1108 	struct ixl_pf *pf = iflib_get_softc(ctx);
1109 	struct ixl_vsi *vsi = &pf->vsi;
1110 	struct i40e_hw		*hw = vsi->hw;
1111 	struct ixl_rx_queue	*rx_que = vsi->rx_queues;
1112 
1113 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
1114 		for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
1115 			ixl_disable_queue(hw, rx_que->msix - 1);
1116 	} else {
1117 		// Set PFINT_LNKLST0 FIRSTQ_INDX to 0x7FF
1118 		// stops queues from triggering interrupts
1119 		wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
1120 	}
1121 }
1122 
1123 static int
1124 ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
1125 {
1126 	struct ixl_pf *pf = iflib_get_softc(ctx);
1127 	struct ixl_vsi *vsi = &pf->vsi;
1128 	struct i40e_hw		*hw = vsi->hw;
1129 	struct ixl_rx_queue	*rx_que = &vsi->rx_queues[rxqid];
1130 
1131 	ixl_enable_queue(hw, rx_que->msix - 1);
1132 	return (0);
1133 }
1134 
1135 static int
1136 ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid)
1137 {
1138 	struct ixl_pf *pf = iflib_get_softc(ctx);
1139 	struct ixl_vsi *vsi = &pf->vsi;
1140 	struct i40e_hw *hw = vsi->hw;
1141 	struct ixl_tx_queue *tx_que = &vsi->tx_queues[txqid];
1142 
1143 	ixl_enable_queue(hw, tx_que->msix - 1);
1144 	return (0);
1145 }
1146 
1147 static int
1148 ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets)
1149 {
1150 	struct ixl_pf *pf = iflib_get_softc(ctx);
1151 	struct ixl_vsi *vsi = &pf->vsi;
1152 	if_softc_ctx_t scctx = vsi->shared;
1153 	struct ixl_tx_queue *que;
1154 	int i, j, error = 0;
1155 
1156 	MPASS(scctx->isc_ntxqsets > 0);
1157 	MPASS(ntxqs == 1);
1158 	MPASS(scctx->isc_ntxqsets == ntxqsets);
1159 
1160 	/* Allocate queue structure memory */
1161 	if (!(vsi->tx_queues =
1162 	    (struct ixl_tx_queue *) malloc(sizeof(struct ixl_tx_queue) *ntxqsets, M_IXL, M_NOWAIT | M_ZERO))) {
1163 		device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n");
1164 		return (ENOMEM);
1165 	}
1166 
1167 	for (i = 0, que = vsi->tx_queues; i < ntxqsets; i++, que++) {
1168 		struct tx_ring *txr = &que->txr;
1169 
1170 		txr->me = i;
1171 		que->vsi = vsi;
1172 
1173 		if (!vsi->enable_head_writeback) {
1174 			/* Allocate report status array */
1175 			if (!(txr->tx_rsq = malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXL, M_NOWAIT))) {
1176 				device_printf(iflib_get_dev(ctx), "failed to allocate tx_rsq memory\n");
1177 				error = ENOMEM;
1178 				goto fail;
1179 			}
1180 			/* Init report status array */
1181 			for (j = 0; j < scctx->isc_ntxd[0]; j++)
1182 				txr->tx_rsq[j] = QIDX_INVALID;
1183 		}
1184 		/* get the virtual and physical address of the hardware queues */
1185 		txr->tail = I40E_QTX_TAIL(txr->me);
1186 		txr->tx_base = (struct i40e_tx_desc *)vaddrs[i * ntxqs];
1187 		txr->tx_paddr = paddrs[i * ntxqs];
1188 		txr->que = que;
1189 	}
1190 
1191 	return (0);
1192 fail:
1193 	ixl_if_queues_free(ctx);
1194 	return (error);
1195 }
1196 
1197 static int
1198 ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets)
1199 {
1200 	struct ixl_pf *pf = iflib_get_softc(ctx);
1201 	struct ixl_vsi *vsi = &pf->vsi;
1202 	struct ixl_rx_queue *que;
1203 	int i, error = 0;
1204 
1205 #ifdef INVARIANTS
1206 	if_softc_ctx_t scctx = vsi->shared;
1207 	MPASS(scctx->isc_nrxqsets > 0);
1208 	MPASS(nrxqs == 1);
1209 	MPASS(scctx->isc_nrxqsets == nrxqsets);
1210 #endif
1211 
1212 	/* Allocate queue structure memory */
1213 	if (!(vsi->rx_queues =
1214 	    (struct ixl_rx_queue *) malloc(sizeof(struct ixl_rx_queue) *
1215 	    nrxqsets, M_IXL, M_NOWAIT | M_ZERO))) {
1216 		device_printf(iflib_get_dev(ctx), "Unable to allocate RX ring memory\n");
1217 		error = ENOMEM;
1218 		goto fail;
1219 	}
1220 
1221 	for (i = 0, que = vsi->rx_queues; i < nrxqsets; i++, que++) {
1222 		struct rx_ring *rxr = &que->rxr;
1223 
1224 		rxr->me = i;
1225 		que->vsi = vsi;
1226 
1227 		/* get the virtual and physical address of the hardware queues */
1228 		rxr->tail = I40E_QRX_TAIL(rxr->me);
1229 		rxr->rx_base = (union i40e_rx_desc *)vaddrs[i * nrxqs];
1230 		rxr->rx_paddr = paddrs[i * nrxqs];
1231 		rxr->que = que;
1232 	}
1233 
1234 	return (0);
1235 fail:
1236 	ixl_if_queues_free(ctx);
1237 	return (error);
1238 }
1239 
1240 static void
1241 ixl_if_queues_free(if_ctx_t ctx)
1242 {
1243 	struct ixl_pf *pf = iflib_get_softc(ctx);
1244 	struct ixl_vsi *vsi = &pf->vsi;
1245 
1246 	if (!vsi->enable_head_writeback) {
1247 		struct ixl_tx_queue *que;
1248 		int i = 0;
1249 
1250 		for (i = 0, que = vsi->tx_queues; i < vsi->num_tx_queues; i++, que++) {
1251 			struct tx_ring *txr = &que->txr;
1252 			if (txr->tx_rsq != NULL) {
1253 				free(txr->tx_rsq, M_IXL);
1254 				txr->tx_rsq = NULL;
1255 			}
1256 		}
1257 	}
1258 
1259 	if (vsi->tx_queues != NULL) {
1260 		free(vsi->tx_queues, M_IXL);
1261 		vsi->tx_queues = NULL;
1262 	}
1263 	if (vsi->rx_queues != NULL) {
1264 		free(vsi->rx_queues, M_IXL);
1265 		vsi->rx_queues = NULL;
1266 	}
1267 
1268 	if (!IXL_PF_IN_RECOVERY_MODE(pf))
1269 		sysctl_ctx_free(&vsi->sysctl_ctx);
1270 }
1271 
1272 void
1273 ixl_update_link_status(struct ixl_pf *pf)
1274 {
1275 	struct ixl_vsi *vsi = &pf->vsi;
1276 	struct i40e_hw *hw = &pf->hw;
1277 	u64 baudrate;
1278 
1279 	if (pf->link_up) {
1280 		if (vsi->link_active == FALSE) {
1281 			vsi->link_active = TRUE;
1282 			baudrate = ixl_max_aq_speed_to_value(hw->phy.link_info.link_speed);
1283 			iflib_link_state_change(vsi->ctx, LINK_STATE_UP, baudrate);
1284 			ixl_link_up_msg(pf);
1285 #ifdef PCI_IOV
1286 			ixl_broadcast_link_state(pf);
1287 #endif
1288 		}
1289 	} else { /* Link down */
1290 		if (vsi->link_active == TRUE) {
1291 			vsi->link_active = FALSE;
1292 			iflib_link_state_change(vsi->ctx, LINK_STATE_DOWN, 0);
1293 #ifdef PCI_IOV
1294 			ixl_broadcast_link_state(pf);
1295 #endif
1296 		}
1297 	}
1298 }
1299 
1300 static void
1301 ixl_handle_lan_overflow_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
1302 {
1303 	device_t dev = pf->dev;
1304 	u32 rxq_idx, qtx_ctl;
1305 
1306 	rxq_idx = (e->desc.params.external.param0 & I40E_PRTDCB_RUPTQ_RXQNUM_MASK) >>
1307 	    I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT;
1308 	qtx_ctl = e->desc.params.external.param1;
1309 
1310 	device_printf(dev, "LAN overflow event: global rxq_idx %d\n", rxq_idx);
1311 	device_printf(dev, "LAN overflow event: QTX_CTL 0x%08x\n", qtx_ctl);
1312 }
1313 
1314 static int
1315 ixl_process_adminq(struct ixl_pf *pf, u16 *pending)
1316 {
1317 	enum i40e_status_code status = I40E_SUCCESS;
1318 	struct i40e_arq_event_info event;
1319 	struct i40e_hw *hw = &pf->hw;
1320 	device_t dev = pf->dev;
1321 	u16 opcode;
1322 	u32 loop = 0, reg;
1323 
1324 	event.buf_len = IXL_AQ_BUF_SZ;
1325 	event.msg_buf = malloc(event.buf_len, M_IXL, M_NOWAIT | M_ZERO);
1326 	if (!event.msg_buf) {
1327 		device_printf(dev, "%s: Unable to allocate memory for Admin"
1328 		    " Queue event!\n", __func__);
1329 		return (ENOMEM);
1330 	}
1331 
1332 	/* clean and process any events */
1333 	do {
1334 		status = i40e_clean_arq_element(hw, &event, pending);
1335 		if (status)
1336 			break;
1337 		opcode = LE16_TO_CPU(event.desc.opcode);
1338 		ixl_dbg(pf, IXL_DBG_AQ,
1339 		    "Admin Queue event: %#06x\n", opcode);
1340 		switch (opcode) {
1341 		case i40e_aqc_opc_get_link_status:
1342 			ixl_link_event(pf, &event);
1343 			break;
1344 		case i40e_aqc_opc_send_msg_to_pf:
1345 #ifdef PCI_IOV
1346 			ixl_handle_vf_msg(pf, &event);
1347 #endif
1348 			break;
1349 		/*
1350 		 * This should only occur on no-drop queues, which
1351 		 * aren't currently configured.
1352 		 */
1353 		case i40e_aqc_opc_event_lan_overflow:
1354 			ixl_handle_lan_overflow_event(pf, &event);
1355 			break;
1356 		default:
1357 			break;
1358 		}
1359 	} while (*pending && (loop++ < IXL_ADM_LIMIT));
1360 
1361 	free(event.msg_buf, M_IXL);
1362 
1363 	/* Re-enable admin queue interrupt cause */
1364 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
1365 	reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
1366 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1367 
1368 	return (status);
1369 }
1370 
1371 static void
1372 ixl_if_update_admin_status(if_ctx_t ctx)
1373 {
1374 	struct ixl_pf	*pf = iflib_get_softc(ctx);
1375 	struct i40e_hw	*hw = &pf->hw;
1376 	u16		pending;
1377 
1378 	if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING)
1379 		ixl_handle_empr_reset(pf);
1380 
1381 	/*
1382 	 * Admin Queue is shut down while handling reset.
1383 	 * Don't proceed if it hasn't been re-initialized
1384 	 * e.g due to an issue with new FW.
1385 	 */
1386 	if (!i40e_check_asq_alive(&pf->hw))
1387 		return;
1388 
1389 	if (pf->state & IXL_PF_STATE_MDD_PENDING)
1390 		ixl_handle_mdd_event(pf);
1391 
1392 	ixl_process_adminq(pf, &pending);
1393 	ixl_update_link_status(pf);
1394 
1395 	/*
1396 	 * If there are still messages to process, reschedule ourselves.
1397 	 * Otherwise, re-enable our interrupt and go to sleep.
1398 	 */
1399 	if (pending > 0)
1400 		iflib_admin_intr_deferred(ctx);
1401 	else
1402 		ixl_enable_intr0(hw);
1403 }
1404 
1405 static void
1406 ixl_if_multi_set(if_ctx_t ctx)
1407 {
1408 	struct ixl_pf *pf = iflib_get_softc(ctx);
1409 	struct ixl_vsi *vsi = &pf->vsi;
1410 	struct i40e_hw *hw = vsi->hw;
1411 	int mcnt, flags;
1412 	int del_mcnt;
1413 
1414 	IOCTL_DEBUGOUT("ixl_if_multi_set: begin");
1415 
1416 	mcnt = min(if_llmaddr_count(iflib_get_ifp(ctx)), MAX_MULTICAST_ADDR);
1417 	/* Delete filters for removed multicast addresses */
1418 	del_mcnt = ixl_del_multi(vsi);
1419 	vsi->num_macs -= del_mcnt;
1420 
1421 	if (__predict_false(mcnt == MAX_MULTICAST_ADDR)) {
1422 		i40e_aq_set_vsi_multicast_promiscuous(hw,
1423 		    vsi->seid, TRUE, NULL);
1424 		return;
1425 	}
1426 	/* (re-)install filters for all mcast addresses */
1427 	/* XXX: This bypasses filter count tracking code! */
1428 	mcnt = if_foreach_llmaddr(iflib_get_ifp(ctx), ixl_mc_filter_apply, vsi);
1429 	if (mcnt > 0) {
1430 		vsi->num_macs += mcnt;
1431 		flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
1432 		ixl_add_hw_filters(vsi, flags, mcnt);
1433 	}
1434 
1435 	ixl_dbg_filter(pf, "%s: filter mac total: %d\n",
1436 	    __func__, vsi->num_macs);
1437 	IOCTL_DEBUGOUT("ixl_if_multi_set: end");
1438 }
1439 
1440 static int
1441 ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
1442 {
1443 	struct ixl_pf *pf = iflib_get_softc(ctx);
1444 	struct ixl_vsi *vsi = &pf->vsi;
1445 
1446 	IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
1447 	if (mtu > IXL_MAX_FRAME - ETHER_HDR_LEN - ETHER_CRC_LEN -
1448 		ETHER_VLAN_ENCAP_LEN)
1449 		return (EINVAL);
1450 
1451 	vsi->shared->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
1452 		ETHER_VLAN_ENCAP_LEN;
1453 
1454 	return (0);
1455 }
1456 
1457 static void
1458 ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
1459 {
1460 	struct ixl_pf *pf = iflib_get_softc(ctx);
1461 	struct i40e_hw  *hw = &pf->hw;
1462 
1463 	INIT_DEBUGOUT("ixl_media_status: begin");
1464 
1465 	ifmr->ifm_status = IFM_AVALID;
1466 	ifmr->ifm_active = IFM_ETHER;
1467 
1468 	if (!pf->link_up) {
1469 		return;
1470 	}
1471 
1472 	ifmr->ifm_status |= IFM_ACTIVE;
1473 	/* Hardware is always full-duplex */
1474 	ifmr->ifm_active |= IFM_FDX;
1475 
1476 	switch (hw->phy.link_info.phy_type) {
1477 		/* 100 M */
1478 		case I40E_PHY_TYPE_100BASE_TX:
1479 			ifmr->ifm_active |= IFM_100_TX;
1480 			break;
1481 		/* 1 G */
1482 		case I40E_PHY_TYPE_1000BASE_T:
1483 			ifmr->ifm_active |= IFM_1000_T;
1484 			break;
1485 		case I40E_PHY_TYPE_1000BASE_SX:
1486 			ifmr->ifm_active |= IFM_1000_SX;
1487 			break;
1488 		case I40E_PHY_TYPE_1000BASE_LX:
1489 			ifmr->ifm_active |= IFM_1000_LX;
1490 			break;
1491 		case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
1492 			ifmr->ifm_active |= IFM_1000_T;
1493 			break;
1494 		/* 10 G */
1495 		case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1496 			ifmr->ifm_active |= IFM_10G_TWINAX;
1497 			break;
1498 		case I40E_PHY_TYPE_10GBASE_SR:
1499 			ifmr->ifm_active |= IFM_10G_SR;
1500 			break;
1501 		case I40E_PHY_TYPE_10GBASE_LR:
1502 			ifmr->ifm_active |= IFM_10G_LR;
1503 			break;
1504 		case I40E_PHY_TYPE_10GBASE_T:
1505 			ifmr->ifm_active |= IFM_10G_T;
1506 			break;
1507 		case I40E_PHY_TYPE_XAUI:
1508 		case I40E_PHY_TYPE_XFI:
1509 			ifmr->ifm_active |= IFM_10G_TWINAX;
1510 			break;
1511 		case I40E_PHY_TYPE_10GBASE_AOC:
1512 			ifmr->ifm_active |= IFM_10G_AOC;
1513 			break;
1514 		/* 25 G */
1515 		case I40E_PHY_TYPE_25GBASE_KR:
1516 			ifmr->ifm_active |= IFM_25G_KR;
1517 			break;
1518 		case I40E_PHY_TYPE_25GBASE_CR:
1519 			ifmr->ifm_active |= IFM_25G_CR;
1520 			break;
1521 		case I40E_PHY_TYPE_25GBASE_SR:
1522 			ifmr->ifm_active |= IFM_25G_SR;
1523 			break;
1524 		case I40E_PHY_TYPE_25GBASE_LR:
1525 			ifmr->ifm_active |= IFM_25G_LR;
1526 			break;
1527 		case I40E_PHY_TYPE_25GBASE_AOC:
1528 			ifmr->ifm_active |= IFM_25G_AOC;
1529 			break;
1530 		case I40E_PHY_TYPE_25GBASE_ACC:
1531 			ifmr->ifm_active |= IFM_25G_ACC;
1532 			break;
1533 		/* 40 G */
1534 		case I40E_PHY_TYPE_40GBASE_CR4:
1535 		case I40E_PHY_TYPE_40GBASE_CR4_CU:
1536 			ifmr->ifm_active |= IFM_40G_CR4;
1537 			break;
1538 		case I40E_PHY_TYPE_40GBASE_SR4:
1539 			ifmr->ifm_active |= IFM_40G_SR4;
1540 			break;
1541 		case I40E_PHY_TYPE_40GBASE_LR4:
1542 			ifmr->ifm_active |= IFM_40G_LR4;
1543 			break;
1544 		case I40E_PHY_TYPE_XLAUI:
1545 			ifmr->ifm_active |= IFM_OTHER;
1546 			break;
1547 		case I40E_PHY_TYPE_1000BASE_KX:
1548 			ifmr->ifm_active |= IFM_1000_KX;
1549 			break;
1550 		case I40E_PHY_TYPE_SGMII:
1551 			ifmr->ifm_active |= IFM_1000_SGMII;
1552 			break;
1553 		/* ERJ: What's the difference between these? */
1554 		case I40E_PHY_TYPE_10GBASE_CR1_CU:
1555 		case I40E_PHY_TYPE_10GBASE_CR1:
1556 			ifmr->ifm_active |= IFM_10G_CR1;
1557 			break;
1558 		case I40E_PHY_TYPE_10GBASE_KX4:
1559 			ifmr->ifm_active |= IFM_10G_KX4;
1560 			break;
1561 		case I40E_PHY_TYPE_10GBASE_KR:
1562 			ifmr->ifm_active |= IFM_10G_KR;
1563 			break;
1564 		case I40E_PHY_TYPE_SFI:
1565 			ifmr->ifm_active |= IFM_10G_SFI;
1566 			break;
1567 		/* Our single 20G media type */
1568 		case I40E_PHY_TYPE_20GBASE_KR2:
1569 			ifmr->ifm_active |= IFM_20G_KR2;
1570 			break;
1571 		case I40E_PHY_TYPE_40GBASE_KR4:
1572 			ifmr->ifm_active |= IFM_40G_KR4;
1573 			break;
1574 		case I40E_PHY_TYPE_XLPPI:
1575 		case I40E_PHY_TYPE_40GBASE_AOC:
1576 			ifmr->ifm_active |= IFM_40G_XLPPI;
1577 			break;
1578 		/* Unknown to driver */
1579 		default:
1580 			ifmr->ifm_active |= IFM_UNKNOWN;
1581 			break;
1582 	}
1583 	/* Report flow control status as well */
1584 	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1585 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1586 	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1587 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1588 }
1589 
1590 static int
1591 ixl_if_media_change(if_ctx_t ctx)
1592 {
1593 	struct ifmedia *ifm = iflib_get_media(ctx);
1594 
1595 	INIT_DEBUGOUT("ixl_media_change: begin");
1596 
1597 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1598 		return (EINVAL);
1599 
1600 	if_printf(iflib_get_ifp(ctx), "Media change is not supported.\n");
1601 	return (ENODEV);
1602 }
1603 
1604 static int
1605 ixl_if_promisc_set(if_ctx_t ctx, int flags)
1606 {
1607 	struct ixl_pf *pf = iflib_get_softc(ctx);
1608 	struct ixl_vsi *vsi = &pf->vsi;
1609 	struct ifnet	*ifp = iflib_get_ifp(ctx);
1610 	struct i40e_hw	*hw = vsi->hw;
1611 	int		err;
1612 	bool		uni = FALSE, multi = FALSE;
1613 
1614 	if (flags & IFF_PROMISC)
1615 		uni = multi = TRUE;
1616 	else if (flags & IFF_ALLMULTI || if_llmaddr_count(ifp) >=
1617 	    MAX_MULTICAST_ADDR)
1618 		multi = TRUE;
1619 
1620 	err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1621 	    vsi->seid, uni, NULL, true);
1622 	if (err)
1623 		return (err);
1624 	err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1625 	    vsi->seid, multi, NULL);
1626 	return (err);
1627 }
1628 
1629 static void
1630 ixl_if_timer(if_ctx_t ctx, uint16_t qid)
1631 {
1632 	struct ixl_pf *pf = iflib_get_softc(ctx);
1633 
1634 	if (qid != 0)
1635 		return;
1636 
1637 	ixl_update_stats_counters(pf);
1638 }
1639 
1640 static void
1641 ixl_if_vlan_register(if_ctx_t ctx, u16 vtag)
1642 {
1643 	struct ixl_pf *pf = iflib_get_softc(ctx);
1644 	struct ixl_vsi *vsi = &pf->vsi;
1645 	struct i40e_hw	*hw = vsi->hw;
1646 
1647 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
1648 		return;
1649 
1650 	++vsi->num_vlans;
1651 	ixl_add_filter(vsi, hw->mac.addr, vtag);
1652 }
1653 
1654 static void
1655 ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1656 {
1657 	struct ixl_pf *pf = iflib_get_softc(ctx);
1658 	struct ixl_vsi *vsi = &pf->vsi;
1659 	struct i40e_hw	*hw = vsi->hw;
1660 
1661 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
1662 		return;
1663 
1664 	--vsi->num_vlans;
1665 	ixl_del_filter(vsi, hw->mac.addr, vtag);
1666 }
1667 
1668 static uint64_t
1669 ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1670 {
1671 	struct ixl_pf *pf = iflib_get_softc(ctx);
1672 	struct ixl_vsi *vsi = &pf->vsi;
1673 	if_t ifp = iflib_get_ifp(ctx);
1674 
1675 	switch (cnt) {
1676 	case IFCOUNTER_IPACKETS:
1677 		return (vsi->ipackets);
1678 	case IFCOUNTER_IERRORS:
1679 		return (vsi->ierrors);
1680 	case IFCOUNTER_OPACKETS:
1681 		return (vsi->opackets);
1682 	case IFCOUNTER_OERRORS:
1683 		return (vsi->oerrors);
1684 	case IFCOUNTER_COLLISIONS:
1685 		/* Collisions are by standard impossible in 40G/10G Ethernet */
1686 		return (0);
1687 	case IFCOUNTER_IBYTES:
1688 		return (vsi->ibytes);
1689 	case IFCOUNTER_OBYTES:
1690 		return (vsi->obytes);
1691 	case IFCOUNTER_IMCASTS:
1692 		return (vsi->imcasts);
1693 	case IFCOUNTER_OMCASTS:
1694 		return (vsi->omcasts);
1695 	case IFCOUNTER_IQDROPS:
1696 		return (vsi->iqdrops);
1697 	case IFCOUNTER_OQDROPS:
1698 		return (vsi->oqdrops);
1699 	case IFCOUNTER_NOPROTO:
1700 		return (vsi->noproto);
1701 	default:
1702 		return (if_get_counter_default(ifp, cnt));
1703 	}
1704 }
1705 
1706 #ifdef PCI_IOV
1707 static void
1708 ixl_if_vflr_handle(if_ctx_t ctx)
1709 {
1710 	struct ixl_pf *pf = iflib_get_softc(ctx);
1711 
1712 	ixl_handle_vflr(pf);
1713 }
1714 #endif
1715 
1716 static int
1717 ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1718 {
1719 	struct ixl_pf		*pf = iflib_get_softc(ctx);
1720 
1721 	if (pf->read_i2c_byte == NULL)
1722 		return (EINVAL);
1723 
1724 	for (int i = 0; i < req->len; i++)
1725 		if (pf->read_i2c_byte(pf, req->offset + i,
1726 		    req->dev_addr, &req->data[i]))
1727 			return (EIO);
1728 	return (0);
1729 }
1730 
1731 static int
1732 ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data)
1733 {
1734 	struct ixl_pf *pf = iflib_get_softc(ctx);
1735 	struct ifdrv *ifd = (struct ifdrv *)data;
1736 	int error = 0;
1737 
1738 	/*
1739 	 * The iflib_if_ioctl forwards SIOCxDRVSPEC and SIOGPRIVATE_0 without
1740 	 * performing privilege checks. It is important that this function
1741 	 * perform the necessary checks for commands which should only be
1742 	 * executed by privileged threads.
1743 	 */
1744 
1745 	switch(command) {
1746 	case SIOCGDRVSPEC:
1747 	case SIOCSDRVSPEC:
1748 		/* NVM update command */
1749 		if (ifd->ifd_cmd == I40E_NVM_ACCESS) {
1750 			error = priv_check(curthread, PRIV_DRIVER);
1751 			if (error)
1752 				break;
1753 			error = ixl_handle_nvmupd_cmd(pf, ifd);
1754 		} else {
1755 			error = EINVAL;
1756 		}
1757 		break;
1758 	default:
1759 		error = EOPNOTSUPP;
1760 	}
1761 
1762 	return (error);
1763 }
1764 
1765 /* ixl_if_needs_restart - Tell iflib when the driver needs to be reinitialized
1766  * @ctx: iflib context
1767  * @event: event code to check
1768  *
1769  * Defaults to returning false for every event.
1770  *
1771  * @returns true if iflib needs to reinit the interface, false otherwise
1772  */
1773 static bool
1774 ixl_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1775 {
1776 	switch (event) {
1777 	case IFLIB_RESTART_VLAN_CONFIG:
1778 	default:
1779 		return (false);
1780 	}
1781 }
1782 
1783 static u_int
1784 ixl_mc_filter_apply(void *arg, struct sockaddr_dl *sdl, u_int count __unused)
1785 {
1786 	struct ixl_vsi *vsi = arg;
1787 
1788 	ixl_add_mc_filter(vsi, (u8*)LLADDR(sdl));
1789 	return (1);
1790 }
1791 
1792 /*
1793  * Sanity check and save off tunable values.
1794  */
1795 static void
1796 ixl_save_pf_tunables(struct ixl_pf *pf)
1797 {
1798 	device_t dev = pf->dev;
1799 
1800 	/* Save tunable information */
1801 #ifdef IXL_DEBUG_FC
1802 	pf->enable_tx_fc_filter = ixl_enable_tx_fc_filter;
1803 #endif
1804 #ifdef IXL_DEBUG
1805 	pf->recovery_mode = ixl_debug_recovery_mode;
1806 #endif
1807 	pf->dbg_mask = ixl_core_debug_mask;
1808 	pf->hw.debug_mask = ixl_shared_debug_mask;
1809 	pf->vsi.enable_head_writeback = !!(ixl_enable_head_writeback);
1810 	pf->enable_vf_loopback = !!(ixl_enable_vf_loopback);
1811 #if 0
1812 	pf->dynamic_rx_itr = ixl_dynamic_rx_itr;
1813 	pf->dynamic_tx_itr = ixl_dynamic_tx_itr;
1814 #endif
1815 
1816 	if (ixl_i2c_access_method > 3 || ixl_i2c_access_method < 0)
1817 		pf->i2c_access_method = 0;
1818 	else
1819 		pf->i2c_access_method = ixl_i2c_access_method;
1820 
1821 	if (ixl_tx_itr < 0 || ixl_tx_itr > IXL_MAX_ITR) {
1822 		device_printf(dev, "Invalid tx_itr value of %d set!\n",
1823 		    ixl_tx_itr);
1824 		device_printf(dev, "tx_itr must be between %d and %d, "
1825 		    "inclusive\n",
1826 		    0, IXL_MAX_ITR);
1827 		device_printf(dev, "Using default value of %d instead\n",
1828 		    IXL_ITR_4K);
1829 		pf->tx_itr = IXL_ITR_4K;
1830 	} else
1831 		pf->tx_itr = ixl_tx_itr;
1832 
1833 	if (ixl_rx_itr < 0 || ixl_rx_itr > IXL_MAX_ITR) {
1834 		device_printf(dev, "Invalid rx_itr value of %d set!\n",
1835 		    ixl_rx_itr);
1836 		device_printf(dev, "rx_itr must be between %d and %d, "
1837 		    "inclusive\n",
1838 		    0, IXL_MAX_ITR);
1839 		device_printf(dev, "Using default value of %d instead\n",
1840 		    IXL_ITR_8K);
1841 		pf->rx_itr = IXL_ITR_8K;
1842 	} else
1843 		pf->rx_itr = ixl_rx_itr;
1844 }
1845 
1846