xref: /freebsd/sys/dev/ixl/if_ixl.c (revision a3557ef0)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2018, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #include "ixl.h"
36 #include "ixl_pf.h"
37 
38 #ifdef IXL_IW
39 #include "ixl_iw.h"
40 #include "ixl_iw_int.h"
41 #endif
42 
43 #ifdef PCI_IOV
44 #include "ixl_pf_iov.h"
45 #endif
46 
47 /*********************************************************************
48  *  Driver version
49  *********************************************************************/
50 #define IXL_DRIVER_VERSION_MAJOR	2
51 #define IXL_DRIVER_VERSION_MINOR	1
52 #define IXL_DRIVER_VERSION_BUILD	0
53 
54 #define IXL_DRIVER_VERSION_STRING			\
55     __XSTRING(IXL_DRIVER_VERSION_MAJOR) "."		\
56     __XSTRING(IXL_DRIVER_VERSION_MINOR) "."		\
57     __XSTRING(IXL_DRIVER_VERSION_BUILD) "-k"
58 
59 /*********************************************************************
60  *  PCI Device ID Table
61  *
62  *  Used by probe to select devices to load on
63  *
64  *  ( Vendor ID, Device ID, Branding String )
65  *********************************************************************/
66 
67 static pci_vendor_info_t ixl_vendor_info_array[] =
68 {
69 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, "Intel(R) Ethernet Controller X710 for 10GbE SFP+"),
70 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, "Intel(R) Ethernet Controller XL710 for 40GbE backplane"),
71 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, "Intel(R) Ethernet Controller X710 for 10GbE backplane"),
72 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"),
73 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"),
74 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, "Intel(R) Ethernet Controller X710 for 10GbE QSFP+"),
75 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, "Intel(R) Ethernet Controller X710 for 10GBASE-T"),
76 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, "Intel(R) Ethernet Controller X710/X557-AT 10GBASE-T"),
77 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722, "Intel(R) Ethernet Connection X722 for 10GbE backplane"),
78 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE QSFP+"),
79 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"),
80 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 1GbE"),
81 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 10GBASE-T"),
82 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"),
83 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B, "Intel(R) Ethernet Controller XXV710 for 25GbE backplane"),
84 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28, "Intel(R) Ethernet Controller XXV710 for 25GbE SFP28"),
85 	/* required last entry */
86 	PVID_END
87 };
88 
89 /*********************************************************************
90  *  Function prototypes
91  *********************************************************************/
92 /*** IFLIB interface ***/
93 static void	*ixl_register(device_t dev);
94 static int	 ixl_if_attach_pre(if_ctx_t ctx);
95 static int	 ixl_if_attach_post(if_ctx_t ctx);
96 static int	 ixl_if_detach(if_ctx_t ctx);
97 static int	 ixl_if_shutdown(if_ctx_t ctx);
98 static int	 ixl_if_suspend(if_ctx_t ctx);
99 static int	 ixl_if_resume(if_ctx_t ctx);
100 static int	 ixl_if_msix_intr_assign(if_ctx_t ctx, int msix);
101 static void	 ixl_if_enable_intr(if_ctx_t ctx);
102 static void	 ixl_if_disable_intr(if_ctx_t ctx);
103 static int	 ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid);
104 static int	 ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid);
105 static int	 ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets);
106 static int	 ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
107 static void	 ixl_if_queues_free(if_ctx_t ctx);
108 static void	 ixl_if_update_admin_status(if_ctx_t ctx);
109 static void	 ixl_if_multi_set(if_ctx_t ctx);
110 static int	 ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
111 static void	 ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr);
112 static int	 ixl_if_media_change(if_ctx_t ctx);
113 static int	 ixl_if_promisc_set(if_ctx_t ctx, int flags);
114 static void	 ixl_if_timer(if_ctx_t ctx, uint16_t qid);
115 static void	 ixl_if_vlan_register(if_ctx_t ctx, u16 vtag);
116 static void	 ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
117 static uint64_t	 ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt);
118 static int	 ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req);
119 static int	 ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data);
120 static bool	 ixl_if_needs_restart(if_ctx_t ctx, enum iflib_restart_event event);
121 #ifdef PCI_IOV
122 static void	 ixl_if_vflr_handle(if_ctx_t ctx);
123 #endif
124 
125 /*** Other ***/
126 static u_int	 ixl_mc_filter_apply(void *, struct sockaddr_dl *, u_int);
127 static void	 ixl_save_pf_tunables(struct ixl_pf *);
128 static int	 ixl_allocate_pci_resources(struct ixl_pf *);
129 
130 /*********************************************************************
131  *  FreeBSD Device Interface Entry Points
132  *********************************************************************/
133 
134 static device_method_t ixl_methods[] = {
135 	/* Device interface */
136 	DEVMETHOD(device_register, ixl_register),
137 	DEVMETHOD(device_probe, iflib_device_probe),
138 	DEVMETHOD(device_attach, iflib_device_attach),
139 	DEVMETHOD(device_detach, iflib_device_detach),
140 	DEVMETHOD(device_shutdown, iflib_device_shutdown),
141 #ifdef PCI_IOV
142 	DEVMETHOD(pci_iov_init, iflib_device_iov_init),
143 	DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
144 	DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
145 #endif
146 	DEVMETHOD_END
147 };
148 
149 static driver_t ixl_driver = {
150 	"ixl", ixl_methods, sizeof(struct ixl_pf),
151 };
152 
153 devclass_t ixl_devclass;
154 DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0);
155 IFLIB_PNP_INFO(pci, ixl, ixl_vendor_info_array);
156 MODULE_VERSION(ixl, 3);
157 
158 MODULE_DEPEND(ixl, pci, 1, 1, 1);
159 MODULE_DEPEND(ixl, ether, 1, 1, 1);
160 MODULE_DEPEND(ixl, iflib, 1, 1, 1);
161 
162 static device_method_t ixl_if_methods[] = {
163 	DEVMETHOD(ifdi_attach_pre, ixl_if_attach_pre),
164 	DEVMETHOD(ifdi_attach_post, ixl_if_attach_post),
165 	DEVMETHOD(ifdi_detach, ixl_if_detach),
166 	DEVMETHOD(ifdi_shutdown, ixl_if_shutdown),
167 	DEVMETHOD(ifdi_suspend, ixl_if_suspend),
168 	DEVMETHOD(ifdi_resume, ixl_if_resume),
169 	DEVMETHOD(ifdi_init, ixl_if_init),
170 	DEVMETHOD(ifdi_stop, ixl_if_stop),
171 	DEVMETHOD(ifdi_msix_intr_assign, ixl_if_msix_intr_assign),
172 	DEVMETHOD(ifdi_intr_enable, ixl_if_enable_intr),
173 	DEVMETHOD(ifdi_intr_disable, ixl_if_disable_intr),
174 	DEVMETHOD(ifdi_rx_queue_intr_enable, ixl_if_rx_queue_intr_enable),
175 	DEVMETHOD(ifdi_tx_queue_intr_enable, ixl_if_tx_queue_intr_enable),
176 	DEVMETHOD(ifdi_tx_queues_alloc, ixl_if_tx_queues_alloc),
177 	DEVMETHOD(ifdi_rx_queues_alloc, ixl_if_rx_queues_alloc),
178 	DEVMETHOD(ifdi_queues_free, ixl_if_queues_free),
179 	DEVMETHOD(ifdi_update_admin_status, ixl_if_update_admin_status),
180 	DEVMETHOD(ifdi_multi_set, ixl_if_multi_set),
181 	DEVMETHOD(ifdi_mtu_set, ixl_if_mtu_set),
182 	DEVMETHOD(ifdi_media_status, ixl_if_media_status),
183 	DEVMETHOD(ifdi_media_change, ixl_if_media_change),
184 	DEVMETHOD(ifdi_promisc_set, ixl_if_promisc_set),
185 	DEVMETHOD(ifdi_timer, ixl_if_timer),
186 	DEVMETHOD(ifdi_vlan_register, ixl_if_vlan_register),
187 	DEVMETHOD(ifdi_vlan_unregister, ixl_if_vlan_unregister),
188 	DEVMETHOD(ifdi_get_counter, ixl_if_get_counter),
189 	DEVMETHOD(ifdi_i2c_req, ixl_if_i2c_req),
190 	DEVMETHOD(ifdi_priv_ioctl, ixl_if_priv_ioctl),
191 	DEVMETHOD(ifdi_needs_restart, ixl_if_needs_restart),
192 #ifdef PCI_IOV
193 	DEVMETHOD(ifdi_iov_init, ixl_if_iov_init),
194 	DEVMETHOD(ifdi_iov_uninit, ixl_if_iov_uninit),
195 	DEVMETHOD(ifdi_iov_vf_add, ixl_if_iov_vf_add),
196 	DEVMETHOD(ifdi_vflr_handle, ixl_if_vflr_handle),
197 #endif
198 	// ifdi_led_func
199 	// ifdi_debug
200 	DEVMETHOD_END
201 };
202 
203 static driver_t ixl_if_driver = {
204 	"ixl_if", ixl_if_methods, sizeof(struct ixl_pf)
205 };
206 
207 /*
208 ** TUNEABLE PARAMETERS:
209 */
210 
211 static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
212     "ixl driver parameters");
213 
214 /*
215  * Leave this on unless you need to send flow control
216  * frames (or other control frames) from software
217  */
218 static int ixl_enable_tx_fc_filter = 1;
219 TUNABLE_INT("hw.ixl.enable_tx_fc_filter",
220     &ixl_enable_tx_fc_filter);
221 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_tx_fc_filter, CTLFLAG_RDTUN,
222     &ixl_enable_tx_fc_filter, 0,
223     "Filter out packets with Ethertype 0x8808 from being sent out by non-HW sources");
224 
225 static int ixl_i2c_access_method = 0;
226 TUNABLE_INT("hw.ixl.i2c_access_method",
227     &ixl_i2c_access_method);
228 SYSCTL_INT(_hw_ixl, OID_AUTO, i2c_access_method, CTLFLAG_RDTUN,
229     &ixl_i2c_access_method, 0,
230     IXL_SYSCTL_HELP_I2C_METHOD);
231 
232 static int ixl_enable_vf_loopback = 1;
233 TUNABLE_INT("hw.ixl.enable_vf_loopback",
234     &ixl_enable_vf_loopback);
235 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_vf_loopback, CTLFLAG_RDTUN,
236     &ixl_enable_vf_loopback, 0,
237     IXL_SYSCTL_HELP_VF_LOOPBACK);
238 
239 /*
240  * Different method for processing TX descriptor
241  * completion.
242  */
243 static int ixl_enable_head_writeback = 1;
244 TUNABLE_INT("hw.ixl.enable_head_writeback",
245     &ixl_enable_head_writeback);
246 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN,
247     &ixl_enable_head_writeback, 0,
248     "For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors");
249 
250 static int ixl_core_debug_mask = 0;
251 TUNABLE_INT("hw.ixl.core_debug_mask",
252     &ixl_core_debug_mask);
253 SYSCTL_INT(_hw_ixl, OID_AUTO, core_debug_mask, CTLFLAG_RDTUN,
254     &ixl_core_debug_mask, 0,
255     "Display debug statements that are printed in non-shared code");
256 
257 static int ixl_shared_debug_mask = 0;
258 TUNABLE_INT("hw.ixl.shared_debug_mask",
259     &ixl_shared_debug_mask);
260 SYSCTL_INT(_hw_ixl, OID_AUTO, shared_debug_mask, CTLFLAG_RDTUN,
261     &ixl_shared_debug_mask, 0,
262     "Display debug statements that are printed in shared code");
263 
264 #if 0
265 /*
266 ** Controls for Interrupt Throttling
267 **	- true/false for dynamic adjustment
268 ** 	- default values for static ITR
269 */
270 static int ixl_dynamic_rx_itr = 0;
271 TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
272 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
273     &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
274 
275 static int ixl_dynamic_tx_itr = 0;
276 TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
277 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
278     &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
279 #endif
280 
281 static int ixl_rx_itr = IXL_ITR_8K;
282 TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
283 SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
284     &ixl_rx_itr, 0, "RX Interrupt Rate");
285 
286 static int ixl_tx_itr = IXL_ITR_4K;
287 TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
288 SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
289     &ixl_tx_itr, 0, "TX Interrupt Rate");
290 
291 #ifdef IXL_IW
292 int ixl_enable_iwarp = 0;
293 TUNABLE_INT("hw.ixl.enable_iwarp", &ixl_enable_iwarp);
294 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_iwarp, CTLFLAG_RDTUN,
295     &ixl_enable_iwarp, 0, "iWARP enabled");
296 
297 #if __FreeBSD_version < 1100000
298 int ixl_limit_iwarp_msix = 1;
299 #else
300 int ixl_limit_iwarp_msix = IXL_IW_MAX_MSIX;
301 #endif
302 TUNABLE_INT("hw.ixl.limit_iwarp_msix", &ixl_limit_iwarp_msix);
303 SYSCTL_INT(_hw_ixl, OID_AUTO, limit_iwarp_msix, CTLFLAG_RDTUN,
304     &ixl_limit_iwarp_msix, 0, "Limit MSI-X vectors assigned to iWARP");
305 #endif
306 
307 extern struct if_txrx ixl_txrx_hwb;
308 extern struct if_txrx ixl_txrx_dwb;
309 
310 static struct if_shared_ctx ixl_sctx_init = {
311 	.isc_magic = IFLIB_MAGIC,
312 	.isc_q_align = PAGE_SIZE,
313 	.isc_tx_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
314 	.isc_tx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
315 	.isc_tso_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
316 	.isc_tso_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
317 	.isc_rx_maxsize = 16384,
318 	.isc_rx_nsegments = IXL_MAX_RX_SEGS,
319 	.isc_rx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
320 	.isc_nfl = 1,
321 	.isc_ntxqs = 1,
322 	.isc_nrxqs = 1,
323 
324 	.isc_admin_intrcnt = 1,
325 	.isc_vendor_info = ixl_vendor_info_array,
326 	.isc_driver_version = IXL_DRIVER_VERSION_STRING,
327 	.isc_driver = &ixl_if_driver,
328 	.isc_flags = IFLIB_NEED_SCRATCH | IFLIB_NEED_ZERO_CSUM | IFLIB_TSO_INIT_IP | IFLIB_ADMIN_ALWAYS_RUN,
329 
330 	.isc_nrxd_min = {IXL_MIN_RING},
331 	.isc_ntxd_min = {IXL_MIN_RING},
332 	.isc_nrxd_max = {IXL_MAX_RING},
333 	.isc_ntxd_max = {IXL_MAX_RING},
334 	.isc_nrxd_default = {IXL_DEFAULT_RING},
335 	.isc_ntxd_default = {IXL_DEFAULT_RING},
336 };
337 
338 if_shared_ctx_t ixl_sctx = &ixl_sctx_init;
339 
340 /*** Functions ***/
341 static void *
342 ixl_register(device_t dev)
343 {
344 	return (ixl_sctx);
345 }
346 
347 static int
348 ixl_allocate_pci_resources(struct ixl_pf *pf)
349 {
350 	device_t dev = iflib_get_dev(pf->vsi.ctx);
351 	struct i40e_hw *hw = &pf->hw;
352 	int             rid;
353 
354 	/* Map BAR0 */
355 	rid = PCIR_BAR(0);
356 	pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
357 	    &rid, RF_ACTIVE);
358 
359 	if (!(pf->pci_mem)) {
360 		device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
361 		return (ENXIO);
362 	}
363 
364 	/* Save off the PCI information */
365 	hw->vendor_id = pci_get_vendor(dev);
366 	hw->device_id = pci_get_device(dev);
367 	hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
368 	hw->subsystem_vendor_id =
369 	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
370 	hw->subsystem_device_id =
371 	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
372 
373 	hw->bus.device = pci_get_slot(dev);
374 	hw->bus.func = pci_get_function(dev);
375 
376 	/* Save off register access information */
377 	pf->osdep.mem_bus_space_tag =
378 		rman_get_bustag(pf->pci_mem);
379 	pf->osdep.mem_bus_space_handle =
380 		rman_get_bushandle(pf->pci_mem);
381 	pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
382 	pf->osdep.flush_reg = I40E_GLGEN_STAT;
383 	pf->osdep.dev = dev;
384 
385 	pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
386 	pf->hw.back = &pf->osdep;
387 
388  	return (0);
389  }
390 
391 static int
392 ixl_if_attach_pre(if_ctx_t ctx)
393 {
394 	device_t dev;
395 	struct ixl_pf *pf;
396 	struct i40e_hw *hw;
397 	struct ixl_vsi *vsi;
398 	if_softc_ctx_t scctx;
399 	struct i40e_filter_control_settings filter;
400 	enum i40e_status_code status;
401 	int error = 0;
402 
403 	dev = iflib_get_dev(ctx);
404 	pf = iflib_get_softc(ctx);
405 
406 	INIT_DBG_DEV(dev, "begin");
407 
408 	vsi = &pf->vsi;
409 	vsi->back = pf;
410 	pf->dev = dev;
411 	hw = &pf->hw;
412 
413 	vsi->dev = dev;
414 	vsi->hw = &pf->hw;
415 	vsi->id = 0;
416 	vsi->num_vlans = 0;
417 	vsi->ctx = ctx;
418 	vsi->media = iflib_get_media(ctx);
419 	vsi->shared = scctx = iflib_get_softc_ctx(ctx);
420 
421 	/* Save tunable values */
422 	ixl_save_pf_tunables(pf);
423 
424 	/* Do PCI setup - map BAR0, etc */
425 	if (ixl_allocate_pci_resources(pf)) {
426 		device_printf(dev, "Allocation of PCI resources failed\n");
427 		error = ENXIO;
428 		goto err_pci_res;
429 	}
430 
431 	/* Establish a clean starting point */
432 	i40e_clear_hw(hw);
433 	status = i40e_pf_reset(hw);
434 	if (status) {
435 		device_printf(dev, "PF reset failure %s\n",
436 		    i40e_stat_str(hw, status));
437 		error = EIO;
438 		goto err_out;
439 	}
440 
441 	/* Initialize the shared code */
442 	status = i40e_init_shared_code(hw);
443 	if (status) {
444 		device_printf(dev, "Unable to initialize shared code, error %s\n",
445 		    i40e_stat_str(hw, status));
446 		error = EIO;
447 		goto err_out;
448 	}
449 
450 	/* Set up the admin queue */
451 	hw->aq.num_arq_entries = IXL_AQ_LEN;
452 	hw->aq.num_asq_entries = IXL_AQ_LEN;
453 	hw->aq.arq_buf_size = IXL_AQ_BUF_SZ;
454 	hw->aq.asq_buf_size = IXL_AQ_BUF_SZ;
455 
456 	status = i40e_init_adminq(hw);
457 	if (status != 0 && status != I40E_ERR_FIRMWARE_API_VERSION) {
458 		device_printf(dev, "Unable to initialize Admin Queue, error %s\n",
459 		    i40e_stat_str(hw, status));
460 		error = EIO;
461 		goto err_out;
462 	}
463 	ixl_print_nvm_version(pf);
464 
465 	if (status == I40E_ERR_FIRMWARE_API_VERSION) {
466 		device_printf(dev, "The driver for the device stopped "
467 		    "because the NVM image is newer than expected.\n");
468 		device_printf(dev, "You must install the most recent version of "
469 		    "the network driver.\n");
470 		error = EIO;
471 		goto err_out;
472 	}
473 
474         if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
475 	    hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) {
476 		device_printf(dev, "The driver for the device detected "
477 		    "a newer version of the NVM image than expected.\n");
478 		device_printf(dev, "Please install the most recent version "
479 		    "of the network driver.\n");
480 	} else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4) {
481 		device_printf(dev, "The driver for the device detected "
482 		    "an older version of the NVM image than expected.\n");
483 		device_printf(dev, "Please update the NVM image.\n");
484 	}
485 
486 	/* Clear PXE mode */
487 	i40e_clear_pxe_mode(hw);
488 
489 	/* Get capabilities from the device */
490 	error = ixl_get_hw_capabilities(pf);
491 	if (error) {
492 		device_printf(dev, "get_hw_capabilities failed: %d\n",
493 		    error);
494 		goto err_get_cap;
495 	}
496 
497 	/* Set up host memory cache */
498 	status = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
499 	    hw->func_caps.num_rx_qp, 0, 0);
500 	if (status) {
501 		device_printf(dev, "init_lan_hmc failed: %s\n",
502 		    i40e_stat_str(hw, status));
503 		goto err_get_cap;
504 	}
505 	status = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
506 	if (status) {
507 		device_printf(dev, "configure_lan_hmc failed: %s\n",
508 		    i40e_stat_str(hw, status));
509 		goto err_mac_hmc;
510 	}
511 
512 	/* Disable LLDP from the firmware for certain NVM versions */
513 	if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
514 	    (pf->hw.aq.fw_maj_ver < 4)) {
515 		i40e_aq_stop_lldp(hw, TRUE, NULL);
516 		pf->state |= IXL_PF_STATE_FW_LLDP_DISABLED;
517 	}
518 
519 	/* Get MAC addresses from hardware */
520 	i40e_get_mac_addr(hw, hw->mac.addr);
521 	error = i40e_validate_mac_addr(hw->mac.addr);
522 	if (error) {
523 		device_printf(dev, "validate_mac_addr failed: %d\n", error);
524 		goto err_mac_hmc;
525 	}
526 	bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
527 	iflib_set_mac(ctx, hw->mac.addr);
528 	i40e_get_port_mac_addr(hw, hw->mac.port_addr);
529 
530 	/* Set up the device filtering */
531 	bzero(&filter, sizeof(filter));
532 	filter.enable_ethtype = TRUE;
533 	filter.enable_macvlan = TRUE;
534 	filter.enable_fdir = FALSE;
535 	filter.hash_lut_size = I40E_HASH_LUT_SIZE_512;
536 	if (i40e_set_filter_control(hw, &filter))
537 		device_printf(dev, "i40e_set_filter_control() failed\n");
538 
539 	/* Query device FW LLDP status */
540 	ixl_get_fw_lldp_status(pf);
541 	/* Tell FW to apply DCB config on link up */
542 	i40e_aq_set_dcb_parameters(hw, true, NULL);
543 
544 	/* Fill out iflib parameters */
545 	if (hw->mac.type == I40E_MAC_X722)
546 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 128;
547 	else
548 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
549 	if (vsi->enable_head_writeback) {
550 		scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
551 		    * sizeof(struct i40e_tx_desc) + sizeof(u32), DBA_ALIGN);
552 		scctx->isc_txrx = &ixl_txrx_hwb;
553 	} else {
554 		scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
555 		    * sizeof(struct i40e_tx_desc), DBA_ALIGN);
556 		scctx->isc_txrx = &ixl_txrx_dwb;
557 	}
558 	scctx->isc_txrx->ift_legacy_intr = ixl_intr;
559 	scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0]
560 	    * sizeof(union i40e_32byte_rx_desc), DBA_ALIGN);
561 	scctx->isc_msix_bar = PCIR_BAR(IXL_MSIX_BAR);
562 	scctx->isc_tx_nsegments = IXL_MAX_TX_SEGS;
563 	scctx->isc_tx_tso_segments_max = IXL_MAX_TSO_SEGS;
564 	scctx->isc_tx_tso_size_max = IXL_TSO_SIZE;
565 	scctx->isc_tx_tso_segsize_max = IXL_MAX_DMA_SEG_SIZE;
566 	scctx->isc_rss_table_size = pf->hw.func_caps.rss_table_size;
567 	scctx->isc_tx_csum_flags = CSUM_OFFLOAD;
568 	scctx->isc_capabilities = scctx->isc_capenable = IXL_CAPS;
569 
570 	INIT_DBG_DEV(dev, "end");
571 	return (0);
572 
573 err_mac_hmc:
574 	i40e_shutdown_lan_hmc(hw);
575 err_get_cap:
576 	i40e_shutdown_adminq(hw);
577 err_out:
578 	ixl_free_pci_resources(pf);
579 err_pci_res:
580 	return (error);
581 }
582 
583 static int
584 ixl_if_attach_post(if_ctx_t ctx)
585 {
586 	device_t dev;
587 	struct ixl_pf *pf;
588 	struct i40e_hw *hw;
589 	struct ixl_vsi *vsi;
590 	int error = 0;
591 	enum i40e_status_code status;
592 
593 	dev = iflib_get_dev(ctx);
594 	pf = iflib_get_softc(ctx);
595 
596 	INIT_DBG_DEV(dev, "begin");
597 
598 	vsi = &pf->vsi;
599 	vsi->ifp = iflib_get_ifp(ctx);
600 	hw = &pf->hw;
601 
602 	/* Save off determined number of queues for interface */
603 	vsi->num_rx_queues = vsi->shared->isc_nrxqsets;
604 	vsi->num_tx_queues = vsi->shared->isc_ntxqsets;
605 
606 	/* Setup OS network interface / ifnet */
607 	if (ixl_setup_interface(dev, pf)) {
608 		device_printf(dev, "interface setup failed!\n");
609 		error = EIO;
610 		goto err;
611 	}
612 
613 	/* Determine link state */
614 	if (ixl_attach_get_link_status(pf)) {
615 		error = EINVAL;
616 		goto err;
617 	}
618 
619 	error = ixl_switch_config(pf);
620 	if (error) {
621 		device_printf(dev, "Initial ixl_switch_config() failed: %d\n",
622 		     error);
623 		goto err;
624 	}
625 
626 	/* Add protocol filters to list */
627 	ixl_init_filters(vsi);
628 
629 	/* Init queue allocation manager */
630 	error = ixl_pf_qmgr_init(&pf->qmgr, hw->func_caps.num_tx_qp);
631 	if (error) {
632 		device_printf(dev, "Failed to init queue manager for PF queues, error %d\n",
633 		    error);
634 		goto err;
635 	}
636 	/* reserve a contiguous allocation for the PF's VSI */
637 	error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr,
638 	    max(vsi->num_rx_queues, vsi->num_tx_queues), &pf->qtag);
639 	if (error) {
640 		device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
641 		    error);
642 		goto err;
643 	}
644 	device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n",
645 	    pf->qtag.num_allocated, pf->qtag.num_active);
646 
647 	/* Limit PHY interrupts to link, autoneg, and modules failure */
648 	status = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
649 	    NULL);
650         if (status) {
651 		device_printf(dev, "i40e_aq_set_phy_mask() failed: err %s,"
652 		    " aq_err %s\n", i40e_stat_str(hw, status),
653 		    i40e_aq_str(hw, hw->aq.asq_last_status));
654 		goto err;
655 	}
656 
657 	/* Get the bus configuration and set the shared code */
658 	ixl_get_bus_info(pf);
659 
660 	/* Keep admin queue interrupts active while driver is loaded */
661 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
662  		ixl_configure_intr0_msix(pf);
663  		ixl_enable_intr0(hw);
664 	}
665 
666 	/* Set initial advertised speed sysctl value */
667 	ixl_set_initial_advertised_speeds(pf);
668 
669 	/* Initialize statistics & add sysctls */
670 	ixl_add_device_sysctls(pf);
671 	ixl_pf_reset_stats(pf);
672 	ixl_update_stats_counters(pf);
673 	ixl_add_hw_stats(pf);
674 
675 	hw->phy.get_link_info = true;
676 	i40e_get_link_status(hw, &pf->link_up);
677 	ixl_update_link_status(pf);
678 
679 #ifdef PCI_IOV
680 	ixl_initialize_sriov(pf);
681 #endif
682 
683 #ifdef IXL_IW
684 	if (hw->func_caps.iwarp && ixl_enable_iwarp) {
685 		pf->iw_enabled = (pf->iw_msix > 0) ? true : false;
686 		if (pf->iw_enabled) {
687 			error = ixl_iw_pf_attach(pf);
688 			if (error) {
689 				device_printf(dev,
690 				    "interfacing to iWARP driver failed: %d\n",
691 				    error);
692 				goto err;
693 			} else
694 				device_printf(dev, "iWARP ready\n");
695 		} else
696 			device_printf(dev, "iWARP disabled on this device "
697 			    "(no MSI-X vectors)\n");
698 	} else {
699 		pf->iw_enabled = false;
700 		device_printf(dev, "The device is not iWARP enabled\n");
701 	}
702 #endif
703 
704 	INIT_DBG_DEV(dev, "end");
705 	return (0);
706 
707 err:
708 	INIT_DEBUGOUT("end: error %d", error);
709 	/* ixl_if_detach() is called on error from this */
710 	return (error);
711 }
712 
713 /**
714  * XXX: iflib always ignores the return value of detach()
715  * -> This means that this isn't allowed to fail
716  */
717 static int
718 ixl_if_detach(if_ctx_t ctx)
719 {
720 	struct ixl_pf *pf = iflib_get_softc(ctx);
721 	struct ixl_vsi *vsi = &pf->vsi;
722 	struct i40e_hw *hw = &pf->hw;
723 	device_t dev = pf->dev;
724 	enum i40e_status_code	status;
725 #ifdef IXL_IW
726 	int			error;
727 #endif
728 
729 	INIT_DBG_DEV(dev, "begin");
730 
731 #ifdef IXL_IW
732 	if (ixl_enable_iwarp && pf->iw_enabled) {
733 		error = ixl_iw_pf_detach(pf);
734 		if (error == EBUSY) {
735 			device_printf(dev, "iwarp in use; stop it first.\n");
736 			//return (error);
737 		}
738 	}
739 #endif
740 	/* Remove all previously allocated media types */
741 	ifmedia_removeall(vsi->media);
742 
743 	/* Shutdown LAN HMC */
744 	if (hw->hmc.hmc_obj) {
745 		status = i40e_shutdown_lan_hmc(hw);
746 		if (status)
747 			device_printf(dev,
748 			    "i40e_shutdown_lan_hmc() failed with status %s\n",
749 			    i40e_stat_str(hw, status));
750 	}
751 
752 	/* Shutdown admin queue */
753 	ixl_disable_intr0(hw);
754 	status = i40e_shutdown_adminq(hw);
755 	if (status)
756 		device_printf(dev,
757 		    "i40e_shutdown_adminq() failed with status %s\n",
758 		    i40e_stat_str(hw, status));
759 
760 	ixl_pf_qmgr_destroy(&pf->qmgr);
761 	ixl_free_pci_resources(pf);
762 	ixl_free_mac_filters(vsi);
763 	INIT_DBG_DEV(dev, "end");
764 	return (0);
765 }
766 
767 static int
768 ixl_if_shutdown(if_ctx_t ctx)
769 {
770 	int error = 0;
771 
772 	INIT_DEBUGOUT("ixl_if_shutdown: begin");
773 
774 	/* TODO: Call ixl_if_stop()? */
775 
776 	/* TODO: Then setup low power mode */
777 
778 	return (error);
779 }
780 
781 static int
782 ixl_if_suspend(if_ctx_t ctx)
783 {
784 	int error = 0;
785 
786 	INIT_DEBUGOUT("ixl_if_suspend: begin");
787 
788 	/* TODO: Call ixl_if_stop()? */
789 
790 	/* TODO: Then setup low power mode */
791 
792 	return (error);
793 }
794 
795 static int
796 ixl_if_resume(if_ctx_t ctx)
797 {
798 	struct ifnet *ifp = iflib_get_ifp(ctx);
799 
800 	INIT_DEBUGOUT("ixl_if_resume: begin");
801 
802 	/* Read & clear wake-up registers */
803 
804 	/* Required after D3->D0 transition */
805 	if (ifp->if_flags & IFF_UP)
806 		ixl_if_init(ctx);
807 
808 	return (0);
809 }
810 
811 void
812 ixl_if_init(if_ctx_t ctx)
813 {
814 	struct ixl_pf *pf = iflib_get_softc(ctx);
815 	struct ixl_vsi *vsi = &pf->vsi;
816 	struct i40e_hw	*hw = &pf->hw;
817 	struct ifnet *ifp = iflib_get_ifp(ctx);
818 	device_t 	dev = iflib_get_dev(ctx);
819 	u8		tmpaddr[ETHER_ADDR_LEN];
820 	int		ret;
821 
822 	/*
823 	 * If the aq is dead here, it probably means something outside of the driver
824 	 * did something to the adapter, like a PF reset.
825 	 * So, rebuild the driver's state here if that occurs.
826 	 */
827 	if (!i40e_check_asq_alive(&pf->hw)) {
828 		device_printf(dev, "Admin Queue is down; resetting...\n");
829 		ixl_teardown_hw_structs(pf);
830 		ixl_rebuild_hw_structs_after_reset(pf);
831 	}
832 
833 	/* Get the latest mac address... User might use a LAA */
834 	bcopy(IF_LLADDR(vsi->ifp), tmpaddr, ETH_ALEN);
835 	if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
836 	    (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
837 		ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
838 		bcopy(tmpaddr, hw->mac.addr, ETH_ALEN);
839 		ret = i40e_aq_mac_address_write(hw,
840 		    I40E_AQC_WRITE_TYPE_LAA_ONLY,
841 		    hw->mac.addr, NULL);
842 		if (ret) {
843 			device_printf(dev, "LLA address change failed!!\n");
844 			return;
845 		}
846 		ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
847 	}
848 
849 	iflib_set_mac(ctx, hw->mac.addr);
850 
851 	/* Prepare the VSI: rings, hmc contexts, etc... */
852 	if (ixl_initialize_vsi(vsi)) {
853 		device_printf(dev, "initialize vsi failed!!\n");
854 		return;
855 	}
856 
857 	/* Reconfigure multicast filters in HW */
858 	ixl_if_multi_set(ctx);
859 
860 	/* Set up RSS */
861 	ixl_config_rss(pf);
862 
863 	/* Set up MSI-X routing and the ITR settings */
864 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
865 		ixl_configure_queue_intr_msix(pf);
866 		ixl_configure_itr(pf);
867 	} else
868 		ixl_configure_legacy(pf);
869 
870 	if (vsi->enable_head_writeback)
871 		ixl_init_tx_cidx(vsi);
872 	else
873 		ixl_init_tx_rsqs(vsi);
874 
875 	ixl_enable_rings(vsi);
876 
877 	i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
878 
879 	/* Re-add configure filters to HW */
880 	ixl_reconfigure_filters(vsi);
881 
882 	/* Configure promiscuous mode */
883 	ixl_if_promisc_set(ctx, if_getflags(ifp));
884 
885 #ifdef IXL_IW
886 	if (ixl_enable_iwarp && pf->iw_enabled) {
887 		ret = ixl_iw_pf_init(pf);
888 		if (ret)
889 			device_printf(dev,
890 			    "initialize iwarp failed, code %d\n", ret);
891 	}
892 #endif
893 }
894 
895 void
896 ixl_if_stop(if_ctx_t ctx)
897 {
898 	struct ixl_pf *pf = iflib_get_softc(ctx);
899 	struct ixl_vsi *vsi = &pf->vsi;
900 
901 	INIT_DEBUGOUT("ixl_if_stop: begin\n");
902 
903 	// TODO: This may need to be reworked
904 #ifdef IXL_IW
905 	/* Stop iWARP device */
906 	if (ixl_enable_iwarp && pf->iw_enabled)
907 		ixl_iw_pf_stop(pf);
908 #endif
909 
910 	ixl_disable_rings_intr(vsi);
911 	ixl_disable_rings(pf, vsi, &pf->qtag);
912 }
913 
914 static int
915 ixl_if_msix_intr_assign(if_ctx_t ctx, int msix)
916 {
917 	struct ixl_pf *pf = iflib_get_softc(ctx);
918 	struct ixl_vsi *vsi = &pf->vsi;
919 	struct ixl_rx_queue *rx_que = vsi->rx_queues;
920 	struct ixl_tx_queue *tx_que = vsi->tx_queues;
921 	int err, i, rid, vector = 0;
922 	char buf[16];
923 
924 	MPASS(vsi->shared->isc_nrxqsets > 0);
925 	MPASS(vsi->shared->isc_ntxqsets > 0);
926 
927 	/* Admin Que must use vector 0*/
928 	rid = vector + 1;
929 	err = iflib_irq_alloc_generic(ctx, &vsi->irq, rid, IFLIB_INTR_ADMIN,
930 	    ixl_msix_adminq, pf, 0, "aq");
931 	if (err) {
932 		iflib_irq_free(ctx, &vsi->irq);
933 		device_printf(iflib_get_dev(ctx),
934 		    "Failed to register Admin Que handler");
935 		return (err);
936 	}
937 	/* Create soft IRQ for handling VFLRs */
938 	iflib_softirq_alloc_generic(ctx, NULL, IFLIB_INTR_IOV, pf, 0, "iov");
939 
940 	/* Now set up the stations */
941 	for (i = 0, vector = 1; i < vsi->shared->isc_nrxqsets; i++, vector++, rx_que++) {
942 		rid = vector + 1;
943 
944 		snprintf(buf, sizeof(buf), "rxq%d", i);
945 		err = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
946 		    IFLIB_INTR_RX, ixl_msix_que, rx_que, rx_que->rxr.me, buf);
947 		/* XXX: Does the driver work as expected if there are fewer num_rx_queues than
948 		 * what's expected in the iflib context? */
949 		if (err) {
950 			device_printf(iflib_get_dev(ctx),
951 			    "Failed to allocate queue RX int vector %d, err: %d\n", i, err);
952 			vsi->num_rx_queues = i + 1;
953 			goto fail;
954 		}
955 		rx_que->msix = vector;
956 	}
957 
958 	bzero(buf, sizeof(buf));
959 
960 	for (i = 0; i < vsi->shared->isc_ntxqsets; i++, tx_que++) {
961 		snprintf(buf, sizeof(buf), "txq%d", i);
962 		iflib_softirq_alloc_generic(ctx,
963 		    &vsi->rx_queues[i % vsi->shared->isc_nrxqsets].que_irq,
964 		    IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
965 
966 		/* TODO: Maybe call a strategy function for this to figure out which
967 		* interrupts to map Tx queues to. I don't know if there's an immediately
968 		* better way than this other than a user-supplied map, though. */
969 		tx_que->msix = (i % vsi->shared->isc_nrxqsets) + 1;
970 	}
971 
972 	return (0);
973 fail:
974 	iflib_irq_free(ctx, &vsi->irq);
975 	rx_que = vsi->rx_queues;
976 	for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
977 		iflib_irq_free(ctx, &rx_que->que_irq);
978 	return (err);
979 }
980 
981 /*
982  * Enable all interrupts
983  *
984  * Called in:
985  * iflib_init_locked, after ixl_if_init()
986  */
987 static void
988 ixl_if_enable_intr(if_ctx_t ctx)
989 {
990 	struct ixl_pf *pf = iflib_get_softc(ctx);
991 	struct ixl_vsi *vsi = &pf->vsi;
992 	struct i40e_hw		*hw = vsi->hw;
993 	struct ixl_rx_queue	*que = vsi->rx_queues;
994 
995 	ixl_enable_intr0(hw);
996 	/* Enable queue interrupts */
997 	for (int i = 0; i < vsi->num_rx_queues; i++, que++)
998 		/* TODO: Queue index parameter is probably wrong */
999 		ixl_enable_queue(hw, que->rxr.me);
1000 }
1001 
1002 /*
1003  * Disable queue interrupts
1004  *
1005  * Other interrupt causes need to remain active.
1006  */
1007 static void
1008 ixl_if_disable_intr(if_ctx_t ctx)
1009 {
1010 	struct ixl_pf *pf = iflib_get_softc(ctx);
1011 	struct ixl_vsi *vsi = &pf->vsi;
1012 	struct i40e_hw		*hw = vsi->hw;
1013 	struct ixl_rx_queue	*rx_que = vsi->rx_queues;
1014 
1015 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
1016 		for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
1017 			ixl_disable_queue(hw, rx_que->msix - 1);
1018 	} else {
1019 		// Set PFINT_LNKLST0 FIRSTQ_INDX to 0x7FF
1020 		// stops queues from triggering interrupts
1021 		wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
1022 	}
1023 }
1024 
1025 static int
1026 ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
1027 {
1028 	struct ixl_pf *pf = iflib_get_softc(ctx);
1029 	struct ixl_vsi *vsi = &pf->vsi;
1030 	struct i40e_hw		*hw = vsi->hw;
1031 	struct ixl_rx_queue	*rx_que = &vsi->rx_queues[rxqid];
1032 
1033 	ixl_enable_queue(hw, rx_que->msix - 1);
1034 	return (0);
1035 }
1036 
1037 static int
1038 ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid)
1039 {
1040 	struct ixl_pf *pf = iflib_get_softc(ctx);
1041 	struct ixl_vsi *vsi = &pf->vsi;
1042 	struct i40e_hw *hw = vsi->hw;
1043 	struct ixl_tx_queue *tx_que = &vsi->tx_queues[txqid];
1044 
1045 	ixl_enable_queue(hw, tx_que->msix - 1);
1046 	return (0);
1047 }
1048 
1049 static int
1050 ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets)
1051 {
1052 	struct ixl_pf *pf = iflib_get_softc(ctx);
1053 	struct ixl_vsi *vsi = &pf->vsi;
1054 	if_softc_ctx_t scctx = vsi->shared;
1055 	struct ixl_tx_queue *que;
1056 	int i, j, error = 0;
1057 
1058 	MPASS(scctx->isc_ntxqsets > 0);
1059 	MPASS(ntxqs == 1);
1060 	MPASS(scctx->isc_ntxqsets == ntxqsets);
1061 
1062 	/* Allocate queue structure memory */
1063 	if (!(vsi->tx_queues =
1064 	    (struct ixl_tx_queue *) malloc(sizeof(struct ixl_tx_queue) *ntxqsets, M_IXL, M_NOWAIT | M_ZERO))) {
1065 		device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n");
1066 		return (ENOMEM);
1067 	}
1068 
1069 	for (i = 0, que = vsi->tx_queues; i < ntxqsets; i++, que++) {
1070 		struct tx_ring *txr = &que->txr;
1071 
1072 		txr->me = i;
1073 		que->vsi = vsi;
1074 
1075 		if (!vsi->enable_head_writeback) {
1076 			/* Allocate report status array */
1077 			if (!(txr->tx_rsq = malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXL, M_NOWAIT))) {
1078 				device_printf(iflib_get_dev(ctx), "failed to allocate tx_rsq memory\n");
1079 				error = ENOMEM;
1080 				goto fail;
1081 			}
1082 			/* Init report status array */
1083 			for (j = 0; j < scctx->isc_ntxd[0]; j++)
1084 				txr->tx_rsq[j] = QIDX_INVALID;
1085 		}
1086 		/* get the virtual and physical address of the hardware queues */
1087 		txr->tail = I40E_QTX_TAIL(txr->me);
1088 		txr->tx_base = (struct i40e_tx_desc *)vaddrs[i * ntxqs];
1089 		txr->tx_paddr = paddrs[i * ntxqs];
1090 		txr->que = que;
1091 	}
1092 
1093 	return (0);
1094 fail:
1095 	ixl_if_queues_free(ctx);
1096 	return (error);
1097 }
1098 
1099 static int
1100 ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets)
1101 {
1102 	struct ixl_pf *pf = iflib_get_softc(ctx);
1103 	struct ixl_vsi *vsi = &pf->vsi;
1104 	struct ixl_rx_queue *que;
1105 	int i, error = 0;
1106 
1107 #ifdef INVARIANTS
1108 	if_softc_ctx_t scctx = vsi->shared;
1109 	MPASS(scctx->isc_nrxqsets > 0);
1110 	MPASS(nrxqs == 1);
1111 	MPASS(scctx->isc_nrxqsets == nrxqsets);
1112 #endif
1113 
1114 	/* Allocate queue structure memory */
1115 	if (!(vsi->rx_queues =
1116 	    (struct ixl_rx_queue *) malloc(sizeof(struct ixl_rx_queue) *
1117 	    nrxqsets, M_IXL, M_NOWAIT | M_ZERO))) {
1118 		device_printf(iflib_get_dev(ctx), "Unable to allocate RX ring memory\n");
1119 		error = ENOMEM;
1120 		goto fail;
1121 	}
1122 
1123 	for (i = 0, que = vsi->rx_queues; i < nrxqsets; i++, que++) {
1124 		struct rx_ring *rxr = &que->rxr;
1125 
1126 		rxr->me = i;
1127 		que->vsi = vsi;
1128 
1129 		/* get the virtual and physical address of the hardware queues */
1130 		rxr->tail = I40E_QRX_TAIL(rxr->me);
1131 		rxr->rx_base = (union i40e_rx_desc *)vaddrs[i * nrxqs];
1132 		rxr->rx_paddr = paddrs[i * nrxqs];
1133 		rxr->que = que;
1134 	}
1135 
1136 	return (0);
1137 fail:
1138 	ixl_if_queues_free(ctx);
1139 	return (error);
1140 }
1141 
1142 static void
1143 ixl_if_queues_free(if_ctx_t ctx)
1144 {
1145 	struct ixl_pf *pf = iflib_get_softc(ctx);
1146 	struct ixl_vsi *vsi = &pf->vsi;
1147 
1148 	if (!vsi->enable_head_writeback) {
1149 		struct ixl_tx_queue *que;
1150 		int i = 0;
1151 
1152 		for (i = 0, que = vsi->tx_queues; i < vsi->num_tx_queues; i++, que++) {
1153 			struct tx_ring *txr = &que->txr;
1154 			if (txr->tx_rsq != NULL) {
1155 				free(txr->tx_rsq, M_IXL);
1156 				txr->tx_rsq = NULL;
1157 			}
1158 		}
1159 	}
1160 
1161 	if (vsi->tx_queues != NULL) {
1162 		free(vsi->tx_queues, M_IXL);
1163 		vsi->tx_queues = NULL;
1164 	}
1165 	if (vsi->rx_queues != NULL) {
1166 		free(vsi->rx_queues, M_IXL);
1167 		vsi->rx_queues = NULL;
1168 	}
1169 }
1170 
1171 void
1172 ixl_update_link_status(struct ixl_pf *pf)
1173 {
1174 	struct ixl_vsi *vsi = &pf->vsi;
1175 	struct i40e_hw *hw = &pf->hw;
1176 	u64 baudrate;
1177 
1178 	if (pf->link_up) {
1179 		if (vsi->link_active == FALSE) {
1180 			vsi->link_active = TRUE;
1181 			baudrate = ixl_max_aq_speed_to_value(hw->phy.link_info.link_speed);
1182 			iflib_link_state_change(vsi->ctx, LINK_STATE_UP, baudrate);
1183 			ixl_link_up_msg(pf);
1184 #ifdef PCI_IOV
1185 			ixl_broadcast_link_state(pf);
1186 #endif
1187 
1188 		}
1189 	} else { /* Link down */
1190 		if (vsi->link_active == TRUE) {
1191 			vsi->link_active = FALSE;
1192 			iflib_link_state_change(vsi->ctx, LINK_STATE_DOWN, 0);
1193 #ifdef PCI_IOV
1194 			ixl_broadcast_link_state(pf);
1195 #endif
1196 		}
1197 	}
1198 }
1199 
1200 static void
1201 ixl_handle_lan_overflow_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
1202 {
1203 	device_t dev = pf->dev;
1204 	u32 rxq_idx, qtx_ctl;
1205 
1206 	rxq_idx = (e->desc.params.external.param0 & I40E_PRTDCB_RUPTQ_RXQNUM_MASK) >>
1207 	    I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT;
1208 	qtx_ctl = e->desc.params.external.param1;
1209 
1210 	device_printf(dev, "LAN overflow event: global rxq_idx %d\n", rxq_idx);
1211 	device_printf(dev, "LAN overflow event: QTX_CTL 0x%08x\n", qtx_ctl);
1212 }
1213 
1214 static int
1215 ixl_process_adminq(struct ixl_pf *pf, u16 *pending)
1216 {
1217 	enum i40e_status_code status = I40E_SUCCESS;
1218 	struct i40e_arq_event_info event;
1219 	struct i40e_hw *hw = &pf->hw;
1220 	device_t dev = pf->dev;
1221 	u16 opcode;
1222 	u32 loop = 0, reg;
1223 
1224 	event.buf_len = IXL_AQ_BUF_SZ;
1225 	event.msg_buf = malloc(event.buf_len, M_IXL, M_NOWAIT | M_ZERO);
1226 	if (!event.msg_buf) {
1227 		device_printf(dev, "%s: Unable to allocate memory for Admin"
1228 		    " Queue event!\n", __func__);
1229 		return (ENOMEM);
1230 	}
1231 
1232 	/* clean and process any events */
1233 	do {
1234 		status = i40e_clean_arq_element(hw, &event, pending);
1235 		if (status)
1236 			break;
1237 		opcode = LE16_TO_CPU(event.desc.opcode);
1238 		ixl_dbg(pf, IXL_DBG_AQ,
1239 		    "Admin Queue event: %#06x\n", opcode);
1240 		switch (opcode) {
1241 		case i40e_aqc_opc_get_link_status:
1242 			ixl_link_event(pf, &event);
1243 			break;
1244 		case i40e_aqc_opc_send_msg_to_pf:
1245 #ifdef PCI_IOV
1246 			ixl_handle_vf_msg(pf, &event);
1247 #endif
1248 			break;
1249 		/*
1250 		 * This should only occur on no-drop queues, which
1251 		 * aren't currently configured.
1252 		 */
1253 		case i40e_aqc_opc_event_lan_overflow:
1254 			ixl_handle_lan_overflow_event(pf, &event);
1255 			break;
1256 		default:
1257 			break;
1258 		}
1259 	} while (*pending && (loop++ < IXL_ADM_LIMIT));
1260 
1261 	free(event.msg_buf, M_IXL);
1262 
1263 	/* Re-enable admin queue interrupt cause */
1264 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
1265 	reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
1266 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1267 
1268 	return (status);
1269 }
1270 
1271 static void
1272 ixl_if_update_admin_status(if_ctx_t ctx)
1273 {
1274 	struct ixl_pf			*pf = iflib_get_softc(ctx);
1275 	struct i40e_hw			*hw = &pf->hw;
1276 	u16				pending;
1277 
1278 	if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING)
1279 		ixl_handle_empr_reset(pf);
1280 
1281 	if (pf->state & IXL_PF_STATE_MDD_PENDING)
1282 		ixl_handle_mdd_event(pf);
1283 
1284 	ixl_process_adminq(pf, &pending);
1285 	ixl_update_link_status(pf);
1286 	ixl_update_stats_counters(pf);
1287 
1288 	/*
1289 	 * If there are still messages to process, reschedule ourselves.
1290 	 * Otherwise, re-enable our interrupt and go to sleep.
1291 	 */
1292 	if (pending > 0)
1293 		iflib_admin_intr_deferred(ctx);
1294 	else
1295 		ixl_enable_intr0(hw);
1296 }
1297 
1298 static void
1299 ixl_if_multi_set(if_ctx_t ctx)
1300 {
1301 	struct ixl_pf *pf = iflib_get_softc(ctx);
1302 	struct ixl_vsi *vsi = &pf->vsi;
1303 	struct i40e_hw *hw = vsi->hw;
1304 	int mcnt, flags;
1305 	int del_mcnt;
1306 
1307 	IOCTL_DEBUGOUT("ixl_if_multi_set: begin");
1308 
1309 	mcnt = min(if_llmaddr_count(iflib_get_ifp(ctx)), MAX_MULTICAST_ADDR);
1310 	/* Delete filters for removed multicast addresses */
1311 	del_mcnt = ixl_del_multi(vsi);
1312 	vsi->num_macs -= del_mcnt;
1313 
1314 	if (__predict_false(mcnt == MAX_MULTICAST_ADDR)) {
1315 		i40e_aq_set_vsi_multicast_promiscuous(hw,
1316 		    vsi->seid, TRUE, NULL);
1317 		return;
1318 	}
1319 	/* (re-)install filters for all mcast addresses */
1320 	/* XXX: This bypasses filter count tracking code! */
1321 	mcnt = if_foreach_llmaddr(iflib_get_ifp(ctx), ixl_mc_filter_apply, vsi);
1322 	if (mcnt > 0) {
1323 		vsi->num_macs += mcnt;
1324 		flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
1325 		ixl_add_hw_filters(vsi, flags, mcnt);
1326 	}
1327 
1328 	ixl_dbg_filter(pf, "%s: filter mac total: %d\n",
1329 	    __func__, vsi->num_macs);
1330 	IOCTL_DEBUGOUT("ixl_if_multi_set: end");
1331 }
1332 
1333 static int
1334 ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
1335 {
1336 	struct ixl_pf *pf = iflib_get_softc(ctx);
1337 	struct ixl_vsi *vsi = &pf->vsi;
1338 
1339 	IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
1340 	if (mtu > IXL_MAX_FRAME - ETHER_HDR_LEN - ETHER_CRC_LEN -
1341 		ETHER_VLAN_ENCAP_LEN)
1342 		return (EINVAL);
1343 
1344 	vsi->shared->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
1345 		ETHER_VLAN_ENCAP_LEN;
1346 
1347 	return (0);
1348 }
1349 
1350 static void
1351 ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
1352 {
1353 	struct ixl_pf *pf = iflib_get_softc(ctx);
1354 	struct i40e_hw  *hw = &pf->hw;
1355 
1356 	INIT_DEBUGOUT("ixl_media_status: begin");
1357 
1358 	ifmr->ifm_status = IFM_AVALID;
1359 	ifmr->ifm_active = IFM_ETHER;
1360 
1361 	if (!pf->link_up) {
1362 		return;
1363 	}
1364 
1365 	ifmr->ifm_status |= IFM_ACTIVE;
1366 	/* Hardware is always full-duplex */
1367 	ifmr->ifm_active |= IFM_FDX;
1368 
1369 	switch (hw->phy.link_info.phy_type) {
1370 		/* 100 M */
1371 		case I40E_PHY_TYPE_100BASE_TX:
1372 			ifmr->ifm_active |= IFM_100_TX;
1373 			break;
1374 		/* 1 G */
1375 		case I40E_PHY_TYPE_1000BASE_T:
1376 			ifmr->ifm_active |= IFM_1000_T;
1377 			break;
1378 		case I40E_PHY_TYPE_1000BASE_SX:
1379 			ifmr->ifm_active |= IFM_1000_SX;
1380 			break;
1381 		case I40E_PHY_TYPE_1000BASE_LX:
1382 			ifmr->ifm_active |= IFM_1000_LX;
1383 			break;
1384 		case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
1385 			ifmr->ifm_active |= IFM_1000_T;
1386 			break;
1387 		/* 10 G */
1388 		case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1389 			ifmr->ifm_active |= IFM_10G_TWINAX;
1390 			break;
1391 		case I40E_PHY_TYPE_10GBASE_SR:
1392 			ifmr->ifm_active |= IFM_10G_SR;
1393 			break;
1394 		case I40E_PHY_TYPE_10GBASE_LR:
1395 			ifmr->ifm_active |= IFM_10G_LR;
1396 			break;
1397 		case I40E_PHY_TYPE_10GBASE_T:
1398 			ifmr->ifm_active |= IFM_10G_T;
1399 			break;
1400 		case I40E_PHY_TYPE_XAUI:
1401 		case I40E_PHY_TYPE_XFI:
1402 			ifmr->ifm_active |= IFM_10G_TWINAX;
1403 			break;
1404 		case I40E_PHY_TYPE_10GBASE_AOC:
1405 			ifmr->ifm_active |= IFM_10G_AOC;
1406 			break;
1407 		/* 25 G */
1408 		case I40E_PHY_TYPE_25GBASE_KR:
1409 			ifmr->ifm_active |= IFM_25G_KR;
1410 			break;
1411 		case I40E_PHY_TYPE_25GBASE_CR:
1412 			ifmr->ifm_active |= IFM_25G_CR;
1413 			break;
1414 		case I40E_PHY_TYPE_25GBASE_SR:
1415 			ifmr->ifm_active |= IFM_25G_SR;
1416 			break;
1417 		case I40E_PHY_TYPE_25GBASE_LR:
1418 			ifmr->ifm_active |= IFM_25G_LR;
1419 			break;
1420 		case I40E_PHY_TYPE_25GBASE_AOC:
1421 			ifmr->ifm_active |= IFM_25G_AOC;
1422 			break;
1423 		case I40E_PHY_TYPE_25GBASE_ACC:
1424 			ifmr->ifm_active |= IFM_25G_ACC;
1425 			break;
1426 		/* 40 G */
1427 		case I40E_PHY_TYPE_40GBASE_CR4:
1428 		case I40E_PHY_TYPE_40GBASE_CR4_CU:
1429 			ifmr->ifm_active |= IFM_40G_CR4;
1430 			break;
1431 		case I40E_PHY_TYPE_40GBASE_SR4:
1432 			ifmr->ifm_active |= IFM_40G_SR4;
1433 			break;
1434 		case I40E_PHY_TYPE_40GBASE_LR4:
1435 			ifmr->ifm_active |= IFM_40G_LR4;
1436 			break;
1437 		case I40E_PHY_TYPE_XLAUI:
1438 			ifmr->ifm_active |= IFM_OTHER;
1439 			break;
1440 		case I40E_PHY_TYPE_1000BASE_KX:
1441 			ifmr->ifm_active |= IFM_1000_KX;
1442 			break;
1443 		case I40E_PHY_TYPE_SGMII:
1444 			ifmr->ifm_active |= IFM_1000_SGMII;
1445 			break;
1446 		/* ERJ: What's the difference between these? */
1447 		case I40E_PHY_TYPE_10GBASE_CR1_CU:
1448 		case I40E_PHY_TYPE_10GBASE_CR1:
1449 			ifmr->ifm_active |= IFM_10G_CR1;
1450 			break;
1451 		case I40E_PHY_TYPE_10GBASE_KX4:
1452 			ifmr->ifm_active |= IFM_10G_KX4;
1453 			break;
1454 		case I40E_PHY_TYPE_10GBASE_KR:
1455 			ifmr->ifm_active |= IFM_10G_KR;
1456 			break;
1457 		case I40E_PHY_TYPE_SFI:
1458 			ifmr->ifm_active |= IFM_10G_SFI;
1459 			break;
1460 		/* Our single 20G media type */
1461 		case I40E_PHY_TYPE_20GBASE_KR2:
1462 			ifmr->ifm_active |= IFM_20G_KR2;
1463 			break;
1464 		case I40E_PHY_TYPE_40GBASE_KR4:
1465 			ifmr->ifm_active |= IFM_40G_KR4;
1466 			break;
1467 		case I40E_PHY_TYPE_XLPPI:
1468 		case I40E_PHY_TYPE_40GBASE_AOC:
1469 			ifmr->ifm_active |= IFM_40G_XLPPI;
1470 			break;
1471 		/* Unknown to driver */
1472 		default:
1473 			ifmr->ifm_active |= IFM_UNKNOWN;
1474 			break;
1475 	}
1476 	/* Report flow control status as well */
1477 	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1478 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1479 	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1480 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1481 }
1482 
1483 static int
1484 ixl_if_media_change(if_ctx_t ctx)
1485 {
1486 	struct ifmedia *ifm = iflib_get_media(ctx);
1487 
1488 	INIT_DEBUGOUT("ixl_media_change: begin");
1489 
1490 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1491 		return (EINVAL);
1492 
1493 	if_printf(iflib_get_ifp(ctx), "Media change is not supported.\n");
1494 	return (ENODEV);
1495 }
1496 
1497 static int
1498 ixl_if_promisc_set(if_ctx_t ctx, int flags)
1499 {
1500 	struct ixl_pf *pf = iflib_get_softc(ctx);
1501 	struct ixl_vsi *vsi = &pf->vsi;
1502 	struct ifnet	*ifp = iflib_get_ifp(ctx);
1503 	struct i40e_hw	*hw = vsi->hw;
1504 	int		err;
1505 	bool		uni = FALSE, multi = FALSE;
1506 
1507 	if (flags & IFF_PROMISC)
1508 		uni = multi = TRUE;
1509 	else if (flags & IFF_ALLMULTI || if_llmaddr_count(ifp) >=
1510 	    MAX_MULTICAST_ADDR)
1511 		multi = TRUE;
1512 
1513 	err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1514 	    vsi->seid, uni, NULL, true);
1515 	if (err)
1516 		return (err);
1517 	err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1518 	    vsi->seid, multi, NULL);
1519 	return (err);
1520 }
1521 
1522 static void
1523 ixl_if_timer(if_ctx_t ctx, uint16_t qid)
1524 {
1525 	if (qid != 0)
1526 		return;
1527 
1528 	/* Fire off the adminq task */
1529 	iflib_admin_intr_deferred(ctx);
1530 }
1531 
1532 static void
1533 ixl_if_vlan_register(if_ctx_t ctx, u16 vtag)
1534 {
1535 	struct ixl_pf *pf = iflib_get_softc(ctx);
1536 	struct ixl_vsi *vsi = &pf->vsi;
1537 	struct i40e_hw	*hw = vsi->hw;
1538 
1539 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
1540 		return;
1541 
1542 	++vsi->num_vlans;
1543 	ixl_add_filter(vsi, hw->mac.addr, vtag);
1544 }
1545 
1546 static void
1547 ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1548 {
1549 	struct ixl_pf *pf = iflib_get_softc(ctx);
1550 	struct ixl_vsi *vsi = &pf->vsi;
1551 	struct i40e_hw	*hw = vsi->hw;
1552 
1553 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
1554 		return;
1555 
1556 	--vsi->num_vlans;
1557 	ixl_del_filter(vsi, hw->mac.addr, vtag);
1558 }
1559 
1560 static uint64_t
1561 ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1562 {
1563 	struct ixl_pf *pf = iflib_get_softc(ctx);
1564 	struct ixl_vsi *vsi = &pf->vsi;
1565 	if_t ifp = iflib_get_ifp(ctx);
1566 
1567 	switch (cnt) {
1568 	case IFCOUNTER_IPACKETS:
1569 		return (vsi->ipackets);
1570 	case IFCOUNTER_IERRORS:
1571 		return (vsi->ierrors);
1572 	case IFCOUNTER_OPACKETS:
1573 		return (vsi->opackets);
1574 	case IFCOUNTER_OERRORS:
1575 		return (vsi->oerrors);
1576 	case IFCOUNTER_COLLISIONS:
1577 		/* Collisions are by standard impossible in 40G/10G Ethernet */
1578 		return (0);
1579 	case IFCOUNTER_IBYTES:
1580 		return (vsi->ibytes);
1581 	case IFCOUNTER_OBYTES:
1582 		return (vsi->obytes);
1583 	case IFCOUNTER_IMCASTS:
1584 		return (vsi->imcasts);
1585 	case IFCOUNTER_OMCASTS:
1586 		return (vsi->omcasts);
1587 	case IFCOUNTER_IQDROPS:
1588 		return (vsi->iqdrops);
1589 	case IFCOUNTER_OQDROPS:
1590 		return (vsi->oqdrops);
1591 	case IFCOUNTER_NOPROTO:
1592 		return (vsi->noproto);
1593 	default:
1594 		return (if_get_counter_default(ifp, cnt));
1595 	}
1596 }
1597 
1598 #ifdef PCI_IOV
1599 static void
1600 ixl_if_vflr_handle(if_ctx_t ctx)
1601 {
1602 	struct ixl_pf *pf = iflib_get_softc(ctx);
1603 
1604 	ixl_handle_vflr(pf);
1605 }
1606 #endif
1607 
1608 static int
1609 ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1610 {
1611 	struct ixl_pf		*pf = iflib_get_softc(ctx);
1612 
1613 	if (pf->read_i2c_byte == NULL)
1614 		return (EINVAL);
1615 
1616 	for (int i = 0; i < req->len; i++)
1617 		if (pf->read_i2c_byte(pf, req->offset + i,
1618 		    req->dev_addr, &req->data[i]))
1619 			return (EIO);
1620 	return (0);
1621 }
1622 
1623 static int
1624 ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data)
1625 {
1626 	struct ixl_pf *pf = iflib_get_softc(ctx);
1627 	struct ifdrv *ifd = (struct ifdrv *)data;
1628 	int error = 0;
1629 
1630 	/*
1631 	 * The iflib_if_ioctl forwards SIOCxDRVSPEC and SIOGPRIVATE_0 without
1632 	 * performing privilege checks. It is important that this function
1633 	 * perform the necessary checks for commands which should only be
1634 	 * executed by privileged threads.
1635 	 */
1636 
1637 	switch(command) {
1638 	case SIOCGDRVSPEC:
1639 	case SIOCSDRVSPEC:
1640 		/* NVM update command */
1641 		if (ifd->ifd_cmd == I40E_NVM_ACCESS) {
1642 			error = priv_check(curthread, PRIV_DRIVER);
1643 			if (error)
1644 				break;
1645 			error = ixl_handle_nvmupd_cmd(pf, ifd);
1646 		} else {
1647 			error = EINVAL;
1648 		}
1649 		break;
1650 	default:
1651 		error = EOPNOTSUPP;
1652 	}
1653 
1654 	return (error);
1655 }
1656 
1657 /* ixl_if_needs_restart - Tell iflib when the driver needs to be reinitialized
1658  * @ctx: iflib context
1659  * @event: event code to check
1660  *
1661  * Defaults to returning false for every event.
1662  *
1663  * @returns true if iflib needs to reinit the interface, false otherwise
1664  */
1665 static bool
1666 ixl_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1667 {
1668 	switch (event) {
1669 	case IFLIB_RESTART_VLAN_CONFIG:
1670 	default:
1671 		return (false);
1672 	}
1673 }
1674 
1675 static u_int
1676 ixl_mc_filter_apply(void *arg, struct sockaddr_dl *sdl, u_int count __unused)
1677 {
1678 	struct ixl_vsi *vsi = arg;
1679 
1680 	ixl_add_mc_filter(vsi, (u8*)LLADDR(sdl));
1681 	return (1);
1682 }
1683 
1684 /*
1685  * Sanity check and save off tunable values.
1686  */
1687 static void
1688 ixl_save_pf_tunables(struct ixl_pf *pf)
1689 {
1690 	device_t dev = pf->dev;
1691 
1692 	/* Save tunable information */
1693 	pf->enable_tx_fc_filter = ixl_enable_tx_fc_filter;
1694 	pf->dbg_mask = ixl_core_debug_mask;
1695 	pf->hw.debug_mask = ixl_shared_debug_mask;
1696 	pf->vsi.enable_head_writeback = !!(ixl_enable_head_writeback);
1697 	pf->enable_vf_loopback = !!(ixl_enable_vf_loopback);
1698 #if 0
1699 	pf->dynamic_rx_itr = ixl_dynamic_rx_itr;
1700 	pf->dynamic_tx_itr = ixl_dynamic_tx_itr;
1701 #endif
1702 
1703 	if (ixl_i2c_access_method > 3 || ixl_i2c_access_method < 0)
1704 		pf->i2c_access_method = 0;
1705 	else
1706 		pf->i2c_access_method = ixl_i2c_access_method;
1707 
1708 	if (ixl_tx_itr < 0 || ixl_tx_itr > IXL_MAX_ITR) {
1709 		device_printf(dev, "Invalid tx_itr value of %d set!\n",
1710 		    ixl_tx_itr);
1711 		device_printf(dev, "tx_itr must be between %d and %d, "
1712 		    "inclusive\n",
1713 		    0, IXL_MAX_ITR);
1714 		device_printf(dev, "Using default value of %d instead\n",
1715 		    IXL_ITR_4K);
1716 		pf->tx_itr = IXL_ITR_4K;
1717 	} else
1718 		pf->tx_itr = ixl_tx_itr;
1719 
1720 	if (ixl_rx_itr < 0 || ixl_rx_itr > IXL_MAX_ITR) {
1721 		device_printf(dev, "Invalid rx_itr value of %d set!\n",
1722 		    ixl_rx_itr);
1723 		device_printf(dev, "rx_itr must be between %d and %d, "
1724 		    "inclusive\n",
1725 		    0, IXL_MAX_ITR);
1726 		device_printf(dev, "Using default value of %d instead\n",
1727 		    IXL_ITR_8K);
1728 		pf->rx_itr = IXL_ITR_8K;
1729 	} else
1730 		pf->rx_itr = ixl_rx_itr;
1731 }
1732 
1733