xref: /freebsd/sys/dev/ixl/if_ixl.c (revision 2a01feab)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2018, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #include "ixl.h"
36 #include "ixl_pf.h"
37 
38 #ifdef IXL_IW
39 #include "ixl_iw.h"
40 #include "ixl_iw_int.h"
41 #endif
42 
43 #ifdef PCI_IOV
44 #include "ixl_pf_iov.h"
45 #endif
46 
47 /*********************************************************************
48  *  Driver version
49  *********************************************************************/
50 #define IXL_DRIVER_VERSION_MAJOR	2
51 #define IXL_DRIVER_VERSION_MINOR	0
52 #define IXL_DRIVER_VERSION_BUILD	0
53 
54 #define IXL_DRIVER_VERSION_STRING			\
55     __XSTRING(IXL_DRIVER_VERSION_MAJOR) "."		\
56     __XSTRING(IXL_DRIVER_VERSION_MINOR) "."		\
57     __XSTRING(IXL_DRIVER_VERSION_BUILD) "-k"
58 
59 /*********************************************************************
60  *  PCI Device ID Table
61  *
62  *  Used by probe to select devices to load on
63  *
64  *  ( Vendor ID, Device ID, Branding String )
65  *********************************************************************/
66 
67 static pci_vendor_info_t ixl_vendor_info_array[] =
68 {
69 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, "Intel(R) Ethernet Controller X710 for 10GbE SFP+"),
70 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, "Intel(R) Ethernet Controller XL710 for 40GbE backplane"),
71 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, "Intel(R) Ethernet Controller X710 for 10GbE backplane"),
72 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"),
73 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"),
74 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, "Intel(R) Ethernet Controller X710 for 10GbE QSFP+"),
75 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, "Intel(R) Ethernet Controller X710 for 10GBASE-T"),
76 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, "Intel(R) Ethernet Controller X710/X557-AT 10GBASE-T"),
77 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722, "Intel(R) Ethernet Connection X722 for 10GbE backplane"),
78 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE QSFP+"),
79 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"),
80 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 1GbE"),
81 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 10GBASE-T"),
82 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"),
83 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B, "Intel(R) Ethernet Controller XXV710 for 25GbE backplane"),
84 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28, "Intel(R) Ethernet Controller XXV710 for 25GbE SFP28"),
85 	/* required last entry */
86 	PVID_END
87 };
88 
89 /*********************************************************************
90  *  Function prototypes
91  *********************************************************************/
92 /*** IFLIB interface ***/
93 static void	*ixl_register(device_t dev);
94 static int	 ixl_if_attach_pre(if_ctx_t ctx);
95 static int	 ixl_if_attach_post(if_ctx_t ctx);
96 static int	 ixl_if_detach(if_ctx_t ctx);
97 static int	 ixl_if_shutdown(if_ctx_t ctx);
98 static int	 ixl_if_suspend(if_ctx_t ctx);
99 static int	 ixl_if_resume(if_ctx_t ctx);
100 static int	 ixl_if_msix_intr_assign(if_ctx_t ctx, int msix);
101 static void	 ixl_if_enable_intr(if_ctx_t ctx);
102 static void	 ixl_if_disable_intr(if_ctx_t ctx);
103 static int	 ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid);
104 static int	 ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid);
105 static int	 ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets);
106 static int	 ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
107 static void	 ixl_if_queues_free(if_ctx_t ctx);
108 static void	 ixl_if_update_admin_status(if_ctx_t ctx);
109 static void	 ixl_if_multi_set(if_ctx_t ctx);
110 static int	 ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
111 static void	 ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr);
112 static int	 ixl_if_media_change(if_ctx_t ctx);
113 static int	 ixl_if_promisc_set(if_ctx_t ctx, int flags);
114 static void	 ixl_if_timer(if_ctx_t ctx, uint16_t qid);
115 static void	 ixl_if_vlan_register(if_ctx_t ctx, u16 vtag);
116 static void	 ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
117 static uint64_t	 ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt);
118 static void	 ixl_if_vflr_handle(if_ctx_t ctx);
119 // static void	 ixl_if_link_intr_enable(if_ctx_t ctx);
120 static int	 ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req);
121 static int	 ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data);
122 
123 /*** Other ***/
124 static int	 ixl_mc_filter_apply(void *arg, struct ifmultiaddr *ifma, int);
125 static void	 ixl_save_pf_tunables(struct ixl_pf *);
126 static int	 ixl_allocate_pci_resources(struct ixl_pf *);
127 
128 /*********************************************************************
129  *  FreeBSD Device Interface Entry Points
130  *********************************************************************/
131 
132 static device_method_t ixl_methods[] = {
133 	/* Device interface */
134 	DEVMETHOD(device_register, ixl_register),
135 	DEVMETHOD(device_probe, iflib_device_probe),
136 	DEVMETHOD(device_attach, iflib_device_attach),
137 	DEVMETHOD(device_detach, iflib_device_detach),
138 	DEVMETHOD(device_shutdown, iflib_device_shutdown),
139 #ifdef PCI_IOV
140 	DEVMETHOD(pci_iov_init, ixl_iov_init),
141 	DEVMETHOD(pci_iov_uninit, ixl_iov_uninit),
142 	DEVMETHOD(pci_iov_add_vf, ixl_add_vf),
143 #endif
144 	DEVMETHOD_END
145 };
146 
147 static driver_t ixl_driver = {
148 	"ixl", ixl_methods, sizeof(struct ixl_pf),
149 };
150 
151 devclass_t ixl_devclass;
152 DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0);
153 IFLIB_PNP_INFO(pci, ixl, ixl_vendor_info_array);
154 MODULE_VERSION(ixl, 3);
155 
156 MODULE_DEPEND(ixl, pci, 1, 1, 1);
157 MODULE_DEPEND(ixl, ether, 1, 1, 1);
158 MODULE_DEPEND(ixl, iflib, 1, 1, 1);
159 
160 static device_method_t ixl_if_methods[] = {
161 	DEVMETHOD(ifdi_attach_pre, ixl_if_attach_pre),
162 	DEVMETHOD(ifdi_attach_post, ixl_if_attach_post),
163 	DEVMETHOD(ifdi_detach, ixl_if_detach),
164 	DEVMETHOD(ifdi_shutdown, ixl_if_shutdown),
165 	DEVMETHOD(ifdi_suspend, ixl_if_suspend),
166 	DEVMETHOD(ifdi_resume, ixl_if_resume),
167 	DEVMETHOD(ifdi_init, ixl_if_init),
168 	DEVMETHOD(ifdi_stop, ixl_if_stop),
169 	DEVMETHOD(ifdi_msix_intr_assign, ixl_if_msix_intr_assign),
170 	DEVMETHOD(ifdi_intr_enable, ixl_if_enable_intr),
171 	DEVMETHOD(ifdi_intr_disable, ixl_if_disable_intr),
172 	//DEVMETHOD(ifdi_link_intr_enable, ixl_if_link_intr_enable),
173 	DEVMETHOD(ifdi_rx_queue_intr_enable, ixl_if_rx_queue_intr_enable),
174 	DEVMETHOD(ifdi_tx_queue_intr_enable, ixl_if_tx_queue_intr_enable),
175 	DEVMETHOD(ifdi_tx_queues_alloc, ixl_if_tx_queues_alloc),
176 	DEVMETHOD(ifdi_rx_queues_alloc, ixl_if_rx_queues_alloc),
177 	DEVMETHOD(ifdi_queues_free, ixl_if_queues_free),
178 	DEVMETHOD(ifdi_update_admin_status, ixl_if_update_admin_status),
179 	DEVMETHOD(ifdi_multi_set, ixl_if_multi_set),
180 	DEVMETHOD(ifdi_mtu_set, ixl_if_mtu_set),
181 	DEVMETHOD(ifdi_media_status, ixl_if_media_status),
182 	DEVMETHOD(ifdi_media_change, ixl_if_media_change),
183 	DEVMETHOD(ifdi_promisc_set, ixl_if_promisc_set),
184 	DEVMETHOD(ifdi_timer, ixl_if_timer),
185 	DEVMETHOD(ifdi_vlan_register, ixl_if_vlan_register),
186 	DEVMETHOD(ifdi_vlan_unregister, ixl_if_vlan_unregister),
187 	DEVMETHOD(ifdi_get_counter, ixl_if_get_counter),
188 	DEVMETHOD(ifdi_vflr_handle, ixl_if_vflr_handle),
189 	DEVMETHOD(ifdi_i2c_req, ixl_if_i2c_req),
190 	DEVMETHOD(ifdi_priv_ioctl, ixl_if_priv_ioctl),
191 	// ifdi_led_func
192 	// ifdi_debug
193 	DEVMETHOD_END
194 };
195 
196 static driver_t ixl_if_driver = {
197 	"ixl_if", ixl_if_methods, sizeof(struct ixl_pf)
198 };
199 
200 /*
201 ** TUNEABLE PARAMETERS:
202 */
203 
204 static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0,
205                    "IXL driver parameters");
206 
207 /*
208  * Leave this on unless you need to send flow control
209  * frames (or other control frames) from software
210  */
211 static int ixl_enable_tx_fc_filter = 1;
212 TUNABLE_INT("hw.ixl.enable_tx_fc_filter",
213     &ixl_enable_tx_fc_filter);
214 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_tx_fc_filter, CTLFLAG_RDTUN,
215     &ixl_enable_tx_fc_filter, 0,
216     "Filter out packets with Ethertype 0x8808 from being sent out by non-HW sources");
217 
218 static int ixl_i2c_access_method = 0;
219 TUNABLE_INT("hw.ixl.i2c_access_method",
220     &ixl_i2c_access_method);
221 SYSCTL_INT(_hw_ixl, OID_AUTO, i2c_access_method, CTLFLAG_RDTUN,
222     &ixl_i2c_access_method, 0,
223     IXL_SYSCTL_HELP_I2C_METHOD);
224 
225 /*
226  * Different method for processing TX descriptor
227  * completion.
228  */
229 static int ixl_enable_head_writeback = 1;
230 TUNABLE_INT("hw.ixl.enable_head_writeback",
231     &ixl_enable_head_writeback);
232 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN,
233     &ixl_enable_head_writeback, 0,
234     "For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors");
235 
236 static int ixl_core_debug_mask = 0;
237 TUNABLE_INT("hw.ixl.core_debug_mask",
238     &ixl_core_debug_mask);
239 SYSCTL_INT(_hw_ixl, OID_AUTO, core_debug_mask, CTLFLAG_RDTUN,
240     &ixl_core_debug_mask, 0,
241     "Display debug statements that are printed in non-shared code");
242 
243 static int ixl_shared_debug_mask = 0;
244 TUNABLE_INT("hw.ixl.shared_debug_mask",
245     &ixl_shared_debug_mask);
246 SYSCTL_INT(_hw_ixl, OID_AUTO, shared_debug_mask, CTLFLAG_RDTUN,
247     &ixl_shared_debug_mask, 0,
248     "Display debug statements that are printed in shared code");
249 
250 #if 0
251 /*
252 ** Controls for Interrupt Throttling
253 **	- true/false for dynamic adjustment
254 ** 	- default values for static ITR
255 */
256 static int ixl_dynamic_rx_itr = 0;
257 TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
258 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
259     &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
260 
261 static int ixl_dynamic_tx_itr = 0;
262 TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
263 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
264     &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
265 #endif
266 
267 static int ixl_rx_itr = IXL_ITR_8K;
268 TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
269 SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
270     &ixl_rx_itr, 0, "RX Interrupt Rate");
271 
272 static int ixl_tx_itr = IXL_ITR_4K;
273 TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
274 SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
275     &ixl_tx_itr, 0, "TX Interrupt Rate");
276 
277 #ifdef IXL_IW
278 int ixl_enable_iwarp = 0;
279 TUNABLE_INT("hw.ixl.enable_iwarp", &ixl_enable_iwarp);
280 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_iwarp, CTLFLAG_RDTUN,
281     &ixl_enable_iwarp, 0, "iWARP enabled");
282 
283 #if __FreeBSD_version < 1100000
284 int ixl_limit_iwarp_msix = 1;
285 #else
286 int ixl_limit_iwarp_msix = IXL_IW_MAX_MSIX;
287 #endif
288 TUNABLE_INT("hw.ixl.limit_iwarp_msix", &ixl_limit_iwarp_msix);
289 SYSCTL_INT(_hw_ixl, OID_AUTO, limit_iwarp_msix, CTLFLAG_RDTUN,
290     &ixl_limit_iwarp_msix, 0, "Limit MSIX vectors assigned to iWARP");
291 #endif
292 
293 extern struct if_txrx ixl_txrx_hwb;
294 extern struct if_txrx ixl_txrx_dwb;
295 
296 static struct if_shared_ctx ixl_sctx_init = {
297 	.isc_magic = IFLIB_MAGIC,
298 	.isc_q_align = PAGE_SIZE,
299 	.isc_tx_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
300 	.isc_tx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
301 	.isc_tso_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
302 	.isc_tso_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
303 	.isc_rx_maxsize = 16384,
304 	.isc_rx_nsegments = IXL_MAX_RX_SEGS,
305 	.isc_rx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
306 	.isc_nfl = 1,
307 	.isc_ntxqs = 1,
308 	.isc_nrxqs = 1,
309 
310 	.isc_admin_intrcnt = 1,
311 	.isc_vendor_info = ixl_vendor_info_array,
312 	.isc_driver_version = IXL_DRIVER_VERSION_STRING,
313 	.isc_driver = &ixl_if_driver,
314 	.isc_flags = IFLIB_NEED_SCRATCH | IFLIB_NEED_ZERO_CSUM | IFLIB_ADMIN_ALWAYS_RUN,
315 
316 	.isc_nrxd_min = {IXL_MIN_RING},
317 	.isc_ntxd_min = {IXL_MIN_RING},
318 	.isc_nrxd_max = {IXL_MAX_RING},
319 	.isc_ntxd_max = {IXL_MAX_RING},
320 	.isc_nrxd_default = {IXL_DEFAULT_RING},
321 	.isc_ntxd_default = {IXL_DEFAULT_RING},
322 };
323 
324 if_shared_ctx_t ixl_sctx = &ixl_sctx_init;
325 
326 /*** Functions ***/
327 static void *
328 ixl_register(device_t dev)
329 {
330 	return (ixl_sctx);
331 }
332 
333 static int
334 ixl_allocate_pci_resources(struct ixl_pf *pf)
335 {
336 	int             rid;
337 	struct i40e_hw *hw = &pf->hw;
338 	device_t dev = iflib_get_dev(pf->vsi.ctx);
339 
340 	/* Map BAR0 */
341 	rid = PCIR_BAR(0);
342 	pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
343 	    &rid, RF_ACTIVE);
344 
345 	if (!(pf->pci_mem)) {
346 		device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
347 		return (ENXIO);
348 	}
349 
350 	/* Save off the PCI information */
351 	hw->vendor_id = pci_get_vendor(dev);
352 	hw->device_id = pci_get_device(dev);
353 	hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
354 	hw->subsystem_vendor_id =
355 	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
356 	hw->subsystem_device_id =
357 	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
358 
359 	hw->bus.device = pci_get_slot(dev);
360 	hw->bus.func = pci_get_function(dev);
361 
362 	/* Save off register access information */
363 	pf->osdep.mem_bus_space_tag =
364 		rman_get_bustag(pf->pci_mem);
365 	pf->osdep.mem_bus_space_handle =
366 		rman_get_bushandle(pf->pci_mem);
367 	pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
368 	pf->osdep.flush_reg = I40E_GLGEN_STAT;
369 	pf->osdep.dev = dev;
370 
371 	pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
372 	pf->hw.back = &pf->osdep;
373 
374  	return (0);
375  }
376 
377 static int
378 ixl_if_attach_pre(if_ctx_t ctx)
379 {
380 	device_t dev;
381 	struct ixl_pf *pf;
382 	struct i40e_hw *hw;
383 	struct ixl_vsi *vsi;
384 	if_softc_ctx_t scctx;
385 	struct i40e_filter_control_settings filter;
386 	enum i40e_status_code status;
387 	int error = 0;
388 
389 	INIT_DEBUGOUT("ixl_if_attach_pre: begin");
390 
391 	/* Allocate, clear, and link in our primary soft structure */
392 	dev = iflib_get_dev(ctx);
393 	pf = iflib_get_softc(ctx);
394 	vsi = &pf->vsi;
395 	vsi->back = pf;
396 	pf->dev = dev;
397 	hw = &pf->hw;
398 
399 	/*
400 	** Note this assumes we have a single embedded VSI,
401 	** this could be enhanced later to allocate multiple
402 	*/
403 	//vsi->dev = pf->dev;
404 	vsi->hw = &pf->hw;
405 	vsi->id = 0;
406 	vsi->num_vlans = 0;
407 	vsi->ctx = ctx;
408 	vsi->media = iflib_get_media(ctx);
409 	vsi->shared = scctx = iflib_get_softc_ctx(ctx);
410 
411 	/* Save tunable values */
412 	ixl_save_pf_tunables(pf);
413 
414 	/* Do PCI setup - map BAR0, etc */
415 	if (ixl_allocate_pci_resources(pf)) {
416 		device_printf(dev, "Allocation of PCI resources failed\n");
417 		error = ENXIO;
418 		goto err_pci_res;
419 	}
420 
421 	/* Establish a clean starting point */
422 	i40e_clear_hw(hw);
423 	status = i40e_pf_reset(hw);
424 	if (status) {
425 		device_printf(dev, "PF reset failure %s\n",
426 		    i40e_stat_str(hw, status));
427 		error = EIO;
428 		goto err_out;
429 	}
430 
431 	/* Initialize the shared code */
432 	status = i40e_init_shared_code(hw);
433 	if (status) {
434 		device_printf(dev, "Unable to initialize shared code, error %s\n",
435 		    i40e_stat_str(hw, status));
436 		error = EIO;
437 		goto err_out;
438 	}
439 
440 	/* Set up the admin queue */
441 	hw->aq.num_arq_entries = IXL_AQ_LEN;
442 	hw->aq.num_asq_entries = IXL_AQ_LEN;
443 	hw->aq.arq_buf_size = IXL_AQ_BUF_SZ;
444 	hw->aq.asq_buf_size = IXL_AQ_BUF_SZ;
445 
446 	status = i40e_init_adminq(hw);
447 	if (status != 0 && status != I40E_ERR_FIRMWARE_API_VERSION) {
448 		device_printf(dev, "Unable to initialize Admin Queue, error %s\n",
449 		    i40e_stat_str(hw, status));
450 		error = EIO;
451 		goto err_out;
452 	}
453 	ixl_print_nvm_version(pf);
454 
455 	if (status == I40E_ERR_FIRMWARE_API_VERSION) {
456 		device_printf(dev, "The driver for the device stopped "
457 		    "because the NVM image is newer than expected.\n");
458 		device_printf(dev, "You must install the most recent version of "
459 		    "the network driver.\n");
460 		error = EIO;
461 		goto err_out;
462 	}
463 
464         if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
465 	    hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) {
466 		device_printf(dev, "The driver for the device detected "
467 		    "a newer version of the NVM image than expected.\n");
468 		device_printf(dev, "Please install the most recent version "
469 		    "of the network driver.\n");
470 	} else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4) {
471 		device_printf(dev, "The driver for the device detected "
472 		    "an older version of the NVM image than expected.\n");
473 		device_printf(dev, "Please update the NVM image.\n");
474 	}
475 
476 	/* Clear PXE mode */
477 	i40e_clear_pxe_mode(hw);
478 
479 	/* Get capabilities from the device */
480 	error = ixl_get_hw_capabilities(pf);
481 	if (error) {
482 		device_printf(dev, "get_hw_capabilities failed: %d\n",
483 		    error);
484 		goto err_get_cap;
485 	}
486 
487 	/* Set up host memory cache */
488 	status = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
489 	    hw->func_caps.num_rx_qp, 0, 0);
490 	if (status) {
491 		device_printf(dev, "init_lan_hmc failed: %s\n",
492 		    i40e_stat_str(hw, status));
493 		goto err_get_cap;
494 	}
495 	status = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
496 	if (status) {
497 		device_printf(dev, "configure_lan_hmc failed: %s\n",
498 		    i40e_stat_str(hw, status));
499 		goto err_mac_hmc;
500 	}
501 
502 	/* Disable LLDP from the firmware for certain NVM versions */
503 	if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
504 	    (pf->hw.aq.fw_maj_ver < 4)) {
505 		i40e_aq_stop_lldp(hw, TRUE, NULL);
506 		pf->state |= IXL_PF_STATE_FW_LLDP_DISABLED;
507 	}
508 
509 	/* Get MAC addresses from hardware */
510 	i40e_get_mac_addr(hw, hw->mac.addr);
511 	error = i40e_validate_mac_addr(hw->mac.addr);
512 	if (error) {
513 		device_printf(dev, "validate_mac_addr failed: %d\n", error);
514 		goto err_mac_hmc;
515 	}
516 	bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
517 	iflib_set_mac(ctx, hw->mac.addr);
518 	i40e_get_port_mac_addr(hw, hw->mac.port_addr);
519 
520 	/* Set up the device filtering */
521 	bzero(&filter, sizeof(filter));
522 	filter.enable_ethtype = TRUE;
523 	filter.enable_macvlan = TRUE;
524 	filter.enable_fdir = FALSE;
525 	filter.hash_lut_size = I40E_HASH_LUT_SIZE_512;
526 	if (i40e_set_filter_control(hw, &filter))
527 		device_printf(dev, "i40e_set_filter_control() failed\n");
528 
529 	/* Query device FW LLDP status */
530 	ixl_get_fw_lldp_status(pf);
531 	/* Tell FW to apply DCB config on link up */
532 	i40e_aq_set_dcb_parameters(hw, true, NULL);
533 
534 	/* Fill out iflib parameters */
535 	if (hw->mac.type == I40E_MAC_X722)
536 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 128;
537 	else
538 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
539 	if (vsi->enable_head_writeback) {
540 		scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
541 		    * sizeof(struct i40e_tx_desc) + sizeof(u32), DBA_ALIGN);
542 		scctx->isc_txrx = &ixl_txrx_hwb;
543 	} else {
544 		scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
545 		    * sizeof(struct i40e_tx_desc), DBA_ALIGN);
546 		scctx->isc_txrx = &ixl_txrx_dwb;
547 	}
548 	scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0]
549 	    * sizeof(union i40e_32byte_rx_desc), DBA_ALIGN);
550 	scctx->isc_msix_bar = PCIR_BAR(IXL_MSIX_BAR);
551 	scctx->isc_tx_nsegments = IXL_MAX_TX_SEGS;
552 	scctx->isc_tx_tso_segments_max = IXL_MAX_TSO_SEGS;
553 	scctx->isc_tx_tso_size_max = IXL_TSO_SIZE;
554 	scctx->isc_tx_tso_segsize_max = IXL_MAX_DMA_SEG_SIZE;
555 	scctx->isc_rss_table_size = pf->hw.func_caps.rss_table_size;
556 	scctx->isc_tx_csum_flags = CSUM_OFFLOAD;
557 	scctx->isc_capabilities = scctx->isc_capenable = IXL_CAPS;
558 
559 	INIT_DEBUGOUT("ixl_if_attach_pre: end");
560 	return (0);
561 
562 err_mac_hmc:
563 	i40e_shutdown_lan_hmc(hw);
564 err_get_cap:
565 	i40e_shutdown_adminq(hw);
566 err_out:
567 	ixl_free_pci_resources(pf);
568 err_pci_res:
569 	return (error);
570 }
571 
572 static int
573 ixl_if_attach_post(if_ctx_t ctx)
574 {
575 	device_t dev;
576 	struct ixl_pf *pf;
577 	struct i40e_hw *hw;
578 	struct ixl_vsi *vsi;
579 	int error = 0;
580 	enum i40e_status_code status;
581 
582 	INIT_DEBUGOUT("ixl_if_attach_post: begin");
583 
584 	dev = iflib_get_dev(ctx);
585 	pf = iflib_get_softc(ctx);
586 	vsi = &pf->vsi;
587 	vsi->ifp = iflib_get_ifp(ctx);
588 	hw = &pf->hw;
589 
590 	/* Setup OS network interface / ifnet */
591 	if (ixl_setup_interface(dev, pf)) {
592 		device_printf(dev, "interface setup failed!\n");
593 		error = EIO;
594 		goto err;
595 	}
596 
597 	/* Determine link state */
598 	if (ixl_attach_get_link_status(pf)) {
599 		error = EINVAL;
600 		goto err;
601 	}
602 
603 	error = ixl_switch_config(pf);
604 	if (error) {
605 		device_printf(dev, "Initial ixl_switch_config() failed: %d\n",
606 		     error);
607 		goto err;
608 	}
609 
610 	/* Add protocol filters to list */
611 	ixl_init_filters(vsi);
612 
613 	/* Init queue allocation manager */
614 	error = ixl_pf_qmgr_init(&pf->qmgr, hw->func_caps.num_tx_qp);
615 	if (error) {
616 		device_printf(dev, "Failed to init queue manager for PF queues, error %d\n",
617 		    error);
618 		goto err;
619 	}
620 	/* reserve a contiguous allocation for the PF's VSI */
621 	error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr,
622 	    max(vsi->num_rx_queues, vsi->num_tx_queues), &pf->qtag);
623 	if (error) {
624 		device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
625 		    error);
626 		goto err;
627 	}
628 	device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n",
629 	    pf->qtag.num_allocated, pf->qtag.num_active);
630 
631 	/* Limit PHY interrupts to link, autoneg, and modules failure */
632 	status = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
633 	    NULL);
634         if (status) {
635 		device_printf(dev, "i40e_aq_set_phy_mask() failed: err %s,"
636 		    " aq_err %s\n", i40e_stat_str(hw, status),
637 		    i40e_aq_str(hw, hw->aq.asq_last_status));
638 		goto err;
639 	}
640 
641 	/* Get the bus configuration and set the shared code */
642 	ixl_get_bus_info(pf);
643 
644 	/* Keep admin queue interrupts active while driver is loaded */
645 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
646  		ixl_configure_intr0_msix(pf);
647  		ixl_enable_intr0(hw);
648 	}
649 
650 	/* Set initial advertised speed sysctl value */
651 	ixl_set_initial_advertised_speeds(pf);
652 
653 	/* Initialize statistics & add sysctls */
654 	ixl_add_device_sysctls(pf);
655 	ixl_pf_reset_stats(pf);
656 	ixl_update_stats_counters(pf);
657 	ixl_add_hw_stats(pf);
658 
659 	hw->phy.get_link_info = true;
660 	i40e_get_link_status(hw, &pf->link_up);
661 	ixl_update_link_status(pf);
662 
663 #ifdef PCI_IOV
664 	ixl_initialize_sriov(pf);
665 #endif
666 
667 #ifdef IXL_IW
668 	if (hw->func_caps.iwarp && ixl_enable_iwarp) {
669 		pf->iw_enabled = (pf->iw_msix > 0) ? true : false;
670 		if (pf->iw_enabled) {
671 			error = ixl_iw_pf_attach(pf);
672 			if (error) {
673 				device_printf(dev,
674 				    "interfacing to iwarp driver failed: %d\n",
675 				    error);
676 				goto err;
677 			} else
678 				device_printf(dev, "iWARP ready\n");
679 		} else
680 			device_printf(dev,
681 			    "iwarp disabled on this device (no msix vectors)\n");
682 	} else {
683 		pf->iw_enabled = false;
684 		device_printf(dev, "The device is not iWARP enabled\n");
685 	}
686 #endif
687 
688 	INIT_DBG_DEV(dev, "end");
689 	return (0);
690 
691 err:
692 	INIT_DEBUGOUT("end: error %d", error);
693 	/* ixl_if_detach() is called on error from this */
694 	return (error);
695 }
696 
697 static int
698 ixl_if_detach(if_ctx_t ctx)
699 {
700 	struct ixl_pf *pf = iflib_get_softc(ctx);
701 	struct ixl_vsi *vsi = &pf->vsi;
702 	struct i40e_hw *hw = &pf->hw;
703 	device_t dev = pf->dev;
704 	enum i40e_status_code	status;
705 #if defined(PCI_IOV) || defined(IXL_IW)
706 	int			error;
707 #endif
708 
709 	INIT_DBG_DEV(dev, "begin");
710 
711 #ifdef IXL_IW
712 	if (ixl_enable_iwarp && pf->iw_enabled) {
713 		error = ixl_iw_pf_detach(pf);
714 		if (error == EBUSY) {
715 			device_printf(dev, "iwarp in use; stop it first.\n");
716 			return (error);
717 		}
718 	}
719 #endif
720 #ifdef PCI_IOV
721 	error = pci_iov_detach(dev);
722 	if (error != 0) {
723 		device_printf(dev, "SR-IOV in use; detach first.\n");
724 		return (error);
725 	}
726 #endif
727 	/* Remove all previously allocated media types */
728 	ifmedia_removeall(vsi->media);
729 
730 	/* Shutdown LAN HMC */
731 	if (hw->hmc.hmc_obj) {
732 		status = i40e_shutdown_lan_hmc(hw);
733 		if (status)
734 			device_printf(dev,
735 			    "i40e_shutdown_lan_hmc() failed with status %s\n",
736 			    i40e_stat_str(hw, status));
737 	}
738 
739 	/* Shutdown admin queue */
740 	ixl_disable_intr0(hw);
741 	status = i40e_shutdown_adminq(hw);
742 	if (status)
743 		device_printf(dev,
744 		    "i40e_shutdown_adminq() failed with status %s\n",
745 		    i40e_stat_str(hw, status));
746 
747 	ixl_pf_qmgr_destroy(&pf->qmgr);
748 	ixl_free_pci_resources(pf);
749 	ixl_free_mac_filters(vsi);
750 	INIT_DBG_DEV(dev, "end");
751 	return (0);
752 }
753 
754 /* TODO: Do shutdown-specific stuff here */
755 static int
756 ixl_if_shutdown(if_ctx_t ctx)
757 {
758 	int error = 0;
759 
760 	INIT_DEBUGOUT("ixl_if_shutdown: begin");
761 
762 	/* TODO: Call ixl_if_stop()? */
763 
764 	/* TODO: Then setup low power mode */
765 
766 	return (error);
767 }
768 
769 static int
770 ixl_if_suspend(if_ctx_t ctx)
771 {
772 	int error = 0;
773 
774 	INIT_DEBUGOUT("ixl_if_suspend: begin");
775 
776 	/* TODO: Call ixl_if_stop()? */
777 
778 	/* TODO: Then setup low power mode */
779 
780 	return (error);
781 }
782 
783 static int
784 ixl_if_resume(if_ctx_t ctx)
785 {
786 	struct ifnet *ifp = iflib_get_ifp(ctx);
787 
788 	INIT_DEBUGOUT("ixl_if_resume: begin");
789 
790 	/* Read & clear wake-up registers */
791 
792 	/* Required after D3->D0 transition */
793 	if (ifp->if_flags & IFF_UP)
794 		ixl_if_init(ctx);
795 
796 	return (0);
797 }
798 
799 /* Set Report Status queue fields to 0 */
800 static void
801 ixl_init_tx_rsqs(struct ixl_vsi *vsi)
802 {
803 	if_softc_ctx_t scctx = vsi->shared;
804 	struct ixl_tx_queue *tx_que;
805 	int i, j;
806 
807 	for (i = 0, tx_que = vsi->tx_queues; i < vsi->num_tx_queues; i++, tx_que++) {
808 		struct tx_ring *txr = &tx_que->txr;
809 
810 		txr->tx_rs_cidx = txr->tx_rs_pidx = txr->tx_cidx_processed = 0;
811 
812 		for (j = 0; j < scctx->isc_ntxd[0]; j++)
813 			txr->tx_rsq[j] = QIDX_INVALID;
814 	}
815 }
816 
817 static void
818 ixl_init_tx_cidx(struct ixl_vsi *vsi)
819 {
820 	struct ixl_tx_queue *tx_que;
821 	int i;
822 
823 	for (i = 0, tx_que = vsi->tx_queues; i < vsi->num_tx_queues; i++, tx_que++) {
824 		struct tx_ring *txr = &tx_que->txr;
825 
826 		txr->tx_cidx_processed = 0;
827 	}
828 }
829 
830 void
831 ixl_if_init(if_ctx_t ctx)
832 {
833 	struct ixl_pf *pf = iflib_get_softc(ctx);
834 	struct ixl_vsi *vsi = &pf->vsi;
835 	struct i40e_hw	*hw = &pf->hw;
836 	device_t 	dev = iflib_get_dev(ctx);
837 	u8		tmpaddr[ETHER_ADDR_LEN];
838 	int		ret;
839 
840 	/*
841 	 * If the aq is dead here, it probably means something outside of the driver
842 	 * did something to the adapter, like a PF reset.
843 	 * So rebuild the driver's state here if that occurs.
844 	 */
845 	if (!i40e_check_asq_alive(&pf->hw)) {
846 		device_printf(dev, "Admin Queue is down; resetting...\n");
847 		ixl_teardown_hw_structs(pf);
848 		ixl_reset(pf);
849 	}
850 
851 	/* Get the latest mac address... User might use a LAA */
852 	bcopy(IF_LLADDR(vsi->ifp), tmpaddr, ETH_ALEN);
853 	if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
854 	    (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
855 		ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
856 		bcopy(tmpaddr, hw->mac.addr, ETH_ALEN);
857 		ret = i40e_aq_mac_address_write(hw,
858 		    I40E_AQC_WRITE_TYPE_LAA_ONLY,
859 		    hw->mac.addr, NULL);
860 		if (ret) {
861 			device_printf(dev, "LLA address change failed!!\n");
862 			return;
863 		}
864 		ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
865 	}
866 
867 	iflib_set_mac(ctx, hw->mac.addr);
868 
869 	/* Prepare the VSI: rings, hmc contexts, etc... */
870 	if (ixl_initialize_vsi(vsi)) {
871 		device_printf(dev, "initialize vsi failed!!\n");
872 		return;
873 	}
874 
875 	// TODO: Call iflib setup multicast filters here?
876 	// It's called in ixgbe in D5213
877 	ixl_if_multi_set(ctx);
878 
879 	/* Set up RSS */
880 	ixl_config_rss(pf);
881 
882 	/* Set up MSI/X routing and the ITR settings */
883 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
884 		ixl_configure_queue_intr_msix(pf);
885 		ixl_configure_itr(pf);
886 	} else
887 		ixl_configure_legacy(pf);
888 
889 	if (vsi->enable_head_writeback)
890 		ixl_init_tx_cidx(vsi);
891 	else
892 		ixl_init_tx_rsqs(vsi);
893 
894 	ixl_enable_rings(vsi);
895 
896 	i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
897 
898 	ixl_reconfigure_filters(vsi);
899 
900 #ifdef IXL_IW
901 	if (ixl_enable_iwarp && pf->iw_enabled) {
902 		ret = ixl_iw_pf_init(pf);
903 		if (ret)
904 			device_printf(dev,
905 			    "initialize iwarp failed, code %d\n", ret);
906 	}
907 #endif
908 }
909 
910 void
911 ixl_if_stop(if_ctx_t ctx)
912 {
913 	struct ixl_pf *pf = iflib_get_softc(ctx);
914 	struct ixl_vsi *vsi = &pf->vsi;
915 
916 	INIT_DEBUGOUT("ixl_if_stop: begin\n");
917 
918 	// TODO: This may need to be reworked
919 #ifdef IXL_IW
920 	/* Stop iWARP device */
921 	if (ixl_enable_iwarp && pf->iw_enabled)
922 		ixl_iw_pf_stop(pf);
923 #endif
924 
925 	ixl_disable_rings_intr(vsi);
926 	ixl_disable_rings(vsi);
927 }
928 
929 static int
930 ixl_if_msix_intr_assign(if_ctx_t ctx, int msix)
931 {
932 	struct ixl_pf *pf = iflib_get_softc(ctx);
933 	struct ixl_vsi *vsi = &pf->vsi;
934 	struct ixl_rx_queue *rx_que = vsi->rx_queues;
935 	struct ixl_tx_queue *tx_que = vsi->tx_queues;
936 	int err, i, rid, vector = 0;
937 	char buf[16];
938 
939 	/* Admin Que must use vector 0*/
940 	rid = vector + 1;
941 	err = iflib_irq_alloc_generic(ctx, &vsi->irq, rid, IFLIB_INTR_ADMIN,
942 	    ixl_msix_adminq, pf, 0, "aq");
943 	if (err) {
944 		iflib_irq_free(ctx, &vsi->irq);
945 		device_printf(iflib_get_dev(ctx),
946 		    "Failed to register Admin que handler");
947 		return (err);
948 	}
949 	// TODO: Re-enable this at some point
950 	// iflib_softirq_alloc_generic(ctx, rid, IFLIB_INTR_IOV, pf, 0, "ixl_iov");
951 
952 	/* Now set up the stations */
953 	for (i = 0, vector = 1; i < vsi->num_rx_queues; i++, vector++, rx_que++) {
954 		rid = vector + 1;
955 
956 		snprintf(buf, sizeof(buf), "rxq%d", i);
957 		err = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
958 		    IFLIB_INTR_RX, ixl_msix_que, rx_que, rx_que->rxr.me, buf);
959 		/* XXX: Does the driver work as expected if there are fewer num_rx_queues than
960 		 * what's expected in the iflib context? */
961 		if (err) {
962 			device_printf(iflib_get_dev(ctx),
963 			    "Failed to allocate q int %d err: %d", i, err);
964 			vsi->num_rx_queues = i + 1;
965 			goto fail;
966 		}
967 		rx_que->msix = vector;
968 	}
969 
970 	bzero(buf, sizeof(buf));
971 
972 	for (i = 0; i < vsi->num_tx_queues; i++, tx_que++) {
973 		snprintf(buf, sizeof(buf), "txq%d", i);
974 		iflib_softirq_alloc_generic(ctx,
975 		    &vsi->rx_queues[i % vsi->num_rx_queues].que_irq,
976 		    IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
977 
978 		/* TODO: Maybe call a strategy function for this to figure out which
979 		* interrupts to map Tx queues to. I don't know if there's an immediately
980 		* better way than this other than a user-supplied map, though. */
981 		tx_que->msix = (i % vsi->num_rx_queues) + 1;
982 	}
983 
984 	return (0);
985 fail:
986 	iflib_irq_free(ctx, &vsi->irq);
987 	rx_que = vsi->rx_queues;
988 	for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
989 		iflib_irq_free(ctx, &rx_que->que_irq);
990 	return (err);
991 }
992 
993 /*
994  * Enable all interrupts
995  *
996  * Called in:
997  * iflib_init_locked, after ixl_if_init()
998  */
999 static void
1000 ixl_if_enable_intr(if_ctx_t ctx)
1001 {
1002 	struct ixl_pf *pf = iflib_get_softc(ctx);
1003 	struct ixl_vsi *vsi = &pf->vsi;
1004 	struct i40e_hw		*hw = vsi->hw;
1005 	struct ixl_rx_queue	*que = vsi->rx_queues;
1006 
1007 	ixl_enable_intr0(hw);
1008 	/* Enable queue interrupts */
1009 	for (int i = 0; i < vsi->num_rx_queues; i++, que++)
1010 		/* TODO: Queue index parameter is probably wrong */
1011 		ixl_enable_queue(hw, que->rxr.me);
1012 }
1013 
1014 /*
1015  * Disable queue interrupts
1016  *
1017  * Other interrupt causes need to remain active.
1018  */
1019 static void
1020 ixl_if_disable_intr(if_ctx_t ctx)
1021 {
1022 	struct ixl_pf *pf = iflib_get_softc(ctx);
1023 	struct ixl_vsi *vsi = &pf->vsi;
1024 	struct i40e_hw		*hw = vsi->hw;
1025 	struct ixl_rx_queue	*rx_que = vsi->rx_queues;
1026 
1027 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
1028 		for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
1029 			ixl_disable_queue(hw, rx_que->msix - 1);
1030 	} else {
1031 		// Set PFINT_LNKLST0 FIRSTQ_INDX to 0x7FF
1032 		// stops queues from triggering interrupts
1033 		wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
1034 	}
1035 }
1036 
1037 static int
1038 ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
1039 {
1040 	struct ixl_pf *pf = iflib_get_softc(ctx);
1041 	struct ixl_vsi *vsi = &pf->vsi;
1042 	struct i40e_hw		*hw = vsi->hw;
1043 	struct ixl_rx_queue	*rx_que = &vsi->rx_queues[rxqid];
1044 
1045 	ixl_enable_queue(hw, rx_que->msix - 1);
1046 	return (0);
1047 }
1048 
1049 static int
1050 ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid)
1051 {
1052 	struct ixl_pf *pf = iflib_get_softc(ctx);
1053 	struct ixl_vsi *vsi = &pf->vsi;
1054 	struct i40e_hw		*hw = vsi->hw;
1055 	struct ixl_tx_queue	*tx_que = &vsi->tx_queues[txqid];
1056 
1057 	ixl_enable_queue(hw, tx_que->msix - 1);
1058 
1059 	return (0);
1060 }
1061 
1062 static int
1063 ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets)
1064 {
1065 	struct ixl_pf *pf = iflib_get_softc(ctx);
1066 	struct ixl_vsi *vsi = &pf->vsi;
1067 	if_softc_ctx_t scctx = vsi->shared;
1068 	struct ixl_tx_queue *que;
1069 	// int i;
1070 	int i, j, error = 0;
1071 
1072 	MPASS(vsi->num_tx_queues > 0);
1073 	MPASS(ntxqs == 1);
1074 	MPASS(vsi->num_tx_queues == ntxqsets);
1075 
1076 	/* Allocate queue structure memory */
1077 	if (!(vsi->tx_queues =
1078 	    (struct ixl_tx_queue *) malloc(sizeof(struct ixl_tx_queue) *ntxqsets, M_IXL, M_NOWAIT | M_ZERO))) {
1079 		device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n");
1080 		return (ENOMEM);
1081 	}
1082 
1083 	for (i = 0, que = vsi->tx_queues; i < ntxqsets; i++, que++) {
1084 		struct tx_ring *txr = &que->txr;
1085 
1086 		txr->me = i;
1087 		que->vsi = vsi;
1088 
1089 		if (!vsi->enable_head_writeback) {
1090 			/* Allocate report status array */
1091 			if (!(txr->tx_rsq = malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXL, M_NOWAIT))) {
1092 				device_printf(iflib_get_dev(ctx), "failed to allocate tx_rsq memory\n");
1093 				error = ENOMEM;
1094 				goto fail;
1095 			}
1096 			/* Init report status array */
1097 			for (j = 0; j < scctx->isc_ntxd[0]; j++)
1098 				txr->tx_rsq[j] = QIDX_INVALID;
1099 		}
1100 		/* get the virtual and physical address of the hardware queues */
1101 		txr->tail = I40E_QTX_TAIL(txr->me);
1102 		txr->tx_base = (struct i40e_tx_desc *)vaddrs[i * ntxqs];
1103 		txr->tx_paddr = paddrs[i * ntxqs];
1104 		txr->que = que;
1105 	}
1106 
1107 	return (0);
1108 fail:
1109 	ixl_if_queues_free(ctx);
1110 	return (error);
1111 }
1112 
1113 static int
1114 ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets)
1115 {
1116 	struct ixl_pf *pf = iflib_get_softc(ctx);
1117 	struct ixl_vsi *vsi = &pf->vsi;
1118 	struct ixl_rx_queue *que;
1119 	int i, error = 0;
1120 
1121 	MPASS(vsi->num_rx_queues > 0);
1122 	MPASS(nrxqs == 1);
1123 	MPASS(vsi->num_rx_queues == nrxqsets);
1124 
1125 	/* Allocate queue structure memory */
1126 	if (!(vsi->rx_queues =
1127 	    (struct ixl_rx_queue *) malloc(sizeof(struct ixl_rx_queue) *
1128 	    nrxqsets, M_IXL, M_NOWAIT | M_ZERO))) {
1129 		device_printf(iflib_get_dev(ctx), "Unable to allocate RX ring memory\n");
1130 		error = ENOMEM;
1131 		goto fail;
1132 	}
1133 
1134 	for (i = 0, que = vsi->rx_queues; i < nrxqsets; i++, que++) {
1135 		struct rx_ring *rxr = &que->rxr;
1136 
1137 		rxr->me = i;
1138 		que->vsi = vsi;
1139 
1140 		/* get the virtual and physical address of the hardware queues */
1141 		rxr->tail = I40E_QRX_TAIL(rxr->me);
1142 		rxr->rx_base = (union i40e_rx_desc *)vaddrs[i * nrxqs];
1143 		rxr->rx_paddr = paddrs[i * nrxqs];
1144 		rxr->que = que;
1145 	}
1146 
1147 	return (0);
1148 fail:
1149 	ixl_if_queues_free(ctx);
1150 	return (error);
1151 }
1152 
1153 static void
1154 ixl_if_queues_free(if_ctx_t ctx)
1155 {
1156 	struct ixl_pf *pf = iflib_get_softc(ctx);
1157 	struct ixl_vsi *vsi = &pf->vsi;
1158 
1159 	if (vsi->enable_head_writeback) {
1160 		struct ixl_tx_queue *que;
1161 		int i = 0;
1162 
1163 		for (i = 0, que = vsi->tx_queues; i < vsi->num_tx_queues; i++, que++) {
1164 			struct tx_ring *txr = &que->txr;
1165 			if (txr->tx_rsq != NULL) {
1166 				free(txr->tx_rsq, M_IXL);
1167 				txr->tx_rsq = NULL;
1168 			}
1169 		}
1170 	}
1171 
1172 	if (vsi->tx_queues != NULL) {
1173 		free(vsi->tx_queues, M_IXL);
1174 		vsi->tx_queues = NULL;
1175 	}
1176 	if (vsi->rx_queues != NULL) {
1177 		free(vsi->rx_queues, M_IXL);
1178 		vsi->rx_queues = NULL;
1179 	}
1180 }
1181 
1182 void
1183 ixl_update_link_status(struct ixl_pf *pf)
1184 {
1185 	struct ixl_vsi *vsi = &pf->vsi;
1186 	struct i40e_hw *hw = &pf->hw;
1187 	u64 baudrate;
1188 
1189 	if (pf->link_up) {
1190 		if (vsi->link_active == FALSE) {
1191 			vsi->link_active = TRUE;
1192 			baudrate = ixl_max_aq_speed_to_value(hw->phy.link_info.link_speed);
1193 			iflib_link_state_change(vsi->ctx, LINK_STATE_UP, baudrate);
1194 			ixl_link_up_msg(pf);
1195 #ifdef PCI_IOV
1196 			ixl_broadcast_link_state(pf);
1197 #endif
1198 
1199 		}
1200 	} else { /* Link down */
1201 		if (vsi->link_active == TRUE) {
1202 			vsi->link_active = FALSE;
1203 			iflib_link_state_change(vsi->ctx, LINK_STATE_DOWN, 0);
1204 #ifdef PCI_IOV
1205 			ixl_broadcast_link_state(pf);
1206 #endif
1207 		}
1208 	}
1209 }
1210 
1211 static int
1212 ixl_process_adminq(struct ixl_pf *pf, u16 *pending)
1213 {
1214 	enum i40e_status_code status = I40E_SUCCESS;
1215 	struct i40e_arq_event_info event;
1216 	struct i40e_hw *hw = &pf->hw;
1217 	device_t dev = pf->dev;
1218 	u16 opcode;
1219 	u32 loop = 0, reg;
1220 
1221 	event.buf_len = IXL_AQ_BUF_SZ;
1222 	event.msg_buf = malloc(event.buf_len, M_IXL, M_NOWAIT | M_ZERO);
1223 	if (!event.msg_buf) {
1224 		device_printf(dev, "%s: Unable to allocate memory for Admin"
1225 		    " Queue event!\n", __func__);
1226 		return (ENOMEM);
1227 	}
1228 
1229 	/* clean and process any events */
1230 	do {
1231 		status = i40e_clean_arq_element(hw, &event, pending);
1232 		if (status)
1233 			break;
1234 		opcode = LE16_TO_CPU(event.desc.opcode);
1235 		ixl_dbg(pf, IXL_DBG_AQ,
1236 		    "Admin Queue event: %#06x\n", opcode);
1237 		switch (opcode) {
1238 		case i40e_aqc_opc_get_link_status:
1239 			ixl_link_event(pf, &event);
1240 			break;
1241 		case i40e_aqc_opc_send_msg_to_pf:
1242 #ifdef PCI_IOV
1243 			ixl_handle_vf_msg(pf, &event);
1244 #endif
1245 			break;
1246 		/*
1247 		 * This should only occur on no-drop queues, which
1248 		 * aren't currently configured.
1249 		 */
1250 		case i40e_aqc_opc_event_lan_overflow:
1251 			device_printf(dev, "LAN overflow event\n");
1252 			break;
1253 		default:
1254 			break;
1255 		}
1256 	} while (*pending && (loop++ < IXL_ADM_LIMIT));
1257 
1258 	free(event.msg_buf, M_IXL);
1259 
1260 	/* Re-enable admin queue interrupt cause */
1261 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
1262 	reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
1263 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1264 
1265 	return (status);
1266 }
1267 
1268 static void
1269 ixl_if_update_admin_status(if_ctx_t ctx)
1270 {
1271 	struct ixl_pf			*pf = iflib_get_softc(ctx);
1272 	struct i40e_hw			*hw = &pf->hw;
1273 	u16				pending;
1274 
1275 	if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING)
1276 		ixl_handle_empr_reset(pf);
1277 
1278 	if (pf->state & IXL_PF_STATE_MDD_PENDING)
1279 		ixl_handle_mdd_event(pf);
1280 
1281 #ifdef PCI_IOV
1282 	if (pf->state & IXL_PF_STATE_VF_RESET_REQ)
1283 		iflib_iov_intr_deferred(ctx);
1284 #endif
1285 
1286 	ixl_process_adminq(pf, &pending);
1287 	ixl_update_link_status(pf);
1288 
1289 	/*
1290 	 * If there are still messages to process, reschedule ourselves.
1291 	 * Otherwise, re-enable our interrupt and go to sleep.
1292 	 */
1293 	if (pending > 0)
1294 		iflib_admin_intr_deferred(ctx);
1295 	else
1296 		ixl_enable_intr0(hw);
1297 }
1298 
1299 static void
1300 ixl_if_multi_set(if_ctx_t ctx)
1301 {
1302 	struct ixl_pf *pf = iflib_get_softc(ctx);
1303 	struct ixl_vsi *vsi = &pf->vsi;
1304 	struct i40e_hw		*hw = vsi->hw;
1305 	int			mcnt = 0, flags;
1306 
1307 	IOCTL_DEBUGOUT("ixl_if_multi_set: begin");
1308 
1309 	mcnt = if_multiaddr_count(iflib_get_ifp(ctx), MAX_MULTICAST_ADDR);
1310 	/* delete existing MC filters */
1311 	ixl_del_multi(vsi);
1312 
1313 	if (__predict_false(mcnt == MAX_MULTICAST_ADDR)) {
1314 		i40e_aq_set_vsi_multicast_promiscuous(hw,
1315 		    vsi->seid, TRUE, NULL);
1316 		return;
1317 	}
1318 	/* (re-)install filters for all mcast addresses */
1319 	mcnt = if_multi_apply(iflib_get_ifp(ctx), ixl_mc_filter_apply, vsi);
1320 
1321 	if (mcnt > 0) {
1322 		flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
1323 		ixl_add_hw_filters(vsi, flags, mcnt);
1324 	}
1325 
1326 	IOCTL_DEBUGOUT("ixl_if_multi_set: end");
1327 }
1328 
1329 static int
1330 ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
1331 {
1332 	struct ixl_pf *pf = iflib_get_softc(ctx);
1333 	struct ixl_vsi *vsi = &pf->vsi;
1334 
1335 	IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
1336 	if (mtu > IXL_MAX_FRAME - ETHER_HDR_LEN - ETHER_CRC_LEN -
1337 		ETHER_VLAN_ENCAP_LEN)
1338 		return (EINVAL);
1339 
1340 	vsi->shared->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
1341 		ETHER_VLAN_ENCAP_LEN;
1342 
1343 	return (0);
1344 }
1345 
1346 static void
1347 ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
1348 {
1349 	struct ixl_pf *pf = iflib_get_softc(ctx);
1350 	struct i40e_hw  *hw = &pf->hw;
1351 
1352 	INIT_DEBUGOUT("ixl_media_status: begin");
1353 
1354 	ifmr->ifm_status = IFM_AVALID;
1355 	ifmr->ifm_active = IFM_ETHER;
1356 
1357 	if (!pf->link_up) {
1358 		return;
1359 	}
1360 
1361 	ifmr->ifm_status |= IFM_ACTIVE;
1362 	/* Hardware is always full-duplex */
1363 	ifmr->ifm_active |= IFM_FDX;
1364 
1365 	switch (hw->phy.link_info.phy_type) {
1366 		/* 100 M */
1367 		case I40E_PHY_TYPE_100BASE_TX:
1368 			ifmr->ifm_active |= IFM_100_TX;
1369 			break;
1370 		/* 1 G */
1371 		case I40E_PHY_TYPE_1000BASE_T:
1372 			ifmr->ifm_active |= IFM_1000_T;
1373 			break;
1374 		case I40E_PHY_TYPE_1000BASE_SX:
1375 			ifmr->ifm_active |= IFM_1000_SX;
1376 			break;
1377 		case I40E_PHY_TYPE_1000BASE_LX:
1378 			ifmr->ifm_active |= IFM_1000_LX;
1379 			break;
1380 		case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
1381 			ifmr->ifm_active |= IFM_1000_T;
1382 			break;
1383 		/* 10 G */
1384 		case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1385 			ifmr->ifm_active |= IFM_10G_TWINAX;
1386 			break;
1387 		case I40E_PHY_TYPE_10GBASE_SR:
1388 			ifmr->ifm_active |= IFM_10G_SR;
1389 			break;
1390 		case I40E_PHY_TYPE_10GBASE_LR:
1391 			ifmr->ifm_active |= IFM_10G_LR;
1392 			break;
1393 		case I40E_PHY_TYPE_10GBASE_T:
1394 			ifmr->ifm_active |= IFM_10G_T;
1395 			break;
1396 		case I40E_PHY_TYPE_XAUI:
1397 		case I40E_PHY_TYPE_XFI:
1398 			ifmr->ifm_active |= IFM_10G_TWINAX;
1399 			break;
1400 		case I40E_PHY_TYPE_10GBASE_AOC:
1401 			ifmr->ifm_active |= IFM_10G_AOC;
1402 			break;
1403 		/* 25 G */
1404 		case I40E_PHY_TYPE_25GBASE_KR:
1405 			ifmr->ifm_active |= IFM_25G_KR;
1406 			break;
1407 		case I40E_PHY_TYPE_25GBASE_CR:
1408 			ifmr->ifm_active |= IFM_25G_CR;
1409 			break;
1410 		case I40E_PHY_TYPE_25GBASE_SR:
1411 			ifmr->ifm_active |= IFM_25G_SR;
1412 			break;
1413 		case I40E_PHY_TYPE_25GBASE_LR:
1414 			ifmr->ifm_active |= IFM_25G_LR;
1415 			break;
1416 		case I40E_PHY_TYPE_25GBASE_AOC:
1417 			ifmr->ifm_active |= IFM_25G_AOC;
1418 			break;
1419 		case I40E_PHY_TYPE_25GBASE_ACC:
1420 			ifmr->ifm_active |= IFM_25G_ACC;
1421 			break;
1422 		/* 40 G */
1423 		case I40E_PHY_TYPE_40GBASE_CR4:
1424 		case I40E_PHY_TYPE_40GBASE_CR4_CU:
1425 			ifmr->ifm_active |= IFM_40G_CR4;
1426 			break;
1427 		case I40E_PHY_TYPE_40GBASE_SR4:
1428 			ifmr->ifm_active |= IFM_40G_SR4;
1429 			break;
1430 		case I40E_PHY_TYPE_40GBASE_LR4:
1431 			ifmr->ifm_active |= IFM_40G_LR4;
1432 			break;
1433 		case I40E_PHY_TYPE_XLAUI:
1434 			ifmr->ifm_active |= IFM_OTHER;
1435 			break;
1436 		case I40E_PHY_TYPE_1000BASE_KX:
1437 			ifmr->ifm_active |= IFM_1000_KX;
1438 			break;
1439 		case I40E_PHY_TYPE_SGMII:
1440 			ifmr->ifm_active |= IFM_1000_SGMII;
1441 			break;
1442 		/* ERJ: What's the difference between these? */
1443 		case I40E_PHY_TYPE_10GBASE_CR1_CU:
1444 		case I40E_PHY_TYPE_10GBASE_CR1:
1445 			ifmr->ifm_active |= IFM_10G_CR1;
1446 			break;
1447 		case I40E_PHY_TYPE_10GBASE_KX4:
1448 			ifmr->ifm_active |= IFM_10G_KX4;
1449 			break;
1450 		case I40E_PHY_TYPE_10GBASE_KR:
1451 			ifmr->ifm_active |= IFM_10G_KR;
1452 			break;
1453 		case I40E_PHY_TYPE_SFI:
1454 			ifmr->ifm_active |= IFM_10G_SFI;
1455 			break;
1456 		/* Our single 20G media type */
1457 		case I40E_PHY_TYPE_20GBASE_KR2:
1458 			ifmr->ifm_active |= IFM_20G_KR2;
1459 			break;
1460 		case I40E_PHY_TYPE_40GBASE_KR4:
1461 			ifmr->ifm_active |= IFM_40G_KR4;
1462 			break;
1463 		case I40E_PHY_TYPE_XLPPI:
1464 		case I40E_PHY_TYPE_40GBASE_AOC:
1465 			ifmr->ifm_active |= IFM_40G_XLPPI;
1466 			break;
1467 		/* Unknown to driver */
1468 		default:
1469 			ifmr->ifm_active |= IFM_UNKNOWN;
1470 			break;
1471 	}
1472 	/* Report flow control status as well */
1473 	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1474 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1475 	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1476 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1477 }
1478 
1479 static int
1480 ixl_if_media_change(if_ctx_t ctx)
1481 {
1482 	struct ifmedia *ifm = iflib_get_media(ctx);
1483 
1484 	INIT_DEBUGOUT("ixl_media_change: begin");
1485 
1486 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1487 		return (EINVAL);
1488 
1489 	if_printf(iflib_get_ifp(ctx), "Media change is not supported.\n");
1490 	return (ENODEV);
1491 }
1492 
1493 static int
1494 ixl_if_promisc_set(if_ctx_t ctx, int flags)
1495 {
1496 	struct ixl_pf *pf = iflib_get_softc(ctx);
1497 	struct ixl_vsi *vsi = &pf->vsi;
1498 	struct ifnet	*ifp = iflib_get_ifp(ctx);
1499 	struct i40e_hw	*hw = vsi->hw;
1500 	int		err;
1501 	bool		uni = FALSE, multi = FALSE;
1502 
1503 	if (flags & IFF_PROMISC)
1504 		uni = multi = TRUE;
1505 	else if (flags & IFF_ALLMULTI ||
1506 		if_multiaddr_count(ifp, MAX_MULTICAST_ADDR) == MAX_MULTICAST_ADDR)
1507 		multi = TRUE;
1508 
1509 	err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1510 	    vsi->seid, uni, NULL, true);
1511 	if (err)
1512 		return (err);
1513 	err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1514 	    vsi->seid, multi, NULL);
1515 	return (err);
1516 }
1517 
1518 static void
1519 ixl_if_timer(if_ctx_t ctx, uint16_t qid)
1520 {
1521 	struct ixl_pf			*pf = iflib_get_softc(ctx);
1522 	//struct i40e_hw		*hw = &pf->hw;
1523 	//struct ixl_tx_queue	*que = &vsi->tx_queues[qid];
1524  #if 0
1525 	u32			mask;
1526 
1527 	/*
1528 	** Check status of the queues
1529 	*/
1530 	mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
1531 		I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
1532 
1533 	/* If queue param has outstanding work, trigger sw irq */
1534 	// TODO: TX queues in iflib don't use HW interrupts; does this do anything?
1535 	if (que->busy)
1536 		wr32(hw, I40E_PFINT_DYN_CTLN(que->txr.me), mask);
1537 #endif
1538 
1539 	if (qid != 0)
1540 		return;
1541 
1542 	/* Fire off the adminq task */
1543 	iflib_admin_intr_deferred(ctx);
1544 
1545 	/* Update stats */
1546 	ixl_update_stats_counters(pf);
1547 }
1548 
1549 static void
1550 ixl_if_vlan_register(if_ctx_t ctx, u16 vtag)
1551 {
1552 	struct ixl_pf *pf = iflib_get_softc(ctx);
1553 	struct ixl_vsi *vsi = &pf->vsi;
1554 	struct i40e_hw	*hw = vsi->hw;
1555 
1556 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
1557 		return;
1558 
1559 	++vsi->num_vlans;
1560 	ixl_add_filter(vsi, hw->mac.addr, vtag);
1561 }
1562 
1563 static void
1564 ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1565 {
1566 	struct ixl_pf *pf = iflib_get_softc(ctx);
1567 	struct ixl_vsi *vsi = &pf->vsi;
1568 	struct i40e_hw	*hw = vsi->hw;
1569 
1570 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
1571 		return;
1572 
1573 	--vsi->num_vlans;
1574 	ixl_del_filter(vsi, hw->mac.addr, vtag);
1575 }
1576 
1577 static uint64_t
1578 ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1579 {
1580 	struct ixl_pf *pf = iflib_get_softc(ctx);
1581 	struct ixl_vsi *vsi = &pf->vsi;
1582 	if_t ifp = iflib_get_ifp(ctx);
1583 
1584 	switch (cnt) {
1585 	case IFCOUNTER_IPACKETS:
1586 		return (vsi->ipackets);
1587 	case IFCOUNTER_IERRORS:
1588 		return (vsi->ierrors);
1589 	case IFCOUNTER_OPACKETS:
1590 		return (vsi->opackets);
1591 	case IFCOUNTER_OERRORS:
1592 		return (vsi->oerrors);
1593 	case IFCOUNTER_COLLISIONS:
1594 		/* Collisions are by standard impossible in 40G/10G Ethernet */
1595 		return (0);
1596 	case IFCOUNTER_IBYTES:
1597 		return (vsi->ibytes);
1598 	case IFCOUNTER_OBYTES:
1599 		return (vsi->obytes);
1600 	case IFCOUNTER_IMCASTS:
1601 		return (vsi->imcasts);
1602 	case IFCOUNTER_OMCASTS:
1603 		return (vsi->omcasts);
1604 	case IFCOUNTER_IQDROPS:
1605 		return (vsi->iqdrops);
1606 	case IFCOUNTER_OQDROPS:
1607 		return (vsi->oqdrops);
1608 	case IFCOUNTER_NOPROTO:
1609 		return (vsi->noproto);
1610 	default:
1611 		return (if_get_counter_default(ifp, cnt));
1612 	}
1613 }
1614 
1615 static void
1616 ixl_if_vflr_handle(if_ctx_t ctx)
1617 {
1618 	IXL_DEV_ERR(iflib_get_dev(ctx), "");
1619 
1620 	// TODO: call ixl_handle_vflr()
1621 }
1622 
1623 static int
1624 ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1625 {
1626 	struct ixl_pf		*pf = iflib_get_softc(ctx);
1627 
1628 	if (pf->read_i2c_byte == NULL)
1629 		return (EINVAL);
1630 
1631 	for (int i = 0; i < req->len; i++)
1632 		if (pf->read_i2c_byte(pf, req->offset + i,
1633 		    req->dev_addr, &req->data[i]))
1634 			return (EIO);
1635 	return (0);
1636 }
1637 
1638 static int
1639 ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data)
1640 {
1641 	struct ixl_pf *pf = iflib_get_softc(ctx);
1642 	struct ifdrv *ifd = (struct ifdrv *)data;
1643 	int error = 0;
1644 
1645 	/* NVM update command */
1646 	if (ifd->ifd_cmd == I40E_NVM_ACCESS)
1647 		error = ixl_handle_nvmupd_cmd(pf, ifd);
1648 	else
1649 		error = EINVAL;
1650 
1651 	return (error);
1652 }
1653 
1654 static int
1655 ixl_mc_filter_apply(void *arg, struct ifmultiaddr *ifma, int count __unused)
1656 {
1657 	struct ixl_vsi *vsi = arg;
1658 
1659 	if (ifma->ifma_addr->sa_family != AF_LINK)
1660 		return (0);
1661 	ixl_add_mc_filter(vsi,
1662 	    (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
1663 	return (1);
1664 }
1665 
1666 /*
1667  * Sanity check and save off tunable values.
1668  */
1669 static void
1670 ixl_save_pf_tunables(struct ixl_pf *pf)
1671 {
1672 	device_t dev = pf->dev;
1673 
1674 	/* Save tunable information */
1675 	pf->enable_tx_fc_filter = ixl_enable_tx_fc_filter;
1676 	pf->dbg_mask = ixl_core_debug_mask;
1677 	pf->hw.debug_mask = ixl_shared_debug_mask;
1678 	pf->vsi.enable_head_writeback = !!(ixl_enable_head_writeback);
1679 #if 0
1680 	pf->dynamic_rx_itr = ixl_dynamic_rx_itr;
1681 	pf->dynamic_tx_itr = ixl_dynamic_tx_itr;
1682 #endif
1683 
1684 	if (ixl_i2c_access_method > 3 || ixl_i2c_access_method < 0)
1685 		pf->i2c_access_method = 0;
1686 	else
1687 		pf->i2c_access_method = ixl_i2c_access_method;
1688 
1689 	if (ixl_tx_itr < 0 || ixl_tx_itr > IXL_MAX_ITR) {
1690 		device_printf(dev, "Invalid tx_itr value of %d set!\n",
1691 		    ixl_tx_itr);
1692 		device_printf(dev, "tx_itr must be between %d and %d, "
1693 		    "inclusive\n",
1694 		    0, IXL_MAX_ITR);
1695 		device_printf(dev, "Using default value of %d instead\n",
1696 		    IXL_ITR_4K);
1697 		pf->tx_itr = IXL_ITR_4K;
1698 	} else
1699 		pf->tx_itr = ixl_tx_itr;
1700 
1701 	if (ixl_rx_itr < 0 || ixl_rx_itr > IXL_MAX_ITR) {
1702 		device_printf(dev, "Invalid rx_itr value of %d set!\n",
1703 		    ixl_rx_itr);
1704 		device_printf(dev, "rx_itr must be between %d and %d, "
1705 		    "inclusive\n",
1706 		    0, IXL_MAX_ITR);
1707 		device_printf(dev, "Using default value of %d instead\n",
1708 		    IXL_ITR_8K);
1709 		pf->rx_itr = IXL_ITR_8K;
1710 	} else
1711 		pf->rx_itr = ixl_rx_itr;
1712 }
1713 
1714