xref: /freebsd/sys/dev/ixgbe/if_ix.c (revision 53b70c86)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38 #include "opt_rss.h"
39 
40 #include "ixgbe.h"
41 #include "ixgbe_sriov.h"
42 #include "ifdi_if.h"
43 
44 #include <net/netmap.h>
45 #include <dev/netmap/netmap_kern.h>
46 
47 /************************************************************************
48  * Driver version
49  ************************************************************************/
50 char ixgbe_driver_version[] = "4.0.1-k";
51 
52 
53 /************************************************************************
54  * PCI Device ID Table
55  *
56  *   Used by probe to select devices to load on
57  *   Last field stores an index into ixgbe_strings
58  *   Last entry must be all 0s
59  *
60  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61  ************************************************************************/
62 static pci_vendor_info_t ixgbe_vendor_info_array[] =
63 {
64   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT,  "Intel(R) 82598EB AF (Dual Fiber)"),
65   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT,  "Intel(R) 82598EB AF (Fiber)"),
66   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4,  "Intel(R) 82598EB AT (CX4)"),
67   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT,  "Intel(R) 82598EB AT"),
68   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2,  "Intel(R) 82598EB AT2"),
69   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598,  "Intel(R) 82598"),
70   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT,  "Intel(R) 82598EB AF DA (Dual Fiber)"),
71   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT,  "Intel(R) 82598EB AT (Dual CX4)"),
72   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR,  "Intel(R) 82598EB AF (Dual Fiber LR)"),
73   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM,  "Intel(R) 82598EB AF (Dual Fiber SR)"),
74   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM,  "Intel(R) 82598EB LOM"),
75   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4,  "Intel(R) X520 82599 (KX4)"),
76   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ,  "Intel(R) X520 82599 (KX4 Mezzanine)"),
77   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP,  "Intel(R) X520 82599ES (SFI/SFP+)"),
78   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM,  "Intel(R) X520 82599 (XAUI/BX4)"),
79   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4,  "Intel(R) X520 82599 (Dual CX4)"),
80   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM,  "Intel(R) X520-T 82599 LOM"),
81   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE,  "Intel(R) X520 82599 (Combined Backplane)"),
82   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE,  "Intel(R) X520 82599 (Backplane w/FCoE)"),
83   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2,  "Intel(R) X520 82599 (Dual SFP+)"),
84   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE,  "Intel(R) X520 82599 (Dual SFP+ w/FCoE)"),
85   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP,  "Intel(R) X520-1 82599EN (SFP+)"),
86   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP,  "Intel(R) X520-4 82599 (Quad SFP+)"),
87   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP,  "Intel(R) X520-Q1 82599 (QSFP+)"),
88   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T,  "Intel(R) X540-AT2"),
89   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1,  "Intel(R) X540-T1"),
90   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T,  "Intel(R) X550-T2"),
91   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) X550-T1"),
92   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR,  "Intel(R) X552 (KR Backplane)"),
93   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4,  "Intel(R) X552 (KX4 Backplane)"),
94   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T,  "Intel(R) X552/X557-AT (10GBASE-T)"),
95   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T,  "Intel(R) X552 (1000BASE-T)"),
96   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, "Intel(R) X552 (SFP+)"),
97   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, "Intel(R) X553 (KR Backplane)"),
98   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, "Intel(R) X553 L (KR Backplane)"),
99   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, "Intel(R) X553 (SFP+)"),
100   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, "Intel(R) X553 N (SFP+)"),
101   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, "Intel(R) X553 (1GbE SGMII)"),
102   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, "Intel(R) X553 L (1GbE SGMII)"),
103   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, "Intel(R) X553/X557-AT (10GBASE-T)"),
104   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, "Intel(R) X553 (1GbE)"),
105   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, "Intel(R) X553 L (1GbE)"),
106   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, "Intel(R) X540-T2 (Bypass)"),
107   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, "Intel(R) X520 82599 (Bypass)"),
108 	/* required last entry */
109   PVID_END
110 };
111 
112 static void *ixgbe_register(device_t dev);
113 static int  ixgbe_if_attach_pre(if_ctx_t ctx);
114 static int  ixgbe_if_attach_post(if_ctx_t ctx);
115 static int  ixgbe_if_detach(if_ctx_t ctx);
116 static int  ixgbe_if_shutdown(if_ctx_t ctx);
117 static int  ixgbe_if_suspend(if_ctx_t ctx);
118 static int  ixgbe_if_resume(if_ctx_t ctx);
119 
120 static void ixgbe_if_stop(if_ctx_t ctx);
121 void ixgbe_if_enable_intr(if_ctx_t ctx);
122 static void ixgbe_if_disable_intr(if_ctx_t ctx);
123 static void ixgbe_link_intr_enable(if_ctx_t ctx);
124 static int  ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
125 static void ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr);
126 static int  ixgbe_if_media_change(if_ctx_t ctx);
127 static int  ixgbe_if_msix_intr_assign(if_ctx_t, int);
128 static int  ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
129 static void ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int strip);
130 static void ixgbe_if_multi_set(if_ctx_t ctx);
131 static int  ixgbe_if_promisc_set(if_ctx_t ctx, int flags);
132 static int  ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
133                                      uint64_t *paddrs, int nrxqs, int nrxqsets);
134 static int  ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
135                                      uint64_t *paddrs, int nrxqs, int nrxqsets);
136 static void ixgbe_if_queues_free(if_ctx_t ctx);
137 static void ixgbe_if_timer(if_ctx_t ctx, uint16_t);
138 static void ixgbe_if_update_admin_status(if_ctx_t ctx);
139 static void ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag);
140 static void ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
141 static int  ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req);
142 static bool ixgbe_if_needs_restart(if_ctx_t ctx, enum iflib_restart_event event);
143 int ixgbe_intr(void *arg);
144 
145 /************************************************************************
146  * Function prototypes
147  ************************************************************************/
148 #if __FreeBSD_version >= 1100036
149 static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter);
150 #endif
151 
152 static void ixgbe_enable_queue(struct adapter *adapter, u32 vector);
153 static void ixgbe_disable_queue(struct adapter *adapter, u32 vector);
154 static void ixgbe_add_device_sysctls(if_ctx_t ctx);
155 static int  ixgbe_allocate_pci_resources(if_ctx_t ctx);
156 static int  ixgbe_setup_low_power_mode(if_ctx_t ctx);
157 
158 static void ixgbe_config_dmac(struct adapter *adapter);
159 static void ixgbe_configure_ivars(struct adapter *adapter);
160 static void ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector,
161                            s8 type);
162 static u8   *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
163 static bool ixgbe_sfp_probe(if_ctx_t ctx);
164 
165 static void ixgbe_free_pci_resources(if_ctx_t ctx);
166 
167 static int  ixgbe_msix_link(void *arg);
168 static int  ixgbe_msix_que(void *arg);
169 static void ixgbe_initialize_rss_mapping(struct adapter *adapter);
170 static void ixgbe_initialize_receive_units(if_ctx_t ctx);
171 static void ixgbe_initialize_transmit_units(if_ctx_t ctx);
172 
173 static int  ixgbe_setup_interface(if_ctx_t ctx);
174 static void ixgbe_init_device_features(struct adapter *adapter);
175 static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
176 static void ixgbe_sbuf_fw_version(struct ixgbe_hw *, struct sbuf *);
177 static void ixgbe_print_fw_version(if_ctx_t ctx);
178 static void ixgbe_add_media_types(if_ctx_t ctx);
179 static void ixgbe_update_stats_counters(struct adapter *adapter);
180 static void ixgbe_config_link(if_ctx_t ctx);
181 static void ixgbe_get_slot_info(struct adapter *);
182 static void ixgbe_check_wol_support(struct adapter *adapter);
183 static void ixgbe_enable_rx_drop(struct adapter *);
184 static void ixgbe_disable_rx_drop(struct adapter *);
185 
186 static void ixgbe_add_hw_stats(struct adapter *adapter);
187 static int  ixgbe_set_flowcntl(struct adapter *, int);
188 static int  ixgbe_set_advertise(struct adapter *, int);
189 static int  ixgbe_get_advertise(struct adapter *);
190 static void ixgbe_setup_vlan_hw_support(if_ctx_t ctx);
191 static void ixgbe_config_gpie(struct adapter *adapter);
192 static void ixgbe_config_delay_values(struct adapter *adapter);
193 
194 /* Sysctl handlers */
195 static int  ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
196 static int  ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
197 static int  ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS);
198 static int  ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
199 static int  ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
200 static int  ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
201 static int  ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS);
202 #ifdef IXGBE_DEBUG
203 static int  ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
204 static int  ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
205 #endif
206 static int  ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS);
207 static int  ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS);
208 static int  ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS);
209 static int  ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS);
210 static int  ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
211 static int  ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
212 static int  ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
213 
214 /* Deferred interrupt tasklets */
215 static void ixgbe_handle_msf(void *);
216 static void ixgbe_handle_mod(void *);
217 static void ixgbe_handle_phy(void *);
218 
219 /************************************************************************
220  *  FreeBSD Device Interface Entry Points
221  ************************************************************************/
222 static device_method_t ix_methods[] = {
223 	/* Device interface */
224 	DEVMETHOD(device_register, ixgbe_register),
225 	DEVMETHOD(device_probe, iflib_device_probe),
226 	DEVMETHOD(device_attach, iflib_device_attach),
227 	DEVMETHOD(device_detach, iflib_device_detach),
228 	DEVMETHOD(device_shutdown, iflib_device_shutdown),
229 	DEVMETHOD(device_suspend, iflib_device_suspend),
230 	DEVMETHOD(device_resume, iflib_device_resume),
231 #ifdef PCI_IOV
232 	DEVMETHOD(pci_iov_init, iflib_device_iov_init),
233 	DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
234 	DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
235 #endif /* PCI_IOV */
236 	DEVMETHOD_END
237 };
238 
239 static driver_t ix_driver = {
240 	"ix", ix_methods, sizeof(struct adapter),
241 };
242 
243 devclass_t ix_devclass;
244 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
245 IFLIB_PNP_INFO(pci, ix_driver, ixgbe_vendor_info_array);
246 MODULE_DEPEND(ix, pci, 1, 1, 1);
247 MODULE_DEPEND(ix, ether, 1, 1, 1);
248 MODULE_DEPEND(ix, iflib, 1, 1, 1);
249 
250 static device_method_t ixgbe_if_methods[] = {
251 	DEVMETHOD(ifdi_attach_pre, ixgbe_if_attach_pre),
252 	DEVMETHOD(ifdi_attach_post, ixgbe_if_attach_post),
253 	DEVMETHOD(ifdi_detach, ixgbe_if_detach),
254 	DEVMETHOD(ifdi_shutdown, ixgbe_if_shutdown),
255 	DEVMETHOD(ifdi_suspend, ixgbe_if_suspend),
256 	DEVMETHOD(ifdi_resume, ixgbe_if_resume),
257 	DEVMETHOD(ifdi_init, ixgbe_if_init),
258 	DEVMETHOD(ifdi_stop, ixgbe_if_stop),
259 	DEVMETHOD(ifdi_msix_intr_assign, ixgbe_if_msix_intr_assign),
260 	DEVMETHOD(ifdi_intr_enable, ixgbe_if_enable_intr),
261 	DEVMETHOD(ifdi_intr_disable, ixgbe_if_disable_intr),
262 	DEVMETHOD(ifdi_link_intr_enable, ixgbe_link_intr_enable),
263 	DEVMETHOD(ifdi_tx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
264 	DEVMETHOD(ifdi_rx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
265 	DEVMETHOD(ifdi_tx_queues_alloc, ixgbe_if_tx_queues_alloc),
266 	DEVMETHOD(ifdi_rx_queues_alloc, ixgbe_if_rx_queues_alloc),
267 	DEVMETHOD(ifdi_queues_free, ixgbe_if_queues_free),
268 	DEVMETHOD(ifdi_update_admin_status, ixgbe_if_update_admin_status),
269 	DEVMETHOD(ifdi_multi_set, ixgbe_if_multi_set),
270 	DEVMETHOD(ifdi_mtu_set, ixgbe_if_mtu_set),
271 	DEVMETHOD(ifdi_crcstrip_set, ixgbe_if_crcstrip_set),
272 	DEVMETHOD(ifdi_media_status, ixgbe_if_media_status),
273 	DEVMETHOD(ifdi_media_change, ixgbe_if_media_change),
274 	DEVMETHOD(ifdi_promisc_set, ixgbe_if_promisc_set),
275 	DEVMETHOD(ifdi_timer, ixgbe_if_timer),
276 	DEVMETHOD(ifdi_vlan_register, ixgbe_if_vlan_register),
277 	DEVMETHOD(ifdi_vlan_unregister, ixgbe_if_vlan_unregister),
278 	DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter),
279 	DEVMETHOD(ifdi_i2c_req, ixgbe_if_i2c_req),
280 	DEVMETHOD(ifdi_needs_restart, ixgbe_if_needs_restart),
281 #ifdef PCI_IOV
282 	DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init),
283 	DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit),
284 	DEVMETHOD(ifdi_iov_vf_add, ixgbe_if_iov_vf_add),
285 #endif /* PCI_IOV */
286 	DEVMETHOD_END
287 };
288 
289 /*
290  * TUNEABLE PARAMETERS:
291  */
292 
293 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
294     "IXGBE driver parameters");
295 static driver_t ixgbe_if_driver = {
296   "ixgbe_if", ixgbe_if_methods, sizeof(struct adapter)
297 };
298 
299 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
300 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
301     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
302 
303 /* Flow control setting, default to full */
304 static int ixgbe_flow_control = ixgbe_fc_full;
305 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
306     &ixgbe_flow_control, 0, "Default flow control used for all adapters");
307 
308 /* Advertise Speed, default to 0 (auto) */
309 static int ixgbe_advertise_speed = 0;
310 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
311     &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
312 
313 /*
314  * Smart speed setting, default to on
315  * this only works as a compile option
316  * right now as its during attach, set
317  * this to 'ixgbe_smart_speed_off' to
318  * disable.
319  */
320 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
321 
322 /*
323  * MSI-X should be the default for best performance,
324  * but this allows it to be forced off for testing.
325  */
326 static int ixgbe_enable_msix = 1;
327 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
328     "Enable MSI-X interrupts");
329 
330 /*
331  * Defining this on will allow the use
332  * of unsupported SFP+ modules, note that
333  * doing so you are on your own :)
334  */
335 static int allow_unsupported_sfp = false;
336 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN,
337     &allow_unsupported_sfp, 0,
338     "Allow unsupported SFP modules...use at your own risk");
339 
340 /*
341  * Not sure if Flow Director is fully baked,
342  * so we'll default to turning it off.
343  */
344 static int ixgbe_enable_fdir = 0;
345 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
346     "Enable Flow Director");
347 
348 /* Receive-Side Scaling */
349 static int ixgbe_enable_rss = 1;
350 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
351     "Enable Receive-Side Scaling (RSS)");
352 
353 /*
354  * AIM: Adaptive Interrupt Moderation
355  * which means that the interrupt rate
356  * is varied over time based on the
357  * traffic for that interrupt vector
358  */
359 static int ixgbe_enable_aim = false;
360 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0,
361     "Enable adaptive interrupt moderation");
362 
363 #if 0
364 /* Keep running tab on them for sanity check */
365 static int ixgbe_total_ports;
366 #endif
367 
368 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
369 
370 /*
371  * For Flow Director: this is the number of TX packets we sample
372  * for the filter pool, this means every 20th packet will be probed.
373  *
374  * This feature can be disabled by setting this to 0.
375  */
376 static int atr_sample_rate = 20;
377 
378 extern struct if_txrx ixgbe_txrx;
379 
380 static struct if_shared_ctx ixgbe_sctx_init = {
381 	.isc_magic = IFLIB_MAGIC,
382 	.isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
383 	.isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
384 	.isc_tx_maxsegsize = PAGE_SIZE,
385 	.isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
386 	.isc_tso_maxsegsize = PAGE_SIZE,
387 	.isc_rx_maxsize = PAGE_SIZE*4,
388 	.isc_rx_nsegments = 1,
389 	.isc_rx_maxsegsize = PAGE_SIZE*4,
390 	.isc_nfl = 1,
391 	.isc_ntxqs = 1,
392 	.isc_nrxqs = 1,
393 
394 	.isc_admin_intrcnt = 1,
395 	.isc_vendor_info = ixgbe_vendor_info_array,
396 	.isc_driver_version = ixgbe_driver_version,
397 	.isc_driver = &ixgbe_if_driver,
398 	.isc_flags = IFLIB_TSO_INIT_IP,
399 
400 	.isc_nrxd_min = {MIN_RXD},
401 	.isc_ntxd_min = {MIN_TXD},
402 	.isc_nrxd_max = {MAX_RXD},
403 	.isc_ntxd_max = {MAX_TXD},
404 	.isc_nrxd_default = {DEFAULT_RXD},
405 	.isc_ntxd_default = {DEFAULT_TXD},
406 };
407 
408 /************************************************************************
409  * ixgbe_if_tx_queues_alloc
410  ************************************************************************/
411 static int
412 ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
413                          int ntxqs, int ntxqsets)
414 {
415 	struct adapter     *adapter = iflib_get_softc(ctx);
416 	if_softc_ctx_t     scctx = adapter->shared;
417 	struct ix_tx_queue *que;
418 	int                i, j, error;
419 
420 	MPASS(adapter->num_tx_queues > 0);
421 	MPASS(adapter->num_tx_queues == ntxqsets);
422 	MPASS(ntxqs == 1);
423 
424 	/* Allocate queue structure memory */
425 	adapter->tx_queues =
426 	    (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
427 	                                 M_IXGBE, M_NOWAIT | M_ZERO);
428 	if (!adapter->tx_queues) {
429 		device_printf(iflib_get_dev(ctx),
430 		    "Unable to allocate TX ring memory\n");
431 		return (ENOMEM);
432 	}
433 
434 	for (i = 0, que = adapter->tx_queues; i < ntxqsets; i++, que++) {
435 		struct tx_ring *txr = &que->txr;
436 
437 		/* In case SR-IOV is enabled, align the index properly */
438 		txr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
439 		    i);
440 
441 		txr->adapter = que->adapter = adapter;
442 
443 		/* Allocate report status array */
444 		txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO);
445 		if (txr->tx_rsq == NULL) {
446 			error = ENOMEM;
447 			goto fail;
448 		}
449 		for (j = 0; j < scctx->isc_ntxd[0]; j++)
450 			txr->tx_rsq[j] = QIDX_INVALID;
451 		/* get the virtual and physical address of the hardware queues */
452 		txr->tail = IXGBE_TDT(txr->me);
453 		txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i];
454 		txr->tx_paddr = paddrs[i];
455 
456 		txr->bytes = 0;
457 		txr->total_packets = 0;
458 
459 		/* Set the rate at which we sample packets */
460 		if (adapter->feat_en & IXGBE_FEATURE_FDIR)
461 			txr->atr_sample = atr_sample_rate;
462 
463 	}
464 
465 	device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
466 	    adapter->num_tx_queues);
467 
468 	return (0);
469 
470 fail:
471 	ixgbe_if_queues_free(ctx);
472 
473 	return (error);
474 } /* ixgbe_if_tx_queues_alloc */
475 
476 /************************************************************************
477  * ixgbe_if_rx_queues_alloc
478  ************************************************************************/
479 static int
480 ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
481                          int nrxqs, int nrxqsets)
482 {
483 	struct adapter     *adapter = iflib_get_softc(ctx);
484 	struct ix_rx_queue *que;
485 	int                i;
486 
487 	MPASS(adapter->num_rx_queues > 0);
488 	MPASS(adapter->num_rx_queues == nrxqsets);
489 	MPASS(nrxqs == 1);
490 
491 	/* Allocate queue structure memory */
492 	adapter->rx_queues =
493 	    (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets,
494 	                                 M_IXGBE, M_NOWAIT | M_ZERO);
495 	if (!adapter->rx_queues) {
496 		device_printf(iflib_get_dev(ctx),
497 		    "Unable to allocate TX ring memory\n");
498 		return (ENOMEM);
499 	}
500 
501 	for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) {
502 		struct rx_ring *rxr = &que->rxr;
503 
504 		/* In case SR-IOV is enabled, align the index properly */
505 		rxr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
506 		    i);
507 
508 		rxr->adapter = que->adapter = adapter;
509 
510 		/* get the virtual and physical address of the hw queues */
511 		rxr->tail = IXGBE_RDT(rxr->me);
512 		rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
513 		rxr->rx_paddr = paddrs[i];
514 		rxr->bytes = 0;
515 		rxr->que = que;
516 	}
517 
518 	device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
519 	    adapter->num_rx_queues);
520 
521 	return (0);
522 } /* ixgbe_if_rx_queues_alloc */
523 
524 /************************************************************************
525  * ixgbe_if_queues_free
526  ************************************************************************/
527 static void
528 ixgbe_if_queues_free(if_ctx_t ctx)
529 {
530 	struct adapter     *adapter = iflib_get_softc(ctx);
531 	struct ix_tx_queue *tx_que = adapter->tx_queues;
532 	struct ix_rx_queue *rx_que = adapter->rx_queues;
533 	int                i;
534 
535 	if (tx_que != NULL) {
536 		for (i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
537 			struct tx_ring *txr = &tx_que->txr;
538 			if (txr->tx_rsq == NULL)
539 				break;
540 
541 			free(txr->tx_rsq, M_IXGBE);
542 			txr->tx_rsq = NULL;
543 		}
544 
545 		free(adapter->tx_queues, M_IXGBE);
546 		adapter->tx_queues = NULL;
547 	}
548 	if (rx_que != NULL) {
549 		free(adapter->rx_queues, M_IXGBE);
550 		adapter->rx_queues = NULL;
551 	}
552 } /* ixgbe_if_queues_free */
553 
554 /************************************************************************
555  * ixgbe_initialize_rss_mapping
556  ************************************************************************/
557 static void
558 ixgbe_initialize_rss_mapping(struct adapter *adapter)
559 {
560 	struct ixgbe_hw *hw = &adapter->hw;
561 	u32             reta = 0, mrqc, rss_key[10];
562 	int             queue_id, table_size, index_mult;
563 	int             i, j;
564 	u32             rss_hash_config;
565 
566 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
567 		/* Fetch the configured RSS key */
568 		rss_getkey((uint8_t *)&rss_key);
569 	} else {
570 		/* set up random bits */
571 		arc4rand(&rss_key, sizeof(rss_key), 0);
572 	}
573 
574 	/* Set multiplier for RETA setup and table size based on MAC */
575 	index_mult = 0x1;
576 	table_size = 128;
577 	switch (adapter->hw.mac.type) {
578 	case ixgbe_mac_82598EB:
579 		index_mult = 0x11;
580 		break;
581 	case ixgbe_mac_X550:
582 	case ixgbe_mac_X550EM_x:
583 	case ixgbe_mac_X550EM_a:
584 		table_size = 512;
585 		break;
586 	default:
587 		break;
588 	}
589 
590 	/* Set up the redirection table */
591 	for (i = 0, j = 0; i < table_size; i++, j++) {
592 		if (j == adapter->num_rx_queues)
593 			j = 0;
594 
595 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
596 			/*
597 			 * Fetch the RSS bucket id for the given indirection
598 			 * entry. Cap it at the number of configured buckets
599 			 * (which is num_rx_queues.)
600 			 */
601 			queue_id = rss_get_indirection_to_bucket(i);
602 			queue_id = queue_id % adapter->num_rx_queues;
603 		} else
604 			queue_id = (j * index_mult);
605 
606 		/*
607 		 * The low 8 bits are for hash value (n+0);
608 		 * The next 8 bits are for hash value (n+1), etc.
609 		 */
610 		reta = reta >> 8;
611 		reta = reta | (((uint32_t)queue_id) << 24);
612 		if ((i & 3) == 3) {
613 			if (i < 128)
614 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
615 			else
616 				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
617 				    reta);
618 			reta = 0;
619 		}
620 	}
621 
622 	/* Now fill our hash function seeds */
623 	for (i = 0; i < 10; i++)
624 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
625 
626 	/* Perform hash on these packet types */
627 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
628 		rss_hash_config = rss_gethashconfig();
629 	else {
630 		/*
631 		 * Disable UDP - IP fragments aren't currently being handled
632 		 * and so we end up with a mix of 2-tuple and 4-tuple
633 		 * traffic.
634 		 */
635 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
636 		                | RSS_HASHTYPE_RSS_TCP_IPV4
637 		                | RSS_HASHTYPE_RSS_IPV6
638 		                | RSS_HASHTYPE_RSS_TCP_IPV6
639 		                | RSS_HASHTYPE_RSS_IPV6_EX
640 		                | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
641 	}
642 
643 	mrqc = IXGBE_MRQC_RSSEN;
644 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
645 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
646 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
647 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
648 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
649 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
650 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
651 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
652 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
653 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
654 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
655 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
656 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
657 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
658 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
659 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
660 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
661 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
662 	mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
663 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
664 } /* ixgbe_initialize_rss_mapping */
665 
666 /************************************************************************
667  * ixgbe_initialize_receive_units - Setup receive registers and features.
668  ************************************************************************/
669 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
670 
671 static void
672 ixgbe_initialize_receive_units(if_ctx_t ctx)
673 {
674 	struct adapter     *adapter = iflib_get_softc(ctx);
675 	if_softc_ctx_t     scctx = adapter->shared;
676 	struct ixgbe_hw    *hw = &adapter->hw;
677 	struct ifnet       *ifp = iflib_get_ifp(ctx);
678 	struct ix_rx_queue *que;
679 	int                i, j;
680 	u32                bufsz, fctrl, srrctl, rxcsum;
681 	u32                hlreg;
682 
683 	/*
684 	 * Make sure receives are disabled while
685 	 * setting up the descriptor ring
686 	 */
687 	ixgbe_disable_rx(hw);
688 
689 	/* Enable broadcasts */
690 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
691 	fctrl |= IXGBE_FCTRL_BAM;
692 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
693 		fctrl |= IXGBE_FCTRL_DPF;
694 		fctrl |= IXGBE_FCTRL_PMCF;
695 	}
696 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
697 
698 	/* Set for Jumbo Frames? */
699 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
700 	if (ifp->if_mtu > ETHERMTU)
701 		hlreg |= IXGBE_HLREG0_JUMBOEN;
702 	else
703 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
704 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
705 
706 	bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
707 	    IXGBE_SRRCTL_BSIZEPKT_SHIFT;
708 
709 	/* Setup the Base and Length of the Rx Descriptor Ring */
710 	for (i = 0, que = adapter->rx_queues; i < adapter->num_rx_queues; i++, que++) {
711 		struct rx_ring *rxr = &que->rxr;
712 		u64            rdba = rxr->rx_paddr;
713 
714 		j = rxr->me;
715 
716 		/* Setup the Base and Length of the Rx Descriptor Ring */
717 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
718 		    (rdba & 0x00000000ffffffffULL));
719 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
720 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
721 		     scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
722 
723 		/* Set up the SRRCTL register */
724 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
725 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
726 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
727 		srrctl |= bufsz;
728 		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
729 
730 		/*
731 		 * Set DROP_EN iff we have no flow control and >1 queue.
732 		 * Note that srrctl was cleared shortly before during reset,
733 		 * so we do not need to clear the bit, but do it just in case
734 		 * this code is moved elsewhere.
735 		 */
736 		if (adapter->num_rx_queues > 1 &&
737 		    adapter->hw.fc.requested_mode == ixgbe_fc_none) {
738 			srrctl |= IXGBE_SRRCTL_DROP_EN;
739 		} else {
740 			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
741 		}
742 
743 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
744 
745 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
746 		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
747 		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
748 
749 		/* Set the driver rx tail address */
750 		rxr->tail =  IXGBE_RDT(rxr->me);
751 	}
752 
753 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
754 		u32 psrtype = IXGBE_PSRTYPE_TCPHDR
755 		            | IXGBE_PSRTYPE_UDPHDR
756 		            | IXGBE_PSRTYPE_IPV4HDR
757 		            | IXGBE_PSRTYPE_IPV6HDR;
758 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
759 	}
760 
761 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
762 
763 	ixgbe_initialize_rss_mapping(adapter);
764 
765 	if (adapter->num_rx_queues > 1) {
766 		/* RSS and RX IPP Checksum are mutually exclusive */
767 		rxcsum |= IXGBE_RXCSUM_PCSD;
768 	}
769 
770 	if (ifp->if_capenable & IFCAP_RXCSUM)
771 		rxcsum |= IXGBE_RXCSUM_PCSD;
772 
773 	/* This is useful for calculating UDP/IP fragment checksums */
774 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
775 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
776 
777 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
778 
779 } /* ixgbe_initialize_receive_units */
780 
781 /************************************************************************
782  * ixgbe_initialize_transmit_units - Enable transmit units.
783  ************************************************************************/
784 static void
785 ixgbe_initialize_transmit_units(if_ctx_t ctx)
786 {
787 	struct adapter     *adapter = iflib_get_softc(ctx);
788 	struct ixgbe_hw    *hw = &adapter->hw;
789 	if_softc_ctx_t     scctx = adapter->shared;
790 	struct ix_tx_queue *que;
791 	int i;
792 
793 	/* Setup the Base and Length of the Tx Descriptor Ring */
794 	for (i = 0, que = adapter->tx_queues; i < adapter->num_tx_queues;
795 	    i++, que++) {
796 		struct tx_ring	   *txr = &que->txr;
797 		u64 tdba = txr->tx_paddr;
798 		u32 txctrl = 0;
799 		int j = txr->me;
800 
801 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
802 		    (tdba & 0x00000000ffffffffULL));
803 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
804 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
805 		    scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc));
806 
807 		/* Setup the HW Tx Head and Tail descriptor pointers */
808 		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
809 		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
810 
811 		/* Cache the tail address */
812 		txr->tail = IXGBE_TDT(txr->me);
813 
814 		txr->tx_rs_cidx = txr->tx_rs_pidx;
815 		txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
816 		for (int k = 0; k < scctx->isc_ntxd[0]; k++)
817 			txr->tx_rsq[k] = QIDX_INVALID;
818 
819 		/* Disable Head Writeback */
820 		/*
821 		 * Note: for X550 series devices, these registers are actually
822 		 * prefixed with TPH_ isntead of DCA_, but the addresses and
823 		 * fields remain the same.
824 		 */
825 		switch (hw->mac.type) {
826 		case ixgbe_mac_82598EB:
827 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
828 			break;
829 		default:
830 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
831 			break;
832 		}
833 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
834 		switch (hw->mac.type) {
835 		case ixgbe_mac_82598EB:
836 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
837 			break;
838 		default:
839 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
840 			break;
841 		}
842 
843 	}
844 
845 	if (hw->mac.type != ixgbe_mac_82598EB) {
846 		u32 dmatxctl, rttdcs;
847 
848 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
849 		dmatxctl |= IXGBE_DMATXCTL_TE;
850 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
851 		/* Disable arbiter to set MTQC */
852 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
853 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
854 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
855 		IXGBE_WRITE_REG(hw, IXGBE_MTQC,
856 		    ixgbe_get_mtqc(adapter->iov_mode));
857 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
858 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
859 	}
860 
861 } /* ixgbe_initialize_transmit_units */
862 
863 /************************************************************************
864  * ixgbe_register
865  ************************************************************************/
866 static void *
867 ixgbe_register(device_t dev)
868 {
869 	return (&ixgbe_sctx_init);
870 } /* ixgbe_register */
871 
872 /************************************************************************
873  * ixgbe_if_attach_pre - Device initialization routine, part 1
874  *
875  *   Called when the driver is being loaded.
876  *   Identifies the type of hardware, initializes the hardware,
877  *   and initializes iflib structures.
878  *
879  *   return 0 on success, positive on failure
880  ************************************************************************/
881 static int
882 ixgbe_if_attach_pre(if_ctx_t ctx)
883 {
884 	struct adapter  *adapter;
885 	device_t        dev;
886 	if_softc_ctx_t  scctx;
887 	struct ixgbe_hw *hw;
888 	int             error = 0;
889 	u32             ctrl_ext;
890 
891 	INIT_DEBUGOUT("ixgbe_attach: begin");
892 
893 	/* Allocate, clear, and link in our adapter structure */
894 	dev = iflib_get_dev(ctx);
895 	adapter = iflib_get_softc(ctx);
896 	adapter->hw.back = adapter;
897 	adapter->ctx = ctx;
898 	adapter->dev = dev;
899 	scctx = adapter->shared = iflib_get_softc_ctx(ctx);
900 	adapter->media = iflib_get_media(ctx);
901 	hw = &adapter->hw;
902 
903 	/* Determine hardware revision */
904 	hw->vendor_id = pci_get_vendor(dev);
905 	hw->device_id = pci_get_device(dev);
906 	hw->revision_id = pci_get_revid(dev);
907 	hw->subsystem_vendor_id = pci_get_subvendor(dev);
908 	hw->subsystem_device_id = pci_get_subdevice(dev);
909 
910 	/* Do base PCI setup - map BAR0 */
911 	if (ixgbe_allocate_pci_resources(ctx)) {
912 		device_printf(dev, "Allocation of PCI resources failed\n");
913 		return (ENXIO);
914 	}
915 
916 	/* let hardware know driver is loaded */
917 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
918 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
919 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
920 
921 	/*
922 	 * Initialize the shared code
923 	 */
924 	if (ixgbe_init_shared_code(hw) != 0) {
925 		device_printf(dev, "Unable to initialize the shared code\n");
926 		error = ENXIO;
927 		goto err_pci;
928 	}
929 
930 	if (hw->mbx.ops.init_params)
931 		hw->mbx.ops.init_params(hw);
932 
933 	hw->allow_unsupported_sfp = allow_unsupported_sfp;
934 
935 	if (hw->mac.type != ixgbe_mac_82598EB)
936 		hw->phy.smart_speed = ixgbe_smart_speed;
937 
938 	ixgbe_init_device_features(adapter);
939 
940 	/* Enable WoL (if supported) */
941 	ixgbe_check_wol_support(adapter);
942 
943 	/* Verify adapter fan is still functional (if applicable) */
944 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
945 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
946 		ixgbe_check_fan_failure(adapter, esdp, false);
947 	}
948 
949 	/* Ensure SW/FW semaphore is free */
950 	ixgbe_init_swfw_semaphore(hw);
951 
952 	/* Set an initial default flow control value */
953 	hw->fc.requested_mode = ixgbe_flow_control;
954 
955 	hw->phy.reset_if_overtemp = true;
956 	error = ixgbe_reset_hw(hw);
957 	hw->phy.reset_if_overtemp = false;
958 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
959 		/*
960 		 * No optics in this port, set up
961 		 * so the timer routine will probe
962 		 * for later insertion.
963 		 */
964 		adapter->sfp_probe = true;
965 		error = 0;
966 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
967 		device_printf(dev, "Unsupported SFP+ module detected!\n");
968 		error = EIO;
969 		goto err_pci;
970 	} else if (error) {
971 		device_printf(dev, "Hardware initialization failed\n");
972 		error = EIO;
973 		goto err_pci;
974 	}
975 
976 	/* Make sure we have a good EEPROM before we read from it */
977 	if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
978 		device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
979 		error = EIO;
980 		goto err_pci;
981 	}
982 
983 	error = ixgbe_start_hw(hw);
984 	switch (error) {
985 	case IXGBE_ERR_EEPROM_VERSION:
986 		device_printf(dev, "This device is a pre-production adapter/LOM.  Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
987 		break;
988 	case IXGBE_ERR_SFP_NOT_SUPPORTED:
989 		device_printf(dev, "Unsupported SFP+ Module\n");
990 		error = EIO;
991 		goto err_pci;
992 	case IXGBE_ERR_SFP_NOT_PRESENT:
993 		device_printf(dev, "No SFP+ Module found\n");
994 		/* falls thru */
995 	default:
996 		break;
997 	}
998 
999 	/* Most of the iflib initialization... */
1000 
1001 	iflib_set_mac(ctx, hw->mac.addr);
1002 	switch (adapter->hw.mac.type) {
1003 	case ixgbe_mac_X550:
1004 	case ixgbe_mac_X550EM_x:
1005 	case ixgbe_mac_X550EM_a:
1006 		scctx->isc_rss_table_size = 512;
1007 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
1008 		break;
1009 	default:
1010 		scctx->isc_rss_table_size = 128;
1011 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16;
1012 	}
1013 
1014 	/* Allow legacy interrupts */
1015 	ixgbe_txrx.ift_legacy_intr = ixgbe_intr;
1016 
1017 	scctx->isc_txqsizes[0] =
1018 	    roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
1019 	    sizeof(u32), DBA_ALIGN),
1020 	scctx->isc_rxqsizes[0] =
1021 	    roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
1022 	    DBA_ALIGN);
1023 
1024 	/* XXX */
1025 	scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
1026 	    CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
1027 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1028 		scctx->isc_tx_nsegments = IXGBE_82598_SCATTER;
1029 	} else {
1030 		scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP;
1031 		scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
1032 	}
1033 
1034 	scctx->isc_msix_bar = pci_msix_table_bar(dev);
1035 
1036 	scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
1037 	scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
1038 	scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
1039 
1040 	scctx->isc_txrx = &ixgbe_txrx;
1041 
1042 	scctx->isc_capabilities = scctx->isc_capenable = IXGBE_CAPS;
1043 
1044 	return (0);
1045 
1046 err_pci:
1047 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1048 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1049 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1050 	ixgbe_free_pci_resources(ctx);
1051 
1052 	return (error);
1053 } /* ixgbe_if_attach_pre */
1054 
1055  /*********************************************************************
1056  * ixgbe_if_attach_post - Device initialization routine, part 2
1057  *
1058  *   Called during driver load, but after interrupts and
1059  *   resources have been allocated and configured.
1060  *   Sets up some data structures not relevant to iflib.
1061  *
1062  *   return 0 on success, positive on failure
1063  *********************************************************************/
1064 static int
1065 ixgbe_if_attach_post(if_ctx_t ctx)
1066 {
1067 	device_t dev;
1068 	struct adapter  *adapter;
1069 	struct ixgbe_hw *hw;
1070 	int             error = 0;
1071 
1072 	dev = iflib_get_dev(ctx);
1073 	adapter = iflib_get_softc(ctx);
1074 	hw = &adapter->hw;
1075 
1076 
1077 	if (adapter->intr_type == IFLIB_INTR_LEGACY &&
1078 		(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) {
1079 		device_printf(dev, "Device does not support legacy interrupts");
1080 		error = ENXIO;
1081 		goto err;
1082 	}
1083 
1084 	/* Allocate multicast array memory. */
1085 	adapter->mta = malloc(sizeof(*adapter->mta) *
1086 	                      MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT);
1087 	if (adapter->mta == NULL) {
1088 		device_printf(dev, "Can not allocate multicast setup array\n");
1089 		error = ENOMEM;
1090 		goto err;
1091 	}
1092 
1093 	/* hw.ix defaults init */
1094 	ixgbe_set_advertise(adapter, ixgbe_advertise_speed);
1095 
1096 	/* Enable the optics for 82599 SFP+ fiber */
1097 	ixgbe_enable_tx_laser(hw);
1098 
1099 	/* Enable power to the phy. */
1100 	ixgbe_set_phy_power(hw, true);
1101 
1102 	ixgbe_initialize_iov(adapter);
1103 
1104 	error = ixgbe_setup_interface(ctx);
1105 	if (error) {
1106 		device_printf(dev, "Interface setup failed: %d\n", error);
1107 		goto err;
1108 	}
1109 
1110 	ixgbe_if_update_admin_status(ctx);
1111 
1112 	/* Initialize statistics */
1113 	ixgbe_update_stats_counters(adapter);
1114 	ixgbe_add_hw_stats(adapter);
1115 
1116 	/* Check PCIE slot type/speed/width */
1117 	ixgbe_get_slot_info(adapter);
1118 
1119 	/*
1120 	 * Do time init and sysctl init here, but
1121 	 * only on the first port of a bypass adapter.
1122 	 */
1123 	ixgbe_bypass_init(adapter);
1124 
1125 	/* Display NVM and Option ROM versions */
1126 	ixgbe_print_fw_version(ctx);
1127 
1128 	/* Set an initial dmac value */
1129 	adapter->dmac = 0;
1130 	/* Set initial advertised speeds (if applicable) */
1131 	adapter->advertise = ixgbe_get_advertise(adapter);
1132 
1133 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1134 		ixgbe_define_iov_schemas(dev, &error);
1135 
1136 	/* Add sysctls */
1137 	ixgbe_add_device_sysctls(ctx);
1138 
1139 	return (0);
1140 err:
1141 	return (error);
1142 } /* ixgbe_if_attach_post */
1143 
1144 /************************************************************************
1145  * ixgbe_check_wol_support
1146  *
1147  *   Checks whether the adapter's ports are capable of
1148  *   Wake On LAN by reading the adapter's NVM.
1149  *
1150  *   Sets each port's hw->wol_enabled value depending
1151  *   on the value read here.
1152  ************************************************************************/
1153 static void
1154 ixgbe_check_wol_support(struct adapter *adapter)
1155 {
1156 	struct ixgbe_hw *hw = &adapter->hw;
1157 	u16             dev_caps = 0;
1158 
1159 	/* Find out WoL support for port */
1160 	adapter->wol_support = hw->wol_enabled = 0;
1161 	ixgbe_get_device_caps(hw, &dev_caps);
1162 	if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1163 	    ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1164 	     hw->bus.func == 0))
1165 		adapter->wol_support = hw->wol_enabled = 1;
1166 
1167 	/* Save initial wake up filter configuration */
1168 	adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1169 
1170 	return;
1171 } /* ixgbe_check_wol_support */
1172 
1173 /************************************************************************
1174  * ixgbe_setup_interface
1175  *
1176  *   Setup networking device structure and register an interface.
1177  ************************************************************************/
1178 static int
1179 ixgbe_setup_interface(if_ctx_t ctx)
1180 {
1181 	struct ifnet   *ifp = iflib_get_ifp(ctx);
1182 	struct adapter *adapter = iflib_get_softc(ctx);
1183 
1184 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1185 
1186 	if_setbaudrate(ifp, IF_Gbps(10));
1187 
1188 	adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1189 
1190 	adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1191 
1192 	ixgbe_add_media_types(ctx);
1193 
1194 	/* Autoselect media by default */
1195 	ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
1196 
1197 	return (0);
1198 } /* ixgbe_setup_interface */
1199 
1200 /************************************************************************
1201  * ixgbe_if_get_counter
1202  ************************************************************************/
1203 static uint64_t
1204 ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1205 {
1206 	struct adapter *adapter = iflib_get_softc(ctx);
1207 	if_t           ifp = iflib_get_ifp(ctx);
1208 
1209 	switch (cnt) {
1210 	case IFCOUNTER_IPACKETS:
1211 		return (adapter->ipackets);
1212 	case IFCOUNTER_OPACKETS:
1213 		return (adapter->opackets);
1214 	case IFCOUNTER_IBYTES:
1215 		return (adapter->ibytes);
1216 	case IFCOUNTER_OBYTES:
1217 		return (adapter->obytes);
1218 	case IFCOUNTER_IMCASTS:
1219 		return (adapter->imcasts);
1220 	case IFCOUNTER_OMCASTS:
1221 		return (adapter->omcasts);
1222 	case IFCOUNTER_COLLISIONS:
1223 		return (0);
1224 	case IFCOUNTER_IQDROPS:
1225 		return (adapter->iqdrops);
1226 	case IFCOUNTER_OQDROPS:
1227 		return (0);
1228 	case IFCOUNTER_IERRORS:
1229 		return (adapter->ierrors);
1230 	default:
1231 		return (if_get_counter_default(ifp, cnt));
1232 	}
1233 } /* ixgbe_if_get_counter */
1234 
1235 /************************************************************************
1236  * ixgbe_if_i2c_req
1237  ************************************************************************/
1238 static int
1239 ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1240 {
1241 	struct adapter		*adapter = iflib_get_softc(ctx);
1242 	struct ixgbe_hw 	*hw = &adapter->hw;
1243 	int 			i;
1244 
1245 
1246 	if (hw->phy.ops.read_i2c_byte == NULL)
1247 		return (ENXIO);
1248 	for (i = 0; i < req->len; i++)
1249 		hw->phy.ops.read_i2c_byte(hw, req->offset + i,
1250 		    req->dev_addr, &req->data[i]);
1251 	return (0);
1252 } /* ixgbe_if_i2c_req */
1253 
1254 /* ixgbe_if_needs_restart - Tell iflib when the driver needs to be reinitialized
1255  * @ctx: iflib context
1256  * @event: event code to check
1257  *
1258  * Defaults to returning true for unknown events.
1259  *
1260  * @returns true if iflib needs to reinit the interface
1261  */
1262 static bool
1263 ixgbe_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1264 {
1265 	switch (event) {
1266 	case IFLIB_RESTART_VLAN_CONFIG:
1267 		return (false);
1268 	default:
1269 		return (true);
1270 	}
1271 }
1272 
1273 /************************************************************************
1274  * ixgbe_add_media_types
1275  ************************************************************************/
1276 static void
1277 ixgbe_add_media_types(if_ctx_t ctx)
1278 {
1279 	struct adapter  *adapter = iflib_get_softc(ctx);
1280 	struct ixgbe_hw *hw = &adapter->hw;
1281 	device_t        dev = iflib_get_dev(ctx);
1282 	u64             layer;
1283 
1284 	layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
1285 
1286 	/* Media types with matching FreeBSD media defines */
1287 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1288 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1289 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1290 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1291 	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1292 		ifmedia_add(adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1293 	if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1294 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
1295 
1296 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1297 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1298 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0,
1299 		    NULL);
1300 
1301 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1302 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1303 		if (hw->phy.multispeed_fiber)
1304 			ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_LX, 0,
1305 			    NULL);
1306 	}
1307 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1308 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1309 		if (hw->phy.multispeed_fiber)
1310 			ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0,
1311 			    NULL);
1312 	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1313 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1314 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1315 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1316 
1317 #ifdef IFM_ETH_XTYPE
1318 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1319 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1320 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1321 		ifmedia_add( adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1322 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1323 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1324 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1325 		ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
1326 #else
1327 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1328 		device_printf(dev, "Media supported: 10GbaseKR\n");
1329 		device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1330 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1331 	}
1332 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1333 		device_printf(dev, "Media supported: 10GbaseKX4\n");
1334 		device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1335 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1336 	}
1337 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1338 		device_printf(dev, "Media supported: 1000baseKX\n");
1339 		device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
1340 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
1341 	}
1342 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1343 		device_printf(dev, "Media supported: 2500baseKX\n");
1344 		device_printf(dev, "2500baseKX mapped to 2500baseSX\n");
1345 		ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_SX, 0, NULL);
1346 	}
1347 #endif
1348 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1349 		device_printf(dev, "Media supported: 1000baseBX\n");
1350 
1351 	if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1352 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1353 		    0, NULL);
1354 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1355 	}
1356 
1357 	ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1358 } /* ixgbe_add_media_types */
1359 
1360 /************************************************************************
1361  * ixgbe_is_sfp
1362  ************************************************************************/
1363 static inline bool
1364 ixgbe_is_sfp(struct ixgbe_hw *hw)
1365 {
1366 	switch (hw->mac.type) {
1367 	case ixgbe_mac_82598EB:
1368 		if (hw->phy.type == ixgbe_phy_nl)
1369 			return (true);
1370 		return (false);
1371 	case ixgbe_mac_82599EB:
1372 		switch (hw->mac.ops.get_media_type(hw)) {
1373 		case ixgbe_media_type_fiber:
1374 		case ixgbe_media_type_fiber_qsfp:
1375 			return (true);
1376 		default:
1377 			return (false);
1378 		}
1379 	case ixgbe_mac_X550EM_x:
1380 	case ixgbe_mac_X550EM_a:
1381 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1382 			return (true);
1383 		return (false);
1384 	default:
1385 		return (false);
1386 	}
1387 } /* ixgbe_is_sfp */
1388 
1389 /************************************************************************
1390  * ixgbe_config_link
1391  ************************************************************************/
1392 static void
1393 ixgbe_config_link(if_ctx_t ctx)
1394 {
1395 	struct adapter  *adapter = iflib_get_softc(ctx);
1396 	struct ixgbe_hw *hw = &adapter->hw;
1397 	u32             autoneg, err = 0;
1398 	bool            sfp, negotiate;
1399 
1400 	sfp = ixgbe_is_sfp(hw);
1401 
1402 	if (sfp) {
1403 		adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
1404 		iflib_admin_intr_deferred(ctx);
1405 	} else {
1406 		if (hw->mac.ops.check_link)
1407 			err = ixgbe_check_link(hw, &adapter->link_speed,
1408 			    &adapter->link_up, false);
1409 		if (err)
1410 			return;
1411 		autoneg = hw->phy.autoneg_advertised;
1412 		if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
1413 			err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1414 			    &negotiate);
1415 		if (err)
1416 			return;
1417 		if (hw->mac.ops.setup_link)
1418 			err = hw->mac.ops.setup_link(hw, autoneg,
1419 			    adapter->link_up);
1420 	}
1421 } /* ixgbe_config_link */
1422 
1423 /************************************************************************
1424  * ixgbe_update_stats_counters - Update board statistics counters.
1425  ************************************************************************/
1426 static void
1427 ixgbe_update_stats_counters(struct adapter *adapter)
1428 {
1429 	struct ixgbe_hw       *hw = &adapter->hw;
1430 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1431 	u32                   missed_rx = 0, bprc, lxon, lxoff, total;
1432 	u32                   lxoffrxc;
1433 	u64                   total_missed_rx = 0;
1434 
1435 	stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1436 	stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1437 	stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1438 	stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1439 	stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0));
1440 
1441 	for (int i = 0; i < 16; i++) {
1442 		stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1443 		stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1444 		stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1445 	}
1446 	stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1447 	stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1448 	stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1449 
1450 	/* Hardware workaround, gprc counts missed packets */
1451 	stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1452 	stats->gprc -= missed_rx;
1453 
1454 	if (hw->mac.type != ixgbe_mac_82598EB) {
1455 		stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1456 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1457 		stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1458 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1459 		stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
1460 		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1461 		stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1462 		lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1463 		stats->lxoffrxc += lxoffrxc;
1464 	} else {
1465 		stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1466 		lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1467 		stats->lxoffrxc += lxoffrxc;
1468 		/* 82598 only has a counter in the high register */
1469 		stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1470 		stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1471 		stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1472 	}
1473 
1474 	/*
1475 	 * For watchdog management we need to know if we have been paused
1476 	 * during the last interval, so capture that here.
1477 	*/
1478 	if (lxoffrxc)
1479 		adapter->shared->isc_pause_frames = 1;
1480 
1481 	/*
1482 	 * Workaround: mprc hardware is incorrectly counting
1483 	 * broadcasts, so for now we subtract those.
1484 	 */
1485 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1486 	stats->bprc += bprc;
1487 	stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1488 	if (hw->mac.type == ixgbe_mac_82598EB)
1489 		stats->mprc -= bprc;
1490 
1491 	stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1492 	stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1493 	stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1494 	stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1495 	stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1496 	stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1497 
1498 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1499 	stats->lxontxc += lxon;
1500 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1501 	stats->lxofftxc += lxoff;
1502 	total = lxon + lxoff;
1503 
1504 	stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1505 	stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1506 	stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1507 	stats->gptc -= total;
1508 	stats->mptc -= total;
1509 	stats->ptc64 -= total;
1510 	stats->gotc -= total * ETHER_MIN_LEN;
1511 
1512 	stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1513 	stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1514 	stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1515 	stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1516 	stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1517 	stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1518 	stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1519 	stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1520 	stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1521 	stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1522 	stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1523 	stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1524 	stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1525 	stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1526 	stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1527 	stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1528 	stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1529 	stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1530 	/* Only read FCOE on 82599 */
1531 	if (hw->mac.type != ixgbe_mac_82598EB) {
1532 		stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1533 		stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1534 		stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1535 		stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1536 		stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1537 	}
1538 
1539 	/* Fill out the OS statistics structure */
1540 	IXGBE_SET_IPACKETS(adapter, stats->gprc);
1541 	IXGBE_SET_OPACKETS(adapter, stats->gptc);
1542 	IXGBE_SET_IBYTES(adapter, stats->gorc);
1543 	IXGBE_SET_OBYTES(adapter, stats->gotc);
1544 	IXGBE_SET_IMCASTS(adapter, stats->mprc);
1545 	IXGBE_SET_OMCASTS(adapter, stats->mptc);
1546 	IXGBE_SET_COLLISIONS(adapter, 0);
1547 	IXGBE_SET_IQDROPS(adapter, total_missed_rx);
1548 
1549 	/*
1550 	 * Aggregate following types of errors as RX errors:
1551 	 * - CRC error count,
1552 	 * - illegal byte error count,
1553 	 * - checksum error count,
1554 	 * - missed packets count,
1555 	 * - length error count,
1556 	 * - undersized packets count,
1557 	 * - fragmented packets count,
1558 	 * - oversized packets count,
1559 	 * - jabber count.
1560 	 */
1561 	IXGBE_SET_IERRORS(adapter, stats->crcerrs + stats->illerrc + stats->xec +
1562 	    stats->mpc[0] + stats->rlec + stats->ruc + stats->rfc + stats->roc +
1563 	    stats->rjc);
1564 } /* ixgbe_update_stats_counters */
1565 
1566 /************************************************************************
1567  * ixgbe_add_hw_stats
1568  *
1569  *   Add sysctl variables, one per statistic, to the system.
1570  ************************************************************************/
1571 static void
1572 ixgbe_add_hw_stats(struct adapter *adapter)
1573 {
1574 	device_t               dev = iflib_get_dev(adapter->ctx);
1575 	struct ix_rx_queue     *rx_que;
1576 	struct ix_tx_queue     *tx_que;
1577 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1578 	struct sysctl_oid      *tree = device_get_sysctl_tree(dev);
1579 	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1580 	struct ixgbe_hw_stats  *stats = &adapter->stats.pf;
1581 	struct sysctl_oid      *stat_node, *queue_node;
1582 	struct sysctl_oid_list *stat_list, *queue_list;
1583 	int                    i;
1584 
1585 #define QUEUE_NAME_LEN 32
1586 	char                   namebuf[QUEUE_NAME_LEN];
1587 
1588 	/* Driver Statistics */
1589 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
1590 	    CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets");
1591 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1592 	    CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
1593 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1594 	    CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
1595 
1596 	for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) {
1597 		struct tx_ring *txr = &tx_que->txr;
1598 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1599 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1600 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1601 		queue_list = SYSCTL_CHILDREN(queue_node);
1602 
1603 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
1604 		    CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, txr, 0,
1605 		    ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head");
1606 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
1607 		    CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, txr, 0,
1608 		    ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail");
1609 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1610 		    CTLFLAG_RD, &txr->tso_tx, "TSO");
1611 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1612 		    CTLFLAG_RD, &txr->total_packets,
1613 		    "Queue Packets Transmitted");
1614 	}
1615 
1616 	for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) {
1617 		struct rx_ring *rxr = &rx_que->rxr;
1618 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1619 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1620 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1621 		queue_list = SYSCTL_CHILDREN(queue_node);
1622 
1623 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
1624 		    CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
1625 		    &adapter->rx_queues[i], 0,
1626 		    ixgbe_sysctl_interrupt_rate_handler, "IU",
1627 		    "Interrupt Rate");
1628 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1629 		    CTLFLAG_RD, &(adapter->rx_queues[i].irqs),
1630 		    "irqs on this queue");
1631 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
1632 		    CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, rxr, 0,
1633 		    ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head");
1634 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
1635 		    CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, rxr, 0,
1636 		    ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail");
1637 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1638 		    CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
1639 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1640 		    CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received");
1641 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
1642 		    CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames");
1643 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1644 		    CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets");
1645 	}
1646 
1647 	/* MAC stats get their own sub node */
1648 
1649 	stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
1650 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "MAC Statistics");
1651 	stat_list = SYSCTL_CHILDREN(stat_node);
1652 
1653 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_errs",
1654 	    CTLFLAG_RD, &adapter->ierrors, IXGBE_SYSCTL_DESC_RX_ERRS);
1655 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
1656 	    CTLFLAG_RD, &stats->crcerrs, "CRC Errors");
1657 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
1658 	    CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors");
1659 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
1660 	    CTLFLAG_RD, &stats->errbc, "Byte Errors");
1661 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
1662 	    CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded");
1663 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
1664 	    CTLFLAG_RD, &stats->mlfc, "MAC Local Faults");
1665 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
1666 	    CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults");
1667 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
1668 	    CTLFLAG_RD, &stats->rlec, "Receive Length Errors");
1669 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets",
1670 	    CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count");
1671 
1672 	/* Flow Control stats */
1673 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
1674 	    CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted");
1675 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
1676 	    CTLFLAG_RD, &stats->lxonrxc, "Link XON Received");
1677 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
1678 	    CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted");
1679 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
1680 	    CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received");
1681 
1682 	/* Packet Reception Stats */
1683 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
1684 	    CTLFLAG_RD, &stats->tor, "Total Octets Received");
1685 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1686 	    CTLFLAG_RD, &stats->gorc, "Good Octets Received");
1687 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
1688 	    CTLFLAG_RD, &stats->tpr, "Total Packets Received");
1689 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1690 	    CTLFLAG_RD, &stats->gprc, "Good Packets Received");
1691 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1692 	    CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
1693 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
1694 	    CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
1695 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
1696 	    CTLFLAG_RD, &stats->prc64, "64 byte frames received ");
1697 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
1698 	    CTLFLAG_RD, &stats->prc127, "65-127 byte frames received");
1699 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
1700 	    CTLFLAG_RD, &stats->prc255, "128-255 byte frames received");
1701 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
1702 	    CTLFLAG_RD, &stats->prc511, "256-511 byte frames received");
1703 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
1704 	    CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received");
1705 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
1706 	    CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received");
1707 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
1708 	    CTLFLAG_RD, &stats->ruc, "Receive Undersized");
1709 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
1710 	    CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received ");
1711 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
1712 	    CTLFLAG_RD, &stats->roc, "Oversized Packets Received");
1713 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
1714 	    CTLFLAG_RD, &stats->rjc, "Received Jabber");
1715 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
1716 	    CTLFLAG_RD, &stats->mngprc, "Management Packets Received");
1717 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
1718 	    CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped");
1719 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
1720 	    CTLFLAG_RD, &stats->xec, "Checksum Errors");
1721 
1722 	/* Packet Transmission Stats */
1723 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1724 	    CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted");
1725 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
1726 	    CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted");
1727 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1728 	    CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted");
1729 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
1730 	    CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
1731 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
1732 	    CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
1733 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
1734 	    CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted");
1735 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
1736 	    CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted ");
1737 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
1738 	    CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted");
1739 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
1740 	    CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted");
1741 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
1742 	    CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted");
1743 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
1744 	    CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted");
1745 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
1746 	    CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted");
1747 } /* ixgbe_add_hw_stats */
1748 
1749 /************************************************************************
1750  * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
1751  *
1752  *   Retrieves the TDH value from the hardware
1753  ************************************************************************/
1754 static int
1755 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
1756 {
1757 	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1758 	int            error;
1759 	unsigned int   val;
1760 
1761 	if (!txr)
1762 		return (0);
1763 
1764 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
1765 	error = sysctl_handle_int(oidp, &val, 0, req);
1766 	if (error || !req->newptr)
1767 		return error;
1768 
1769 	return (0);
1770 } /* ixgbe_sysctl_tdh_handler */
1771 
1772 /************************************************************************
1773  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1774  *
1775  *   Retrieves the TDT value from the hardware
1776  ************************************************************************/
1777 static int
1778 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
1779 {
1780 	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1781 	int            error;
1782 	unsigned int   val;
1783 
1784 	if (!txr)
1785 		return (0);
1786 
1787 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
1788 	error = sysctl_handle_int(oidp, &val, 0, req);
1789 	if (error || !req->newptr)
1790 		return error;
1791 
1792 	return (0);
1793 } /* ixgbe_sysctl_tdt_handler */
1794 
1795 /************************************************************************
1796  * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
1797  *
1798  *   Retrieves the RDH value from the hardware
1799  ************************************************************************/
1800 static int
1801 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
1802 {
1803 	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1804 	int            error;
1805 	unsigned int   val;
1806 
1807 	if (!rxr)
1808 		return (0);
1809 
1810 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
1811 	error = sysctl_handle_int(oidp, &val, 0, req);
1812 	if (error || !req->newptr)
1813 		return error;
1814 
1815 	return (0);
1816 } /* ixgbe_sysctl_rdh_handler */
1817 
1818 /************************************************************************
1819  * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
1820  *
1821  *   Retrieves the RDT value from the hardware
1822  ************************************************************************/
1823 static int
1824 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
1825 {
1826 	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1827 	int            error;
1828 	unsigned int   val;
1829 
1830 	if (!rxr)
1831 		return (0);
1832 
1833 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
1834 	error = sysctl_handle_int(oidp, &val, 0, req);
1835 	if (error || !req->newptr)
1836 		return error;
1837 
1838 	return (0);
1839 } /* ixgbe_sysctl_rdt_handler */
1840 
1841 /************************************************************************
1842  * ixgbe_if_vlan_register
1843  *
1844  *   Run via vlan config EVENT, it enables us to use the
1845  *   HW Filter table since we can get the vlan id. This
1846  *   just creates the entry in the soft version of the
1847  *   VFTA, init will repopulate the real table.
1848  ************************************************************************/
1849 static void
1850 ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag)
1851 {
1852 	struct adapter *adapter = iflib_get_softc(ctx);
1853 	u16            index, bit;
1854 
1855 	index = (vtag >> 5) & 0x7F;
1856 	bit = vtag & 0x1F;
1857 	adapter->shadow_vfta[index] |= (1 << bit);
1858 	++adapter->num_vlans;
1859 	ixgbe_setup_vlan_hw_support(ctx);
1860 } /* ixgbe_if_vlan_register */
1861 
1862 /************************************************************************
1863  * ixgbe_if_vlan_unregister
1864  *
1865  *   Run via vlan unconfig EVENT, remove our entry in the soft vfta.
1866  ************************************************************************/
1867 static void
1868 ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1869 {
1870 	struct adapter *adapter = iflib_get_softc(ctx);
1871 	u16            index, bit;
1872 
1873 	index = (vtag >> 5) & 0x7F;
1874 	bit = vtag & 0x1F;
1875 	adapter->shadow_vfta[index] &= ~(1 << bit);
1876 	--adapter->num_vlans;
1877 	/* Re-init to load the changes */
1878 	ixgbe_setup_vlan_hw_support(ctx);
1879 } /* ixgbe_if_vlan_unregister */
1880 
1881 /************************************************************************
1882  * ixgbe_setup_vlan_hw_support
1883  ************************************************************************/
1884 static void
1885 ixgbe_setup_vlan_hw_support(if_ctx_t ctx)
1886 {
1887 	struct ifnet	*ifp = iflib_get_ifp(ctx);
1888 	struct adapter  *adapter = iflib_get_softc(ctx);
1889 	struct ixgbe_hw *hw = &adapter->hw;
1890 	struct rx_ring  *rxr;
1891 	int             i;
1892 	u32             ctrl;
1893 
1894 
1895 	/*
1896 	 * We get here thru init_locked, meaning
1897 	 * a soft reset, this has already cleared
1898 	 * the VFTA and other state, so if there
1899 	 * have been no vlan's registered do nothing.
1900 	 */
1901 	if (adapter->num_vlans == 0)
1902 		return;
1903 
1904 	/* Setup the queues for vlans */
1905 	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1906 		for (i = 0; i < adapter->num_rx_queues; i++) {
1907 			rxr = &adapter->rx_queues[i].rxr;
1908 			/* On 82599 the VLAN enable is per/queue in RXDCTL */
1909 			if (hw->mac.type != ixgbe_mac_82598EB) {
1910 				ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1911 				ctrl |= IXGBE_RXDCTL_VME;
1912 				IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
1913 			}
1914 			rxr->vtag_strip = true;
1915 		}
1916 	}
1917 
1918 	if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
1919 		return;
1920 	/*
1921 	 * A soft reset zero's out the VFTA, so
1922 	 * we need to repopulate it now.
1923 	 */
1924 	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1925 		if (adapter->shadow_vfta[i] != 0)
1926 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
1927 			    adapter->shadow_vfta[i]);
1928 
1929 	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1930 	/* Enable the Filter Table if enabled */
1931 	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
1932 		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1933 		ctrl |= IXGBE_VLNCTRL_VFE;
1934 	}
1935 	if (hw->mac.type == ixgbe_mac_82598EB)
1936 		ctrl |= IXGBE_VLNCTRL_VME;
1937 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1938 } /* ixgbe_setup_vlan_hw_support */
1939 
1940 /************************************************************************
1941  * ixgbe_get_slot_info
1942  *
1943  *   Get the width and transaction speed of
1944  *   the slot this adapter is plugged into.
1945  ************************************************************************/
1946 static void
1947 ixgbe_get_slot_info(struct adapter *adapter)
1948 {
1949 	device_t        dev = iflib_get_dev(adapter->ctx);
1950 	struct ixgbe_hw *hw = &adapter->hw;
1951 	int             bus_info_valid = true;
1952 	u32             offset;
1953 	u16             link;
1954 
1955 	/* Some devices are behind an internal bridge */
1956 	switch (hw->device_id) {
1957 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
1958 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
1959 		goto get_parent_info;
1960 	default:
1961 		break;
1962 	}
1963 
1964 	ixgbe_get_bus_info(hw);
1965 
1966 	/*
1967 	 * Some devices don't use PCI-E, but there is no need
1968 	 * to display "Unknown" for bus speed and width.
1969 	 */
1970 	switch (hw->mac.type) {
1971 	case ixgbe_mac_X550EM_x:
1972 	case ixgbe_mac_X550EM_a:
1973 		return;
1974 	default:
1975 		goto display;
1976 	}
1977 
1978 get_parent_info:
1979 	/*
1980 	 * For the Quad port adapter we need to parse back
1981 	 * up the PCI tree to find the speed of the expansion
1982 	 * slot into which this adapter is plugged. A bit more work.
1983 	 */
1984 	dev = device_get_parent(device_get_parent(dev));
1985 #ifdef IXGBE_DEBUG
1986 	device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
1987 	    pci_get_slot(dev), pci_get_function(dev));
1988 #endif
1989 	dev = device_get_parent(device_get_parent(dev));
1990 #ifdef IXGBE_DEBUG
1991 	device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
1992 	    pci_get_slot(dev), pci_get_function(dev));
1993 #endif
1994 	/* Now get the PCI Express Capabilities offset */
1995 	if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) {
1996 		/*
1997 		 * Hmm...can't get PCI-Express capabilities.
1998 		 * Falling back to default method.
1999 		 */
2000 		bus_info_valid = false;
2001 		ixgbe_get_bus_info(hw);
2002 		goto display;
2003 	}
2004 	/* ...and read the Link Status Register */
2005 	link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
2006 	ixgbe_set_pci_config_data_generic(hw, link);
2007 
2008 display:
2009 	device_printf(dev, "PCI Express Bus: Speed %s %s\n",
2010 	    ((hw->bus.speed == ixgbe_bus_speed_8000)    ? "8.0GT/s"  :
2011 	     (hw->bus.speed == ixgbe_bus_speed_5000)    ? "5.0GT/s"  :
2012 	     (hw->bus.speed == ixgbe_bus_speed_2500)    ? "2.5GT/s"  :
2013 	     "Unknown"),
2014 	    ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
2015 	     (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
2016 	     (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
2017 	     "Unknown"));
2018 
2019 	if (bus_info_valid) {
2020 		if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2021 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2022 		    (hw->bus.speed == ixgbe_bus_speed_2500))) {
2023 			device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
2024 			device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n");
2025 		}
2026 		if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2027 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2028 		    (hw->bus.speed < ixgbe_bus_speed_8000))) {
2029 			device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
2030 			device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n");
2031 		}
2032 	} else
2033 		device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
2034 
2035 	return;
2036 } /* ixgbe_get_slot_info */
2037 
2038 /************************************************************************
2039  * ixgbe_if_msix_intr_assign
2040  *
2041  *   Setup MSI-X Interrupt resources and handlers
2042  ************************************************************************/
2043 static int
2044 ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix)
2045 {
2046 	struct adapter     *adapter = iflib_get_softc(ctx);
2047 	struct ix_rx_queue *rx_que = adapter->rx_queues;
2048 	struct ix_tx_queue *tx_que;
2049 	int                error, rid, vector = 0;
2050 	int                cpu_id = 0;
2051 	char               buf[16];
2052 
2053 	/* Admin Que is vector 0*/
2054 	rid = vector + 1;
2055 	for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rx_que++) {
2056 		rid = vector + 1;
2057 
2058 		snprintf(buf, sizeof(buf), "rxq%d", i);
2059 		error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
2060 		    IFLIB_INTR_RXTX, ixgbe_msix_que, rx_que, rx_que->rxr.me, buf);
2061 
2062 		if (error) {
2063 			device_printf(iflib_get_dev(ctx),
2064 			    "Failed to allocate que int %d err: %d", i, error);
2065 			adapter->num_rx_queues = i + 1;
2066 			goto fail;
2067 		}
2068 
2069 		rx_que->msix = vector;
2070 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
2071 			/*
2072 			 * The queue ID is used as the RSS layer bucket ID.
2073 			 * We look up the queue ID -> RSS CPU ID and select
2074 			 * that.
2075 			 */
2076 			cpu_id = rss_getcpu(i % rss_getnumbuckets());
2077 		} else {
2078 			/*
2079 			 * Bind the MSI-X vector, and thus the
2080 			 * rings to the corresponding cpu.
2081 			 *
2082 			 * This just happens to match the default RSS
2083 			 * round-robin bucket -> queue -> CPU allocation.
2084 			 */
2085 			if (adapter->num_rx_queues > 1)
2086 				cpu_id = i;
2087 		}
2088 
2089 	}
2090 	for (int i = 0; i < adapter->num_tx_queues; i++) {
2091 		snprintf(buf, sizeof(buf), "txq%d", i);
2092 		tx_que = &adapter->tx_queues[i];
2093 		tx_que->msix = i % adapter->num_rx_queues;
2094 		iflib_softirq_alloc_generic(ctx,
2095 		    &adapter->rx_queues[tx_que->msix].que_irq,
2096 		    IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
2097 	}
2098 	rid = vector + 1;
2099 	error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid,
2100 	    IFLIB_INTR_ADMIN, ixgbe_msix_link, adapter, 0, "aq");
2101 	if (error) {
2102 		device_printf(iflib_get_dev(ctx),
2103 		    "Failed to register admin handler");
2104 		return (error);
2105 	}
2106 
2107 	adapter->vector = vector;
2108 
2109 	return (0);
2110 fail:
2111 	iflib_irq_free(ctx, &adapter->irq);
2112 	rx_que = adapter->rx_queues;
2113 	for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++)
2114 		iflib_irq_free(ctx, &rx_que->que_irq);
2115 
2116 	return (error);
2117 } /* ixgbe_if_msix_intr_assign */
2118 
2119 static inline void
2120 ixgbe_perform_aim(struct adapter *adapter, struct ix_rx_queue *que)
2121 {
2122 	uint32_t newitr = 0;
2123 	struct rx_ring *rxr = &que->rxr;
2124 
2125 	/*
2126 	 * Do Adaptive Interrupt Moderation:
2127 	 *  - Write out last calculated setting
2128 	 *  - Calculate based on average size over
2129 	 *    the last interval.
2130 	 */
2131 	if (que->eitr_setting) {
2132 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix),
2133 		    que->eitr_setting);
2134 	}
2135 
2136 	que->eitr_setting = 0;
2137 	/* Idle, do nothing */
2138 	if (rxr->bytes == 0) {
2139 		return;
2140 	}
2141 
2142 	if ((rxr->bytes) && (rxr->packets)) {
2143 		newitr = (rxr->bytes / rxr->packets);
2144 	}
2145 
2146 	newitr += 24; /* account for hardware frame, crc */
2147 	/* set an upper boundary */
2148 	newitr = min(newitr, 3000);
2149 
2150 	/* Be nice to the mid range */
2151 	if ((newitr > 300) && (newitr < 1200)) {
2152 		newitr = (newitr / 3);
2153 	} else {
2154 		newitr = (newitr / 2);
2155 	}
2156 
2157 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
2158 		newitr |= newitr << 16;
2159 	} else {
2160 		newitr |= IXGBE_EITR_CNT_WDIS;
2161 	}
2162 
2163 	/* save for next interrupt */
2164 	que->eitr_setting = newitr;
2165 
2166 	/* Reset state */
2167 	rxr->bytes = 0;
2168 	rxr->packets = 0;
2169 
2170 	return;
2171 }
2172 
2173 /*********************************************************************
2174  * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2175  **********************************************************************/
2176 static int
2177 ixgbe_msix_que(void *arg)
2178 {
2179 	struct ix_rx_queue *que = arg;
2180 	struct adapter     *adapter = que->adapter;
2181 	struct ifnet       *ifp = iflib_get_ifp(que->adapter->ctx);
2182 
2183 	/* Protect against spurious interrupts */
2184 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2185 		return (FILTER_HANDLED);
2186 
2187 	ixgbe_disable_queue(adapter, que->msix);
2188 	++que->irqs;
2189 
2190 	/* Check for AIM */
2191 	if (adapter->enable_aim) {
2192 		ixgbe_perform_aim(adapter, que);
2193 	}
2194 
2195 	return (FILTER_SCHEDULE_THREAD);
2196 } /* ixgbe_msix_que */
2197 
2198 /************************************************************************
2199  * ixgbe_media_status - Media Ioctl callback
2200  *
2201  *   Called whenever the user queries the status of
2202  *   the interface using ifconfig.
2203  ************************************************************************/
2204 static void
2205 ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
2206 {
2207 	struct adapter  *adapter = iflib_get_softc(ctx);
2208 	struct ixgbe_hw *hw = &adapter->hw;
2209 	int             layer;
2210 
2211 	INIT_DEBUGOUT("ixgbe_if_media_status: begin");
2212 
2213 	ifmr->ifm_status = IFM_AVALID;
2214 	ifmr->ifm_active = IFM_ETHER;
2215 
2216 	if (!adapter->link_active)
2217 		return;
2218 
2219 	ifmr->ifm_status |= IFM_ACTIVE;
2220 	layer = adapter->phy_layer;
2221 
2222 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2223 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2224 	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2225 	    layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2226 		switch (adapter->link_speed) {
2227 		case IXGBE_LINK_SPEED_10GB_FULL:
2228 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2229 			break;
2230 		case IXGBE_LINK_SPEED_1GB_FULL:
2231 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2232 			break;
2233 		case IXGBE_LINK_SPEED_100_FULL:
2234 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2235 			break;
2236 		case IXGBE_LINK_SPEED_10_FULL:
2237 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2238 			break;
2239 		}
2240 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2241 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2242 		switch (adapter->link_speed) {
2243 		case IXGBE_LINK_SPEED_10GB_FULL:
2244 			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2245 			break;
2246 		}
2247 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2248 		switch (adapter->link_speed) {
2249 		case IXGBE_LINK_SPEED_10GB_FULL:
2250 			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2251 			break;
2252 		case IXGBE_LINK_SPEED_1GB_FULL:
2253 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2254 			break;
2255 		}
2256 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2257 		switch (adapter->link_speed) {
2258 		case IXGBE_LINK_SPEED_10GB_FULL:
2259 			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2260 			break;
2261 		case IXGBE_LINK_SPEED_1GB_FULL:
2262 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2263 			break;
2264 		}
2265 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2266 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2267 		switch (adapter->link_speed) {
2268 		case IXGBE_LINK_SPEED_10GB_FULL:
2269 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2270 			break;
2271 		case IXGBE_LINK_SPEED_1GB_FULL:
2272 			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2273 			break;
2274 		}
2275 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2276 		switch (adapter->link_speed) {
2277 		case IXGBE_LINK_SPEED_10GB_FULL:
2278 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2279 			break;
2280 		}
2281 	/*
2282 	 * XXX: These need to use the proper media types once
2283 	 * they're added.
2284 	 */
2285 #ifndef IFM_ETH_XTYPE
2286 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2287 		switch (adapter->link_speed) {
2288 		case IXGBE_LINK_SPEED_10GB_FULL:
2289 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2290 			break;
2291 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2292 			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2293 			break;
2294 		case IXGBE_LINK_SPEED_1GB_FULL:
2295 			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2296 			break;
2297 		}
2298 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2299 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2300 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2301 		switch (adapter->link_speed) {
2302 		case IXGBE_LINK_SPEED_10GB_FULL:
2303 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2304 			break;
2305 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2306 			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2307 			break;
2308 		case IXGBE_LINK_SPEED_1GB_FULL:
2309 			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2310 			break;
2311 		}
2312 #else
2313 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2314 		switch (adapter->link_speed) {
2315 		case IXGBE_LINK_SPEED_10GB_FULL:
2316 			ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2317 			break;
2318 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2319 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2320 			break;
2321 		case IXGBE_LINK_SPEED_1GB_FULL:
2322 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2323 			break;
2324 		}
2325 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2326 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2327 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2328 		switch (adapter->link_speed) {
2329 		case IXGBE_LINK_SPEED_10GB_FULL:
2330 			ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2331 			break;
2332 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2333 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2334 			break;
2335 		case IXGBE_LINK_SPEED_1GB_FULL:
2336 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2337 			break;
2338 		}
2339 #endif
2340 
2341 	/* If nothing is recognized... */
2342 	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2343 		ifmr->ifm_active |= IFM_UNKNOWN;
2344 
2345 	/* Display current flow control setting used on link */
2346 	if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2347 	    hw->fc.current_mode == ixgbe_fc_full)
2348 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2349 	if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2350 	    hw->fc.current_mode == ixgbe_fc_full)
2351 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2352 } /* ixgbe_media_status */
2353 
2354 /************************************************************************
2355  * ixgbe_media_change - Media Ioctl callback
2356  *
2357  *   Called when the user changes speed/duplex using
2358  *   media/mediopt option with ifconfig.
2359  ************************************************************************/
2360 static int
2361 ixgbe_if_media_change(if_ctx_t ctx)
2362 {
2363 	struct adapter   *adapter = iflib_get_softc(ctx);
2364 	struct ifmedia   *ifm = iflib_get_media(ctx);
2365 	struct ixgbe_hw  *hw = &adapter->hw;
2366 	ixgbe_link_speed speed = 0;
2367 
2368 	INIT_DEBUGOUT("ixgbe_if_media_change: begin");
2369 
2370 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2371 		return (EINVAL);
2372 
2373 	if (hw->phy.media_type == ixgbe_media_type_backplane)
2374 		return (EPERM);
2375 
2376 	/*
2377 	 * We don't actually need to check against the supported
2378 	 * media types of the adapter; ifmedia will take care of
2379 	 * that for us.
2380 	 */
2381 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
2382 	case IFM_AUTO:
2383 	case IFM_10G_T:
2384 		speed |= IXGBE_LINK_SPEED_100_FULL;
2385 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2386 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2387 		break;
2388 	case IFM_10G_LRM:
2389 	case IFM_10G_LR:
2390 #ifndef IFM_ETH_XTYPE
2391 	case IFM_10G_SR: /* KR, too */
2392 	case IFM_10G_CX4: /* KX4 */
2393 #else
2394 	case IFM_10G_KR:
2395 	case IFM_10G_KX4:
2396 #endif
2397 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2398 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2399 		break;
2400 #ifndef IFM_ETH_XTYPE
2401 	case IFM_1000_CX: /* KX */
2402 #else
2403 	case IFM_1000_KX:
2404 #endif
2405 	case IFM_1000_LX:
2406 	case IFM_1000_SX:
2407 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2408 		break;
2409 	case IFM_1000_T:
2410 		speed |= IXGBE_LINK_SPEED_100_FULL;
2411 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2412 		break;
2413 	case IFM_10G_TWINAX:
2414 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2415 		break;
2416 	case IFM_100_TX:
2417 		speed |= IXGBE_LINK_SPEED_100_FULL;
2418 		break;
2419 	case IFM_10_T:
2420 		speed |= IXGBE_LINK_SPEED_10_FULL;
2421 		break;
2422 	default:
2423 		goto invalid;
2424 	}
2425 
2426 	hw->mac.autotry_restart = true;
2427 	hw->mac.ops.setup_link(hw, speed, true);
2428 	adapter->advertise =
2429 	    ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
2430 	    ((speed & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
2431 	    ((speed & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
2432 	    ((speed & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
2433 
2434 	return (0);
2435 
2436 invalid:
2437 	device_printf(iflib_get_dev(ctx), "Invalid media type!\n");
2438 
2439 	return (EINVAL);
2440 } /* ixgbe_if_media_change */
2441 
2442 /************************************************************************
2443  * ixgbe_set_promisc
2444  ************************************************************************/
2445 static int
2446 ixgbe_if_promisc_set(if_ctx_t ctx, int flags)
2447 {
2448 	struct adapter *adapter = iflib_get_softc(ctx);
2449 	struct ifnet   *ifp = iflib_get_ifp(ctx);
2450 	u32            rctl;
2451 	int            mcnt = 0;
2452 
2453 	rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2454 	rctl &= (~IXGBE_FCTRL_UPE);
2455 	if (ifp->if_flags & IFF_ALLMULTI)
2456 		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2457 	else {
2458 		mcnt = min(if_llmaddr_count(ifp), MAX_NUM_MULTICAST_ADDRESSES);
2459 	}
2460 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2461 		rctl &= (~IXGBE_FCTRL_MPE);
2462 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2463 
2464 	if (ifp->if_flags & IFF_PROMISC) {
2465 		rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2466 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2467 	} else if (ifp->if_flags & IFF_ALLMULTI) {
2468 		rctl |= IXGBE_FCTRL_MPE;
2469 		rctl &= ~IXGBE_FCTRL_UPE;
2470 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2471 	}
2472 	return (0);
2473 } /* ixgbe_if_promisc_set */
2474 
2475 /************************************************************************
2476  * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2477  ************************************************************************/
2478 static int
2479 ixgbe_msix_link(void *arg)
2480 {
2481 	struct adapter  *adapter = arg;
2482 	struct ixgbe_hw *hw = &adapter->hw;
2483 	u32             eicr, eicr_mask;
2484 	s32             retval;
2485 
2486 	++adapter->link_irq;
2487 
2488 	/* Pause other interrupts */
2489 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2490 
2491 	/* First get the cause */
2492 	eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2493 	/* Be sure the queue bits are not cleared */
2494 	eicr &= ~IXGBE_EICR_RTX_QUEUE;
2495 	/* Clear interrupt with write */
2496 	IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2497 
2498 	/* Link status change */
2499 	if (eicr & IXGBE_EICR_LSC) {
2500 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2501 		adapter->task_requests |= IXGBE_REQUEST_TASK_LSC;
2502 	}
2503 
2504 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
2505 		if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
2506 		    (eicr & IXGBE_EICR_FLOW_DIR)) {
2507 			/* This is probably overkill :) */
2508 			if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
2509 				return (FILTER_HANDLED);
2510 			/* Disable the interrupt */
2511 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
2512 			adapter->task_requests |= IXGBE_REQUEST_TASK_FDIR;
2513 		} else
2514 			if (eicr & IXGBE_EICR_ECC) {
2515 				device_printf(iflib_get_dev(adapter->ctx),
2516 				   "\nCRITICAL: ECC ERROR!! Please Reboot!!\n");
2517 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2518 			}
2519 
2520 		/* Check for over temp condition */
2521 		if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
2522 			switch (adapter->hw.mac.type) {
2523 			case ixgbe_mac_X550EM_a:
2524 				if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
2525 					break;
2526 				IXGBE_WRITE_REG(hw, IXGBE_EIMC,
2527 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
2528 				IXGBE_WRITE_REG(hw, IXGBE_EICR,
2529 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
2530 				retval = hw->phy.ops.check_overtemp(hw);
2531 				if (retval != IXGBE_ERR_OVERTEMP)
2532 					break;
2533 				device_printf(iflib_get_dev(adapter->ctx),
2534 				    "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2535 				device_printf(iflib_get_dev(adapter->ctx),
2536 				    "System shutdown required!\n");
2537 				break;
2538 			default:
2539 				if (!(eicr & IXGBE_EICR_TS))
2540 					break;
2541 				retval = hw->phy.ops.check_overtemp(hw);
2542 				if (retval != IXGBE_ERR_OVERTEMP)
2543 					break;
2544 				device_printf(iflib_get_dev(adapter->ctx),
2545 				    "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2546 				device_printf(iflib_get_dev(adapter->ctx),
2547 				    "System shutdown required!\n");
2548 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
2549 				break;
2550 			}
2551 		}
2552 
2553 		/* Check for VF message */
2554 		if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
2555 		    (eicr & IXGBE_EICR_MAILBOX))
2556 			adapter->task_requests |= IXGBE_REQUEST_TASK_MBX;
2557 	}
2558 
2559 	if (ixgbe_is_sfp(hw)) {
2560 		/* Pluggable optics-related interrupt */
2561 		if (hw->mac.type >= ixgbe_mac_X540)
2562 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2563 		else
2564 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
2565 
2566 		if (eicr & eicr_mask) {
2567 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2568 			adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
2569 		}
2570 
2571 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
2572 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
2573 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
2574 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2575 			adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
2576 		}
2577 	}
2578 
2579 	/* Check for fan failure */
2580 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
2581 		ixgbe_check_fan_failure(adapter, eicr, true);
2582 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2583 	}
2584 
2585 	/* External PHY interrupt */
2586 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
2587 	    (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
2588 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
2589 		adapter->task_requests |= IXGBE_REQUEST_TASK_PHY;
2590 	}
2591 
2592 	return (adapter->task_requests != 0) ? FILTER_SCHEDULE_THREAD : FILTER_HANDLED;
2593 } /* ixgbe_msix_link */
2594 
2595 /************************************************************************
2596  * ixgbe_sysctl_interrupt_rate_handler
2597  ************************************************************************/
2598 static int
2599 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
2600 {
2601 	struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1);
2602 	int                error;
2603 	unsigned int       reg, usec, rate;
2604 
2605 	reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
2606 	usec = ((reg & 0x0FF8) >> 3);
2607 	if (usec > 0)
2608 		rate = 500000 / usec;
2609 	else
2610 		rate = 0;
2611 	error = sysctl_handle_int(oidp, &rate, 0, req);
2612 	if (error || !req->newptr)
2613 		return error;
2614 	reg &= ~0xfff; /* default, no limitation */
2615 	ixgbe_max_interrupt_rate = 0;
2616 	if (rate > 0 && rate < 500000) {
2617 		if (rate < 1000)
2618 			rate = 1000;
2619 		ixgbe_max_interrupt_rate = rate;
2620 		reg |= ((4000000/rate) & 0xff8);
2621 	}
2622 	IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
2623 
2624 	return (0);
2625 } /* ixgbe_sysctl_interrupt_rate_handler */
2626 
2627 /************************************************************************
2628  * ixgbe_add_device_sysctls
2629  ************************************************************************/
2630 static void
2631 ixgbe_add_device_sysctls(if_ctx_t ctx)
2632 {
2633 	struct adapter         *adapter = iflib_get_softc(ctx);
2634 	device_t               dev = iflib_get_dev(ctx);
2635 	struct ixgbe_hw        *hw = &adapter->hw;
2636 	struct sysctl_oid_list *child;
2637 	struct sysctl_ctx_list *ctx_list;
2638 
2639 	ctx_list = device_get_sysctl_ctx(dev);
2640 	child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2641 
2642 	/* Sysctls for all devices */
2643 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc",
2644 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2645 	    adapter, 0, ixgbe_sysctl_flowcntl, "I",
2646 	    IXGBE_SYSCTL_DESC_SET_FC);
2647 
2648 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed",
2649 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2650 	    adapter, 0, ixgbe_sysctl_advertise, "I",
2651 	    IXGBE_SYSCTL_DESC_ADV_SPEED);
2652 
2653 	adapter->enable_aim = ixgbe_enable_aim;
2654 	SYSCTL_ADD_INT(ctx_list, child, OID_AUTO, "enable_aim", CTLFLAG_RW,
2655 	    &adapter->enable_aim, 0, "Interrupt Moderation");
2656 
2657 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fw_version",
2658 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter, 0,
2659 	    ixgbe_sysctl_print_fw_version, "A", "Prints FW/NVM Versions");
2660 
2661 #ifdef IXGBE_DEBUG
2662 	/* testing sysctls (for all devices) */
2663 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state",
2664 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2665 	    adapter, 0, ixgbe_sysctl_power_state,
2666 	    "I", "PCI Power State");
2667 
2668 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config",
2669 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter, 0,
2670 	    ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
2671 #endif
2672 	/* for X550 series devices */
2673 	if (hw->mac.type >= ixgbe_mac_X550)
2674 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac",
2675 		    CTLTYPE_U16 | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2676 		    adapter, 0, ixgbe_sysctl_dmac,
2677 		    "I", "DMA Coalesce");
2678 
2679 	/* for WoL-capable devices */
2680 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2681 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable",
2682 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, adapter, 0,
2683 		    ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN");
2684 
2685 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc",
2686 		    CTLTYPE_U32 | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2687 		    adapter, 0, ixgbe_sysctl_wufc,
2688 		    "I", "Enable/Disable Wake Up Filters");
2689 	}
2690 
2691 	/* for X552/X557-AT devices */
2692 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2693 		struct sysctl_oid *phy_node;
2694 		struct sysctl_oid_list *phy_list;
2695 
2696 		phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy",
2697 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "External PHY sysctls");
2698 		phy_list = SYSCTL_CHILDREN(phy_node);
2699 
2700 		SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp",
2701 		    CTLTYPE_U16 | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2702 		    adapter, 0, ixgbe_sysctl_phy_temp,
2703 		    "I", "Current External PHY Temperature (Celsius)");
2704 
2705 		SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO,
2706 		    "overtemp_occurred",
2707 		    CTLTYPE_U16 | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter, 0,
2708 		    ixgbe_sysctl_phy_overtemp_occurred, "I",
2709 		    "External PHY High Temperature Event Occurred");
2710 	}
2711 
2712 	if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
2713 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state",
2714 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, adapter, 0,
2715 		    ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
2716 	}
2717 } /* ixgbe_add_device_sysctls */
2718 
2719 /************************************************************************
2720  * ixgbe_allocate_pci_resources
2721  ************************************************************************/
2722 static int
2723 ixgbe_allocate_pci_resources(if_ctx_t ctx)
2724 {
2725 	struct adapter *adapter = iflib_get_softc(ctx);
2726 	device_t        dev = iflib_get_dev(ctx);
2727 	int             rid;
2728 
2729 	rid = PCIR_BAR(0);
2730 	adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2731 	    RF_ACTIVE);
2732 
2733 	if (!(adapter->pci_mem)) {
2734 		device_printf(dev, "Unable to allocate bus resource: memory\n");
2735 		return (ENXIO);
2736 	}
2737 
2738 	/* Save bus_space values for READ/WRITE_REG macros */
2739 	adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
2740 	adapter->osdep.mem_bus_space_handle =
2741 	    rman_get_bushandle(adapter->pci_mem);
2742 	/* Set hw values for shared code */
2743 	adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2744 
2745 	return (0);
2746 } /* ixgbe_allocate_pci_resources */
2747 
2748 /************************************************************************
2749  * ixgbe_detach - Device removal routine
2750  *
2751  *   Called when the driver is being removed.
2752  *   Stops the adapter and deallocates all the resources
2753  *   that were allocated for driver operation.
2754  *
2755  *   return 0 on success, positive on failure
2756  ************************************************************************/
2757 static int
2758 ixgbe_if_detach(if_ctx_t ctx)
2759 {
2760 	struct adapter *adapter = iflib_get_softc(ctx);
2761 	device_t       dev = iflib_get_dev(ctx);
2762 	u32            ctrl_ext;
2763 
2764 	INIT_DEBUGOUT("ixgbe_detach: begin");
2765 
2766 	if (ixgbe_pci_iov_detach(dev) != 0) {
2767 		device_printf(dev, "SR-IOV in use; detach first.\n");
2768 		return (EBUSY);
2769 	}
2770 
2771 	ixgbe_setup_low_power_mode(ctx);
2772 
2773 	/* let hardware know driver is unloading */
2774 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
2775 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
2776 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
2777 
2778 	ixgbe_free_pci_resources(ctx);
2779 	free(adapter->mta, M_IXGBE);
2780 
2781 	return (0);
2782 } /* ixgbe_if_detach */
2783 
2784 /************************************************************************
2785  * ixgbe_setup_low_power_mode - LPLU/WoL preparation
2786  *
2787  *   Prepare the adapter/port for LPLU and/or WoL
2788  ************************************************************************/
2789 static int
2790 ixgbe_setup_low_power_mode(if_ctx_t ctx)
2791 {
2792 	struct adapter  *adapter = iflib_get_softc(ctx);
2793 	struct ixgbe_hw *hw = &adapter->hw;
2794 	device_t        dev = iflib_get_dev(ctx);
2795 	s32             error = 0;
2796 
2797 	if (!hw->wol_enabled)
2798 		ixgbe_set_phy_power(hw, false);
2799 
2800 	/* Limit power management flow to X550EM baseT */
2801 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
2802 	    hw->phy.ops.enter_lplu) {
2803 		/* Turn off support for APM wakeup. (Using ACPI instead) */
2804 		IXGBE_WRITE_REG(hw, IXGBE_GRC,
2805 		    IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
2806 
2807 		/*
2808 		 * Clear Wake Up Status register to prevent any previous wakeup
2809 		 * events from waking us up immediately after we suspend.
2810 		 */
2811 		IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2812 
2813 		/*
2814 		 * Program the Wakeup Filter Control register with user filter
2815 		 * settings
2816 		 */
2817 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
2818 
2819 		/* Enable wakeups and power management in Wakeup Control */
2820 		IXGBE_WRITE_REG(hw, IXGBE_WUC,
2821 		    IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
2822 
2823 		/* X550EM baseT adapters need a special LPLU flow */
2824 		hw->phy.reset_disable = true;
2825 		ixgbe_if_stop(ctx);
2826 		error = hw->phy.ops.enter_lplu(hw);
2827 		if (error)
2828 			device_printf(dev, "Error entering LPLU: %d\n", error);
2829 		hw->phy.reset_disable = false;
2830 	} else {
2831 		/* Just stop for other adapters */
2832 		ixgbe_if_stop(ctx);
2833 	}
2834 
2835 	return error;
2836 } /* ixgbe_setup_low_power_mode */
2837 
2838 /************************************************************************
2839  * ixgbe_shutdown - Shutdown entry point
2840  ************************************************************************/
2841 static int
2842 ixgbe_if_shutdown(if_ctx_t ctx)
2843 {
2844 	int error = 0;
2845 
2846 	INIT_DEBUGOUT("ixgbe_shutdown: begin");
2847 
2848 	error = ixgbe_setup_low_power_mode(ctx);
2849 
2850 	return (error);
2851 } /* ixgbe_if_shutdown */
2852 
2853 /************************************************************************
2854  * ixgbe_suspend
2855  *
2856  *   From D0 to D3
2857  ************************************************************************/
2858 static int
2859 ixgbe_if_suspend(if_ctx_t ctx)
2860 {
2861 	int error = 0;
2862 
2863 	INIT_DEBUGOUT("ixgbe_suspend: begin");
2864 
2865 	error = ixgbe_setup_low_power_mode(ctx);
2866 
2867 	return (error);
2868 } /* ixgbe_if_suspend */
2869 
2870 /************************************************************************
2871  * ixgbe_resume
2872  *
2873  *   From D3 to D0
2874  ************************************************************************/
2875 static int
2876 ixgbe_if_resume(if_ctx_t ctx)
2877 {
2878 	struct adapter  *adapter = iflib_get_softc(ctx);
2879 	device_t        dev = iflib_get_dev(ctx);
2880 	struct ifnet    *ifp = iflib_get_ifp(ctx);
2881 	struct ixgbe_hw *hw = &adapter->hw;
2882 	u32             wus;
2883 
2884 	INIT_DEBUGOUT("ixgbe_resume: begin");
2885 
2886 	/* Read & clear WUS register */
2887 	wus = IXGBE_READ_REG(hw, IXGBE_WUS);
2888 	if (wus)
2889 		device_printf(dev, "Woken up by (WUS): %#010x\n",
2890 		    IXGBE_READ_REG(hw, IXGBE_WUS));
2891 	IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2892 	/* And clear WUFC until next low-power transition */
2893 	IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
2894 
2895 	/*
2896 	 * Required after D3->D0 transition;
2897 	 * will re-advertise all previous advertised speeds
2898 	 */
2899 	if (ifp->if_flags & IFF_UP)
2900 		ixgbe_if_init(ctx);
2901 
2902 	return (0);
2903 } /* ixgbe_if_resume */
2904 
2905 /************************************************************************
2906  * ixgbe_if_mtu_set - Ioctl mtu entry point
2907  *
2908  *   Return 0 on success, EINVAL on failure
2909  ************************************************************************/
2910 static int
2911 ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
2912 {
2913 	struct adapter *adapter = iflib_get_softc(ctx);
2914 	int error = 0;
2915 
2916 	IOCTL_DEBUGOUT("ioctl: SIOCIFMTU (Set Interface MTU)");
2917 
2918 	if (mtu > IXGBE_MAX_MTU) {
2919 		error = EINVAL;
2920 	} else {
2921 		adapter->max_frame_size = mtu + IXGBE_MTU_HDR;
2922 	}
2923 
2924 	return error;
2925 } /* ixgbe_if_mtu_set */
2926 
2927 /************************************************************************
2928  * ixgbe_if_crcstrip_set
2929  ************************************************************************/
2930 static void
2931 ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip)
2932 {
2933 	struct adapter *sc = iflib_get_softc(ctx);
2934 	struct ixgbe_hw *hw = &sc->hw;
2935 	/* crc stripping is set in two places:
2936 	 * IXGBE_HLREG0 (modified on init_locked and hw reset)
2937 	 * IXGBE_RDRXCTL (set by the original driver in
2938 	 *	ixgbe_setup_hw_rsc() called in init_locked.
2939 	 *	We disable the setting when netmap is compiled in).
2940 	 * We update the values here, but also in ixgbe.c because
2941 	 * init_locked sometimes is called outside our control.
2942 	 */
2943 	uint32_t hl, rxc;
2944 
2945 	hl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2946 	rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2947 #ifdef NETMAP
2948 	if (netmap_verbose)
2949 		D("%s read  HLREG 0x%x rxc 0x%x",
2950 			onoff ? "enter" : "exit", hl, rxc);
2951 #endif
2952 	/* hw requirements ... */
2953 	rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
2954 	rxc |= IXGBE_RDRXCTL_RSCACKC;
2955 	if (onoff && !crcstrip) {
2956 		/* keep the crc. Fast rx */
2957 		hl &= ~IXGBE_HLREG0_RXCRCSTRP;
2958 		rxc &= ~IXGBE_RDRXCTL_CRCSTRIP;
2959 	} else {
2960 		/* reset default mode */
2961 		hl |= IXGBE_HLREG0_RXCRCSTRP;
2962 		rxc |= IXGBE_RDRXCTL_CRCSTRIP;
2963 	}
2964 #ifdef NETMAP
2965 	if (netmap_verbose)
2966 		D("%s write HLREG 0x%x rxc 0x%x",
2967 			onoff ? "enter" : "exit", hl, rxc);
2968 #endif
2969 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl);
2970 	IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc);
2971 } /* ixgbe_if_crcstrip_set */
2972 
2973 /*********************************************************************
2974  * ixgbe_if_init - Init entry point
2975  *
2976  *   Used in two ways: It is used by the stack as an init
2977  *   entry point in network interface structure. It is also
2978  *   used by the driver as a hw/sw initialization routine to
2979  *   get to a consistent state.
2980  *
2981  *   Return 0 on success, positive on failure
2982  **********************************************************************/
2983 void
2984 ixgbe_if_init(if_ctx_t ctx)
2985 {
2986 	struct adapter     *adapter = iflib_get_softc(ctx);
2987 	struct ifnet       *ifp = iflib_get_ifp(ctx);
2988 	device_t           dev = iflib_get_dev(ctx);
2989 	struct ixgbe_hw *hw = &adapter->hw;
2990 	struct ix_rx_queue *rx_que;
2991 	struct ix_tx_queue *tx_que;
2992 	u32             txdctl, mhadd;
2993 	u32             rxdctl, rxctrl;
2994 	u32             ctrl_ext;
2995 
2996 	int             i, j, err;
2997 
2998 	INIT_DEBUGOUT("ixgbe_if_init: begin");
2999 
3000 	/* Queue indices may change with IOV mode */
3001 	ixgbe_align_all_queue_indices(adapter);
3002 
3003 	/* reprogram the RAR[0] in case user changed it. */
3004 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
3005 
3006 	/* Get the latest mac address, User can use a LAA */
3007 	bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3008 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
3009 	hw->addr_ctrl.rar_used_count = 1;
3010 
3011 	ixgbe_init_hw(hw);
3012 
3013 	ixgbe_initialize_iov(adapter);
3014 
3015 	ixgbe_initialize_transmit_units(ctx);
3016 
3017 	/* Setup Multicast table */
3018 	ixgbe_if_multi_set(ctx);
3019 
3020 	/* Determine the correct mbuf pool, based on frame size */
3021 	adapter->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
3022 
3023 	/* Configure RX settings */
3024 	ixgbe_initialize_receive_units(ctx);
3025 
3026 	/*
3027 	 * Initialize variable holding task enqueue requests
3028 	 * from MSI-X interrupts
3029 	 */
3030 	adapter->task_requests = 0;
3031 
3032 	/* Enable SDP & MSI-X interrupts based on adapter */
3033 	ixgbe_config_gpie(adapter);
3034 
3035 	/* Set MTU size */
3036 	if (ifp->if_mtu > ETHERMTU) {
3037 		/* aka IXGBE_MAXFRS on 82599 and newer */
3038 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3039 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
3040 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
3041 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3042 	}
3043 
3044 	/* Now enable all the queues */
3045 	for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) {
3046 		struct tx_ring *txr = &tx_que->txr;
3047 
3048 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
3049 		txdctl |= IXGBE_TXDCTL_ENABLE;
3050 		/* Set WTHRESH to 8, burst writeback */
3051 		txdctl |= (8 << 16);
3052 		/*
3053 		 * When the internal queue falls below PTHRESH (32),
3054 		 * start prefetching as long as there are at least
3055 		 * HTHRESH (1) buffers ready. The values are taken
3056 		 * from the Intel linux driver 3.8.21.
3057 		 * Prefetching enables tx line rate even with 1 queue.
3058 		 */
3059 		txdctl |= (32 << 0) | (1 << 8);
3060 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
3061 	}
3062 
3063 	for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) {
3064 		struct rx_ring *rxr = &rx_que->rxr;
3065 
3066 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
3067 		if (hw->mac.type == ixgbe_mac_82598EB) {
3068 			/*
3069 			 * PTHRESH = 21
3070 			 * HTHRESH = 4
3071 			 * WTHRESH = 8
3072 			 */
3073 			rxdctl &= ~0x3FFFFF;
3074 			rxdctl |= 0x080420;
3075 		}
3076 		rxdctl |= IXGBE_RXDCTL_ENABLE;
3077 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
3078 		for (j = 0; j < 10; j++) {
3079 			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
3080 			    IXGBE_RXDCTL_ENABLE)
3081 				break;
3082 			else
3083 				msec_delay(1);
3084 		}
3085 		wmb();
3086 	}
3087 
3088 	/* Enable Receive engine */
3089 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3090 	if (hw->mac.type == ixgbe_mac_82598EB)
3091 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
3092 	rxctrl |= IXGBE_RXCTRL_RXEN;
3093 	ixgbe_enable_rx_dma(hw, rxctrl);
3094 
3095 	/* Set up MSI/MSI-X routing */
3096 	if (ixgbe_enable_msix)  {
3097 		ixgbe_configure_ivars(adapter);
3098 		/* Set up auto-mask */
3099 		if (hw->mac.type == ixgbe_mac_82598EB)
3100 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3101 		else {
3102 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3103 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3104 		}
3105 	} else {  /* Simple settings for Legacy/MSI */
3106 		ixgbe_set_ivar(adapter, 0, 0, 0);
3107 		ixgbe_set_ivar(adapter, 0, 0, 1);
3108 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3109 	}
3110 
3111 	ixgbe_init_fdir(adapter);
3112 
3113 	/*
3114 	 * Check on any SFP devices that
3115 	 * need to be kick-started
3116 	 */
3117 	if (hw->phy.type == ixgbe_phy_none) {
3118 		err = hw->phy.ops.identify(hw);
3119 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3120 			device_printf(dev,
3121 			    "Unsupported SFP+ module type was detected.\n");
3122 			return;
3123 		}
3124 	}
3125 
3126 	/* Set moderation on the Link interrupt */
3127 	IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
3128 
3129 	/* Enable power to the phy. */
3130 	ixgbe_set_phy_power(hw, true);
3131 
3132 	/* Config/Enable Link */
3133 	ixgbe_config_link(ctx);
3134 
3135 	/* Hardware Packet Buffer & Flow Control setup */
3136 	ixgbe_config_delay_values(adapter);
3137 
3138 	/* Initialize the FC settings */
3139 	ixgbe_start_hw(hw);
3140 
3141 	/* Set up VLAN support and filter */
3142 	ixgbe_setup_vlan_hw_support(ctx);
3143 
3144 	/* Setup DMA Coalescing */
3145 	ixgbe_config_dmac(adapter);
3146 
3147 	/* And now turn on interrupts */
3148 	ixgbe_if_enable_intr(ctx);
3149 
3150 	/* Enable the use of the MBX by the VF's */
3151 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
3152 		ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3153 		ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3154 		IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3155 	}
3156 
3157 } /* ixgbe_init_locked */
3158 
3159 /************************************************************************
3160  * ixgbe_set_ivar
3161  *
3162  *   Setup the correct IVAR register for a particular MSI-X interrupt
3163  *     (yes this is all very magic and confusing :)
3164  *    - entry is the register array entry
3165  *    - vector is the MSI-X vector for this queue
3166  *    - type is RX/TX/MISC
3167  ************************************************************************/
3168 static void
3169 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3170 {
3171 	struct ixgbe_hw *hw = &adapter->hw;
3172 	u32 ivar, index;
3173 
3174 	vector |= IXGBE_IVAR_ALLOC_VAL;
3175 
3176 	switch (hw->mac.type) {
3177 	case ixgbe_mac_82598EB:
3178 		if (type == -1)
3179 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3180 		else
3181 			entry += (type * 64);
3182 		index = (entry >> 2) & 0x1F;
3183 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3184 		ivar &= ~(0xFF << (8 * (entry & 0x3)));
3185 		ivar |= (vector << (8 * (entry & 0x3)));
3186 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3187 		break;
3188 	case ixgbe_mac_82599EB:
3189 	case ixgbe_mac_X540:
3190 	case ixgbe_mac_X550:
3191 	case ixgbe_mac_X550EM_x:
3192 	case ixgbe_mac_X550EM_a:
3193 		if (type == -1) { /* MISC IVAR */
3194 			index = (entry & 1) * 8;
3195 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3196 			ivar &= ~(0xFF << index);
3197 			ivar |= (vector << index);
3198 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3199 		} else {          /* RX/TX IVARS */
3200 			index = (16 * (entry & 1)) + (8 * type);
3201 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3202 			ivar &= ~(0xFF << index);
3203 			ivar |= (vector << index);
3204 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3205 		}
3206 	default:
3207 		break;
3208 	}
3209 } /* ixgbe_set_ivar */
3210 
3211 /************************************************************************
3212  * ixgbe_configure_ivars
3213  ************************************************************************/
3214 static void
3215 ixgbe_configure_ivars(struct adapter *adapter)
3216 {
3217 	struct ix_rx_queue *rx_que = adapter->rx_queues;
3218 	struct ix_tx_queue *tx_que = adapter->tx_queues;
3219 	u32                newitr;
3220 
3221 	if (ixgbe_max_interrupt_rate > 0)
3222 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3223 	else {
3224 		/*
3225 		 * Disable DMA coalescing if interrupt moderation is
3226 		 * disabled.
3227 		 */
3228 		adapter->dmac = 0;
3229 		newitr = 0;
3230 	}
3231 
3232 	for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) {
3233 		struct rx_ring *rxr = &rx_que->rxr;
3234 
3235 		/* First the RX queue entry */
3236 		ixgbe_set_ivar(adapter, rxr->me, rx_que->msix, 0);
3237 
3238 		/* Set an Initial EITR value */
3239 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(rx_que->msix), newitr);
3240 	}
3241 	for (int i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
3242 		struct tx_ring *txr = &tx_que->txr;
3243 
3244 		/* ... and the TX */
3245 		ixgbe_set_ivar(adapter, txr->me, tx_que->msix, 1);
3246 	}
3247 	/* For the Link interrupt */
3248 	ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3249 } /* ixgbe_configure_ivars */
3250 
3251 /************************************************************************
3252  * ixgbe_config_gpie
3253  ************************************************************************/
3254 static void
3255 ixgbe_config_gpie(struct adapter *adapter)
3256 {
3257 	struct ixgbe_hw *hw = &adapter->hw;
3258 	u32             gpie;
3259 
3260 	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3261 
3262 	if (adapter->intr_type == IFLIB_INTR_MSIX) {
3263 		/* Enable Enhanced MSI-X mode */
3264 		gpie |= IXGBE_GPIE_MSIX_MODE
3265 		     |  IXGBE_GPIE_EIAME
3266 		     |  IXGBE_GPIE_PBA_SUPPORT
3267 		     |  IXGBE_GPIE_OCD;
3268 	}
3269 
3270 	/* Fan Failure Interrupt */
3271 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3272 		gpie |= IXGBE_SDP1_GPIEN;
3273 
3274 	/* Thermal Sensor Interrupt */
3275 	if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
3276 		gpie |= IXGBE_SDP0_GPIEN_X540;
3277 
3278 	/* Link detection */
3279 	switch (hw->mac.type) {
3280 	case ixgbe_mac_82599EB:
3281 		gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
3282 		break;
3283 	case ixgbe_mac_X550EM_x:
3284 	case ixgbe_mac_X550EM_a:
3285 		gpie |= IXGBE_SDP0_GPIEN_X540;
3286 		break;
3287 	default:
3288 		break;
3289 	}
3290 
3291 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3292 
3293 } /* ixgbe_config_gpie */
3294 
3295 /************************************************************************
3296  * ixgbe_config_delay_values
3297  *
3298  *   Requires adapter->max_frame_size to be set.
3299  ************************************************************************/
3300 static void
3301 ixgbe_config_delay_values(struct adapter *adapter)
3302 {
3303 	struct ixgbe_hw *hw = &adapter->hw;
3304 	u32             rxpb, frame, size, tmp;
3305 
3306 	frame = adapter->max_frame_size;
3307 
3308 	/* Calculate High Water */
3309 	switch (hw->mac.type) {
3310 	case ixgbe_mac_X540:
3311 	case ixgbe_mac_X550:
3312 	case ixgbe_mac_X550EM_x:
3313 	case ixgbe_mac_X550EM_a:
3314 		tmp = IXGBE_DV_X540(frame, frame);
3315 		break;
3316 	default:
3317 		tmp = IXGBE_DV(frame, frame);
3318 		break;
3319 	}
3320 	size = IXGBE_BT2KB(tmp);
3321 	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
3322 	hw->fc.high_water[0] = rxpb - size;
3323 
3324 	/* Now calculate Low Water */
3325 	switch (hw->mac.type) {
3326 	case ixgbe_mac_X540:
3327 	case ixgbe_mac_X550:
3328 	case ixgbe_mac_X550EM_x:
3329 	case ixgbe_mac_X550EM_a:
3330 		tmp = IXGBE_LOW_DV_X540(frame);
3331 		break;
3332 	default:
3333 		tmp = IXGBE_LOW_DV(frame);
3334 		break;
3335 	}
3336 	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
3337 
3338 	hw->fc.pause_time = IXGBE_FC_PAUSE;
3339 	hw->fc.send_xon = true;
3340 } /* ixgbe_config_delay_values */
3341 
3342 /************************************************************************
3343  * ixgbe_set_multi - Multicast Update
3344  *
3345  *   Called whenever multicast address list is updated.
3346  ************************************************************************/
3347 static u_int
3348 ixgbe_mc_filter_apply(void *arg, struct sockaddr_dl *sdl, u_int idx)
3349 {
3350 	struct adapter *adapter = arg;
3351 	struct ixgbe_mc_addr *mta = adapter->mta;
3352 
3353 	if (idx == MAX_NUM_MULTICAST_ADDRESSES)
3354 		return (0);
3355 	bcopy(LLADDR(sdl), mta[idx].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3356 	mta[idx].vmdq = adapter->pool;
3357 
3358 	return (1);
3359 } /* ixgbe_mc_filter_apply */
3360 
3361 static void
3362 ixgbe_if_multi_set(if_ctx_t ctx)
3363 {
3364 	struct adapter       *adapter = iflib_get_softc(ctx);
3365 	struct ixgbe_mc_addr *mta;
3366 	struct ifnet         *ifp = iflib_get_ifp(ctx);
3367 	u8                   *update_ptr;
3368 	u32                  fctrl;
3369 	u_int		     mcnt;
3370 
3371 	IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin");
3372 
3373 	mta = adapter->mta;
3374 	bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
3375 
3376 	mcnt = if_foreach_llmaddr(iflib_get_ifp(ctx), ixgbe_mc_filter_apply,
3377 	    adapter);
3378 
3379 	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
3380 
3381 	if (ifp->if_flags & IFF_PROMISC)
3382 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3383 	else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
3384 	    ifp->if_flags & IFF_ALLMULTI) {
3385 		fctrl |= IXGBE_FCTRL_MPE;
3386 		fctrl &= ~IXGBE_FCTRL_UPE;
3387 	} else
3388 		fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3389 
3390 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
3391 
3392 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
3393 		update_ptr = (u8 *)mta;
3394 		ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
3395 		    ixgbe_mc_array_itr, true);
3396 	}
3397 
3398 } /* ixgbe_if_multi_set */
3399 
3400 /************************************************************************
3401  * ixgbe_mc_array_itr
3402  *
3403  *   An iterator function needed by the multicast shared code.
3404  *   It feeds the shared code routine the addresses in the
3405  *   array of ixgbe_set_multi() one by one.
3406  ************************************************************************/
3407 static u8 *
3408 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
3409 {
3410 	struct ixgbe_mc_addr *mta;
3411 
3412 	mta = (struct ixgbe_mc_addr *)*update_ptr;
3413 	*vmdq = mta->vmdq;
3414 
3415 	*update_ptr = (u8*)(mta + 1);
3416 
3417 	return (mta->addr);
3418 } /* ixgbe_mc_array_itr */
3419 
3420 /************************************************************************
3421  * ixgbe_local_timer - Timer routine
3422  *
3423  *   Checks for link status, updates statistics,
3424  *   and runs the watchdog check.
3425  ************************************************************************/
3426 static void
3427 ixgbe_if_timer(if_ctx_t ctx, uint16_t qid)
3428 {
3429 	struct adapter *adapter = iflib_get_softc(ctx);
3430 
3431 	if (qid != 0)
3432 		return;
3433 
3434 	/* Check for pluggable optics */
3435 	if (adapter->sfp_probe)
3436 		if (!ixgbe_sfp_probe(ctx))
3437 			return; /* Nothing to do */
3438 
3439 	ixgbe_check_link(&adapter->hw, &adapter->link_speed,
3440 	    &adapter->link_up, 0);
3441 
3442 	/* Fire off the adminq task */
3443 	iflib_admin_intr_deferred(ctx);
3444 
3445 } /* ixgbe_if_timer */
3446 
3447 /************************************************************************
3448  * ixgbe_sfp_probe
3449  *
3450  *   Determine if a port had optics inserted.
3451  ************************************************************************/
3452 static bool
3453 ixgbe_sfp_probe(if_ctx_t ctx)
3454 {
3455 	struct adapter  *adapter = iflib_get_softc(ctx);
3456 	struct ixgbe_hw *hw = &adapter->hw;
3457 	device_t        dev = iflib_get_dev(ctx);
3458 	bool            result = false;
3459 
3460 	if ((hw->phy.type == ixgbe_phy_nl) &&
3461 	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3462 		s32 ret = hw->phy.ops.identify_sfp(hw);
3463 		if (ret)
3464 			goto out;
3465 		ret = hw->phy.ops.reset(hw);
3466 		adapter->sfp_probe = false;
3467 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3468 			device_printf(dev, "Unsupported SFP+ module detected!");
3469 			device_printf(dev,
3470 			    "Reload driver with supported module.\n");
3471 			goto out;
3472 		} else
3473 			device_printf(dev, "SFP+ module detected!\n");
3474 		/* We now have supported optics */
3475 		result = true;
3476 	}
3477 out:
3478 
3479 	return (result);
3480 } /* ixgbe_sfp_probe */
3481 
3482 /************************************************************************
3483  * ixgbe_handle_mod - Tasklet for SFP module interrupts
3484  ************************************************************************/
3485 static void
3486 ixgbe_handle_mod(void *context)
3487 {
3488 	if_ctx_t        ctx = context;
3489 	struct adapter  *adapter = iflib_get_softc(ctx);
3490 	struct ixgbe_hw *hw = &adapter->hw;
3491 	device_t        dev = iflib_get_dev(ctx);
3492 	u32             err, cage_full = 0;
3493 
3494 	if (adapter->hw.need_crosstalk_fix) {
3495 		switch (hw->mac.type) {
3496 		case ixgbe_mac_82599EB:
3497 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3498 			    IXGBE_ESDP_SDP2;
3499 			break;
3500 		case ixgbe_mac_X550EM_x:
3501 		case ixgbe_mac_X550EM_a:
3502 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3503 			    IXGBE_ESDP_SDP0;
3504 			break;
3505 		default:
3506 			break;
3507 		}
3508 
3509 		if (!cage_full)
3510 			goto handle_mod_out;
3511 	}
3512 
3513 	err = hw->phy.ops.identify_sfp(hw);
3514 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3515 		device_printf(dev,
3516 		    "Unsupported SFP+ module type was detected.\n");
3517 		goto handle_mod_out;
3518 	}
3519 
3520 	if (hw->mac.type == ixgbe_mac_82598EB)
3521 		err = hw->phy.ops.reset(hw);
3522 	else
3523 		err = hw->mac.ops.setup_sfp(hw);
3524 
3525 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3526 		device_printf(dev,
3527 		    "Setup failure - unsupported SFP+ module type.\n");
3528 		goto handle_mod_out;
3529 	}
3530 	adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
3531 	return;
3532 
3533 handle_mod_out:
3534 	adapter->task_requests &= ~(IXGBE_REQUEST_TASK_MSF);
3535 } /* ixgbe_handle_mod */
3536 
3537 
3538 /************************************************************************
3539  * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
3540  ************************************************************************/
3541 static void
3542 ixgbe_handle_msf(void *context)
3543 {
3544 	if_ctx_t        ctx = context;
3545 	struct adapter  *adapter = iflib_get_softc(ctx);
3546 	struct ixgbe_hw *hw = &adapter->hw;
3547 	u32             autoneg;
3548 	bool            negotiate;
3549 
3550 	/* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3551 	adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
3552 
3553 	autoneg = hw->phy.autoneg_advertised;
3554 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3555 		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3556 	if (hw->mac.ops.setup_link)
3557 		hw->mac.ops.setup_link(hw, autoneg, true);
3558 
3559 	/* Adjust media types shown in ifconfig */
3560 	ifmedia_removeall(adapter->media);
3561 	ixgbe_add_media_types(adapter->ctx);
3562 	ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
3563 } /* ixgbe_handle_msf */
3564 
3565 /************************************************************************
3566  * ixgbe_handle_phy - Tasklet for external PHY interrupts
3567  ************************************************************************/
3568 static void
3569 ixgbe_handle_phy(void *context)
3570 {
3571 	if_ctx_t        ctx = context;
3572 	struct adapter  *adapter = iflib_get_softc(ctx);
3573 	struct ixgbe_hw *hw = &adapter->hw;
3574 	int             error;
3575 
3576 	error = hw->phy.ops.handle_lasi(hw);
3577 	if (error == IXGBE_ERR_OVERTEMP)
3578 		device_printf(adapter->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!!  PHY will downshift to lower power state!\n");
3579 	else if (error)
3580 		device_printf(adapter->dev,
3581 		    "Error handling LASI interrupt: %d\n", error);
3582 } /* ixgbe_handle_phy */
3583 
3584 /************************************************************************
3585  * ixgbe_if_stop - Stop the hardware
3586  *
3587  *   Disables all traffic on the adapter by issuing a
3588  *   global reset on the MAC and deallocates TX/RX buffers.
3589  ************************************************************************/
3590 static void
3591 ixgbe_if_stop(if_ctx_t ctx)
3592 {
3593 	struct adapter  *adapter = iflib_get_softc(ctx);
3594 	struct ixgbe_hw *hw = &adapter->hw;
3595 
3596 	INIT_DEBUGOUT("ixgbe_if_stop: begin\n");
3597 
3598 	ixgbe_reset_hw(hw);
3599 	hw->adapter_stopped = false;
3600 	ixgbe_stop_adapter(hw);
3601 	if (hw->mac.type == ixgbe_mac_82599EB)
3602 		ixgbe_stop_mac_link_on_d3_82599(hw);
3603 	/* Turn off the laser - noop with no optics */
3604 	ixgbe_disable_tx_laser(hw);
3605 
3606 	/* Update the stack */
3607 	adapter->link_up = false;
3608 	ixgbe_if_update_admin_status(ctx);
3609 
3610 	/* reprogram the RAR[0] in case user changed it. */
3611 	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
3612 
3613 	return;
3614 } /* ixgbe_if_stop */
3615 
3616 /************************************************************************
3617  * ixgbe_update_link_status - Update OS on link state
3618  *
3619  * Note: Only updates the OS on the cached link state.
3620  *       The real check of the hardware only happens with
3621  *       a link interrupt.
3622  ************************************************************************/
3623 static void
3624 ixgbe_if_update_admin_status(if_ctx_t ctx)
3625 {
3626 	struct adapter *adapter = iflib_get_softc(ctx);
3627 	device_t       dev = iflib_get_dev(ctx);
3628 
3629 	if (adapter->link_up) {
3630 		if (adapter->link_active == false) {
3631 			if (bootverbose)
3632 				device_printf(dev, "Link is up %d Gbps %s \n",
3633 				    ((adapter->link_speed == 128) ? 10 : 1),
3634 				    "Full Duplex");
3635 			adapter->link_active = true;
3636 			/* Update any Flow Control changes */
3637 			ixgbe_fc_enable(&adapter->hw);
3638 			/* Update DMA coalescing config */
3639 			ixgbe_config_dmac(adapter);
3640 			/* should actually be negotiated value */
3641 			iflib_link_state_change(ctx, LINK_STATE_UP, IF_Gbps(10));
3642 
3643 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3644 				ixgbe_ping_all_vfs(adapter);
3645 		}
3646 	} else { /* Link down */
3647 		if (adapter->link_active == true) {
3648 			if (bootverbose)
3649 				device_printf(dev, "Link is Down\n");
3650 			iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
3651 			adapter->link_active = false;
3652 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3653 				ixgbe_ping_all_vfs(adapter);
3654 		}
3655 	}
3656 
3657 	/* Handle task requests from msix_link() */
3658 	if (adapter->task_requests & IXGBE_REQUEST_TASK_MOD)
3659 		ixgbe_handle_mod(ctx);
3660 	if (adapter->task_requests & IXGBE_REQUEST_TASK_MSF)
3661 		ixgbe_handle_msf(ctx);
3662 	if (adapter->task_requests & IXGBE_REQUEST_TASK_MBX)
3663 		ixgbe_handle_mbx(ctx);
3664 	if (adapter->task_requests & IXGBE_REQUEST_TASK_FDIR)
3665 		ixgbe_reinit_fdir(ctx);
3666 	if (adapter->task_requests & IXGBE_REQUEST_TASK_PHY)
3667 		ixgbe_handle_phy(ctx);
3668 	adapter->task_requests = 0;
3669 
3670 	ixgbe_update_stats_counters(adapter);
3671 } /* ixgbe_if_update_admin_status */
3672 
3673 /************************************************************************
3674  * ixgbe_config_dmac - Configure DMA Coalescing
3675  ************************************************************************/
3676 static void
3677 ixgbe_config_dmac(struct adapter *adapter)
3678 {
3679 	struct ixgbe_hw          *hw = &adapter->hw;
3680 	struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3681 
3682 	if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
3683 		return;
3684 
3685 	if (dcfg->watchdog_timer ^ adapter->dmac ||
3686 	    dcfg->link_speed ^ adapter->link_speed) {
3687 		dcfg->watchdog_timer = adapter->dmac;
3688 		dcfg->fcoe_en = false;
3689 		dcfg->link_speed = adapter->link_speed;
3690 		dcfg->num_tcs = 1;
3691 
3692 		INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3693 		    dcfg->watchdog_timer, dcfg->link_speed);
3694 
3695 		hw->mac.ops.dmac_config(hw);
3696 	}
3697 } /* ixgbe_config_dmac */
3698 
3699 /************************************************************************
3700  * ixgbe_if_enable_intr
3701  ************************************************************************/
3702 void
3703 ixgbe_if_enable_intr(if_ctx_t ctx)
3704 {
3705 	struct adapter     *adapter = iflib_get_softc(ctx);
3706 	struct ixgbe_hw    *hw = &adapter->hw;
3707 	struct ix_rx_queue *que = adapter->rx_queues;
3708 	u32                mask, fwsm;
3709 
3710 	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3711 
3712 	switch (adapter->hw.mac.type) {
3713 	case ixgbe_mac_82599EB:
3714 		mask |= IXGBE_EIMS_ECC;
3715 		/* Temperature sensor on some adapters */
3716 		mask |= IXGBE_EIMS_GPI_SDP0;
3717 		/* SFP+ (RX_LOS_N & MOD_ABS_N) */
3718 		mask |= IXGBE_EIMS_GPI_SDP1;
3719 		mask |= IXGBE_EIMS_GPI_SDP2;
3720 		break;
3721 	case ixgbe_mac_X540:
3722 		/* Detect if Thermal Sensor is enabled */
3723 		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3724 		if (fwsm & IXGBE_FWSM_TS_ENABLED)
3725 			mask |= IXGBE_EIMS_TS;
3726 		mask |= IXGBE_EIMS_ECC;
3727 		break;
3728 	case ixgbe_mac_X550:
3729 		/* MAC thermal sensor is automatically enabled */
3730 		mask |= IXGBE_EIMS_TS;
3731 		mask |= IXGBE_EIMS_ECC;
3732 		break;
3733 	case ixgbe_mac_X550EM_x:
3734 	case ixgbe_mac_X550EM_a:
3735 		/* Some devices use SDP0 for important information */
3736 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3737 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3738 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
3739 		    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3740 			mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3741 		if (hw->phy.type == ixgbe_phy_x550em_ext_t)
3742 			mask |= IXGBE_EICR_GPI_SDP0_X540;
3743 		mask |= IXGBE_EIMS_ECC;
3744 		break;
3745 	default:
3746 		break;
3747 	}
3748 
3749 	/* Enable Fan Failure detection */
3750 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3751 		mask |= IXGBE_EIMS_GPI_SDP1;
3752 	/* Enable SR-IOV */
3753 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3754 		mask |= IXGBE_EIMS_MAILBOX;
3755 	/* Enable Flow Director */
3756 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
3757 		mask |= IXGBE_EIMS_FLOW_DIR;
3758 
3759 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3760 
3761 	/* With MSI-X we use auto clear */
3762 	if (adapter->intr_type == IFLIB_INTR_MSIX) {
3763 		mask = IXGBE_EIMS_ENABLE_MASK;
3764 		/* Don't autoclear Link */
3765 		mask &= ~IXGBE_EIMS_OTHER;
3766 		mask &= ~IXGBE_EIMS_LSC;
3767 		if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
3768 			mask &= ~IXGBE_EIMS_MAILBOX;
3769 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3770 	}
3771 
3772 	/*
3773 	 * Now enable all queues, this is done separately to
3774 	 * allow for handling the extended (beyond 32) MSI-X
3775 	 * vectors that can be used by 82599
3776 	 */
3777 	for (int i = 0; i < adapter->num_rx_queues; i++, que++)
3778 		ixgbe_enable_queue(adapter, que->msix);
3779 
3780 	IXGBE_WRITE_FLUSH(hw);
3781 
3782 } /* ixgbe_if_enable_intr */
3783 
3784 /************************************************************************
3785  * ixgbe_disable_intr
3786  ************************************************************************/
3787 static void
3788 ixgbe_if_disable_intr(if_ctx_t ctx)
3789 {
3790 	struct adapter *adapter = iflib_get_softc(ctx);
3791 
3792 	if (adapter->intr_type == IFLIB_INTR_MSIX)
3793 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3794 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3795 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3796 	} else {
3797 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3798 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3799 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3800 	}
3801 	IXGBE_WRITE_FLUSH(&adapter->hw);
3802 
3803 } /* ixgbe_if_disable_intr */
3804 
3805 /************************************************************************
3806  * ixgbe_link_intr_enable
3807  ************************************************************************/
3808 static void
3809 ixgbe_link_intr_enable(if_ctx_t ctx)
3810 {
3811 	struct ixgbe_hw *hw = &((struct adapter *)iflib_get_softc(ctx))->hw;
3812 
3813 	/* Re-enable other interrupts */
3814 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
3815 } /* ixgbe_link_intr_enable */
3816 
3817 /************************************************************************
3818  * ixgbe_if_rx_queue_intr_enable
3819  ************************************************************************/
3820 static int
3821 ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
3822 {
3823 	struct adapter     *adapter = iflib_get_softc(ctx);
3824 	struct ix_rx_queue *que = &adapter->rx_queues[rxqid];
3825 
3826 	ixgbe_enable_queue(adapter, que->msix);
3827 
3828 	return (0);
3829 } /* ixgbe_if_rx_queue_intr_enable */
3830 
3831 /************************************************************************
3832  * ixgbe_enable_queue
3833  ************************************************************************/
3834 static void
3835 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
3836 {
3837 	struct ixgbe_hw *hw = &adapter->hw;
3838 	u64             queue = 1ULL << vector;
3839 	u32             mask;
3840 
3841 	if (hw->mac.type == ixgbe_mac_82598EB) {
3842 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3843 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3844 	} else {
3845 		mask = (queue & 0xFFFFFFFF);
3846 		if (mask)
3847 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
3848 		mask = (queue >> 32);
3849 		if (mask)
3850 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
3851 	}
3852 } /* ixgbe_enable_queue */
3853 
3854 /************************************************************************
3855  * ixgbe_disable_queue
3856  ************************************************************************/
3857 static void
3858 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
3859 {
3860 	struct ixgbe_hw *hw = &adapter->hw;
3861 	u64             queue = 1ULL << vector;
3862 	u32             mask;
3863 
3864 	if (hw->mac.type == ixgbe_mac_82598EB) {
3865 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3866 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
3867 	} else {
3868 		mask = (queue & 0xFFFFFFFF);
3869 		if (mask)
3870 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
3871 		mask = (queue >> 32);
3872 		if (mask)
3873 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
3874 	}
3875 } /* ixgbe_disable_queue */
3876 
3877 /************************************************************************
3878  * ixgbe_intr - Legacy Interrupt Service Routine
3879  ************************************************************************/
3880 int
3881 ixgbe_intr(void *arg)
3882 {
3883 	struct adapter     *adapter = arg;
3884 	struct ix_rx_queue *que = adapter->rx_queues;
3885 	struct ixgbe_hw    *hw = &adapter->hw;
3886 	if_ctx_t           ctx = adapter->ctx;
3887 	u32                eicr, eicr_mask;
3888 
3889 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3890 
3891 	++que->irqs;
3892 	if (eicr == 0) {
3893 		ixgbe_if_enable_intr(ctx);
3894 		return (FILTER_HANDLED);
3895 	}
3896 
3897 	/* Check for fan failure */
3898 	if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
3899 	    (eicr & IXGBE_EICR_GPI_SDP1)) {
3900 		device_printf(adapter->dev,
3901 		    "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
3902 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3903 	}
3904 
3905 	/* Link status change */
3906 	if (eicr & IXGBE_EICR_LSC) {
3907 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
3908 		iflib_admin_intr_deferred(ctx);
3909 	}
3910 
3911 	if (ixgbe_is_sfp(hw)) {
3912 		/* Pluggable optics-related interrupt */
3913 		if (hw->mac.type >= ixgbe_mac_X540)
3914 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3915 		else
3916 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3917 
3918 		if (eicr & eicr_mask) {
3919 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3920 			adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
3921 		}
3922 
3923 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
3924 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3925 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
3926 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3927 			adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
3928 		}
3929 	}
3930 
3931 	/* External PHY interrupt */
3932 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3933 	    (eicr & IXGBE_EICR_GPI_SDP0_X540))
3934 		adapter->task_requests |= IXGBE_REQUEST_TASK_PHY;
3935 
3936 	return (FILTER_SCHEDULE_THREAD);
3937 } /* ixgbe_intr */
3938 
3939 /************************************************************************
3940  * ixgbe_free_pci_resources
3941  ************************************************************************/
3942 static void
3943 ixgbe_free_pci_resources(if_ctx_t ctx)
3944 {
3945 	struct adapter *adapter = iflib_get_softc(ctx);
3946 	struct         ix_rx_queue *que = adapter->rx_queues;
3947 	device_t       dev = iflib_get_dev(ctx);
3948 
3949 	/* Release all MSI-X queue resources */
3950 	if (adapter->intr_type == IFLIB_INTR_MSIX)
3951 		iflib_irq_free(ctx, &adapter->irq);
3952 
3953 	if (que != NULL) {
3954 		for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
3955 			iflib_irq_free(ctx, &que->que_irq);
3956 		}
3957 	}
3958 
3959 	if (adapter->pci_mem != NULL)
3960 		bus_release_resource(dev, SYS_RES_MEMORY,
3961 		    rman_get_rid(adapter->pci_mem), adapter->pci_mem);
3962 } /* ixgbe_free_pci_resources */
3963 
3964 /************************************************************************
3965  * ixgbe_sysctl_flowcntl
3966  *
3967  *   SYSCTL wrapper around setting Flow Control
3968  ************************************************************************/
3969 static int
3970 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
3971 {
3972 	struct adapter *adapter;
3973 	int            error, fc;
3974 
3975 	adapter = (struct adapter *)arg1;
3976 	fc = adapter->hw.fc.current_mode;
3977 
3978 	error = sysctl_handle_int(oidp, &fc, 0, req);
3979 	if ((error) || (req->newptr == NULL))
3980 		return (error);
3981 
3982 	/* Don't bother if it's not changed */
3983 	if (fc == adapter->hw.fc.current_mode)
3984 		return (0);
3985 
3986 	return ixgbe_set_flowcntl(adapter, fc);
3987 } /* ixgbe_sysctl_flowcntl */
3988 
3989 /************************************************************************
3990  * ixgbe_set_flowcntl - Set flow control
3991  *
3992  *   Flow control values:
3993  *     0 - off
3994  *     1 - rx pause
3995  *     2 - tx pause
3996  *     3 - full
3997  ************************************************************************/
3998 static int
3999 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
4000 {
4001 	switch (fc) {
4002 	case ixgbe_fc_rx_pause:
4003 	case ixgbe_fc_tx_pause:
4004 	case ixgbe_fc_full:
4005 		adapter->hw.fc.requested_mode = fc;
4006 		if (adapter->num_rx_queues > 1)
4007 			ixgbe_disable_rx_drop(adapter);
4008 		break;
4009 	case ixgbe_fc_none:
4010 		adapter->hw.fc.requested_mode = ixgbe_fc_none;
4011 		if (adapter->num_rx_queues > 1)
4012 			ixgbe_enable_rx_drop(adapter);
4013 		break;
4014 	default:
4015 		return (EINVAL);
4016 	}
4017 
4018 	/* Don't autoneg if forcing a value */
4019 	adapter->hw.fc.disable_fc_autoneg = true;
4020 	ixgbe_fc_enable(&adapter->hw);
4021 
4022 	return (0);
4023 } /* ixgbe_set_flowcntl */
4024 
4025 /************************************************************************
4026  * ixgbe_enable_rx_drop
4027  *
4028  *   Enable the hardware to drop packets when the buffer is
4029  *   full. This is useful with multiqueue, so that no single
4030  *   queue being full stalls the entire RX engine. We only
4031  *   enable this when Multiqueue is enabled AND Flow Control
4032  *   is disabled.
4033  ************************************************************************/
4034 static void
4035 ixgbe_enable_rx_drop(struct adapter *adapter)
4036 {
4037 	struct ixgbe_hw *hw = &adapter->hw;
4038 	struct rx_ring  *rxr;
4039 	u32             srrctl;
4040 
4041 	for (int i = 0; i < adapter->num_rx_queues; i++) {
4042 		rxr = &adapter->rx_queues[i].rxr;
4043 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4044 		srrctl |= IXGBE_SRRCTL_DROP_EN;
4045 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4046 	}
4047 
4048 	/* enable drop for each vf */
4049 	for (int i = 0; i < adapter->num_vfs; i++) {
4050 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
4051 		                (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
4052 		                IXGBE_QDE_ENABLE));
4053 	}
4054 } /* ixgbe_enable_rx_drop */
4055 
4056 /************************************************************************
4057  * ixgbe_disable_rx_drop
4058  ************************************************************************/
4059 static void
4060 ixgbe_disable_rx_drop(struct adapter *adapter)
4061 {
4062 	struct ixgbe_hw *hw = &adapter->hw;
4063 	struct rx_ring  *rxr;
4064 	u32             srrctl;
4065 
4066 	for (int i = 0; i < adapter->num_rx_queues; i++) {
4067 		rxr = &adapter->rx_queues[i].rxr;
4068 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4069 		srrctl &= ~IXGBE_SRRCTL_DROP_EN;
4070 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4071 	}
4072 
4073 	/* disable drop for each vf */
4074 	for (int i = 0; i < adapter->num_vfs; i++) {
4075 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
4076 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
4077 	}
4078 } /* ixgbe_disable_rx_drop */
4079 
4080 /************************************************************************
4081  * ixgbe_sysctl_advertise
4082  *
4083  *   SYSCTL wrapper around setting advertised speed
4084  ************************************************************************/
4085 static int
4086 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
4087 {
4088 	struct adapter *adapter;
4089 	int            error, advertise;
4090 
4091 	adapter = (struct adapter *)arg1;
4092 	advertise = adapter->advertise;
4093 
4094 	error = sysctl_handle_int(oidp, &advertise, 0, req);
4095 	if ((error) || (req->newptr == NULL))
4096 		return (error);
4097 
4098 	return ixgbe_set_advertise(adapter, advertise);
4099 } /* ixgbe_sysctl_advertise */
4100 
4101 /************************************************************************
4102  * ixgbe_set_advertise - Control advertised link speed
4103  *
4104  *   Flags:
4105  *     0x1 - advertise 100 Mb
4106  *     0x2 - advertise 1G
4107  *     0x4 - advertise 10G
4108  *     0x8 - advertise 10 Mb (yes, Mb)
4109  ************************************************************************/
4110 static int
4111 ixgbe_set_advertise(struct adapter *adapter, int advertise)
4112 {
4113 	device_t         dev = iflib_get_dev(adapter->ctx);
4114 	struct ixgbe_hw  *hw;
4115 	ixgbe_link_speed speed = 0;
4116 	ixgbe_link_speed link_caps = 0;
4117 	s32              err = IXGBE_NOT_IMPLEMENTED;
4118 	bool             negotiate = false;
4119 
4120 	/* Checks to validate new value */
4121 	if (adapter->advertise == advertise) /* no change */
4122 		return (0);
4123 
4124 	hw = &adapter->hw;
4125 
4126 	/* No speed changes for backplane media */
4127 	if (hw->phy.media_type == ixgbe_media_type_backplane)
4128 		return (ENODEV);
4129 
4130 	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4131 	      (hw->phy.multispeed_fiber))) {
4132 		device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n");
4133 		return (EINVAL);
4134 	}
4135 
4136 	if (advertise < 0x1 || advertise > 0xF) {
4137 		device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0xF\n");
4138 		return (EINVAL);
4139 	}
4140 
4141 	if (hw->mac.ops.get_link_capabilities) {
4142 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
4143 		    &negotiate);
4144 		if (err != IXGBE_SUCCESS) {
4145 			device_printf(dev, "Unable to determine supported advertise speeds\n");
4146 			return (ENODEV);
4147 		}
4148 	}
4149 
4150 	/* Set new value and report new advertised mode */
4151 	if (advertise & 0x1) {
4152 		if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
4153 			device_printf(dev, "Interface does not support 100Mb advertised speed\n");
4154 			return (EINVAL);
4155 		}
4156 		speed |= IXGBE_LINK_SPEED_100_FULL;
4157 	}
4158 	if (advertise & 0x2) {
4159 		if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
4160 			device_printf(dev, "Interface does not support 1Gb advertised speed\n");
4161 			return (EINVAL);
4162 		}
4163 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
4164 	}
4165 	if (advertise & 0x4) {
4166 		if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
4167 			device_printf(dev, "Interface does not support 10Gb advertised speed\n");
4168 			return (EINVAL);
4169 		}
4170 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
4171 	}
4172 	if (advertise & 0x8) {
4173 		if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
4174 			device_printf(dev, "Interface does not support 10Mb advertised speed\n");
4175 			return (EINVAL);
4176 		}
4177 		speed |= IXGBE_LINK_SPEED_10_FULL;
4178 	}
4179 
4180 	hw->mac.autotry_restart = true;
4181 	hw->mac.ops.setup_link(hw, speed, true);
4182 	adapter->advertise = advertise;
4183 
4184 	return (0);
4185 } /* ixgbe_set_advertise */
4186 
4187 /************************************************************************
4188  * ixgbe_get_advertise - Get current advertised speed settings
4189  *
4190  *   Formatted for sysctl usage.
4191  *   Flags:
4192  *     0x1 - advertise 100 Mb
4193  *     0x2 - advertise 1G
4194  *     0x4 - advertise 10G
4195  *     0x8 - advertise 10 Mb (yes, Mb)
4196  ************************************************************************/
4197 static int
4198 ixgbe_get_advertise(struct adapter *adapter)
4199 {
4200 	struct ixgbe_hw  *hw = &adapter->hw;
4201 	int              speed;
4202 	ixgbe_link_speed link_caps = 0;
4203 	s32              err;
4204 	bool             negotiate = false;
4205 
4206 	/*
4207 	 * Advertised speed means nothing unless it's copper or
4208 	 * multi-speed fiber
4209 	 */
4210 	if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
4211 	    !(hw->phy.multispeed_fiber))
4212 		return (0);
4213 
4214 	err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
4215 	if (err != IXGBE_SUCCESS)
4216 		return (0);
4217 
4218 	speed =
4219 	    ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
4220 	    ((link_caps & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
4221 	    ((link_caps & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
4222 	    ((link_caps & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
4223 
4224 	return speed;
4225 } /* ixgbe_get_advertise */
4226 
4227 /************************************************************************
4228  * ixgbe_sysctl_dmac - Manage DMA Coalescing
4229  *
4230  *   Control values:
4231  *     0/1 - off / on (use default value of 1000)
4232  *
4233  *     Legal timer values are:
4234  *     50,100,250,500,1000,2000,5000,10000
4235  *
4236  *     Turning off interrupt moderation will also turn this off.
4237  ************************************************************************/
4238 static int
4239 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4240 {
4241 	struct adapter *adapter = (struct adapter *)arg1;
4242 	struct ifnet   *ifp = iflib_get_ifp(adapter->ctx);
4243 	int            error;
4244 	u16            newval;
4245 
4246 	newval = adapter->dmac;
4247 	error = sysctl_handle_16(oidp, &newval, 0, req);
4248 	if ((error) || (req->newptr == NULL))
4249 		return (error);
4250 
4251 	switch (newval) {
4252 	case 0:
4253 		/* Disabled */
4254 		adapter->dmac = 0;
4255 		break;
4256 	case 1:
4257 		/* Enable and use default */
4258 		adapter->dmac = 1000;
4259 		break;
4260 	case 50:
4261 	case 100:
4262 	case 250:
4263 	case 500:
4264 	case 1000:
4265 	case 2000:
4266 	case 5000:
4267 	case 10000:
4268 		/* Legal values - allow */
4269 		adapter->dmac = newval;
4270 		break;
4271 	default:
4272 		/* Do nothing, illegal value */
4273 		return (EINVAL);
4274 	}
4275 
4276 	/* Re-initialize hardware if it's already running */
4277 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4278 		ifp->if_init(ifp);
4279 
4280 	return (0);
4281 } /* ixgbe_sysctl_dmac */
4282 
4283 #ifdef IXGBE_DEBUG
4284 /************************************************************************
4285  * ixgbe_sysctl_power_state
4286  *
4287  *   Sysctl to test power states
4288  *   Values:
4289  *     0      - set device to D0
4290  *     3      - set device to D3
4291  *     (none) - get current device power state
4292  ************************************************************************/
4293 static int
4294 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4295 {
4296 	struct adapter *adapter = (struct adapter *)arg1;
4297 	device_t       dev = adapter->dev;
4298 	int            curr_ps, new_ps, error = 0;
4299 
4300 	curr_ps = new_ps = pci_get_powerstate(dev);
4301 
4302 	error = sysctl_handle_int(oidp, &new_ps, 0, req);
4303 	if ((error) || (req->newptr == NULL))
4304 		return (error);
4305 
4306 	if (new_ps == curr_ps)
4307 		return (0);
4308 
4309 	if (new_ps == 3 && curr_ps == 0)
4310 		error = DEVICE_SUSPEND(dev);
4311 	else if (new_ps == 0 && curr_ps == 3)
4312 		error = DEVICE_RESUME(dev);
4313 	else
4314 		return (EINVAL);
4315 
4316 	device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
4317 
4318 	return (error);
4319 } /* ixgbe_sysctl_power_state */
4320 #endif
4321 
4322 /************************************************************************
4323  * ixgbe_sysctl_wol_enable
4324  *
4325  *   Sysctl to enable/disable the WoL capability,
4326  *   if supported by the adapter.
4327  *
4328  *   Values:
4329  *     0 - disabled
4330  *     1 - enabled
4331  ************************************************************************/
4332 static int
4333 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4334 {
4335 	struct adapter  *adapter = (struct adapter *)arg1;
4336 	struct ixgbe_hw *hw = &adapter->hw;
4337 	int             new_wol_enabled;
4338 	int             error = 0;
4339 
4340 	new_wol_enabled = hw->wol_enabled;
4341 	error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4342 	if ((error) || (req->newptr == NULL))
4343 		return (error);
4344 	new_wol_enabled = !!(new_wol_enabled);
4345 	if (new_wol_enabled == hw->wol_enabled)
4346 		return (0);
4347 
4348 	if (new_wol_enabled > 0 && !adapter->wol_support)
4349 		return (ENODEV);
4350 	else
4351 		hw->wol_enabled = new_wol_enabled;
4352 
4353 	return (0);
4354 } /* ixgbe_sysctl_wol_enable */
4355 
4356 /************************************************************************
4357  * ixgbe_sysctl_wufc - Wake Up Filter Control
4358  *
4359  *   Sysctl to enable/disable the types of packets that the
4360  *   adapter will wake up on upon receipt.
4361  *   Flags:
4362  *     0x1  - Link Status Change
4363  *     0x2  - Magic Packet
4364  *     0x4  - Direct Exact
4365  *     0x8  - Directed Multicast
4366  *     0x10 - Broadcast
4367  *     0x20 - ARP/IPv4 Request Packet
4368  *     0x40 - Direct IPv4 Packet
4369  *     0x80 - Direct IPv6 Packet
4370  *
4371  *   Settings not listed above will cause the sysctl to return an error.
4372  ************************************************************************/
4373 static int
4374 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4375 {
4376 	struct adapter *adapter = (struct adapter *)arg1;
4377 	int            error = 0;
4378 	u32            new_wufc;
4379 
4380 	new_wufc = adapter->wufc;
4381 
4382 	error = sysctl_handle_32(oidp, &new_wufc, 0, req);
4383 	if ((error) || (req->newptr == NULL))
4384 		return (error);
4385 	if (new_wufc == adapter->wufc)
4386 		return (0);
4387 
4388 	if (new_wufc & 0xffffff00)
4389 		return (EINVAL);
4390 
4391 	new_wufc &= 0xff;
4392 	new_wufc |= (0xffffff & adapter->wufc);
4393 	adapter->wufc = new_wufc;
4394 
4395 	return (0);
4396 } /* ixgbe_sysctl_wufc */
4397 
4398 #ifdef IXGBE_DEBUG
4399 /************************************************************************
4400  * ixgbe_sysctl_print_rss_config
4401  ************************************************************************/
4402 static int
4403 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
4404 {
4405 	struct adapter  *adapter = (struct adapter *)arg1;
4406 	struct ixgbe_hw *hw = &adapter->hw;
4407 	device_t        dev = adapter->dev;
4408 	struct sbuf     *buf;
4409 	int             error = 0, reta_size;
4410 	u32             reg;
4411 
4412 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4413 	if (!buf) {
4414 		device_printf(dev, "Could not allocate sbuf for output.\n");
4415 		return (ENOMEM);
4416 	}
4417 
4418 	// TODO: use sbufs to make a string to print out
4419 	/* Set multiplier for RETA setup and table size based on MAC */
4420 	switch (adapter->hw.mac.type) {
4421 	case ixgbe_mac_X550:
4422 	case ixgbe_mac_X550EM_x:
4423 	case ixgbe_mac_X550EM_a:
4424 		reta_size = 128;
4425 		break;
4426 	default:
4427 		reta_size = 32;
4428 		break;
4429 	}
4430 
4431 	/* Print out the redirection table */
4432 	sbuf_cat(buf, "\n");
4433 	for (int i = 0; i < reta_size; i++) {
4434 		if (i < 32) {
4435 			reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
4436 			sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
4437 		} else {
4438 			reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
4439 			sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
4440 		}
4441 	}
4442 
4443 	// TODO: print more config
4444 
4445 	error = sbuf_finish(buf);
4446 	if (error)
4447 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4448 
4449 	sbuf_delete(buf);
4450 
4451 	return (0);
4452 } /* ixgbe_sysctl_print_rss_config */
4453 #endif /* IXGBE_DEBUG */
4454 
4455 /************************************************************************
4456  * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
4457  *
4458  *   For X552/X557-AT devices using an external PHY
4459  ************************************************************************/
4460 static int
4461 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4462 {
4463 	struct adapter  *adapter = (struct adapter *)arg1;
4464 	struct ixgbe_hw *hw = &adapter->hw;
4465 	u16             reg;
4466 
4467 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4468 		device_printf(iflib_get_dev(adapter->ctx),
4469 		    "Device has no supported external thermal sensor.\n");
4470 		return (ENODEV);
4471 	}
4472 
4473 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4474 	    IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4475 		device_printf(iflib_get_dev(adapter->ctx),
4476 		    "Error reading from PHY's current temperature register\n");
4477 		return (EAGAIN);
4478 	}
4479 
4480 	/* Shift temp for output */
4481 	reg = reg >> 8;
4482 
4483 	return (sysctl_handle_16(oidp, NULL, reg, req));
4484 } /* ixgbe_sysctl_phy_temp */
4485 
4486 /************************************************************************
4487  * ixgbe_sysctl_phy_overtemp_occurred
4488  *
4489  *   Reports (directly from the PHY) whether the current PHY
4490  *   temperature is over the overtemp threshold.
4491  ************************************************************************/
4492 static int
4493 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4494 {
4495 	struct adapter  *adapter = (struct adapter *)arg1;
4496 	struct ixgbe_hw *hw = &adapter->hw;
4497 	u16             reg;
4498 
4499 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4500 		device_printf(iflib_get_dev(adapter->ctx),
4501 		    "Device has no supported external thermal sensor.\n");
4502 		return (ENODEV);
4503 	}
4504 
4505 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4506 	    IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4507 		device_printf(iflib_get_dev(adapter->ctx),
4508 		    "Error reading from PHY's temperature status register\n");
4509 		return (EAGAIN);
4510 	}
4511 
4512 	/* Get occurrence bit */
4513 	reg = !!(reg & 0x4000);
4514 
4515 	return (sysctl_handle_16(oidp, 0, reg, req));
4516 } /* ixgbe_sysctl_phy_overtemp_occurred */
4517 
4518 /************************************************************************
4519  * ixgbe_sysctl_eee_state
4520  *
4521  *   Sysctl to set EEE power saving feature
4522  *   Values:
4523  *     0      - disable EEE
4524  *     1      - enable EEE
4525  *     (none) - get current device EEE state
4526  ************************************************************************/
4527 static int
4528 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
4529 {
4530 	struct adapter *adapter = (struct adapter *)arg1;
4531 	device_t       dev = adapter->dev;
4532 	struct ifnet   *ifp = iflib_get_ifp(adapter->ctx);
4533 	int            curr_eee, new_eee, error = 0;
4534 	s32            retval;
4535 
4536 	curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
4537 
4538 	error = sysctl_handle_int(oidp, &new_eee, 0, req);
4539 	if ((error) || (req->newptr == NULL))
4540 		return (error);
4541 
4542 	/* Nothing to do */
4543 	if (new_eee == curr_eee)
4544 		return (0);
4545 
4546 	/* Not supported */
4547 	if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
4548 		return (EINVAL);
4549 
4550 	/* Bounds checking */
4551 	if ((new_eee < 0) || (new_eee > 1))
4552 		return (EINVAL);
4553 
4554 	retval = ixgbe_setup_eee(&adapter->hw, new_eee);
4555 	if (retval) {
4556 		device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
4557 		return (EINVAL);
4558 	}
4559 
4560 	/* Restart auto-neg */
4561 	ifp->if_init(ifp);
4562 
4563 	device_printf(dev, "New EEE state: %d\n", new_eee);
4564 
4565 	/* Cache new value */
4566 	if (new_eee)
4567 		adapter->feat_en |= IXGBE_FEATURE_EEE;
4568 	else
4569 		adapter->feat_en &= ~IXGBE_FEATURE_EEE;
4570 
4571 	return (error);
4572 } /* ixgbe_sysctl_eee_state */
4573 
4574 /************************************************************************
4575  * ixgbe_init_device_features
4576  ************************************************************************/
4577 static void
4578 ixgbe_init_device_features(struct adapter *adapter)
4579 {
4580 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
4581 	                  | IXGBE_FEATURE_RSS
4582 	                  | IXGBE_FEATURE_MSI
4583 	                  | IXGBE_FEATURE_MSIX
4584 	                  | IXGBE_FEATURE_LEGACY_IRQ;
4585 
4586 	/* Set capabilities first... */
4587 	switch (adapter->hw.mac.type) {
4588 	case ixgbe_mac_82598EB:
4589 		if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
4590 			adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
4591 		break;
4592 	case ixgbe_mac_X540:
4593 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4594 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4595 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
4596 		    (adapter->hw.bus.func == 0))
4597 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4598 		break;
4599 	case ixgbe_mac_X550:
4600 		adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4601 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4602 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4603 		break;
4604 	case ixgbe_mac_X550EM_x:
4605 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4606 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4607 		break;
4608 	case ixgbe_mac_X550EM_a:
4609 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4610 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4611 		adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4612 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
4613 		    (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
4614 			adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4615 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
4616 		}
4617 		break;
4618 	case ixgbe_mac_82599EB:
4619 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4620 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4621 		if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
4622 		    (adapter->hw.bus.func == 0))
4623 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4624 		if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
4625 			adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4626 		break;
4627 	default:
4628 		break;
4629 	}
4630 
4631 	/* Enabled by default... */
4632 	/* Fan failure detection */
4633 	if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
4634 		adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
4635 	/* Netmap */
4636 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
4637 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
4638 	/* EEE */
4639 	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4640 		adapter->feat_en |= IXGBE_FEATURE_EEE;
4641 	/* Thermal Sensor */
4642 	if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
4643 		adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
4644 
4645 	/* Enabled via global sysctl... */
4646 	/* Flow Director */
4647 	if (ixgbe_enable_fdir) {
4648 		if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
4649 			adapter->feat_en |= IXGBE_FEATURE_FDIR;
4650 		else
4651 			device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
4652 	}
4653 	/*
4654 	 * Message Signal Interrupts - Extended (MSI-X)
4655 	 * Normal MSI is only enabled if MSI-X calls fail.
4656 	 */
4657 	if (!ixgbe_enable_msix)
4658 		adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
4659 	/* Receive-Side Scaling (RSS) */
4660 	if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
4661 		adapter->feat_en |= IXGBE_FEATURE_RSS;
4662 
4663 	/* Disable features with unmet dependencies... */
4664 	/* No MSI-X */
4665 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
4666 		adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
4667 		adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
4668 		adapter->feat_en &= ~IXGBE_FEATURE_RSS;
4669 		adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
4670 	}
4671 } /* ixgbe_init_device_features */
4672 
4673 /************************************************************************
4674  * ixgbe_check_fan_failure
4675  ************************************************************************/
4676 static void
4677 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
4678 {
4679 	u32 mask;
4680 
4681 	mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
4682 	    IXGBE_ESDP_SDP1;
4683 
4684 	if (reg & mask)
4685 		device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
4686 } /* ixgbe_check_fan_failure */
4687 
4688 /************************************************************************
4689  * ixgbe_sbuf_fw_version
4690  ************************************************************************/
4691 static void
4692 ixgbe_sbuf_fw_version(struct ixgbe_hw *hw, struct sbuf *buf)
4693 {
4694 	struct ixgbe_nvm_version nvm_ver = {0};
4695 	uint16_t phyfw = 0;
4696 	int status;
4697 	const char *space = "";
4698 
4699 	ixgbe_get_oem_prod_version(hw, &nvm_ver); /* OEM's NVM version */
4700 	ixgbe_get_orom_version(hw, &nvm_ver); /* Option ROM */
4701 	ixgbe_get_etk_id(hw, &nvm_ver); /* eTrack identifies a build in Intel's SCM */
4702 	status = ixgbe_get_phy_firmware_version(hw, &phyfw);
4703 
4704 	if (nvm_ver.oem_valid) {
4705 		sbuf_printf(buf, "NVM OEM V%d.%d R%d", nvm_ver.oem_major,
4706 		    nvm_ver.oem_minor, nvm_ver.oem_release);
4707 		space = " ";
4708 	}
4709 
4710 	if (nvm_ver.or_valid) {
4711 		sbuf_printf(buf, "%sOption ROM V%d-b%d-p%d",
4712 		    space, nvm_ver.or_major, nvm_ver.or_build, nvm_ver.or_patch);
4713 		space = " ";
4714 	}
4715 
4716 	if (nvm_ver.etk_id != ((NVM_VER_INVALID << NVM_ETK_SHIFT) |
4717 	    NVM_VER_INVALID)) {
4718 		sbuf_printf(buf, "%seTrack 0x%08x", space, nvm_ver.etk_id);
4719 		space = " ";
4720 	}
4721 
4722 	if (phyfw != 0 && status == IXGBE_SUCCESS)
4723 		sbuf_printf(buf, "%sPHY FW V%d", space, phyfw);
4724 } /* ixgbe_sbuf_fw_version */
4725 
4726 /************************************************************************
4727  * ixgbe_print_fw_version
4728  ************************************************************************/
4729 static void
4730 ixgbe_print_fw_version(if_ctx_t ctx)
4731 {
4732 	struct adapter *adapter = iflib_get_softc(ctx);
4733 	struct ixgbe_hw *hw = &adapter->hw;
4734 	device_t dev = adapter->dev;
4735 	struct sbuf *buf;
4736 	int error = 0;
4737 
4738 	buf = sbuf_new_auto();
4739 	if (!buf) {
4740 		device_printf(dev, "Could not allocate sbuf for output.\n");
4741 		return;
4742 	}
4743 
4744 	ixgbe_sbuf_fw_version(hw, buf);
4745 
4746 	error = sbuf_finish(buf);
4747 	if (error)
4748 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4749 	else if (sbuf_len(buf))
4750 		device_printf(dev, "%s\n", sbuf_data(buf));
4751 
4752 	sbuf_delete(buf);
4753 } /* ixgbe_print_fw_version */
4754 
4755 /************************************************************************
4756  * ixgbe_sysctl_print_fw_version
4757  ************************************************************************/
4758 static int
4759 ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS)
4760 {
4761 	struct adapter  *adapter = (struct adapter *)arg1;
4762 	struct ixgbe_hw *hw = &adapter->hw;
4763 	device_t dev = adapter->dev;
4764 	struct sbuf *buf;
4765 	int error = 0;
4766 
4767 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4768 	if (!buf) {
4769 		device_printf(dev, "Could not allocate sbuf for output.\n");
4770 		return (ENOMEM);
4771 	}
4772 
4773 	ixgbe_sbuf_fw_version(hw, buf);
4774 
4775 	error = sbuf_finish(buf);
4776 	if (error)
4777 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4778 
4779 	sbuf_delete(buf);
4780 
4781 	return (0);
4782 } /* ixgbe_sysctl_print_fw_version */
4783